Les principaux domaines d’activité du Digital Group sont :
L’entreprise et le numérique
Analyser et comprendre l’impact de la révolution digitale sur les entreprises. Anticiper les évolutions du management, du marketing, de la communication et de la relation client. Étudier les nouveaux modèles économiques, les nouvelles organisations, leurs risques et opportunités dans une économie mondialisée.
L’industrie intelligente
Imaginer l’apport du digital pour l’usine du futur. Concevoir de nouveaux outils numériques permettant le recueil, la structuration et l’analyse des données, en se reposant sur des techniques avancées de Machine Learning et de Deep Learning. Permettre la sauvegarde des expertises avec des jumeaux numériques et leurs redéploiements dans l’entreprise.
Du Big Data aux Smart Data
Collecter, stocker, analyser, optimiser et présenter des données toujours plus nombreuses. Réinventer les outils classiques d’exploration de bases de données et la façon dont est envisagée la gestion de l’information. Concevoir des solutions profitant des dernières avancées en représentation des connaissances, techniques de passage à l’échelle, manipulation des données, Machine Learning et Data Mining. Un intérêt tout particulier est porté aux applications au tourisme.
Computer Human Interaction
Les récents progrès en Intelligence Artificielle offrent de nouvelles perspectives de collaboration entre la machine et les êtres humains. Avec de plus hauts niveaux de compréhension multimodale, les machines intègrent la vision par ordinateur, le traitement du langage naturel, mais aussi les Smart Materials et l’IoT. Créer de nouveaux médias d’interaction à partir de matière souple, active, et e-textile.
L’équipe d’enseignants-chercheurs Digital Group
L’ensemble des travaux des enseignants-chercheurs Digital Group
Abishek Sriramulu; Nicolas Fourrier; Christoph Bergmeir
Adaptive dependency learning graph neural networks Article de journal
Dans: Information Sciences, vol. 625, p. 700-714, 2023.
@article{sriramulu_2084,
title = {Adaptive dependency learning graph neural networks},
author = {Abishek Sriramulu and Nicolas Fourrier and Christoph Bergmeir},
url = {https://www.sciencedirect.com/science/article/pii/S002002552201581X?via%3Dihub},
year = {2023},
date = {2023-05-01},
journal = {Information Sciences},
volume = {625},
pages = {700-714},
abstract = {Graph Neural Networks (GNN) have recently gained popularity in the forecasting domain due to their ability to model complex spatial and temporal patterns in tasks such as traffic forecasting and region-based demand forecasting. Most of these methods require a prede- fined graph as input, whereas in real-life multivariate time series problems, a well- predefined dependency graph rarely exists. This requirement makes it harder for GNNs to be utilised widely for multivariate forecasting problems in other domains such as retail or energy. In this paper, we propose a hybrid approach combining neural networks and sta- tistical structure learning models to self-learn the dependencies and construct a dynami- cally changing dependency graph from multivariate data aiming to enable the use of GNNs for multivariate forecasting even when a well-defined graph does not exist. The sta- tistical structure modeling in conjunction with neural networks provides a well-principled and efficient approach by bringing in causal semantics to determine dependencies among the series. Finally, we demonstrate significantly improved performance using our proposed approach on real-world benchmark datasets without a pre-defined dependency graph.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Guillaume Guérard; Sonia Djebali; Quentin Gabot
Tourists Profiling by Interest Analysis Conférence
MaDICS 2022, Lyon, France, 2022.
@conference{guerard_1835,
title = {Tourists Profiling by Interest Analysis},
author = {Guillaume Guérard and Sonia Djebali and Quentin Gabot},
url = {https://www.madics.fr/event/symposium-madics-4/},
year = {2022},
date = {2022-07-01},
booktitle = {MaDICS 2022},
address = {Lyon, France},
abstract = {I'm a research student in MSc in Computer Science, my research study concern the behaviors of tourists using digital traces they leave during their travels on social medias. The studies conducted on diverse aspects of tourism focus on quantitative aspects of digital traces to reach its conclusions. In my works, I suggest a study focused on both qualitative and quantitative aspect of digital traces to understand the dynamics governing tourist behavior, especially those concerning attractions networks.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Benjamin Bobbia
Donsker's Theorem for local Data-Driven Empirical Measure Conférence
Séminaire du laboratoire Modal X, Nanterre, France, 2022.
@conference{bobbia_1773,
title = {Donsker's Theorem for local Data-Driven Empirical Measure},
author = {Benjamin Bobbia},
url = {x},
year = {2022},
date = {2022-02-01},
booktitle = {Séminaire du laboratoire Modal X},
address = {Nanterre, France},
abstract = {Nowadays, empirical processes are well known objects. A reason that push forward theirs studies is that, in many models, we can write the estimators as images of empirical measures. In this work we investigate the case of local empirical measures built over a sub-sample with data conditioned to be in a certain area. There exists numerous results about such question, but what can we say if the area of interest is data driven ? In the present work we present a general framework which allows to derive asymptotic results for this particular empirical measures (with "low" cost in terms of technicality and assumptions). Then, we use this approach in the framework of extreme values theory deriving tail and quantile estimations.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Benjamin Bobbia
Donsker's Theorem for local data-driven empirical measures Conférence
Forum Jeunes Mathématiciens, Besançon, france, 2021.
@conference{bobbia_1772,
title = {Donsker's Theorem for local data-driven empirical measures},
author = {Benjamin Bobbia},
url = {https://femmes-et-maths.fr/2021/06/28/forum-2021-des-jeunes-mathematien-nes/},
year = {2021},
date = {2021-12-01},
booktitle = {Forum Jeunes Mathématiciens},
address = {Besançon, france},
abstract = {Nowadays, empirical processes are well known object. A reason that push forward theirs studies is that, in many modelisations, we can write the estimators as images of empirical measures. In this work we investigate the case of local empirical measures, that is the empirical measure built over a subsample with data conditionned to be in a certain area. There exist numerous results about such result, but what can we say if the previous area is data driven ? We can handle this situation with high technical cost and additional assumptions. The main aim of the present work is to present a general framework which allows to derive asymptotic results for this particular empirical measures with lower cost (in terms of technicality and assumptions). We give a Donsker-type theorem for such measures and present some domains of application as extreme values theory or multivariate regular variations},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Nicholas I-Hsien Kuo; Mehrtash Harandi; Nicolas Fourrier; Christian Walder; Gabriela Ferraro; Hanna Suominen
Learning to Continually Learn Rapidly from Few and Noisy Data Conférence
AAAI Workshop on Meta-Learning and MetaDL Challenge, online, 2021.
@conference{i-hsien_kuo_1764,
title = {Learning to Continually Learn Rapidly from Few and Noisy Data},
author = {Nicholas I-Hsien Kuo and Mehrtash Harandi and Nicolas Fourrier and Christian Walder and Gabriela Ferraro and Hanna Suominen},
url = {https://proceedings.mlr.press/v140/kuo21a.html},
year = {2021},
date = {2021-04-01},
booktitle = {AAAI Workshop on Meta-Learning and MetaDL Challenge},
pages = {140:65-76},
address = {online},
abstract = {Neural networks suffer from catastrophic forgetting and are unable to sequentially learn new tasks without guaranteed stationarity in data distribution. Continual learning could be achieved via replay - by concurrently training externally stored old data while learning a new task. However, replay becomes less effective when each past task is allocated with less memory. To overcome this difficulty, we supplemented replay mechanics with meta-learning for rapid knowledge acquisition. By employing a meta-learner, which learns a learning rate per parameter per past task, we found that base learners produced strong results when less memory was available. Additionally, our approach inherited several meta-learning advantages for continual learning: it demonstrated strong robustness to continually learn under the presence of noises and yielded base learners to higher accuracy in less updates.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Shameek Sinha
Beyond CRM, Customer Portfolio Management Builds Long Term Customer Value Divers
Forbes Magazine, 2021.
@misc{sinha_1992,
title = {Beyond CRM, Customer Portfolio Management Builds Long Term Customer Value},
author = {Shameek Sinha},
url = {https://www.forbes.com/sites/garydrenik/2021/04/14/beyond-crm-customer-portfolio-management-builds-long-term-customer-value/?sh=3bb9f6d92b2c},
year = {2021},
date = {2021-04-01},
howpublished = {Forbes Magazine},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Nicholas I-Hsien Kuo; Mehrtash Harandi; Nicolas Fourrier; Christian Walder; Gabriela Ferraro; Hanna Suominen
M2SGD: Learning to Learn Important Weights Conférence
IEEE Conference on Computer Vision and Pattern Recognition Workshops, Seattle, USA, 2020.
@conference{i-hsien_kuo_1765,
title = {M2SGD: Learning to Learn Important Weights},
author = {Nicholas I-Hsien Kuo and Mehrtash Harandi and Nicolas Fourrier and Christian Walder and Gabriela Ferraro and Hanna Suominen},
url = {https://ieeexplore.ieee.org/document/9150875},
year = {2020},
date = {2020-06-01},
booktitle = {IEEE Conference on Computer Vision and Pattern Recognition Workshops},
address = {Seattle, USA},
abstract = {Meta-learning concerns rapid knowledge acquisition. One popular approach cast optimisation as a learning problem and it has been shown that learnt neural optimisers updated base learners more quickly than their handcrafted counterparts. In this paper, we learn an optimisation rule that sparsely updates the learner parameters and removes redundant weights. We present Masked Meta-SGD (M 2 SGD), a neural optimiser which is not only capable of updating learners quickly, but also capable of removing 83.71% weights for ResNet20s. We release our codes at https://github.com/Nic5472K/CLVISION2020_CVPR_M2SGD},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Nicholas I-Hsien Kuo; Mehrtash Harandi; Nicolas Fourrier; Christian Walder; Gabriela Ferraro; Hanna Suominen
An Input Residual Connection for Simplifying Gated Recurrent Neural Networks Proceedings Article
Dans: IEEE International Joint Conference on Neural Networks, Glasgow, UK, 2020.
@inproceedings{i-hsien_kuo_1763,
title = {An Input Residual Connection for Simplifying Gated Recurrent Neural Networks},
author = {Nicholas I-Hsien Kuo and Mehrtash Harandi and Nicolas Fourrier and Christian Walder and Gabriela Ferraro and Hanna Suominen},
url = {https://ieeexplore.ieee.org/document/9207238},
year = {2020},
date = {2020-07-01},
booktitle = {IEEE International Joint Conference on Neural Networks},
address = {Glasgow, UK},
abstract = {Gated Recurrent Neural Networks (GRNNs) are important models that continue to push the state-of-the-art solutions across different machine learning problems. However, they are composed of intricate components that are generally not well understood. We increase GRNN interpretability by linking the canonical Gated Recurrent Unit (GRU) design to the well-studied Hopfield network. This connection allowed us to identify network redundancies, which we simplified with an Input Residual Connection (IRC). We tested GRNNs against their IRC counterparts on language modelling. In addition, we proposed an Input Highway Connection (IHC) as an advance application of the IRC and then evaluated the most widely applied GRNN of the Long Short-Term Memory (LSTM) and IHC-LSTM on tasks of i) image generation and ii) learning to learn to update another learner-network. Despite parameter reductions, all IRC-GRNNs showed either comparative or superior generalisation than their baseline models. Furthermore, compared to LSTM, the IHC-LSTM removed 85.4% parameters on image generation. In conclusion, the IRC is applicable, but not limited, to the GRNN designs of GRUs and LSTMs but also to FastGRNNs, Simple Recurrent Units (SRUs), and Strongly-Typed Recurrent Neural Networks (T-RNNs).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
N'hésitez pas à contacter le service des admissions pour tout renseignement complémentaire :