% ============================================================
% Publications of Jörg Frochte
% Last updated: 2026-03-20
% ============================================================

% ---- 2026 ----

@inproceedings{mohr2026gbkan,
  author    = {Mohr, Janis and Frochte, J\"{o}rg},
  title     = {{GB-KAN: Gradient Boosting with Interpretable Kolmogorov-Arnold Networks}},
  booktitle = {18th International Conference on Agents and Artificial Intelligence (ICAART 2026)},
  volume    = {3},
  pages     = {2421--2432},
  year      = {2026},
  doi       = {10.5220/0014246800004052},
  url       = {download/Mohr2026_GBKAN.pdf},
  keywords  = {xai, continual-learning},
  isbn      = {978-989-758-796-2},
  confrank  = {CORE C, Qualis B4}
}

@misc{sandfuchs2026responsibleai,
  author    = {Sandfuchs, Stephan and Farooghi, Diako and Mohr, Janis and Grewe, Sarah and Lemmen, Markus and Frochte, J\"{o}rg},
  title     = {{Responsible AI in Business}},
  year      = {2026},
  url       = {https://arxiv.org/abs/2602.13244},
  keywords  = {preprint, xai, responsible-ai}
}

% ---- 2025 ----

@inproceedings{sandfuchs2025llms,
  author    = {Sandfuchs, Stephan and Melchert, Maximilian and Frochte, J\"{o}rg},
  title     = {{LLMs for Text-Based Exploration and Navigation under Partial Observability}},
  booktitle = {International Conference on Advancements in Automation, Robotics and Sensing (ICAARS 2025)},
  series    = {Lecture Notes of the Institute for Computer Sciences, Social Informatics and Telecommunications Engineering (LNICST)},
  publisher = {Springer},
  year      = {2025},
  note      = {Accepted, to be published in 2026},
  abstract  = {Exploration and goal-directed navigation in unknown layouts are central to inspection, logistics, and search-and-rescue. We ask whether large language models (LLMs) can function as text-only controllers under partial observability -- without code execution, tools, or program synthesis. We introduce a reproducible benchmark with oracle localisation in fixed ASCII gridworlds: each step reveals only a local 5x5 window around the agent and the model must select one of UP/RIGHT/DOWN/LEFT. Nine contemporary LLMs ranging from open/proprietary, dense / Mixture of Experts and instruction- vs. reasoning-tuned are evaluated on two tasks across three layouts of increasing difficulty: Exploration (maximising revealed cells) and Navigation (reach the goal on the shortest path). Reasoning-tuned models reliably complete navigation across all layouts, yet remain less efficient than oracle paths. Few-shot demonstrations in the prompt chiefly help these reasoning-tuned models by reducing invalid moves and shortening paths, while classic dense instruction models remain inconsistent. We observe characteristic action priors (UP/RIGHT) that can induce looping under partial observability. Overall, training regimen and test-time deliberation predict control ability better than raw parameter count. These findings suggest lightweight hybridisation with classical online planners as a practical route to deployable partial map systems.},
  keywords  = {robotics, llm}
}

@inproceedings{tousside2025esecl,
  author    = {Tousside, Basile and Meisen, Tobias and Frochte, J\"{o}rg},
  title     = {{Dynamic Capacity Expansion in Continual Learning: The eSECL Approach}},
  booktitle = {16th International Conference on Agents and Artificial Intelligence (ICAART 2024)},
  address   = {Rome},
  year      = {2025},
  doi       = {10.1007/978-3-031-87327-0_13},
  url       = {download/Tousside2025_eSECL_preprint.pdf},
  abstract  = {Humans excel at adapting to constantly changing environments, while artificial neural networks (ANNs) struggle with forgetting under dynamic conditions. Continual learning (CL) research aims to address this issue by enabling neural networks to sequentially acquire new knowledge, balancing stability (retaining past tasks) and plasticity (adapting to new tasks). Recently, promising work has emerged in this area. Notably, Sparsification and Expansion for CL (SECL) introduces a robust framework that sparsely uses a neural network's available capacity and expands it when this capacity is exhausted. We propose an enhanced version of SECL, dubbed eSECL, which addresses two key weaknesses of the original SECL algorithm. First, SECL was primarily investigated using convolutional neural networks (CNNs). Second, it does not properly address how much capacity to add when growing a network. We address the first issue by generalizing the algorithm to both dense neural networks and CNNs. For the second issue, we introduce a robust heuristic that monitors the model capacity at each layer and determines the necessary capacity to add. Experiments on popular CL datasets demonstrate the superiority of eSECL over state-of-the-art methods.},
  keywords  = {continual-learning},
  isbn      = {978-3-031-87327-0},
  confrank  = {CORE C, Qualis B4}
}

@inproceedings{mohr2025mann,
  author    = {Mohr, Janis and Frochte, J\"{o}rg},
  title     = {{Multiple Additive Neural Networks for Structured and Unstructured Data}},
  booktitle = {15th International Joint Conference on Computational Intelligence (IJCCI 2023)},
  address   = {Rome},
  year      = {2025},
  doi       = {10.1007/978-3-031-85252-7_10},
  abstract  = {This paper extends and explains the Multiple Additive Neural Networks (MANN) methodology, an enhancement to the traditional Gradient Boosting framework, utilizing nearly shallow neural networks instead of decision trees as base learners. This innovative approach leverages neural network architectures, notably Convolutional Neural Networks (CNNs) and Capsule Neural Networks, to extend its application to both structured and unstructured data such as images and audio. For structured data the advantages of capsule neural networks as feature extractors are used and combined with MANN as a classifier. MANN's unique architecture promotes continuous learning and integrates advanced heuristics to combat overfitting, ensuring robustness and reducing sensitivity to hyperparameter settings like learning rate and iterations. Our empirical studies reveal that MANN surpasses traditional methods such as Extreme Gradient Boosting (XGB) in accuracy across well-known datasets. This research demonstrates MANN's superior precision and generalizability, making it a versatile tool for diverse data types and complex learning environments.},
  keywords  = {continual-learning, xai},
  isbn      = {978-3-031-85252-7},
  confrank  = {CORE B (IJCCI), Springer SCI}
}

% ---- 2024 ----

@inproceedings{bockermann2024ki,
  author    = {Bockermann, Christian and Frochte, J\"{o}rg and Schilberg, Daniel},
  title     = {{Angewandte K\"{u}nstliche Intelligenz im Verbund verschiedener Fachbereiche}},
  booktitle = {INFORMATIK 2024},
  address   = {Wiesbaden},
  year      = {2024},
  doi       = {10.18420/inf2024_192},
  url       = {download/Bockermann2024_KI_Verbund.pdf},
  abstract  = {Das Interdisziplinaere Institut fuer Angewandte Kuenstliche Intelligenz und Data Science (AKIS) ist die zentrale Anlaufstelle fuer datenorientierte Forschung, Lehre und Promotionen an der Hochschule Bochum. In diesem Beitrag stellen wir die Zielsetzungen und Struktur des Instituts, seine Rolle im Rahmen des Hochschulentwicklungsplans und die bisherigen Aktivitaeten und Projekte vor, die im AKIS seit Gruendung des Instituts an der Hochschule Bochum zusammengelaufen sind.},
  keywords  = {interdisciplinary},
  isbn      = {978-3-88579-746-3},
  confrank  = {national}
}

@inproceedings{neugebauer2024efficient,
  author    = {Neugebauer, Malte and Erlebach, Ralf and Kaufmann, Christof and Mohr, Janis and Frochte, J\"{o}rg},
  title     = {{Efficient Learning Processes by Design: Analysis of Usage Patterns in Differently Designed Digital Self-Learning Environments}},
  booktitle = {16th International Conference on Computer Supported Education (CSEDU 2024)},
  address   = {Angers},
  year      = {2024},
  doi       = {10.5220/0012558200003693},
  url       = {https://www.hochschule-bochum.de/fileadmin/public/Die-BO_Hochschule/campus_VH/Labore_und_AGs/AGMathematikAngInformatik/Digitales_Mentoring/Neugebauer_Efficient_Learning_Processes_By_Design.pdf},
  keywords  = {edm},
  isbn      = {978-989-758-697-2},
  confrank  = {CORE B}
}

@inproceedings{tousside2024cnns,
  author    = {Tousside, Basile and Frochte, J\"{o}rg and Meisen, Tobias},
  title     = {{CNNs Sparsification and Expansion for Continual Learning}},
  booktitle = {16th International Conference on Agents and Artificial Intelligence (ICAART 2024)},
  volume    = {2},
  pages     = {110--120},
  address   = {Rome},
  year      = {2024},
  doi       = {10.5220/0012314000003636},
  url       = {https://www.scitepress.org/publishedPapers/2024/123140/pdf/index.html},
  keywords  = {continual-learning},
  isbn      = {978-989-758-680-4},
  confrank  = {CORE C, Qualis B4}
}

@inproceedings{asatyran2024expectations,
  author    = {Asatyran, H. and Tousside, Basile and Mohr, Janis and Neugebauer, Malte and Frochte, J\"{o}rg and Bijl, Hildo},
  title     = {{Exploring Student Expectations and Confidence in Learning Analytics}},
  booktitle = {14th Learning Analytics and Knowledge Conference (LAK 2024)},
  address   = {Kyoto},
  year      = {2024},
  doi       = {10.1145/3636555.3636923},
  url       = {https://arxiv.org/abs/2601.05082},
  keywords  = {selected, edm},
  isbn      = {979-8-400-71618-8},
  confrank  = {CORE A}
}

@inproceedings{elstner2024classification,
  author    = {Elstner, Theresa and Hanle, B. and Loebe, Frank and Fr\"{o}be, Maik and Kolyada, Nikolay and Mohr, Janis and Stein, Benno and Potthast, Martin and Frochte, J\"{o}rg},
  title     = {{Classification of Shared Tasks Used in Teaching}},
  booktitle = {29th ACM Conference on Innovation and Technology in Computer Science Education (ITiCSE 2024)},
  address   = {Milan},
  year      = {2024},
  doi       = {10.1145/3649217.3653559},
  url       = {https://downloads.webis.de/publications/papers/elstner_2024.pdf},
  keywords  = {teaching},
  isbn      = {979-8-400-70600-4},
  confrank  = {CORE B}
}

% ---- 2023 ----

@inproceedings{neugebauer2023gamifiziert,
  author    = {Neugebauer, Malte and Frochte, J\"{o}rg},
  title     = {{Steigerung von Lernerfolg und Motivation durch gamifizierte Mathematik-Aufgaben in Lernmanagementsystemen}},
  booktitle = {21. Fachtagung Bildungstechnologien (DELFI)},
  pages     = {247--248},
  year      = {2023},
  doi       = {10.18420/delfi2023-39},
  url       = {https://www.hochschule-bochum.de/fileadmin/public/Die-BO_Hochschule/campus_VH/Labore_und_AGs/AGMathematikAngInformatik/Digitales_Mentoring/Neugebauer_Frochte_Steigerung_von_Motivation_und_Lernerfolg_mit_gamifizierten_Mathematik_Aufgaben_in_Lernmanagementsystemen.pdf},
  keywords  = {edm},
  isbn      = {978-3-88579-732-6},
  confrank  = {national}
}

@incollection{mohr2023oneshot,
  author    = {Mohr, Janis and Frochte, J\"{o}rg},
  title     = {{One-Shot Identification with Different Neural Network Approaches}},
  booktitle = {Studies in Computational Intelligence},
  publisher = {Springer},
  volume    = {1119},
  pages     = {205--222},
  year      = {2023},
  doi       = {10.1007/978-3-031-46221-4_10},
  url       = {https://arxiv.org/abs/2601.08278},
  keywords  = {applied-ml},
  isbn      = {978-3-031-46221-4},
  confrank  = {CORE B (IJCCI), Springer SCI}
}

@inproceedings{mohr2023mann,
  author    = {Mohr, Janis and Tousside, Basile and Schmidt, Marco and Frochte, J\"{o}rg},
  title     = {{Multiple Additive Neural Networks: A Novel Approach to Continuous Learning in Regression and Classification}},
  booktitle = {15th International Conference on Neural Computation Theory and Applications (NCTA 2023)},
  year      = {2023},
  doi       = {10.5220/0012234000003595},
  url       = {download/Mohr2023_MANN.pdf},
  abstract  = {Gradient Boosting is one of the leading techniques for the regression and classification of structured data. Recent adaptations and implementations use decision trees as base learners. In this work, a new method based on the original approach of Gradient Boosting was adapted to nearly shallow neural networks as base learners. The proposed method supports a new architecture-based approach for continuous learning and utilises strong heuristics against overfitting. Therefore, the method that we call Multiple Additive Neural Networks (MANN) is robust and achieves high accuracy. As shown by our experiments, MANN obtains more accurate predictions on well-known datasets than Extreme Gradient Boosting (XGB), while also being less prone to overfitting and less dependent on the selection of the hyperparameters learn rate and iterations.},
  keywords  = {continual-learning, xai},
  isbn      = {978-989-758-674-3},
  confrank  = {CORE B}
}

@inproceedings{elstner2023sharedtasks,
  author    = {Elstner, Theresa and Loebe, Frank and Ajjour, Yamen and Akiki, Christopher and Bondarenko, Alexander and Fr\"{o}be, Maik and Gienapp, Lukas and Kolyada, Nikolay and Mohr, Janis and Sandfuchs, Stephan and Wiegmann, Matti and Ferro, Nicola and Hofmann, Sven and Stein, Benno and Hagen, Matthias and Potthast, Martin and Frochte, J\"{o}rg},
  title     = {{Shared Tasks as Tutorials: A Methodical Approach}},
  booktitle = {Proceedings of the AAAI Conference on Artificial Intelligence},
  volume    = {37},
  number    = {13},
  pages     = {15807--15815},
  year      = {2023},
  doi       = {10.1609/aaai.v37i13.26877},
  url       = {https://ojs.aaai.org/index.php/AAAI/article/view/26877/26649},
  keywords  = {selected, teaching},
  issn      = {2159-5399},
  confrank  = {CORE A*, Qualis A1}
}

@inproceedings{neugebauer2023success,
  author    = {Neugebauer, Malte and Tousside, Basile and Frochte, J\"{o}rg},
  title     = {{Success Factors for Mathematical e-Learning Exercises Focusing First-Year Students}},
  booktitle = {15th International Conference on Computer Supported Education (CSEDU 2023)},
  volume    = {2},
  pages     = {306--317},
  year      = {2023},
  doi       = {10.5220/0011858400003470},
  url       = {download/SuccessFactors4MathematicalE-LearningExercisesFocusingFirst-YearStudents.pdf},
  abstract  = {How university students succeed in math courses at the beginning of their studies is of great relevance for the overall study success in many study programs. Since the competence levels of candidates are different, lecturers struggle to mediate knowledge to such heterogeneous audiences simultaneously. In tacit consent, a catch-up of lower-skilled students is expected. Self-organized learning materials -- which are often accessible via e-learning -- are mostly unattractive, especially to lower-skilled students. Since gamification is successfully used in other areas of education to support motivation and performance, we propose gamification as a first success factor for mathematical exercises. Considering infrastructural aspects of higher education, we furthermore suggest the gamification systems' ability to be extended by lecturers, its integrability into universities learning management systems and its affordability as success factors for mathematical e-learning exercises.},
  keywords  = {edm},
  isbn      = {978-989-758-641-5},
  confrank  = {CORE B}
}

% ---- 2022 ----

@inproceedings{tousside2022explainability,
  author    = {Tousside, Basile and Dama, Yashwanth and Frochte, J\"{o}rg},
  title     = {{Towards Explainability in Modern Educational Data Mining: A Survey}},
  booktitle = {14th International Joint Conference on Knowledge Discovery, Knowledge Engineering and Knowledge Management},
  pages     = {212--220},
  year      = {2022},
  doi       = {10.5220/0011529400003335},
  url       = {download/TowardsExplainabilityInModernEducationalDataMiningASurvey.pdf},
  abstract  = {Data mining has become an integral part of many educational systems, where it provides the ability to explore hidden relationship in educational data as well as predict students' academic achievements. However, the proposed techniques to achieve these goals, referred to as educational data mining (EDM) techniques, are mostly not explainable. This means that the system is black-boxed and offers no insight regarding the understanding of its decision making process. In this paper, we propose to delve into explainability in the EDM landscape. We analyze the current state-of-the-art method in EDM, empirically scrutinize their strengths and weaknesses regarding explainability and making suggestions on how to make them more explainable and more trustworthy.},
  keywords  = {selected, edm, xai},
  isbn      = {978-989-758-614-9},
  confrank  = {CORE B}
}

@inproceedings{tousside2022sparse,
  author    = {Tousside, Basile and Mohr, Janis and Frochte, J\"{o}rg},
  title     = {{Group and Exclusive Sparse Regularization-based Continual Learning of CNNs}},
  booktitle = {Canadian Conference on Artificial Intelligence},
  year      = {2022},
  doi       = {10.21428/594757db.b7e2fbf3},
  url       = {https://arxiv.org/abs/2601.03658},
  note      = {Best Paper Award},
  keywords  = {selected, continual-learning},
  confrank  = {CORE B}
}

@inproceedings{quetscher2022capsule,
  author    = {Quetscher, Felizia and Kaufmann, Christof and Frochte, J\"{o}rg},
  title     = {{Investigation of Capsule Networks Regarding their Potential of Explainability and Image Rankings}},
  booktitle = {14th International Conference on Agents and Artificial Intelligence (ICAART 2022)},
  volume    = {3},
  pages     = {343--351},
  year      = {2022},
  doi       = {10.5220/0010821600003116},
  url       = {https://www.scitepress.org/Papers/2022/108216/108216.pdf},
  abstract  = {Explainable Artificial Intelligence (AI) is a long-ranged goal, which can be approached from different viewpoints. One way is to simplify the complex AI model into an explainable one, another way uses post-processing to highlight the most important input features for the classification. In this work, we focus on the explanation of image classification using capsule networks with dynamic routing. We train a capsule network on the EMNIST letter dataset and examine the model regarding its explanatory potential. We show that the length of the class specific vectors (squash vectors) of the capsule network can be interpreted as predicted probability and it correlates with the agreement between the decoded image and the original image.},
  keywords  = {xai, continual-learning},
  isbn      = {978-989-758-547-0},
  confrank  = {CORE C, Qualis B4}
}

@incollection{sandfuchs2022slam,
  author    = {Sandfuchs, Stephan and Schmidt, Marco and Frochte, J\"{o}rg},
  title     = {{Novel Approaches for Periodic Depth Enhancement in Visual SLAM}},
  booktitle = {Advances in Service and Industrial Robotics},
  publisher = {Springer},
  pages     = {436--443},
  year      = {2022},
  doi       = {10.1007/978-3-031-04870-8_51},
  abstract  = {Most visual simultaneous localization and mapping (SLAM) systems use either monocular or stereo sensor information. However, in some situations, depth information may not be available for each camera frame. This work presents two novel approaches by enhancing either each n_k-th keyframe or each n_f-th camera frame periodically with depth information. The experimental results on the KITTI visual odometry benchmark show that both approaches improve scale drift compared to monocular SLAM. Even if only a small number of camera frames is enhanced with depth information (10 to 20 percent), this approach achieves comparable or more accurate results than stereo trajectories.},
  keywords  = {robotics},
  isbn      = {978-3-031-04870-8},
  confrank  = {Springer RAAD}
}

@inproceedings{tousside2022treecnn,
  author    = {Tousside, Basile and Friedrichsen, Lukas and Frochte, J\"{o}rg},
  title     = {{Towards Robust Continual Learning using an Enhanced Tree-CNN}},
  booktitle = {14th International Conference on Agents and Artificial Intelligence (ICAART 2022)},
  volume    = {3},
  year      = {2022},
  doi       = {10.5220/0010819800003116},
  url       = {download/Towards_Robust_Continual_Learning_Using_an_Enhanced_Tree_CNN.pdf},
  abstract  = {The ability to perform continual learning and the adaption to new tasks without losing the knowledge already acquired is still a problem that current machine learning models do not address well. In this paper we discuss a simple but effective approach based on a Tree-CNN architecture. It allows knowledge transfer from past task when learning a new task, which maintains the model compact despite network expansion. Second, it avoids forgetting, i.e., learning new tasks without forgetting previous tasks. Third, it is cheap to train, to evaluate and requires less memory compared to a single monolithic model.},
  keywords  = {continual-learning, xai},
  isbn      = {978-989-758-547-0},
  confrank  = {CORE C, Qualis B4}
}

% ---- 2021 ----

@inproceedings{mohr2021capsule,
  author    = {Mohr, Janis and Tousside, Basile and Schmidt, Marco and Frochte, J\"{o}rg},
  title     = {{Explainability and Continuous Learning with Capsule Networks}},
  booktitle = {13th International Joint Conference on Knowledge Discovery, Knowledge Engineering and Knowledge Management (IC3K 2021)},
  volume    = {1},
  pages     = {264--273},
  year      = {2021},
  doi       = {10.5220/0010681300003064},
  url       = {https://www.scitepress.org/Papers/2021/106813/106813.pdf},
  abstract  = {Capsule networks are an emerging technique for image recognition and classification tasks with innovative approaches inspired by the human visual cortex. In this work, it is shown that capsule networks can generate image descriptions representing detected objects in images. This visualisation in combination with reconstructed images delivers strong and easily understandable explainability regarding the decision-making process of capsule networks and leading towards trustworthy AI. Furthermore it is shown that capsule networks can be used for continuous learning utilising already learned basic geometric shapes to learn more complex objects.},
  keywords  = {continual-learning, xai},
  isbn      = {978-989-758-533-3},
  confrank  = {CORE B}
}

@inproceedings{mohr2021oneshot,
  author    = {Mohr, Janis and Breidenbach, Finn and Frochte, J\"{o}rg},
  title     = {{An Approach to One-shot Identification with Neural Networks}},
  booktitle = {13th International Joint Conference on Computational Intelligence (IJCCI 2021)},
  pages     = {344--351},
  year      = {2021},
  doi       = {10.5220/0010684300003063},
  url       = {https://www.scitepress.org/Papers/2021/106843/106843.pdf},
  abstract  = {In order to optimise products and comprehend product defects, the production process must be traceable. Machine learning techniques are a modern approach, which can be used to recognise a product in every production step. In this paper an approach to identify objects, which have only been seen once, is proposed. The proposed approach is for applications in production comparable with existing solutions based on siamese networks regarding the accuracy. Furthermore, it is a lightweight architecture with some advantages regarding computation cost in the online prediction use case of some industrial applications.},
  keywords  = {applied-ml},
  isbn      = {978-989-758-534-0},
  confrank  = {CORE B}
}

% ---- 2020 ----

@inproceedings{marsland2020regression,
  author    = {Marsland, Stephen and Frochte, J\"{o}rg},
  title     = {{Regression Learning on Patches}},
  booktitle = {2020 IEEE Symposium Series on Computational Intelligence (SSCI)},
  pages     = {1786--1793},
  year      = {2020},
  doi       = {10.1109/SSCI47803.2020.9308514},
  url       = {download/Regression_learning_on_patches.pdf},
  abstract  = {Neural networks often do poorly at representing discontinuous functions, or even just functions with rapid transitions in the response surface between closely-spaced points in feature space. We introduce an algorithm to partition the data that is inspired by Finite Element Tearing and Interconnecting. Using an implementation based on a decision tree with neural networks at the leaves, we demonstrate our approach for regression learning on patches of the feature space. We use both artificial and real-world datasets to show that, in some use cases, this method can outperform conventional neural networks that see the entire feature set in the original training.},
  keywords  = {selected, ill-posed},
  isbn      = {978-1-7281-2547-3},
  confrank  = {CORE B}
}

@incollection{lemmen2020integration,
  author    = {Lemmen, Markus and Schmidt, Marco and Frochte, J\"{o}rg},
  title     = {{Concerning the Integration of Machine Learning Content in Mechatronics Curricula}},
  booktitle = {Revolutionizing Education in the Age of AI and Machine Learning},
  publisher = {IGI Global},
  pages     = {75--96},
  year      = {2020},
  doi       = {10.4018/978-1-5225-7793-5.ch004},
  keywords  = {teaching},
  isbn      = {978-1-5225-7793-5},
  confrank  = {IGI Global book chapter}
}

@incollection{tousside2020robot,
  author    = {Tousside, Basile and Mohr, Janis and Schmidt, Marco and Frochte, J\"{o}rg},
  title     = {{A Learning Approach for Optimizing Robot Behavior Selection Algorithm}},
  booktitle = {Intelligent Robotics and Applications (ICIRA 2020)},
  series    = {Lecture Notes in Computer Science},
  volume    = {12595},
  pages     = {171--183},
  publisher = {Springer},
  year      = {2020},
  doi       = {10.1007/978-3-030-66645-3_15},
  url       = {download/2020LearningApproach4OptimizingRobotBehavior.pdf},
  abstract  = {Algorithms are the heart of each robotics system. A specific class of algorithm embedded in robotics systems is the so-called behavior -- or action -- selection algorithm. These algorithms select an action a robot should take, when performing a specific task. In this paper, we show how this problem can be addressed with supervised learning techniques. Our method starts by learning the algorithm behavior from the parameter space according to environment features, then bootstrap itself into a more robust framework capable of self-adjusting robot parameters in real-time.},
  keywords  = {robotics, applied-ml},
  isbn      = {978-3-030-66645-3},
  confrank  = {Springer LNCS (ICIRA)}
}

% ---- 2019 ----

@incollection{marsland2019illposed,
  author    = {Marsland, Stephen and Frochte, J\"{o}rg},
  title     = {{A Learning Approach for Ill-Posed Optimisation Problems}},
  booktitle = {Data Mining (AusDM 2019)},
  series    = {Communications in Computer and Information Science},
  volume    = {1127},
  pages     = {16--27},
  publisher = {Springer},
  year      = {2019},
  doi       = {10.1007/978-981-15-1699-3_2},
  url       = {download/ausdm2019.pdf},
  note      = {Best Paper Award},
  abstract  = {Supervised learning can be thought of as finding a mapping between spaces of input and output vectors. In the case that the function to be learned is multi-valued (so that there are several correct output values for a given input) the problem becomes ill-posed, and many standard methods fail to find good solutions. We present an analysis of this problem, together with an approach based on k-nearest neighbours, which we demonstrate on a set of simple examples, including two application areas of interest.},
  keywords  = {ill-posed},
  isbn      = {978-981-15-1699-3},
  confrank  = {CORE B}
}

@inproceedings{boerzel2019casestudy,
  author    = {B\"{o}rzel, S. and Frochte, J\"{o}rg},
  title     = {{Case Study on Model-based Application of Machine Learning Using Small CAD Databases for Cost Estimation}},
  booktitle = {11th International Joint Conference on Knowledge Discovery, Knowledge Engineering and Knowledge Management (KDIR 2019)},
  volume    = {1},
  pages     = {258--265},
  address   = {Vienna},
  year      = {2019},
  doi       = {10.5220/0007979802580265},
  url       = {download/CaseStudyCAE2019.pdf},
  abstract  = {In many industries, the development is aimed towards Industry 4.0, which is accompanied by a movement from large to small quantities of individually adapted products in a multitude of variants. In this paper, we present an approach for such use cases that combines the advantages of model-based approaches with modern machine learning techniques, as well as a discussion on feature generation from CAD data and reduction to a low-dimensional representation of the customer requests.},
  keywords  = {applied-ml},
  isbn      = {978-989-758-382-7},
  confrank  = {CORE B}
}

% ---- 2018 ----

@inproceedings{lemmen2018seamless,
  author    = {Lemmen, Markus and Schmidt, Marco and Frochte, J\"{o}rg},
  title     = {{Seamless Integration of Machine Learning Contents in Mechatronics Curricula}},
  booktitle = {19th International Conference on Research and Education in Mechatronics (REM 2018)},
  address   = {Delft},
  year      = {2018},
  doi       = {10.1109/REM.2018.8421794},
  url       = {download/jfrochteREM2018.pdf},
  abstract  = {The topic of machine learning is becoming more and more important for mechatronical systems and will become ordinary part of today's student life. This paper provides an analysis of how machine learning can be integrated as a mandatory part of the curriculum of mechatronic degree courses. We consider, what the required minimal changes in fundamental courses should be and how traditional subjects like robotics, automation and automotive engineering can profit most of this approach.},
  keywords  = {teaching},
  isbn      = {978-1-5386-5413-2},
  confrank  = {specialized}
}

% ---- 2016 ----

@inproceedings{bernst2016prediction,
  author    = {Bernst, Irina and Frochte, J\"{o}rg},
  title     = {{Success Prediction System for Student Counseling Using Data Mining}},
  booktitle = {8th International Conference on Knowledge Discovery and Information Retrieval (KDIR 2016)},
  pages     = {181--189},
  address   = {Porto},
  year      = {2016},
  isbn      = {978-989-758-203-5},
  doi       = {10.5220/0006036401810188},
  url       = {download/DataMiningStudentDataFrochte.pdf},
  abstract  = {A framework how to use data mining of central exam data for the prediction of student success in bachelor degree courses is presented. For the prediction a supervised learning approach is used based on successful and unsuccessful student biographies. We develop a traffic light rating system and present results for two different kinds of bachelor degree courses; one in economics and one in engineering.},
  keywords  = {edm},
  confrank  = {CORE B}
}

@inproceedings{kaufmann2016fmu,
  author    = {Kaufmann, Christof and Frochte, J\"{o}rg},
  title     = {{A Case Study on FMU as Co-Simulation Exchange Format for FEM Models}},
  booktitle = {13th International Conference on Applied Computing 2016},
  pages     = {11--18},
  address   = {Mannheim},
  year      = {2016},
  isbn      = {978-989-8533-56-2},
  url       = {download/FMUforFEM.pdf},
  abstract  = {This work deals with the Functional Mock-Up Interface, which is becoming more and more popular in simulation tools since 2011. Developed with a focus on ordinary differential equation or differential algebraic equation based models arising from Modelica models we discuss the extension on Partial Differential Equation based models discretized using the finite element method (FEM).},
  keywords  = {simulation}
}

@article{bernst2016assistance,
  author    = {Bernst, Irina and Kaufmann, Christof and Frochte, J\"{o}rg},
  title     = {{On Learning Assistance Systems for Numerical Simulation}},
  journal   = {International Journal on Computer Science and Information Systems},
  volume    = {11},
  number    = {1},
  pages     = {115--133},
  year      = {2016},
  issn      = {1646-3692},
  url       = {download/FrochteOnLearningAssistanceSystems.pdf},
  abstract  = {The work we present deals with the problem to provide learning assistance systems in the context of simulation and modelling. We develop a classification scheme for learning assistance systems and their use cases. Beyond this, we discuss how learning from simulation data differs from traditional knowledge discovery from data bases.},
  keywords  = {simulation, applied-ml}
}

% ---- 2015 ----

@inproceedings{bouillon2015plant,
  author    = {Bouillon, Patrick and Lemmen, Markus and Frochte, J\"{o}rg},
  title     = {{Influence of Plant Model Variants for the Automatic Optimisation of Control Parameters}},
  booktitle = {16th International Conference on Research and Education in Mechatronics (REM 2015)},
  pages     = {80--87},
  address   = {Bochum},
  year      = {2015},
  isbn      = {978-3-945728-01-7},
  doi       = {10.1109/REM.2015.7380376},
  url       = {download/FrochtePlantModellControllerREM2015.pdf},
  abstract  = {Designing controllers in a model based engineering environment has become more important in industrial applications. The goal of this paper is to investigate the automation potentials of the model based design process, concretely the parametrization of two given cascaded controllers for a plant, which might vary over time or due to certain events.},
  keywords  = {applied-ml, simulation},
  confrank  = {specialized}
}

@inproceedings{bouillon2015elearning,
  author    = {Bouillon, Patrick and Frochte, J\"{o}rg},
  title     = {{Simulation- and Web-Based E-Learning in Engineering -- Open Source Architecture and Didactic Issues}},
  booktitle = {16th International Conference on Research and Education in Mechatronics (REM 2015)},
  pages     = {127--134},
  address   = {Bochum},
  year      = {2015},
  isbn      = {978-3-945728-01-7},
  doi       = {10.1109/REM.2015.7380382},
  url       = {download/BouillonFrochteELearningModelica.pdf},
  abstract  = {The relevance of simulation based software has increased for the academic education of engineers. In this article we consider didactic issues and an open source software architecture to address these issues. A software architecture that combines Java Remote Application Platform, OpenModelica and a database connection is presented to support differentiated instruction.},
  keywords  = {teaching, simulation},
  confrank  = {specialized}
}

@inproceedings{bernst2015loadbalancing,
  author    = {Bernst, Irina and Kaufmann, Christof and Frochte, J\"{o}rg},
  title     = {{Learning Load Balancing for Simulation in Heterogeneous Systems}},
  booktitle = {12th International Conference on Applied Computing 2015},
  pages     = {121--128},
  address   = {Greater Dublin},
  year      = {2015},
  isbn      = {978-989-8533-45-6},
  doi       = {10.13140/2.1.1780.8328},
  url       = {download/ACFullPaperBKF2015_preprint.pdf},
  abstract  = {Distributed computing is an important key technology for simulation technologies like finite elements. The work we present deals with the problem to provide a learning assistance system for load balancing in FEM simulations. Our method uses a two-stage architecture to minimize additional computational costs.},
  keywords  = {simulation, applied-ml}
}

% ---- 2014 ----

@inproceedings{kaufmann2014cloud,
  author    = {Kaufmann, Christof and Bouillon, Patrick and Frochte, J\"{o}rg},
  title     = {{An Approach for Secure Cloud Computing for FEM Simulation}},
  booktitle = {11th International Conference on Applied Computing 2014},
  pages     = {234--239},
  year      = {2014},
  isbn      = {978-989-8533-25-8},
  doi       = {10.13140/2.1.3484.7687},
  url       = {download/jfrochteAppliedComputing2014a.pdf},
  abstract  = {This paper deals with the challenge to make cloud computing an acceptable way for finite element method (FEM) simulations. The approach we propose makes use of the specific properties of the FEM and with these it enables the user to simulate in the cloud without risking sensitive information.},
  keywords  = {simulation}
}

@inproceedings{bernst2014loadbalancing,
  author    = {Bernst, Irina and Bouillon, Patrick and Kaufmann, Christof and Frochte, J\"{o}rg},
  title     = {{An Approach for Load Balancing for Simulation in Heterogeneous Distributed Systems using Simulation Data Mining}},
  booktitle = {11th International Conference on Applied Computing 2014},
  pages     = {254--259},
  year      = {2014},
  isbn      = {978-989-8533-25-8},
  doi       = {10.13140/2.1.1780.8328},
  url       = {download/jfrochteAppliedComputing2014b.pdf},
  abstract  = {This paper describes an approach to reduce the computation time of finite element simulations on heterogeneous distributed systems. This should be achieved by enhanced load balancing with help of machine learning techniques.},
  keywords  = {simulation, applied-ml}
}

% ---- 2013 ----

@incollection{burrows2013overlap,
  author    = {Burrows, Steven and Stein, Benno and V\"{o}lske, Michael and Mart\'{i}nez Torres, Ana Bel\'{e}n and Frochte, J\"{o}rg},
  title     = {{Learning Overlap Optimization for Domain Decomposition Methods}},
  booktitle = {17th Pacific-Asia Conference on Knowledge Discovery and Data Mining (PAKDD 2013)},
  series    = {LNAI},
  volume    = {7818},
  pages     = {438--449},
  address   = {Gold Coast},
  year      = {2013},
  doi       = {10.1007/978-3-642-37453-1_36},
  url       = {download/jfrochte2013a.pdf},
  abstract  = {The finite element method is a numerical simulation technique for solving partial differential equations. Domain decomposition provides a means for parallelizing the expensive simulation with modern computing architecture. Choosing the sub-domains for domain decomposition is a non-trivial task, and in this paper we show how this can be addressed with machine learning. Our method starts with a baseline decomposition, from which we learn tailored sub-domain overlaps from localized neighborhoods.},
  keywords  = {simulation, applied-ml},
  isbn      = {978-3-642-37453-1},
  confrank  = {CORE A}
}

% ---- 2011 ----

@inproceedings{burrows2011bridge,
  author    = {Burrows, Steven and Stein, Benno and Wiesner, David and M\"{u}ller, K. and Frochte, J\"{o}rg},
  title     = {{Simulation Data Mining for Supporting Bridge Design}},
  booktitle = {Australasian Data Mining Conference (AusDM 2011)},
  pages     = {71--79},
  address   = {Ballarat},
  year      = {2011},
  isbn      = {978-1-921770-02-9},
  url       = {download/AusDM2011.pdf},
  abstract  = {We introduce simulation data mining as an approach to extract knowledge and decision rules from simulation results. This paper reports on a bridge design project in civil engineering where the motivation to apply simulation data mining is twofold.},
  keywords  = {simulation, applied-ml},
  confrank  = {CORE B}
}

@inproceedings{frochte2011dae,
  author    = {Frochte, J\"{o}rg},
  title     = {{Evaluation and Adaptation of Techniques for Higher Index DAE with Respect to Real-Time Simulation}},
  booktitle = {ASIM 2011 Proceedings},
  address   = {Winterthur},
  year      = {2011},
  isbn      = {978-3-899677331},
  url       = {download/jfrochteASIM2011.pdf},
  abstract  = {In this paper we will evaluate approaches to the simulation of DAE of higher order under real-time conditions. The Hardware in the loop (HIL) Simulation requires plant models which can be simulated under hard real-time conditions.},
  keywords  = {simulation}
}

@inproceedings{frochte2011modelica,
  author    = {Frochte, J\"{o}rg},
  title     = {{Modelica Simulator Compatibility -- Today and in Future}},
  booktitle = {8th International Modelica Conference},
  series    = {Link\"{o}ping Electronic Conference Proceedings},
  number    = {63},
  pages     = {812--818},
  address   = {Dresden},
  year      = {2011},
  isbn      = {978-91-7393-096-3},
  doi       = {10.3384/ecp11063812},
  url       = {download/jfrochteModelica2011.pdf},
  abstract  = {In this paper we would like to give a small snapshot in time on Modelica tool compatibility today, and discuss strategies for its improvement in order to keep it on a high level.},
  keywords  = {simulation}
}

% ---- 2010 ----

@inproceedings{frochte2010nonlinear,
  author    = {Frochte, J\"{o}rg},
  title     = {{A Numerical Method for a Nonlinear Spatial Population Model with a Continuous Delay}},
  booktitle = {International Conference of Numerical Analysis and Applied Mathematics 2010},
  series    = {AIP Conference Proceedings},
  volume    = {1281},
  year      = {2010},
  isbn      = {978-0-7354-0834-0},
  url       = {download/ICNAAM2010frochte.pdf}
}

% ---- 2009 ----

@article{heinrichs2009splitting,
  author    = {Heinrichs, Wilhelm and Frochte, J\"{o}rg},
  title     = {{A Splitting Technique of Higher Order for the Navier-Stokes Equations}},
  journal   = {Journal of Computational and Applied Mathematics},
  year      = {2009},
  url       = {download/frochteheinrichs.pdf}
}

% ---- 2008 ----

@inproceedings{frochte2008adaptive,
  author    = {Frochte, J\"{o}rg},
  title     = {{An Adaptive Higher Order Method in Time for Partial Integro-Differential Equations}},
  booktitle = {International Conference on Numerical Analysis and Applied Mathematics 2008},
  series    = {AIP Conference Proceedings},
  volume    = {1048},
  year      = {2008},
  isbn      = {978-0-7354-0576-9},
  url       = {download/ICNAAM2008frochte.pdf}
}

% ---- 2007 ----

@incollection{frochte2007convdiff,
  author    = {Frochte, J\"{o}rg},
  title     = {{A Third Order Method for Convection-Diffusion Equations with a Delay Term}},
  booktitle = {ENUMATH 2007, Numerical Mathematics and Advanced Applications},
  publisher = {Springer},
  year      = {2008},
  isbn      = {978-3-540-697764},
  url       = {download/enumath2007frochte.pdf}
}

% ---- 2006 ----

@inproceedings{frochte2006hybrid,
  author    = {Frochte, J\"{o}rg},
  title     = {{A Hybrid LSFEM/FEM Technique for Time-Dependent Convection Dominated Equations}},
  booktitle = {WCCM 2006},
  address   = {Los Angeles},
  year      = {2006},
  url       = {download/wccm2006.pdf}
}

% ---- 2005 ----

@incollection{heinrichs2005splitting,
  author    = {Heinrichs, Wilhelm and Frochte, J\"{o}rg},
  title     = {{An Adaptive Operator Splitting of Higher Order for the Navier-Stokes Equations}},
  booktitle = {ENUMATH 2005, Numerical Mathematics and Advanced Applications},
  publisher = {Springer},
  pages     = {871--879},
  year      = {2006},
  isbn      = {3-540-34287-7},
  url       = {download/nse-spitting05.pdf}
}


% ============================================================
% Science Communication
% ============================================================

@misc{frochte2023scicom_stadtwerke,
  author    = {Frochte, J\"{o}rg},
  title     = {{KI startet durch}},
  journal   = {SciCom -- Customer magazine of Stadtwerke Bochum},
  year      = {2023},
  note      = {September 2023},
  url       = {https://www.stadtwerke-bochum.de/privatkunden/magazin/kuenstliche-intelligenz},
  keywords  = {scicom}
}

@misc{frochte2023scicom_ihk,
  author    = {Frochte, J\"{o}rg},
  title     = {{KI als Chance: Pro & Contra}},
  journal   = {IHK Magazin},
  year      = {2023},
  note      = {2023},
  url       = {https://www.ihkmagazin.de/ki-als-chance-pro-contra/},
  keywords  = {scicom}
}

@misc{sandfuchs2025scicom_localllms,
  author    = {Sandfuchs, Stephan and Farooghi, Diako and Frochte, J\"{o}rg},
  title     = {{Factsheet: Lokale und selbstgehostete LLMs}},
  year      = {2025},
  note      = {TRAIBER.NRW Factsheet},
  url       = {https://traiber.nrw/sites/default/files/downloads/2025-12/TRAIBER.NRW%20Factsheet_Local%20LLMs_0.pdf},
  keywords  = {scicom, llm}
}

@misc{frochte2022scicom_linux,
  author    = {Frochte, J\"{o}rg},
  title     = {{Technische Grundlagen}},
  journal   = {Linux-Magazin},
  year      = {2022},
  note      = {September 2022},
  url       = {https://www.linux-magazin.de/ausgaben/2022/09/grundlagen/},
  keywords  = {scicom}
}

% ============================================================
% Books
% ============================================================

@book{frochte2020ml3,
  author    = {Frochte, J\"{o}rg},
  title     = {{Maschinelles Lernen: Grundlagen und Algorithmen in Python}},
  edition   = {3},
  publisher = {Carl Hanser Verlag},
  year      = {2020},
  isbn      = {978-3446461444},
  pages     = {616}
}

@book{frochte2019ml2,
  author    = {Frochte, J\"{o}rg},
  title     = {{Maschinelles Lernen: Grundlagen und Algorithmen in Python}},
  edition   = {2},
  publisher = {Carl Hanser Verlag},
  year      = {2019},
  isbn      = {978-3446459960},
  pages     = {406}
}

@book{frochte2018ml1,
  author    = {Frochte, J\"{o}rg},
  title     = {{Maschinelles Lernen: Grundlagen und Algorithmen in Python}},
  edition   = {1},
  publisher = {Carl Hanser Verlag},
  year      = {2018},
  isbn      = {978-3446452916},
  pages     = {406}
}

@book{frochte2016fem,
  author    = {Frochte, J\"{o}rg},
  title     = {{Finite-Elemente-Methode: Eine praxisbezogene Einf\"{u}hrung mit GNU Octave/MATLAB}},
  edition   = {1},
  publisher = {Carl Hanser Verlag},
  year      = {2016},
  isbn      = {978-3446446656},
  pages     = {320}
}
