@article{4050, author = {Migenda, Nico and Möller, Ralf and Schenck, Wolfram}, issn = {00313203}, journal = {Pattern Recognition}, publisher = {Elsevier BV}, title = {{Adaptive local Principal Component Analysis improves the clustering of high-dimensional data}}, doi = {10.1016/j.patcog.2023.110030}, volume = {146}, year = {2024}, } @inproceedings{4293, author = {Schwan, Constanze and Schenck, Wolfram}, booktitle = {2023 International Joint Conference on Neural Networks (IJCNN)}, location = {Gold Coast, Australia}, pages = {1--8}, publisher = {IEEE}, title = {{Object View Prediction with Aleatoric Uncertainty for Robotic Grasping}}, doi = {10.1109/IJCNN54540.2023.10191465}, year = {2023}, } @article{2774, abstract = { Despite the availability and ease of collecting a large amount of free, unlabeled data, the expensive and time-consuming labeling process is still an obstacle to labeling a sufficient amount of training data, which is essential for building supervised learning models. Here, with low labeling cost, the active learning (AL) technique could be a solution, whereby a few, high-quality data points are queried by searching for the most informative and representative points within the instance space. This strategy ensures high generalizability across the space and improves classification performance on data we have never seen before. In this paper, we provide a survey of recent studies on active learning in the context of classification. This survey starts with an introduction to the theoretical background of the AL technique, AL scenarios, AL components supported with visual explanations, and illustrative examples to explain how AL simply works and the benefits of using AL. In addition to an overview of the query strategies for the classification scenarios, this survey provides a high-level summary to explain various practical challenges with AL in real-world settings; it also explains how AL can be combined with various research areas. Finally, the most commonly used AL software packages and experimental evaluation metrics with AL are also discussed. }, author = {Tharwat, Alaa and Schenck, Wolfram}, issn = {2227-7390}, journal = {Mathematics}, number = {4}, publisher = {MDPI AG}, title = {{A Survey on Active Learning: State-of-the-Art, Practical Challenges and Research Directions}}, doi = {10.3390/math11040820}, volume = {11}, year = {2023}, } @article{3453, abstract = { For assistive devices such as active orthoses, exoskeletons or other close-to-body robotic-systems, the immediate prediction of biological limb movements based on biosignals in the respective control system can be used to enable intuitive operation also by untrained users e.g. in healthcare, rehabilitation or industrial scenarios. Surface electromyography (sEMG) signals from the muscles that drive the limbs can be measured before the actual movement occurs and, hence, can be used as source for predicting limb movements. The aim of this work was to create a model that can be adapted to a new user or movement scenario with little measurement and computing effort. Therefore, a biomechanical model is presented that predicts limb movements of the human forearm based on easy to measure sEMG signals of the main muscles involved in forearm actuation ( lateral and long head of triceps and short and long head of biceps ). The model has 42 internal parameters of which 37 were attributed to 8 individually measured physiological measures (location of acromion at the shoulder, medial/lateral epicondyles as well as olecranon at the elbow, and styloid processes of radius/ulna at the wrist; maximum muscle forces of biceps and triceps ). The remaining 5 parameters are adapted to specific movement conditions in an optimization process. The model was tested in an experimental study with 31 subjects in which the prediction quality of the model was assessed. The quality of the movement prediction was evaluated by using the normalized mean absolute error (nMAE) for two arm postures (lower, upper), two load conditions (2 kg, 4 kg) and two movement velocities (slow, fast). For the resulting 8 experimental combinations the nMAE varied between nMAE = 0.16 and nMAE = 0.21 (lower numbers better). An additional quality score (QS) was introduced that allows direct comparison between different movements. This score ranged from QS = 0.25 to QS = 0.40 (higher numbers better) for the experimental combinations. The above formulated aim was achieved with good prediction quality by using only 8 individual measurements (easy to collect body dimensions) and the subsequent optimization of only 5 parameters. At the same time, just easily accessible sEMG measurement locations are used to enable simple integration, e.g. in exoskeletons. This biomechanical model does not compete with models that measure all sEMG signals of the muscle heads involved in order to achieve the highest possible prediction quality. }, author = {Grimmelsmann, Nils and Mechtenberg, Malte and Schenck, Wolfram and Meyer, Hanno Gerd and Schneider, Axel}, issn = {1932-6203}, journal = {PLOS ONE}, number = {8}, publisher = {Public Library of Science (PLoS)}, title = {{sEMG-based prediction of human forearm movements utilizing a biomechanical model based on individual anatomical/ physiological measures and a reduced set of optimization parameters}}, doi = {10.1371/journal.pone.0289549}, volume = {18}, year = {2023}, } @article{1799, abstract = { Humans learn movements naturally, but it takes a lot of time and training to achieve expert performance in motor skills. In this review, we show how modern technologies can support people in learning new motor skills. First, we introduce important concepts in motor control, motor learning and motor skill learning. We also give an overview about the rapid expansion of machine learning algorithms and sensor technologies for human motion analysis. The integration between motor learning principles, machine learning algorithms and recent sensor technologies has the potential to develop AI-guided assistance systems for motor skill training. We give our perspective on this integration of different fields to transition from motor learning research in laboratory settings to real world environments and real world motor tasks and propose a stepwise approach to facilitate this transition. }, author = {Vandevoorde, Koenraad and Vollenkemper, Lukas and Schwan, Constanze and Kohlhase, Martin and Schenck, Wolfram}, issn = {1424-8220}, journal = {Sensors}, keywords = {motor learning, motor skill learning, assistance system, artificial intelligence, machine learning, pose estimation, action recognition, human motion analysis}, number = {7}, publisher = {MDPI AG}, title = {{Using Artificial Intelligence for Assistance Systems to Bring Motor Learning Principles into Real World Motor Tasks}}, doi = {10.3390/s22072481}, volume = {22}, year = {2022}, } @inproceedings{2945, author = {Shah, Zafran Hussain and Muller, Marcel and Hammer, Barbara and Huser, Thomas and Schenck, Wolfram}, booktitle = {2022 International Joint Conference on Neural Networks (IJCNN)}, location = {Padua, Italy}, pages = {1--10}, publisher = {IEEE}, title = {{Impact of different loss functions on denoising of microscopic images}}, doi = {10.1109/IJCNN55064.2022.9892936}, year = {2022}, } @article{2944, abstract = { Abstract - Many application scenarios for image recognition require learning of deep networks from small sample sizes in the order of a few hundred samples per class. Then, avoiding overfitting is critical. Common techniques to address overfitting are transfer learning, reduction of model complexity and artificial enrichment of the available data by, e.g., data augmentation. A key idea proposed in this paper is to incorporate additional samples into the training that do not belong to the classes of the target task. This can be accomplished by formulating the original classification task as an open set classification task. While the original closed set classification task is not altered at inference time, the recast as open set classification task enables the inclusion of additional data during training. Hence, the original closed set classification task is augmented with an open set task during training. We therefore call the proposed approach open set task augmentation. In order to integrate additional task-unrelated samples into the training, we employ the entropic open set loss originally proposed for open set classification tasks and also show that similar results can be obtained with a modified sum of squared errors loss function. Learning with the proposed approach benefits from the integration of additional “unknown” samples, which are often available, e.g., from open data sets, and can then be easily integrated into the learning process. We show that this open set task augmentation can improve model performance even when these additional samples are rather few or far from the domain of the target task. The proposed approach is demonstrated on two exemplary scenarios based on subsets of the ImageNet and Food-101 data sets as well as with several network architectures and two loss functions. We further shed light on the impact of the entropic open set loss on the internal representations formed by the networks. Open set task augmentation is particularly valuable when no additional data from the target classes are available—a scenario often faced in practice. }, author = {Zai El Amri, Wadhah and Reinhart, Felix and Schenck, Wolfram}, issn = {1433-3058}, journal = {Neural Computing and Applications}, number = {8}, pages = {6067--6083}, publisher = {Springer Science and Business Media LLC}, title = {{Open set task augmentation facilitates generalization of deep neural networks trained on small data sets}}, doi = {10.1007/s00521-021-06753-6}, volume = {34}, year = {2022}, } @inproceedings{2776, abstract = { Abstract - State-of-the-art methods in image-based robotic grasping use deep convolutional neural networks to determine the robot parameters that maximize the probability of a stable grasp given an image of an object. Despite the high accuracy of these models they are not applied in industrial order picking tasks to date. One of the reasons is the fact that the generation of the training data for these models is expensive. Even though this could be solved by using a physics simulation for training data generation, another even more important reason is that the features that lead to the prediction made by the model are not human-readable. This lack of interpretability is the crucial factor why deep networks are not found in critical industrial applications. In this study we suggest to reformulate the task of robotic grasping as three tasks that are easy to assess from human experience. For each of the three steps we discuss the accuracy and interpretability. We outline how the proposed three-step model can be extended to depth images. Furthermore we discuss how interpretable machine learning models can be chosen for the three steps in order to be applied in a real-world industrial environment. }, author = {Schwan, Constanze and Schenck, Wolfram}, booktitle = {Kommunikation und Bildverarbeitung in der Automation. Ausgewählte Beiträge der Jahreskolloquien KommA und BVAu 2020}, editor = {Jasperneite, Jürgen and Lohweg, Volker}, isbn = {978-3-662-64282-5}, issn = {2522-8587}, pages = {291--303}, publisher = {Springer Berlin Heidelberg}, title = {{Design of Interpretable Machine Learning Tasks for the Application to Industrial Order Picking}}, doi = {10.1007/978-3-662-64283-2_21}, year = {2022}, } @article{2775, abstract = { Despite the availability of a large amount of free unlabeled data, collecting sufficient training data for supervised learning models is challenging due to the time and cost involved in the labeling process. The active learning technique we present here provides a solution by querying a small but highly informative set of unlabeled data. It ensures high generalizability across space, improving classification performance with test data that we have never seen before. Most active learners query either the most informative or the most representative data to annotate them. These two criteria are combined in the proposed algorithm by using two phases: exploration and exploitation phases. The former aims to explore the instance space by visiting new regions at each iteration. The second phase attempts to select highly informative points in uncertain regions. Without any predefined knowledge, such as initial training data, these two phases improve the search strategy of the proposed algorithm so that it can explore the minority class space with imbalanced data using a small query budget. Further, some pseudo-labeled points geometrically located in trusted explored regions around the new labeled points are added to the training data, but with lower weights than the original labeled points. These pseudo-labeled points play several roles in our model, such as (i) increasing the size of the training data and (ii) decreasing the size of the version space by reducing the number of hypotheses that are consistent with the training data. Experiments on synthetic and real datasets with different imbalance ratios and dimensions show that the proposed algorithm has significant advantages over various well-known active learners. }, author = {Tharwat, Alaa and Schenck, Wolfram}, issn = {2227-7390}, journal = {Mathematics}, number = {7}, publisher = {MDPI AG}, title = {{A Novel Low-Query-Budget Active Learner with Pseudo-Labels for Imbalanced Data}}, doi = {10.3390/math10071068}, volume = {10}, year = {2022}, } @inproceedings{2569, abstract = {Legal systems form the foundation of democratic states. Nevertheless, it is nearly impossible for individuals to extract specific information from comprehensive legal documents. We present a human-centered and AI-supported system for semantic question answering (QA) in the German legal domain. Our system is built on top of human collaboration and natural language processing (NLP)-based legal information retrieval. Laypersons and legal professionals re ceive information supporting their research and decision-making by collaborating with the system and its underlying AI methods to enable a smarter society. The internal AI is based on state-of-the-art methods evaluating complex search terms, considering words and phrases specific to German law. Subsequently, relevant documents or answers are ranked and graphically presented to the human. In ad dition to the novel system, we publish the first annotated data set for QA in the German legal domain. The experimental results indicate that our semantic QA workflow outperforms existing approaches.KeywordsQuestion answeringInformation retrievalHuman-AI interface designAI-supported decision makingLegal research}, author = {Hoppe, Christoph and Migenda, Nico and Pelkmann, David and Hötte, Daniel Antonius and Schenck, Wolfram}, booktitle = {Collaborative Networks in Digitalization and Society 5.0}, editor = {Camarinha-Matos, Luis M. and Ortiz, Angel and Boucher, Xavier and Osório, A. Luís}, isbn = {978-3-031-14843-9}, issn = {1868-422X}, location = {Lisbon, Portugal}, pages = {303--312}, publisher = {Springer International Publishing}, title = {{Collaborative System for Question Answering in German Case Law Documents}}, doi = {10.1007/978-3-031-14844-6_24}, year = {2022}, } @article{1201, author = {Shah, Zafran Hussain and Müller, Marcel and Wang, Tung-Cheng and Scheidig, Philip Maurice and Schneider, Axel and Schüttpelz, Mark and Huser, Thomas and Schenck, Wolfram}, issn = {2327-9125}, journal = {Photonics Research}, number = {5}, publisher = {The Optical Society}, title = {{Deep-learning based denoising and reconstruction of super-resolution structured illumination microscopy images}}, doi = {10.1364/PRJ.416437}, volume = {9}, year = {2021}, } @inproceedings{2570, author = {Hoppe, Christoph and Pelkmann, David and Migenda, Nico and Hotte, Daniel Antonius and Schenck, Wolfram}, booktitle = {2021 IEEE Fourth International Conference on Artificial Intelligence and Knowledge Engineering (AIKE)}, location = {Laguna Hills, CA, USA}, pages = {29--32}, publisher = {IEEE}, title = {{Towards Intelligent Legal Advisors for Document Retrieval and Question-Answering in German Legal Documents}}, doi = {10.1109/AIKE52691.2021.00011}, year = {2021}, } @inproceedings{2571, author = {Voigt, Tim and Migenda, Nico and Schöne, Marvin and Pelkmann, David and Fricke, Matthias and Schenck, Wolfram and Kohlhase, Martin}, booktitle = {2021 26th IEEE International Conference on Emerging Technologies and Factory Automation (ETFA )}, location = {Vasteras, Sweden}, pages = {01--08}, publisher = {IEEE}, title = {{Advanced Data Analytics Platform for Manufacturing Companies}}, doi = {10.1109/ETFA45728.2021.9613499}, year = {2021}, } @inproceedings{2572, author = {Steinmann, Luca and Migenda, Nico and Voigt, Tim and Kohlhase, Martin and Schenck, Wolfram}, booktitle = {2021 3rd International Conference on Management Science and Industrial Engineering}, isbn = {9781450388887}, location = {Osaka Japan}, pages = {1--7}, publisher = {ACM}, title = {{Variational Autoencoder based Novelty Detection for Real-World Time Series}}, doi = {10.1145/3460824.3460825}, year = {2021}, } @article{1203, abstract = { “Principal Component Analysis” (PCA) is an established linear technique for dimensionality reduction. It performs an orthonormal transformation to replace possibly correlated variables with a smaller set of linearly independent variables, the so-called principal components, which capture a large portion of the data variance. The problem of finding the optimal number of principal components has been widely studied for offline PCA. However, when working with streaming data, the optimal number changes continuously. This requires to update both the principal components and the dimensionality in every timestep. While the continuous update of the principal components is widely studied, the available algorithms for dimensionality adjustment are limited to an increment of one in neural network-based and incremental PCA. Therefore, existing approaches cannot account for abrupt changes in the presented data. The contribution of this work is to enable in neural network-based PCA the continuous dimensionality adjustment by an arbitrary number without the necessity to learn all principal components. A novel algorithm is presented that utilizes several PCA characteristics to adaptivly update the optimal number of principal components for neural network-based PCA. A precise estimation of the required dimensionality reduces the computational effort while ensuring that the desired amount of variance is kept. The computational complexity of the proposed algorithm is investigated and it is benchmarked in an experimental study against other neural network-based and incremental PCA approaches where it produces highly competitive results. }, author = {Migenda, Nico and Möller, Ralf and Schenck, Wolfram}, issn = {1932-6203}, journal = {PLOS ONE}, number = {3}, publisher = {Public Library of Science (PLoS)}, title = {{Adaptive dimensionality reduction for neural network-based online principal component analysis}}, doi = {10.1371/journal.pone.0248896}, volume = {16}, year = {2021}, } @article{2777, author = {Tharwat, Alaa and Schenck, Wolfram}, issn = {22106502}, journal = {Swarm and Evolutionary Computation}, publisher = {Elsevier BV}, title = {{Population initialization techniques for evolutionary algorithms for single-objective constrained optimization problems: Deterministic vs. stochastic techniques}}, doi = {10.1016/j.swevo.2021.100952}, volume = {67}, year = {2021}, } @article{1202, author = {Tharwat, Alaa and Schenck, Wolfram}, issn = {09574174}, journal = {Expert Systems with Applications}, publisher = {Elsevier BV}, title = {{A conceptual and practical comparison of PSO-style optimization algorithms}}, doi = {10.1016/j.eswa.2020.114430}, volume = {167}, year = {2021}, } @techreport{2778, abstract = { Abstract - Super-resolution structured illumination microscopy (SR-SIM) provides an up to two-fold enhanced spatial resolution of fluorescently labeled samples. The reconstruction of high quality SR-SIM images critically depends on patterned illumination with high modulation contrast. Noisy raw image data, e.g. as a result of low excitation power or low exposure times, result in reconstruction artifacts. Here, we demonstrate deep-learning based SR-SIM image denoising that results in high quality reconstructed images. A residual encoding-decoding convolution neural network (RED-Net) was used to successfully denoise computationally reconstructed noisy SR-SIM images. We also demonstrate the entirely deep-learning based denoising and reconstruction of raw SIM images into high-resolution SR-SIM images. Both image reconstruction methods prove to be very robust against image reconstruction artifacts and generalize very well over various noise levels. The combination of computational reconstruction and subsequent denoising via RED-Net shows very robust performance during inference after training even if the microscope settings change. }, author = {Shah, Zafran Hussain and Müller, Marcel and Wang, Tung-Cheng and Scheidig, Philip Maurice and Schneider, Axel and Schüttpelz, Mark and Huser, Thomas and Schenck, Wolfram}, publisher = {Cold Spring Harbor Laboratory}, title = {{Deep-learning based denoising and reconstruction of super-resolution structured illumination microscopy images}}, doi = {https://doi.org/10.1101/2020.10.27.352633}, year = {2020}, } @inproceedings{2574, author = {Migenda, Nico and Schenck, Wolfram}, booktitle = {2020 25th IEEE International Conference on Emerging Technologies and Factory Automation (ETFA)}, location = {Vienna, Austria}, pages = {1579--1586}, publisher = {IEEE}, title = {{Adaptive Dimensionality Reduction for Local Principal Component Analysis}}, doi = {10.1109/ETFA46521.2020.9212129}, year = {2020}, } @article{1204, author = {Tharwat, Alaa and Schenck, Wolfram}, issn = {09507051}, journal = {Knowledge-Based Systems}, publisher = {Elsevier BV}, title = {{Balancing Exploration and Exploitation: A novel active learner for imbalanced data}}, doi = {10.1016/j.knosys.2020.106500}, volume = {210}, year = {2020}, }