@InProceedings{Kriha2018Teaching,
  author   = {Walter Kriha and Tobias Jordine},
  title    = {On How We Can Teach – Exploring New Ways in Professional Software Development for Students},
  year     = {2018},
  abstract = {Requirements and approaches for introductory
            courses in software development at universities differ
            considerably. There seems to be little consensus on which
            languages are a good fit, which methodologies lead to the best
            results and especially which goals should be chosen. This paper
            takes a look at current approaches and difficulties at our own
            faculty – computer science and media at the Stuttgart Media
            University – and explores a combination of teaching techniques
            which seem to make a difference. The most important change was
            to switch to a project-based approach instead of the usual exercises
            given to students after a lecture. The second one is the flipped
            classroom approach with micro-exams at the beginning of
            lectures. The third one is an emphasis on professional tools to be
            used during the project. We also try to achieve a concept-based
            approach using e.g. modelling techniques to get a better
            understanding of source code control and build. And finally, we
            work as a team of two lecturers which allows us time to reflect on
            how we do things and creates new ideas frequently. None of those
            approaches is without problems as we will show, and we have met
            with some critique in our own faculty. The paper is explorative,
            based mostly on observations and feedback from students, but we
            intend to get some quantitative results as well in later publications.},
  file     = {Kriha2018.pdf},
  url      = {https://kriha.de/dload/se2paper.pdf},
}

@InProceedings{Bogner2023RESTAPI,
  author   = {Justus Bogner and Sebastian Kotstein and Timo Pfaff},
  title    = {Do RESTful API design rules have an impact on the understandability of Web APIs?},
  year     = {2023},
  abstract = {Context
            Web APIs are one of the most used ways to expose application functionality on the Web, and their understandability is important for efficiently using the provided resources. While many API design rules exist, empirical evidence for the effectiveness of most rules is lacking.

            Objective
            We therefore wanted to study 1) the impact of RESTful API design rules on understandability, 2) if rule violations are also perceived as more difficult to understand, and 3) if demographic attributes like REST-related experience have an influence on this.

            Method
            We conducted a controlled Web-based experiment with 105 participants, from both industry and academia and with different levels of experience. Based on a hybrid between a crossover and a between-subjects design, we studied 12 design rules using API snippets in two complementary versions: one that adhered to a rule and one that was a violation of this rule. Participants answered comprehension questions and rated the perceived difficulty.

            Results
            For 11 of the 12 rules, we found that violation performed significantly worse than rule for the comprehension tasks. Regarding the subjective ratings, we found significant differences for 9 of the 12 rules, meaning that most violations were subjectively rated as more difficult to understand. Demographics played no role in the comprehension performance for violation.

            Conclusions
            Our results provide first empirical evidence for the importance of following design rules to improve the understandability of Web APIs, which is important for researchers, practitioners, and educators.},
  doi      = {10.1007/s10664-023-10367-y},
  file     = {Bogner2023.pdf},
  url      = {https://link.springer.com/article/10.1007/s10664-023-10367-y},
}

@InProceedings{Ralph2020Pandemic,
  author   = {Paul Ralph and Sebastian Baltes and Gianisa Adisaputri and Richard Torkar and
          Vladimir Kovalenko and Marcos Kalinowski andNicole Novielli and Shin Yoo and
          Xavier Devroey and Xin Tan and Minghui Zhou and Burak Turhan and Rashina Hoda and
          Hideaki Hata andGregorio Robles andAmin Milani Fard and Rana Alkadhi},
  title    = {Pandemic programming},
  year     = {2020},
  abstract = {Context
            As a novel coronavirus swept the world in early 2020, thousands of software developers began working from home. Many did so on short notice, under difficult and stressful conditions.

            Objective
            This study investigates the effects of the pandemic on developers’ wellbeing and productivity.

            Method
            A questionnaire survey was created mainly from existing, validated scales and translated into 12 languages. The data was analyzed using non-parametric inferential statistics and structural equation modeling.

            Results
            The questionnaire received 2225 usable responses from 53 countries. Factor analysis supported the validity of the scales and the structural model achieved a good fit (CFI = 0.961, RMSEA = 0.051, SRMR = 0.067). Confirmatory results include: (1) the pandemic has had a negative effect on developers’ wellbeing and productivity; (2) productivity and wellbeing are closely related; (3) disaster preparedness, fear related to the pandemic and home office ergonomics all affect wellbeing or productivity. Exploratory analysis suggests that: (1) women, parents and people with disabilities may be disproportionately affected; (2) different people need different kinds of support.

            Conclusions
            To improve employee productivity, software companies should focus on maximizing employee wellbeing and improving the ergonomics of employees’ home offices. Women, parents and disabled persons may require extra support.},
  doi      = {10.1007/s10664-020-09875-y},
  file     = {Ralph2020.pdf},
  url      = {https://link.springer.com/article/10.1007/s10664-020-09875-y},
}

@InProceedings{Fritzsch2022CyberPhysical,
  author   = {Jonas Fritzsch and Justus Bogner and Markus Haug and Ana Cristina Franco da Silva and Carolin Rubner and Matthias Saft and Horst Sauer and Stefan Wagner},
  title    = {Adopting microservices and DevOps in the cyber-physical systems domain: A rapid review and case study},
  year     = {2022},
  abstract = {The domain of cyber-physical systems (CPS) has recently seen strong growth, for example, due to the rise of the Internet of Things (IoT) in industrial domains, commonly referred to as “Industry 4.0.” However, CPS challenges like the strong hardware focus can impact modern software development practices, especially in the context of modernizing legacy systems. While microservices and DevOps have been widely studied for enterprise applications, there is insufficient coverage for the CPS domain. Our goal is therefore to analyze the peculiarities of such systems regarding challenges and practices for using and migrating towards microservices and DevOps. We conducted a rapid review based on 146 scientific papers, and subsequently validated our findings in an interview-based case study with nine CPS professionals in different business units at Siemens AG. The combined results picture the specifics of microservices and DevOps in the CPS domain. While several differences were revealed that may require adapted methods, many challenges and practices are shared with typical enterprise applications. Our study supports CPS researchers and practitioners with a summary of challenges, practices to address them, and research opportunities.},
  doi      = {10.1002/spe.3169},
  file     = {Fritzsch2022.pdf},
  url      = {https://onlinelibrary.wiley.com/doi/10.1002/spe.3169},
}

@InProceedings{Bogner2019ServicePatterns,
  author   = {Justus Bogner and Stefan Wagner and Alfred Zimmermann},
  title    = {On the impact of service-oriented patterns on software evolvability: a controlled experiment and metric-based analysis},
  year     = {2019},
  abstract = {
Background
Design patterns are supposed to improve various quality attributes of software systems. However, there is controversial quantitative evidence of this impact. Especially for younger paradigms such as service- and Microservice-based systems, there is a lack of empirical studies.

Objective
In this study, we focused on the effect of four service-based patterns—namely Process Abstraction, Service Façade, Decomposed Capability, and Event-Driven Messaging—on the evolvability of a system from the viewpoint of inexperienced developers.

Method
We conducted a controlled experiment with Bachelor students (N = 69). Two functionally equivalent versions of a service-based web shop—one with patterns (treatment group), one without (control group)—had to be changed and extended in three tasks. We measured evolvability by the effectiveness and efficiency of the participants in these tasks. Additionally, we compared both system versions with nine structural maintainability metrics for size, granularity, complexity, cohesion, and coupling.

Results
Both experiment groups were able to complete a similar number of tasks within the allowed 90 min. Median effectiveness was 1/3. Mean efficiency was 12% higher in the treatment group, but this difference was not statistically significant. Only for the third task, we found statistical support for accepting the alternative hypothesis that the pattern version led to higher efficiency. In the metric analysis, the pattern version had worse measurements for size and granularity while simultaneously having slightly better values for coupling metrics. Complexity and cohesion were not impacted.

Interpretation
For the experiment, our analysis suggests that the difference in efficiency is stronger with more experienced participants and increased from task to task. With respect to the metrics, the patterns introduce additional volume in the system, but also seem to decrease coupling in some areas.

Conclusions
Overall, there was no clear evidence for a decisive positive effect of using service-based patterns, neither for the student experiment nor for the metric analysis. This effect might only be visible in an experiment setting with higher initial effort to understand the system or with more experienced developers.},
  doi      = {10.7717/peerj-cs.213},
  file     = {Bogner2019.pdf},
  url      = {https://peerj.com/articles/cs-213/#},
}

@InProceedings{Keim2024TraceLinks,
  author   = {Jan Keim and Sophie Corallo and Dominik Fuchß and Tobias Hey and Tobias Telge and Anne KoziolekAuthors Info & Claims},
  title    = {Recovering Trace Links Between Software Documentation And Code},
  year     = {2024},
  abstract = {Introduction Software development involves creating various artifacts at different levels of abstraction and establishing relationships between them is essential. Traceability link recovery (TLR) automates this process, enhancing software quality by aiding tasks like maintenance and evolution. However, automating TLR is challenging due to semantic gaps resulting from different levels of abstraction. While automated TLR approaches exist for requirements and code, architecture documentation lacks tailored solutions, hindering the preservation of architecture knowledge and design decisions. Methods This paper presents our approach TransArC for TLR between architecture documentation and code, using component-based architecture models as intermediate artifacts to bridge the semantic gap. We create transitive trace links by combining the existing approach ArDoCo for linking architecture documentation to models with our novel approach ArCoTL for linking architecture models to code.
            Results We evaluate our approaches with five open-source projects, comparing our results to baseline approaches. The model-to-code TLR approach achieves an average F1-score of 0.98, while the documentation-to-code TLR approach achieves a promising average F1-score of 0.82, significantly outperforming baselines. Conclusion Combining two specialized approaches with an intermediate artifact shows promise for bridging the semantic gap. In future research, we will explore further possibilities for such transitive approaches.},
  doi      = {10.1145/3597503.3639130},
  file     = {Keim2024.pdf},
  url      = {https://dl.acm.org/doi/10.1145/3597503.3639130},
}

@InProceedings{Baldoni2002Fundamentals,
  author   = {Roberto Baldoni and Matthias Klusch},
  title    = {Fundamentals of Distributed Computing: A Practical Tour of Vector Clock Systems},
  year     = {2002},
  abstract = {A distributed computation consists of a set of processes that cooperate to achieve a common goal. A main characteristic of these computations is that the processes do not already share a common global memory and that they communicate only by exchanging messages over a communication network. Moreover, message transfer delays are finite yet unpredictable. This computation model defines what is known as the asynchronous distributed system model, which includes systems that span large geographic areas and are subject to unpredictable loads.},
  file     = {Baldoni2002.pdf},
  url      = {https://www.computer.org/csdl/magazine/ds/2002/02/o2001/13rRUEgs2Q8},
}

@InProceedings{Tucker2007OPIUM,
  author   = {Chris Tucker and David Shuffelton and Ranjit Jhala and Sorin Lerner},
  title    = {OPIUM: Optimal Package Install/Uninstall Manager},
  year     = {2007},
  abstract = {Linux distributions often include package management tools such as apt-get in Debian or yum in RedHat. Using information about package dependencies and conflicts, such tools can determine how to install a new package (and its dependencies) on a system of already installed packages. Using off-the-shelf SAT solvers, pseudo-boolean solvers, and Integer Linear Programming solvers, we have developed a new package-management tool, called Opium, that improves on current tools in two ways: (1) Opium is complete, in that if there is a solution, Opium is guaranteed to find it, and (2) Opium can optimize a user-provided objective function, which could for example state that smaller packages should be preferred over larger ones. We performed a comparative study of our tool against Debian's apt-get on 600 traces of real-world package installations. We show that Opium runs fast enough to be usable, and that its completeness and optimality guarantees provide concrete benefits to end users.},
  file     = {Tucker2007.pdf},
  url      = {https://ieeexplore.ieee.org/document/4222580},
}

@Comment{jabref-meta: databaseType:bibtex;}
