2025
Bashyal, Atit; Boroukhian, Tina; Veerachanchai, Pakin; Naransukh, Myanganbayar; Wicaksono, Hendro
Multi-agent deep reinforcement learning based demand response and energy management for heavy industries with discrete manufacturing systems Journal Article
In: Applied Energy, vol. 392, pp. 125990, 2025.
Abstract | Links | BibTeX | Tags: artificial intelligence, data science, deep learning, demand response system, energy management, green energy, machine learning, manufacturing, operation research, reinforcement learning, sustainability
@article{bashyal2025multi,
title = {Multi-agent deep reinforcement learning based demand response and energy management for heavy industries with discrete manufacturing systems},
author = {Atit Bashyal and Tina Boroukhian and Pakin Veerachanchai and Myanganbayar Naransukh and Hendro Wicaksono},
doi = {https://doi.org/10.1016/j.apenergy.2025.125990},
year = {2025},
date = {2025-08-15},
urldate = {2025-01-01},
journal = {Applied Energy},
volume = {392},
pages = {125990},
publisher = {Elsevier},
abstract = {Energy-centric decarbonization of heavy industries, such as steel and cement, necessitates their participation in integrating Renewable Energy Sources (RES) and effective Demand Response (DR) programs. This situation has created the opportunities to research control algorithms in diverse DR scenarios. Further, the industrial sector’s unique challenges, including the diversity of operations and the need for uninterrupted production, bring unique challenges in designing and implementing control algorithms. Reinforcement learning (RL) methods are practical solutions to the unique challenges faced by the industrial sector. Nevertheless, research in RL for industrial demand response has not yet achieved the level of standardization seen in other areas of RL research, hindering broader progress. To propel the research progress, we propose a multi-agent reinforcement learning (MARL)-based energy management system designed to optimize energy consumption in energy-intensive industrial settings by leveraging dynamic pricing DR schemes. The study highlights the creation of a MARL environment and addresses these challenges by designing a general framework that allows researchers to replicate and implement MARL environments for industrial sectors. The proposed framework incorporates a Partially Observable Markov Decision Process (POMDP) to model energy consumption and production processes while introducing buffer storage constraints and a flexible reward function that balances production efficiency and cost reduction. The paper evaluates the framework through experimental validation within a steel powder manufacturing facility. The experimental results validate our framework and also demonstrate the effectiveness of the MARL-based energy management system.},
keywords = {artificial intelligence, data science, deep learning, demand response system, energy management, green energy, machine learning, manufacturing, operation research, reinforcement learning, sustainability},
pubstate = {published},
tppubtype = {article}
}
Wicaksono, Hendro; Trat, Martin; Bashyal, Atit; Boroukhian, Tina; Felder, Mine; Ahrens, Mischa; Bender, Janek; Groß, Sebastian; Steiner, Daniel; July, Christoph; others,
Artificial-intelligence-enabled dynamic demand response system for maximizing the use of renewable electricity in production processes Journal Article
In: The International Journal of Advanced Manufacturing Technology, vol. 138, pp. 247–271, 2025.
Abstract | Links | BibTeX | Tags: artificial intelligence, data management, data science, demand response system, energy management, green energy, industry 4.0, interoperability, machine learning, manufacturing, ontologies, reinforcement learning, semantic web, sustainability
@article{wicaksono2024artificial,
title = {Artificial-intelligence-enabled dynamic demand response system for maximizing the use of renewable electricity in production processes},
author = {Hendro Wicaksono and Martin Trat and Atit Bashyal and Tina Boroukhian and Mine Felder and Mischa Ahrens and Janek Bender and Sebastian Groß and Daniel Steiner and Christoph July and others},
url = {https://link.springer.com/article/10.1007/s00170-024-13372-7},
doi = {https://doi.org/10.1007/s00170-024-13372-7},
year = {2025},
date = {2025-05-01},
urldate = {2025-05-01},
journal = {The International Journal of Advanced Manufacturing Technology},
volume = {138},
pages = { 247–271},
publisher = {Springer London},
abstract = {The transition towards renewable electricity provides opportunities for manufacturing companies to save electricity costs through participating in demand response programs. End-to-end implementation of demand response systems focusing on manufacturing power consumers is still challenging due to multiple stakeholders and subsystems that generate a heterogeneous and large amount of data. This work develops an approach utilizing artificial intelligence for a demand response system that optimizes industrial consumers’ and prosumers’ production-related electricity costs according to time-variable electricity tariffs. It also proposes a semantic middleware architecture that utilizes an ontology as the semantic integration model for handling heterogeneous data models between the system’s modules. This paper reports on developing and evaluating multiple machine learning models for power generation forecasting and load prediction, and also mixed-integer linear programming as well as reinforcement learning for production optimization considering dynamic electricity pricing represented as Green Electricity Index (GEI). The experiments show that the hybrid auto-regressive long-short-term-memory model performs best for solar and convolutional neural networks for wind power generation forecasting. Random forest, k-nearest neighbors, ridge, and gradient-boosting regression models perform best in load prediction in the considered use cases. Furthermore, this research found that the reinforcement-learning-based approach can provide generic and scalable solutions for complex and dynamic production environments. Additionally, this paper presents the validation of the developed system in the German industrial environment, involving a utility company and two small to medium-sized manufacturing companies. It shows that the developed system benefits the manufacturing company that implements fine-grained process scheduling most due to its flexible rescheduling capacities.
},
keywords = {artificial intelligence, data management, data science, demand response system, energy management, green energy, industry 4.0, interoperability, machine learning, manufacturing, ontologies, reinforcement learning, semantic web, sustainability},
pubstate = {published},
tppubtype = {article}
}
Bashyal, Atit; Alnahas, Hani; Boroukhian, Tina; Wicaksono, Hendro
Demand response based industrial energy management with focus on consumption of renewable energy: a deep reinforcement learning approach Journal Article
In: Procedia Computer Science, vol. 253, pp. 1442-1451, 2025.
Abstract | Links | BibTeX | Tags: artificial intelligence, demand response system, energy management, manufacturing, reinforcement learning
@article{nokey,
title = {Demand response based industrial energy management with focus on consumption of renewable energy: a deep reinforcement learning approach},
author = {Atit Bashyal and Hani Alnahas and Tina Boroukhian and Hendro Wicaksono},
url = {https://www.sciencedirect.com/science/article/pii/S1877050925002145},
doi = {https://doi.org/10.1016/j.procs.2025.01.206},
year = {2025},
date = {2025-02-25},
journal = {Procedia Computer Science},
volume = {253},
pages = {1442-1451},
abstract = {Integrating Renewable Energy Resources (RESs) into power grids requires effective Demand Response (DR) programs. Despite high DR potential in industrial sectors, adoption lags behind that of residential and commercial sectors due to diverse operations and production continuity requirements. This paper explores a reinforcement learning (RL)-based DR scheme for energy-intensive industries, promoting the consumption of distributed Renewable Energy (RE) generation. Our approach introduces modifications to the existing Markov Decision Process (MDP) framework. It proposes a flexible reward structure that provides flexibility in balancing production requirements and promotes the consumption of RE. This study addresses the gap in industrial DR literature, emphasizing tailored DR solutions for industrial settings. The key highlight of our RL-based DR solution is its ability to facilitate a price-based DR scheme while promoting the integration of RE into the smart grid.
},
keywords = {artificial intelligence, demand response system, energy management, manufacturing, reinforcement learning},
pubstate = {published},
tppubtype = {article}
}
2024
Prayitno, Kutut Aji; Wicaksono, Hendro
Investigating the Potential of Causal Reinforcement Learning in Collaborative Urban Logistics: A Systematic Literature Review Journal Article
In: Procedia CIRP, vol. 130, pp. 1070-1076, 2024.
Abstract | Links | BibTeX | Tags: causal AI, interoperability, logistics, ontologies, reinforcement learning, semantic web, sustainability
@article{nokey,
title = {Investigating the Potential of Causal Reinforcement Learning in Collaborative Urban Logistics: A Systematic Literature Review},
author = {Kutut Aji Prayitno and Hendro Wicaksono},
url = {https://www.sciencedirect.com/science/article/pii/S2212827124013659},
doi = {https://doi.org/10.1016/j.procir.2024.10.208},
year = {2024},
date = {2024-11-27},
urldate = {2024-11-27},
journal = {Procedia CIRP},
volume = {130},
pages = {1070-1076},
abstract = {Efficiently managing logistics operations is crucial in elevating sustainability and tackling the challenges urbanization brings in today’s urban environment. Collaborations among the public and private sectors in urban logistics are essential to minimize environmental impacts. This study aims to create a novel conceptual framework for collaborative logistics designed explicitly for sustainable metropolitan areas. The framework aims to enable collaborative data-driven sustainability optimization in urban logistics. It comprises ontologies to facilitate interoperability among stakeholders by providing a shared understanding of the exchanged data. The framework utilizes causal artificial intelligence to enable traceability and transparency of data-driven decisions compared to conventional machine learning working based on correlations. Furthermore, the framework also employs causal reinforcement learning that enables agents to learn what actions lead to targeted outcomes and why those actions are effective. The developed framework optimizes vehicle routes and conveyance selection while considering several operational constraints such as time windows, split-load scenarios, and commodity-specific requirements. Moreover, the system integrates the distinctive features of public transport networks. The suggested strategy minimizes fuel use and overall delivery costs, promoting a more sustainable logistics environment in metropolitan areas measured using Environmental, Social, and Governance (ESG) indicators. This study contributes to the theoretical understanding of collaborative logistics. It underscores the importance of environmental stewardship and societal well-being in logistics planning and implementation by utilizing a data-driven approach.
},
keywords = {causal AI, interoperability, logistics, ontologies, reinforcement learning, semantic web, sustainability},
pubstate = {published},
tppubtype = {article}
}