Vidal's libraryTitle: | Multi-Agent Patrolling with Reinforcement Learning |
Author: | Hugo Santana, Geber Ramalho, Vincent Corruble, and Bohdana Ratitch |
Book Tittle: | Proceedings of the Third International Joint Conference on Autonomous Agents and MultiAgent Systems |
Pages: | 1122--1129 |
Publisher: | ACM |
Year: | 2004 |
Abstract: | Patrolling tasks can be encountered in a variety of real-world domains, ranging from computer network administration and surveillance to computer wargame simulations. It is a complex multi-agent task, which usually requires agents to coordinate their decisionmaking in order to achieve optimal performance of the group as a whole. In this paper, we show how the patrolling task can be modeled as a reinforcement learning (RL) problem, allowing continuous and automatic adaptation of the agents strategies to their environment. We demonstrate that an efficient cooperative behavior can be achieved by using RL methods, such as Q-Learning, to train individual agents. The proposed approach is totally distributed, which makes it computationally efficient. The empirical evaluation proves the effectiveness of our approach, as the results obtained are substantially better than the results available so far on this domain. |
Cited by 9 - Google Scholar
@InProceedings{santana04a,
author = {Hugo Santana and Geber Ramalho and Vincent Corruble
and Bohdana Ratitch},
title = {Multi-Agent Patrolling with Reinforcement Learning},
booktitle = {Proceedings of the Third International Joint
Conference on Autonomous Agents and MultiAgent
Systems},
pages = {1122--1129},
year = 2004,
publisher = {{ACM}},
abstract = {Patrolling tasks can be encountered in a variety of
real-world domains, ranging from computer network
administration and surveillance to computer wargame
simulations. It is a complex multi-agent task, which
usually requires agents to coordinate their
decisionmaking in order to achieve optimal
performance of the group as a whole. In this paper,
we show how the patrolling task can be modeled as a
reinforcement learning (RL) problem, allowing
continuous and automatic adaptation of the agents
strategies to their environment. We demonstrate that
an efficient cooperative behavior can be achieved by
using RL methods, such as Q-Learning, to train
individual agents. The proposed approach is totally
distributed, which makes it computationally
efficient. The empirical evaluation proves the
effectiveness of our approach, as the results
obtained are substantially better than the results
available so far on this domain.},
keywords = {multiagent reinforcement learning},
url = {http://jmvidal.cse.sc.edu/library/santana04a.pdf},
comment = {masrg},
googleid = {n9rWT5d1qb8J:scholar.google.com/},
cluster = {13810699025048328863}
}
Last modified: Wed Mar 9 10:16:15 EST 2011