2 CLAW - a C++ Library Absolutely Wonderful
4 CLAW is a free library without any particular aim but being useful to
7 Copyright (C) 2005 Sébastien Angibaud
8 Copyright (C) 2005-2011 Julien Jorge
10 This library is free software; you can redistribute it and/or
11 modify it under the terms of the GNU Lesser General Public
12 License as published by the Free Software Foundation; either
13 version 2.1 of the License, or (at your option) any later version.
15 This library is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 Lesser General Public License for more details.
20 You should have received a copy of the GNU Lesser General Public
21 License along with this library; if not, write to the Free Software
22 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 contact: julien.jorge@gamned.org
28 * \brief Implémentation de fonctions d'intelligence artificielle.
29 * \author Julien Jorge & Sébastien Angibaud
31 #include <claw/max_vector.hpp>
35 //**************************** gamestate **************************************
37 /*---------------------------------------------------------------------------*/
41 template<typename Action, typename Numeric>
42 claw::ai::game::game_state<Action, Numeric>::~game_state()
45 } // game_state::~game_state()
47 /*---------------------------------------------------------------------------*/
49 * \brief Get the minimal score a state can get.
51 template <typename Action, typename Numeric>
52 typename claw::ai::game::game_state<Action, Numeric>::score
53 claw::ai::game::game_state<Action, Numeric>::min_score()
56 } // game_state::min_score()
58 /*---------------------------------------------------------------------------*/
60 * \brief Get the maximal score a state can get.
62 template <typename Action, typename Numeric>
63 typename claw::ai::game::game_state<Action, Numeric>::score
64 claw::ai::game::game_state<Action, Numeric>::max_score()
67 } // game_state::max_score()
69 /*---------------------------------------------------------------------------*/
71 * \brief Truncate a score to fit in the range (min_score(), max_score()).
72 * \param score_val The value to fit.
74 template<typename Action, typename Numeric>
75 typename claw::ai::game::game_state<Action, Numeric>::score
76 claw::ai::game::game_state<Action, Numeric>::fit
77 ( score score_val ) const
79 if ( s_max_score < score_val )
81 else if ( score_val < s_min_score )
85 } // game_state::fit()
88 //**************************** action_eval ************************************
91 /*---------------------------------------------------------------------------*/
94 * \param a The evaluated action.
95 * \param e The evaluation of the action.
97 template <typename Action, typename Numeric>
98 claw::ai::game::action_eval<Action, Numeric>::action_eval
99 ( const Action& a, const Numeric& e)
103 } // action_eval::action_eval()
105 /*---------------------------------------------------------------------------*/
107 * \brief Compare with an otreh action.
108 * \param ae The other action.
110 template <typename Action, typename Numeric>
111 bool claw::ai::game::action_eval<Action, Numeric>::operator<
112 ( const action_eval& ae ) const
114 return eval < ae.eval;
115 } // action_eval::operator<()
118 /*---------------------------------------------------------------------------*/
120 * \brief Egalité de deux actions.
121 * \return vrai si this->eval == ae.eval.
123 template <typename Action, typename Numeric>
124 bool claw::ai::game::action_eval<Action, Numeric>::operator==
125 ( const action_eval& ae ) const
127 return eval == ae.eval;
128 } // action_eval::operator==()
133 //********************************* min_max ***********************************
136 /*---------------------------------------------------------------------------*/
138 * \brief Apply the min-max algorithm to find the best action.
139 * \param depth Depth of the search subtree we are allowed to explore.
140 * \param current_state The state of the game.
141 * \param computer_turn Tell if the next action is done by the computer.
143 template<typename State>
144 typename claw::ai::game::min_max<State>::score
145 claw::ai::game::min_max<State>::operator()
146 ( int depth, const state& current_state, bool computer_turn ) const
150 // we reached a final state or we are not allowed to search more.
151 if ( current_state.final() || (depth == 0) )
152 score_val = current_state.evaluate();
155 std::list<action> next_actions;
156 typename std::list<action>::const_iterator it;
159 // get all reachable states
160 current_state.next_actions( next_actions );
162 if ( next_actions.empty() )
163 score_val = current_state.evaluate();
164 else if (computer_turn)
166 score_val = current_state.min_score();
168 for (it = next_actions.begin(); it!=next_actions.end(); ++it)
170 new_state=static_cast<state*>(current_state.do_action(*it));
172 // evaluate the action of the human player
173 score s = (*this)( depth-1, *new_state, false );
175 // and keep the best action he can do.
182 else // human player's turn
184 score_val = current_state.max_score();
186 for (it = next_actions.begin(); it!=next_actions.end(); ++it)
188 new_state=static_cast<state*>(current_state.do_action(*it));
190 // evaluate the action of the computer player
191 score s = (*this)( depth-1, *new_state, true );
193 // and keep the worst action he can do
203 } // min_max::operator()
209 //******************************** alpha_beta *********************************
212 /*---------------------------------------------------------------------------*/
214 * \brief Apply the alpha-beta algorithm to find the best action.
215 * \param depth Depth of the search subtree we are allowed to explore.
216 * \param current_state The state of the game.
217 * \param computer_turn Tell if the next action is done by the computer.
219 template <typename State>
220 typename State::score claw::ai::game::alpha_beta<State>::operator()
221 ( int depth, const state& current_state, bool computer_turn ) const
224 ( depth, current_state, computer_turn, current_state.min_score(),
225 current_state.max_score() );
226 } // alpha_beta::operator()
228 /*---------------------------------------------------------------------------*/
230 * \brief Find the best action using an alpha-beta algorithm.
231 * \param depth Depth of the search subtree we are allowed to explore.
232 * \param current_state The state of the game.
233 * \param computer_turn Tell if the next action is done by the computer.
234 * \param alpha Worst score of the current player.
235 * \param beta Best score of the other player.
237 template<typename State>
238 typename claw::ai::game::alpha_beta<State>::score
239 claw::ai::game::alpha_beta<State>::compute
240 ( int depth, const state& current_state, bool computer_turn, score alpha,
245 // we reached a final state or we are not allowed to search more.
246 if ( current_state.final() || (depth == 0) )
247 score_val = current_state.evaluate();
250 std::list<action> next_actions;
251 typename std::list<action>::const_iterator it;
254 // get all reachable states
255 current_state.next_actions( next_actions );
257 if ( next_actions.empty() )
258 score_val = current_state.evaluate();
259 else if (computer_turn)
261 score_val = current_state.min_score();
263 it = next_actions.begin();
265 while ( it!=next_actions.end() && (score_val < beta) )
267 new_state=static_cast<state*>(current_state.do_action(*it));
269 // evaluate the action of the human player
271 ( depth-1, *new_state, false, std::max(alpha, score_val), beta );
273 // and keep the best action he can do.
282 else // human player's turn
284 score_val = current_state.max_score();
286 it = next_actions.begin();
288 while ( it!=next_actions.end() && (score_val > alpha) )
290 new_state=static_cast<state*>(current_state.do_action(*it));
292 // evaluate the action of the computer player
294 ( depth-1, *new_state, true, alpha, std::min(beta, score_val) );
296 // and keep the worst action he can do
307 } // alpha_beta::compute()
313 //***************************** select_action *********************************
318 /*---------------------------------------------------------------------------*/
320 * \brief Select an action using the given method.
321 * \param depth Maximum depth of the search tree.
322 * \param current_state The state of the game.
323 * \param new_action (in/out) Best known action.
324 * \param computer_turn Tell if the action is done by the computer.
326 template<typename Method>
327 void claw::ai::game::select_action<Method>::operator()
328 ( int depth, const state& current_state, action& new_action,
329 bool computer_turn ) const
332 typename std::list<action>::iterator it;
336 // get all reachable states
337 current_state.next_actions( l );
338 best_eval = current_state.min_score();
340 for (it=l.begin(); it!=l.end(); ++it)
345 // try and evaluate each action
346 new_state = static_cast<state*>(current_state.do_action(*it));
347 eval = method(depth-1, *new_state, !computer_turn);
351 // we keep one of the best actions
352 if (eval > best_eval)
358 } // select_action::operator()
361 //*************************** select_random_action ****************************
364 * \brief Select a random action among the best ones.
365 * \param depth Maximum depth of the search tree.
366 * \param current_state The state of the game.
367 * \param new_action (in/out) Best known action.
368 * \param computer_turn Tell if the action is done by the computer.
370 template<typename Method>
371 void claw::ai::game::select_random_action<Method>::operator()
372 ( int depth, const state& current_state, action& new_action,
373 bool computer_turn ) const
376 typename std::list<action>::iterator it;
377 action_eval<action, score> eval( new_action, current_state.min_score() );
379 max_vector< action_eval<action, score> > events( eval );
381 // get all reachable states
382 current_state.next_actions( l );
384 for (it=l.begin(); it!=l.end(); ++it)
388 // try and evaluate each action
389 new_state = static_cast<state*>(current_state.do_action(*it));
392 eval.eval = method(depth-1, *new_state, !computer_turn);
396 // keep the best actions.
400 std::size_t i = (double)rand()/(RAND_MAX + 1) * events.get_v().size();
401 new_action = events.get_v()[i].action;
402 } // select_random_action::operator()