3 Copyright (c) Scott Gasch
11 Dynamic move ordering functions/structures. By "dynamic move
12 ordering" I mean killer moves and history heuristic stuff.
14 Note 1: there are two history tables here. One is used for move
15 ordering and the other is used for pruning decisions. Both only
16 contain data about non-capture/promote moves. The former is
17 updated with roughly remaining_depth^2 at a fail high while the
18 latter is updated so as to maintain an approximate answer to "what
19 percent of the time does this move fail high."
21 Note 2: the globals in this module may be accessed by more than
22 one searcher thread at the same time; spinlocks used to
25 Note 3: All of these tables must be cleared when a new game is
26 started or a new position is loaded onto the board.
34 $Id: dynamic.c 345 2007-12-02 22:56:42Z scott $
40 ULONG g_HistoryCounters[14][128];
41 #define FH_STATS_TABLE_SIZE (0x20000)
43 typedef struct _FH_STATS
56 FH_STATS g_FailHighs[FH_STATS_TABLE_SIZE];
59 volatile static ULONG g_uDynamicLock;
60 #define DYN_IS_LOCKED (g_uDynamicLock != 0)
62 AcquireSpinLock(&g_uDynamicLock); \
65 ASSERT(DYN_IS_LOCKED); \
66 ReleaseSpinLock(&g_uDynamicLock)
68 #define DYN_IS_LOCKED (1)
75 InitializeDynamicMoveOrdering(void)
80 Initialize dynamic move ordering structures.
95 ClearDynamicMoveOrdering();
100 ClearDynamicMoveOrdering(void)
105 Clear the global history table. Killer moves are per-context
106 structures and must be cleared on a per-context basis.
120 memset(g_HistoryCounters, 0, sizeof(g_HistoryCounters));
121 for (u = 0; u < FH_STATS_TABLE_SIZE; u++)
123 g_FailHighs[u].uWholeThing = 0x00010001;
129 _RecordMoveFailHigh(MOVE mv)
134 Update the "fail high percentage" history table for this move.
135 The table is used to make pruning decisions, not to rank moves.
147 ULONG u = MOVE_TO_INDEX(mv);
148 ULONG v = g_FailHighs[u].uWholeThing;
150 ASSERT(DYN_IS_LOCKED);
151 if (((v & 0x0000FFFF) == 0x0000FFFF) || ((v & 0xFFFF0000) == 0xFFFF0000))
153 g_FailHighs[u].u16FailHighs >>= 1;
154 g_FailHighs[u].u16Attempts >>= 1;
156 g_FailHighs[u].u16FailHighs++;
157 g_FailHighs[u].u16Attempts++;
158 ASSERT(g_FailHighs[u].u16FailHighs != 0);
159 ASSERT(g_FailHighs[u].u16Attempts != 0);
160 ASSERT(g_FailHighs[u].u16Attempts >= g_FailHighs[u].u16FailHighs);
165 _RecordMoveFailure(MOVE mv)
170 Update the fail high percentage table with the information that a
171 move has not produced a fail high cutoff when it was considered.
183 ULONG u = MOVE_TO_INDEX(mv);
185 ASSERT(DYN_IS_LOCKED);
186 if (g_FailHighs[u].u16Attempts == 0xFFFF)
188 g_FailHighs[u].u16FailHighs >>= 1;
189 g_FailHighs[u].u16Attempts >>= 1;
191 g_FailHighs[u].u16Attempts++;
192 ASSERT(g_FailHighs[u].u16Attempts != 0);
193 ASSERT(g_FailHighs[u].u16Attempts >= g_FailHighs[u].u16FailHighs);
198 _NewKillerMove(SEARCHER_THREAD_CONTEXT *ctx,
205 Add a new killer move at a ply.
209 SEARCHER_THREAD_CONTEXT *ctx,
219 ULONG uPly = ctx->uPly;
222 ASSERT(uPly < MAX_PLY_PER_SEARCH);
223 ASSERT(!IS_CAPTURE_OR_PROMOTION(mv));
226 if (mv.bvFlags & MOVE_FLAG_ESCAPING_CHECK)
228 if (!IS_SAME_MOVE(mv, ctx->mvKillerEscapes[uPly][0]))
230 ctx->mvKillerEscapes[uPly][1] = ctx->mvKillerEscapes[uPly][0];
231 ctx->mvKillerEscapes[uPly][0] = mv;
233 ASSERT(!IS_SAME_MOVE(ctx->mvKillerEscapes[uPly][0],
234 ctx->mvKillerEscapes[uPly][1]));
238 mv.bvFlags |= ((iScore >= +NMATE) * MOVE_FLAG_KILLERMATE);
239 if (!IS_SAME_MOVE(mv, ctx->mvKiller[uPly][0]))
241 ctx->mvKiller[uPly][1] = ctx->mvKiller[uPly][0];
242 ctx->mvKiller[uPly][0] = mv;
243 if (ctx->mvKiller[uPly][1].uMove == 0)
245 ctx->mvKiller[uPly][1] = ctx->mvNullmoveRefutations[uPly];
248 ASSERT(!IS_SAME_MOVE(ctx->mvKiller[uPly][0], ctx->mvKiller[uPly][1]));
254 _IncrementMoveHistoryCounter(MOVE mv,
260 Increase a move's history counter in the global history table.
261 Also affect the history pruning counters in prune.c.
278 ASSERT(!IS_CAPTURE_OR_PROMOTION(mv));
279 uVal = uDepth / ONE_PLY;
281 ASSERT(uVal <= MAX_PLY_PER_SEARCH);
286 pu = &(g_HistoryCounters[mv.pMoved][mv.cTo]);
291 // Make sure that the history weight doesn't get large enough to
292 // affect the move flags or else our move ordering algorithm is
295 while (*pu & ~STRIP_OFF_FLAGS)
297 for (x = 0; x <= WHITE_KING; x++)
301 g_HistoryCounters[x][y] >>= 4;
308 // The purpose of this restriction is to ease contention for the
309 // memory bandwidth of the machine on dual-core / dual proc
310 // systems. The net result is positive.
312 if (uDepth > THREE_PLY)
314 _RecordMoveFailHigh(mv);
317 _RecordMoveFailHigh(mv);
323 _DecrementMoveHistoryCounter(MOVE mv,
329 Decrease a move's history counter in the global history table.
345 ASSERT(!IS_CAPTURE_OR_PROMOTION(mv));
346 uVal = uDepth / ONE_PLY;
348 ASSERT(uVal <= MAX_PLY_PER_SEARCH);
353 pu = &(g_HistoryCounters[mv.pMoved][mv.cTo]);
363 _RecordMoveFailure(mv);
369 UpdateDynamicMoveOrdering(IN SEARCHER_THREAD_CONTEXT *ctx,
370 IN ULONG uRemainingDepth,
378 Update dynamic move ordering structs for a particular move (and,
379 possibly, the other moves that were considered prior to this move
380 in the move ordering). This is called when a move beats alpha at
381 the root or in Search (but not in QSearch).
385 SEARCHER_THREAD_CONTEXT *ctx,
386 ULONG uRemainingDepth,
401 // Add this move to the killer list and increment its history count
403 if (!IS_CAPTURE_OR_PROMOTION(mvBest))
405 _NewKillerMove(ctx, mvBest, iScore);
406 _IncrementMoveHistoryCounter(mvBest, uRemainingDepth);
410 // If this move was not the first we considered at this node,
411 // decrement the history counters of moves we considered before
414 uCurrent -= (uCurrent != 0);
415 for (u = ctx->sMoveStack.uBegin[ctx->uPly];
419 ASSERT(IS_SAME_MOVE(ctx->sMoveStack.mvf[uCurrent].mv, mvBest));
420 mv = ctx->sMoveStack.mvf[u].mv;
421 ASSERT(!IS_SAME_MOVE(mv, mvBest));
422 if (!IS_CAPTURE_OR_PROMOTION(mv))
424 _DecrementMoveHistoryCounter(mv, uRemainingDepth);
433 GetMoveFailHighPercentage(IN MOVE mv)
438 Lookup a move in the fail high percentage history table and return
439 its approximate fail high percentage.
451 ULONG u = MOVE_TO_INDEX(mv);
454 n = g_FailHighs[u].u16FailHighs;
455 d = g_FailHighs[u].u16Attempts;
466 CleanupDynamicMoveOrdering(void)
471 Cleanup dynamic move ordering structs -- basically a noop for now.
488 MaintainDynamicMoveOrdering(void)
493 Perform routine maintenance on dynamic move ordering data by
494 reducing the magnitude of the history counters.
507 for (x = 0; x <= WHITE_KING; x++)
511 g_HistoryCounters[x][y] >>= 1;