3 Copyright (c) Scott Gasch
11 The main transposition table code. The table is a large range of
12 memory organized into lines (64 bytes). Each line contains four
13 entries that are 16 bytes each (index 0..3).
15 After a search, data about the position's score and the depth we
16 searched to determine this score can be stored in the hash table.
17 When information is stored a hash replacement scheme is used to
18 overwrite one of the four entries on a line.
20 Later, before embarking on a deep recursive search, we can check
21 the hash table to see if information relevant to this position
22 has been stored already (and can be reused). If so, we can avoid
23 repeating the same search and save time.
25 A hash entry is 16 bytes long and looks like this:
27 typedef struct _HASH_ENTRY
31 UCHAR bvFlags; // 5 ==> d i r t | thr up low exact
32 signed short iValue; // 6 7
33 UINT64 u64Sig; // 8 9 A B C D E F == 16 bytes
36 The mv is the best move in a position. Even if we do not have a
37 deep enough uDepth recorded to cut off, mv is searched first.
38 uDepth is the depth searched to determine iValue. iValue is
39 either the exact score or a bound score -- depending on the
40 value of bvFlag's low order 4 bits. The high order 4 bits
41 in bvFlags holds "dirty" information to aide the hash replacement
42 scheme. Finally the u64Sig value is used to absolutely identify
43 the position to which the hash entry's information refers.
45 When screwing around with the hash code, turn on PERF_COUNTERS and
46 watch the hash hit rate (and usable hash hit rate). Also here is
47 Fine position #70 which is a great sanity check for hash code:
49 8/k7/3p4/p2P1p2/P2P1P2/8/8/K7/ w - - 0 0 bm 1. Ka1-b1 Ka7-b7 2. Kb1-c1
57 $Id: hash.c 345 2007-12-02 22:56:42Z scott $
64 // Hash entries are 16 bytes each and hash lines are 4 entries = 64
65 // bytes. This is done on purpose so that a hash line fits in a cpu
66 // cache line. My Athlons have 64 byte cache lines. To adjust this
67 // for other processors, change the constant below.
69 #define CONVERT_SEARCH_DEPTH_TO_HASH_DEPTH(x) \
70 ASSERT(IS_VALID_DEPTH(x)); \
72 ASSERT(((x) & 0xffffff00) == 0);
74 #define HASH_ENTRY_IS_DIRTY(x) \
75 (((x).bvFlags & 0xF0) != g_cDirty)
77 #undef ADJUST_MATE_IN_N
81 // On MP builds we lock ranges of the hash table in order to mitigate
82 // against two searcher threads colliding on the same entry at once.
83 // I did not have a free bit to use in the hash entry so I lock every
84 // 256th hash table entry at once.
86 #define NUM_HASH_LOCKS 256
87 volatile static ULONG g_uHashLocks[NUM_HASH_LOCKS];
88 #define HASH_IS_LOCKED(x) ((g_uHashLocks[(x)]) != 0)
89 #define LOCK_HASH(x) \
90 AcquireSpinLock(&(g_uHashLocks[(x)])); \
91 ASSERT(HASH_IS_LOCKED(x));
92 #define UNLOCK_HASH(x) \
93 ASSERT(HASH_IS_LOCKED(x)); \
94 ReleaseSpinLock(&(g_uHashLocks[(x)]));
96 #define HASH_IS_LOCKED(x)
98 #define UNLOCK_HASH(x)
101 ULONG g_uHashTableSizeEntries = 0;
102 ULONG g_uHashTableSizeBytes = 0;
103 HASH_ENTRY *g_pHashTable = NULL;
112 The dirty pattern is bitwise ored onto the bvFlags part of a
113 hash table entry. Since the low nibble is used to hold flags
114 like upper bound, lower bound, exact score, and threat the
115 dirty indicator is limited to operating in the upper nibble
116 of bvFlags (giving it a range of 16 values)
128 ASSERT((g_cDirty & 0x0F) == 0);
130 ASSERT((g_cDirty & 0x0F) == 0);
132 if (g_Options.fShouldPost)
134 Trace("Hash table dirty char: 0x%02x\n", g_cDirty);
141 CleanupHashSystem(void)
146 Cleanup the hash table by freeing the memory and zeroing some
147 variables. This is called before building an opening book in
148 order to free some virtual address space up.
160 if (NULL != g_pHashTable)
162 ASSERT(g_uHashTableSizeBytes > 0);
163 SystemFreeMemory(g_pHashTable);
165 g_uHashTableSizeBytes = 0;
166 g_uHashTableSizeEntries = 0;
171 InitializeHashSystem(void)
176 Initialize the hash system... allocate the table and get everything
191 if (!IS_A_POWER_OF_2(g_Options.uNumHashTableEntries))
195 g_uHashTableSizeEntries = g_Options.uNumHashTableEntries;
196 if (g_uHashTableSizeEntries != 0)
198 ASSERT(g_pHashTable == NULL);
199 uBytesToAlloc = (sizeof(HASH_ENTRY) * g_uHashTableSizeEntries);
200 g_pHashTable = SystemAllocateMemory(uBytesToAlloc);
201 g_uHashTableSizeBytes = uBytesToAlloc;
204 memset((BYTE *)g_uHashLocks, 0, sizeof(g_uHashLocks));
212 ClearHashTable (void)
217 Zero out the entire hash table. This function is called when a new
218 position is loaded into the root board.
230 if (NULL != g_pHashTable)
233 ASSERT(0 != g_uHashTableSizeBytes);
234 ASSERT(0 != g_uHashTableSizeEntries);
235 memset(g_pHashTable, 0, g_uHashTableSizeBytes);
241 // (g_uHashTableSizeEntries) => 1000 0000 ... 0000
242 // NUM_HASH_ENTRIES_PER_LINE => - 0100
243 // ------------------------------------------------
244 // 1111 1111 ... 1100 => Start of a hash line
246 #define KEY_TO_HASH_LINE_START(x) \
247 ((x) & (g_uHashTableSizeEntries - NUM_HASH_ENTRIES_PER_LINE))
250 _SelectHashLine(UINT64 u64Key)
255 On an MP system, this routine locks part of the hash table, the
256 caller should do its thing and then unlock the locked portion of
257 the hash table by calling _DeselectHashLine(u64Key).
269 ULONG uLow = (ULONG)(u64Key >> 32);
270 ULONG uLine = KEY_TO_HASH_LINE_START(uLow);
272 ULONG uLock = (uLine >> 2) & (NUM_HASH_LOCKS - 1);
274 ASSERT(IS_A_POWER_OF_2(NUM_HASH_LOCKS));
276 ASSERT(g_uHashTableSizeBytes != 0);
277 ASSERT(g_uHashTableSizeEntries != 0);
278 ASSERT(g_pHashTable != NULL);
279 ASSERT(IS_A_POWER_OF_2(g_uHashTableSizeEntries));
280 ASSERT(((g_uHashTableSizeEntries - NUM_HASH_ENTRIES_PER_LINE) &
281 (NUM_HASH_ENTRIES_PER_LINE - 1)) == 0);
282 return(&(g_pHashTable[uLine]));
287 _DeselectHashLine(UINT64 u64Key)
292 This releases the lock taken by _SelectHashLine
304 ULONG uLow = (ULONG)(u64Key >> 32);
305 ULONG uLine = KEY_TO_HASH_LINE_START(uLow);
306 ULONG uLock = (uLine >> 2) & (NUM_HASH_LOCKS - 1);
308 ASSERT(IS_A_POWER_OF_2(NUM_HASH_LOCKS));
316 _SelectHashEntry(UINT64 u64Key,
324 This routine takes a lock. The caller should do its thing and then
325 release the lock by calling _DeselectHashEntry.
341 ASSERT(g_uHashTableSizeBytes != 0);
342 ASSERT(g_uHashTableSizeEntries != 0);
343 ASSERT(g_pHashTable != NULL);
344 ASSERT(IS_A_POWER_OF_2(g_uHashTableSizeEntries));
347 // _SelectHashLine takes a lock on MP builds.
349 pEntry = _SelectHashLine(u64Key);
350 ASSERT(pEntry != NULL);
356 // Note: Hash lines consist of four entries currently:
358 // 1. The first is a depth-priority hash entry (store here if you
359 // can beat the depth of what's already in there).
361 // 2. The second is an always-store hash entry (store here if you
362 // can't store in the depth-priority entry).
364 // 3. and 4. The last two entries are where we bump the data from
365 // the depth- priority table and always-store table when they are
366 // going to be overwritten by newer data.
370 // Replace the 0th entry on the line if:
372 // ...the new entry has greater depth
373 // ...the current entry is stale (from an old search)
375 if ((pEntry[0].uDepth <= uDepth) ||
376 HASH_ENTRY_IS_DIRTY(pEntry[0]))
379 // Do we need to back up the entry we will overwrite?
381 if ((pEntry[0].u64Sig != u64Key) ||
382 ((pEntry[0].bvFlags & HASH_FLAG_VALID_BOUNDS) != uType))
384 pEntry[2] = pEntry[0];
386 pStore = &(pEntry[0]);
391 // We could not replace the depth priority 0th entry; instead,
392 // replace the "always replace" 1st entry on the line unless it
393 // contains mate information and is not stale.
395 if (((abs(pEntry[1].iValue >= +NMATE)) &&
396 (abs(iValue) < +NMATE)) &&
397 (!HASH_ENTRY_IS_DIRTY(pEntry[1])))
399 pStore = &(pEntry[3]);
402 if ((pEntry[1].u64Sig != u64Key) ||
403 ((pEntry[1].bvFlags & HASH_FLAG_VALID_BOUNDS) != uType))
405 pEntry[3] = pEntry[1];
407 pStore = &(pEntry[1]);
410 ASSERT(pStore != NULL);
417 _DeselectHashEntry(UINT64 u64Key)
422 Releases the lock taken by _SelectHashEntry.
434 _DeselectHashLine(u64Key); // this releases a lock
440 StoreUpperBound(//MOVE mvBest,
449 Store an upper bound in the hash table.
453 MOVE mvBest : singular move from the node, if any
468 ASSERT(IS_VALID_SCORE(iValue));
469 ASSERT(IS_VALID_DEPTH(uDepth));
472 // It does not make sense to store hash positions that are worth
473 // at best ~INFINITY. Every position is and this is a waste of
474 // hash space. Ignore these.
476 if (iValue >= +NMATE) return;
477 if (NULL == g_pHashTable) return;
479 CONVERT_SEARCH_DEPTH_TO_HASH_DEPTH(uDepth);
480 u64Key = pos->u64NonPawnSig ^ pos->u64PawnSig;
483 // _SelectHashEntry takes a lock on MP builds
485 pHash = _SelectHashEntry(u64Key, uDepth, HASH_FLAG_UPPER, iValue);
486 ASSERT(pHash != NULL);
487 pHash->uDepth = (UCHAR)uDepth;
488 if (pHash->u64Sig != u64Key)
491 // Note: don't overwrite a good hash move with "none" if we
492 // are replacing a hash entry for the same position. Also
493 // if we are klobbering data about the same position save
494 // a memory write to update the (same) sig.
497 pHash->u64Sig = u64Key;
499 ASSERT((g_cDirty & HASH_FLAG_UPPER) == 0);
500 pHash->bvFlags = g_cDirty | HASH_FLAG_UPPER;
501 ASSERT((pHash->bvFlags & 0xF0) == g_cDirty);
504 ASSERT((pHash->bvFlags & HASH_FLAG_THREAT) == 0);
505 pHash->bvFlags |= HASH_FLAG_THREAT;
508 if (iValue <= -NMATE)
511 // If this is an upper bound near -INFINITY
512 // (i.e. ~-INFINITY at best) convert it into -NMATE at
513 // best (at best mate in N).
515 pHash->iValue = -NMATE;
520 // Otherwise hash the exact value.
522 ASSERT(iValue > -NMATE);
523 ASSERT(iValue < +NMATE);
524 pHash->iValue = (signed short)iValue;
528 // Release the lock on MP builds
530 _DeselectHashEntry(u64Key);
536 StoreExactScore(MOVE mvBestMove,
546 Store a best move and its exact score in the hash table.
566 ASSERT(IS_VALID_SCORE(iValue));
567 ASSERT(IS_VALID_DEPTH(uDepth));
568 if (NULL == g_pHashTable) return;
570 CONVERT_SEARCH_DEPTH_TO_HASH_DEPTH(uDepth);
571 u64Key = pos->u64NonPawnSig ^ pos->u64PawnSig;
574 // _SelectHashEntry takes a lock on MP builds.
576 pHash = _SelectHashEntry(u64Key, uDepth, HASH_FLAG_EXACT, iValue);
577 ASSERT(pHash != NULL);
580 // Populate the hash entry
582 pHash->uDepth = (UCHAR)uDepth;
583 ASSERT(mvBestMove.uMove);
584 pHash->mv.uMove = mvBestMove.uMove;
585 ASSERT((g_cDirty & HASH_FLAG_EXACT) == 0);
586 pHash->bvFlags = g_cDirty | HASH_FLAG_EXACT;
587 ASSERT((pHash->bvFlags & 0xF0) == g_cDirty);
590 ASSERT((pHash->bvFlags & HASH_FLAG_THREAT) == 0);
591 pHash->bvFlags |= HASH_FLAG_THREAT;
593 pHash->u64Sig = u64Key;
595 if (iValue >= +NMATE)
597 #ifdef ADJUST_MATE_IN_N
598 ASSERT((iValue + (int)uPly) < +INFINITY);
599 ASSERT((iValue + (int)uPly) > -INFINITY);
600 pHash->iValue = (signed short)(iValue + (int)uPly);
602 pHash->bvFlags = g_cDirty | HASH_FLAG_LOWER;
603 ASSERT((pHash->bvFlags & 0xF0) == g_cDirty);
604 pHash->iValue = +NMATE;
607 else if (iValue <= -NMATE)
609 #ifdef ADJUST_MATE_IN_N
610 ASSERT((iValue + (int)uPly) < +INFINITY);
611 ASSERT((iValue - (int)uPly) > -INFINITY);
612 pHash->iValue = (signed short)(iValue - (int)uPly);
614 pHash->bvFlags = g_cDirty | HASH_FLAG_UPPER;
615 ASSERT((pHash->bvFlags & 0xF0) == g_cDirty);
616 pHash->iValue = +NMATE;
621 ASSERT(iValue > -NMATE);
622 ASSERT(iValue < +NMATE);
623 pHash->iValue = (short signed)iValue;
627 // Release the lock on MP builds
629 _DeselectHashEntry(u64Key); // Release the lock
635 StoreLowerBound(MOVE mvBestMove,
644 Store a lower bound score and move in the hash table.
663 ASSERT(IS_VALID_SCORE(iValue));
664 ASSERT(IS_VALID_DEPTH(uDepth));
667 // Do not bother to hash this node if it is telling us that the
668 // value of this position is worth at least ~-INFINITY. This is
669 // useless information and a waste of hash space.
671 if (iValue <= -NMATE) return;
672 if (NULL == g_pHashTable) return;
674 CONVERT_SEARCH_DEPTH_TO_HASH_DEPTH(uDepth);
675 u64Key = pos->u64NonPawnSig ^ pos->u64PawnSig;
678 // _SelectHashEntry takes a lock on MP builds
680 pHash = _SelectHashEntry(u64Key, uDepth, HASH_FLAG_LOWER, iValue);
681 ASSERT(pHash != NULL);
684 // Populate the entry
686 pHash->uDepth = (UCHAR)uDepth;
687 if ((mvBestMove.uMove) || (pHash->u64Sig != u64Key))
690 // Note: only overwrite the existing hash move if either we
691 // have a move (i.e. not null move) or the entry we are
692 // overwriting is for a different position.
694 pHash->mv.uMove = mvBestMove.uMove;
696 pHash->u64Sig = u64Key;
697 ASSERT((g_cDirty & HASH_FLAG_LOWER) == 0);
698 pHash->bvFlags = g_cDirty | HASH_FLAG_LOWER;
699 ASSERT((pHash->bvFlags & 0xF0) == g_cDirty);
700 pHash->bvFlags |= (HASH_FLAG_THREAT * (fThreat == TRUE));
703 ASSERT(iValue < +NMATE);
704 ASSERT(iValue > -NMATE);
705 pHash->iValue = (signed short)iValue;
710 // If the value is greater than mate in N don't store how many
711 // moves N is because this depends on where in the search we
712 // have found this position. Just say that this position is
713 // worth at least mate in N.
715 pHash->iValue = +NMATE;
719 // Release the lock on MP builds
721 _DeselectHashEntry(u64Key);
727 GetPonderMove(POSITION *pos)
732 Get a move to ponder.
746 UINT64 u64Key = pos->u64NonPawnSig ^ pos->u64PawnSig;
750 if (NULL == g_pHashTable) return(mv);
753 // _SelectHashLine takes a lock on MP builds
755 pHash = _SelectHashLine(u64Key);
757 x < NUM_HASH_ENTRIES_PER_LINE;
760 if (u64Key == pHash->u64Sig)
763 if ((pHash->bvFlags & HASH_FLAG_VALID_BOUNDS) ==
776 _DeselectHashLine(u64Key);
783 HashLookup(SEARCHER_THREAD_CONTEXT *ctx,
796 Lookup any information we have about the current position in the
797 hash table. Cutoff if we can.
801 SEARCHER_THREAD_CONTEXT *ctx,
819 POSITION *pos = &(ctx->sPosition);
820 UINT64 u64Key = pos->u64NonPawnSig ^ pos->u64PawnSig;
821 ULONG uMoveScore = 0;
824 #define COMPUTE_MOVE_SCORE(mv, depth) \
825 ((depth) * 8 + PIECE_VALUE_OVER_100((mv).pCaptured) + 1)
827 ASSERT(IS_VALID_DEPTH(uDepth));
828 ASSERT(IS_VALID_DEPTH(uNextDepth));
829 ASSERT(uNextDepth <= uDepth);
830 ASSERT(IS_VALID_SCORE(iAlpha));
831 ASSERT(IS_VALID_SCORE(iBeta));
832 ASSERT(iAlpha < iBeta);
833 ASSERT(pHashMove->uMove == 0);
834 ASSERT(NULL != g_pHashTable);
835 INC(ctx->sCounters.hash.u64Probes);
838 // These are out parameters for the caller, initialize them
839 // saying we don't know anything.
841 *pfAvoidNull = FALSE;
844 CONVERT_SEARCH_DEPTH_TO_HASH_DEPTH(uDepth);
845 CONVERT_SEARCH_DEPTH_TO_HASH_DEPTH(uNextDepth);
848 // _SelectHashLine takes a lock on MP builds.
850 pHash = _SelectHashLine(u64Key);
852 x < NUM_HASH_ENTRIES_PER_LINE;
855 if (u64Key == pHash->u64Sig)
858 // This entry hit, make it "clean" so it is harder to replace
861 pHash->bvFlags &= 0x0F;
862 ASSERT((g_cDirty & 0x0F) == 0);
863 pHash->bvFlags |= g_cDirty;
864 ASSERT((pHash->bvFlags & 0xF0) == g_cDirty);
867 // Parse the matching entry
869 *pfThreat |= ((pHash->bvFlags & HASH_FLAG_THREAT) > 0);
872 // If we have an exact match and sufficient depth to
873 // satisfy the depth to which we will later search after a
874 // nullmove, see if the known value of this node is less
875 // than beta. If so, doing nothing in this position will
876 // probably not be better than doing something therefore
877 // its not wise to perform the nullmove search. It's just
878 // a waste of nodes and time.
880 if ((pHash->uDepth >= uNextDepth) &&
881 ((pHash->bvFlags & HASH_FLAG_UPPER) ||
882 (pHash->bvFlags & HASH_FLAG_EXACT)) &&
883 (pHash->iValue < iBeta))
888 // Because we can encounter several moves while looking for
889 // a hash cutoff, have a semantic about which one we return
891 INC(ctx->sCounters.hash.u64OverallHits);
892 if (pHash->mv.uMove) {
893 uThisScore = COMPUTE_MOVE_SCORE(pHash->mv, uDepth);
894 ASSERT(uThisScore != 0);
895 if (uThisScore > uMoveScore) {
896 uMoveScore = uThisScore;
897 *pHashMove = pHash->mv;
898 ASSERT(SanityCheckMove(pos, pHash->mv));
903 // We have a hit, but is it useful?
905 if (pHash->uDepth >= uDepth)
907 switch (pHash->bvFlags & HASH_FLAG_VALID_BOUNDS)
909 case HASH_FLAG_EXACT:
910 *piScore = pHash->iValue;
911 INC(ctx->sCounters.hash.u64UsefulHits);
912 INC(ctx->sCounters.hash.u64ExactScoreHits);
913 #ifdef ADJUST_MATE_IN_N
915 // Readjust mate in N scores.
917 if (pHash->iValue >= +NMATE)
919 *piScore -= (int)uPly;
921 else if (pHash->iValue <= -NMATE)
923 *piScore += (int)uPly;
926 ASSERT(IS_VALID_SCORE(*piScore));
930 case HASH_FLAG_LOWER:
931 if (pHash->iValue >= iBeta)
933 INC(ctx->sCounters.hash.u64UsefulHits);
934 INC(ctx->sCounters.hash.u64LowerBoundHits);
935 *piScore = pHash->iValue;
936 ASSERT(IS_VALID_SCORE(*piScore));
941 case HASH_FLAG_UPPER:
942 if (pHash->iValue <= iAlpha)
944 INC(ctx->sCounters.hash.u64UsefulHits);
945 INC(ctx->sCounters.hash.u64UpperBoundHits);
946 *piScore = pHash->iValue;
947 ASSERT(IS_VALID_SCORE(*piScore));
968 _DeselectHashLine(u64Key);