Browse Source

Remove hash2offset...it was too confusing to troubleshoot

Daniel Walton 2 years ago
parent
commit
dc8a5d0316
3 changed files with 49 additions and 85 deletions
  1. 0
    1
      .gitignore
  2. 46
    81
      rubikscubennnsolver/LookupTable.py
  3. 3
    3
      rubikscubennnsolver/RubiksCube666.py

+ 0
- 1
.gitignore View File

@@ -91,6 +91,5 @@ ENV/
91 91
 *.swp
92 92
 cache
93 93
 lookup*.hash
94
-lookup*.hash2offset
95 94
 lookup*.gz
96 95
 lookup*.txt

+ 46
- 81
rubikscubennnsolver/LookupTable.py View File

@@ -2068,7 +2068,6 @@ class LookupTable(object):
2068 2068
         self.sides_FB = (self.parent.sideF, self.parent.sideB)
2069 2069
         self.filename = filename
2070 2070
         self.filename_hash = filename + '.hash'
2071
-        self.filename_hash2offset = filename + '.hash2offset'
2072 2071
         self.filename_gz = filename + '.gz'
2073 2072
         self.desc = filename.replace('lookup-table-', '').replace('.txt', '')
2074 2073
         self.filename_exists = False
@@ -2080,7 +2079,7 @@ class LookupTable(object):
2080 2079
         assert self.modulo, "%s modulo is %s" % (self, self.modulo)
2081 2080
 
2082 2081
         # If the user just git cloned the repo all of the lookup tables will still be gzipped
2083
-        if not os.path.exists(self.filename_hash) or not os.path.exists(self.filename_hash2offset):
2082
+        if not os.path.exists(self.filename_hash):
2084 2083
             if not os.path.exists(self.filename):
2085 2084
                 if not os.path.exists(self.filename_gz):
2086 2085
                     url = "https://github.com/dwalton76/rubiks-cube-lookup-tables-%sx%sx%s/raw/master/%s" % (self.parent.size, self.parent.size, self.parent.size, self.filename_gz)
@@ -2098,6 +2097,7 @@ class LookupTable(object):
2098 2097
         # Find the state_width for the entries in our .hash file
2099 2098
         with open(self.filename_hash, 'r') as fh:
2100 2099
             first_line = next(fh)
2100
+            self.width = len(first_line)
2101 2101
 
2102 2102
             # Now that the files are hashed the first line may not have an entry
2103 2103
             while ':' not in first_line:
@@ -2105,20 +2105,7 @@ class LookupTable(object):
2105 2105
 
2106 2106
             for state_steps in first_line.split(','):
2107 2107
                 (state, steps) = state_steps.split(':')
2108
-
2109
-                # Sometimes I write the hash_index in as the first entry to make debugging easier
2110
-                # When I do that I list the steps as 'INDEX'
2111
-                if steps != 'INDEX':
2112
-                    self.state_width = len(state)
2113
-                    #log.info("%s: state %s, state_width %d" % (self, state, self.state_width))
2114
-                    break
2115
-
2116
-        # Find the line width for our .hash2offset file...they will all be the same width
2117
-        with open(self.filename_hash2offset, 'r') as fh:
2118
-            first_line = next(fh)
2119
-
2120
-            # add 1 for the newline
2121
-            self.width = len(first_line)
2108
+                self.state_width = len(state)
2122 2109
 
2123 2110
         self.filename_exists = True
2124 2111
         self.state_type = state_type
@@ -2128,7 +2115,6 @@ class LookupTable(object):
2128 2115
 
2129 2116
         self.cache = {}
2130 2117
         self.fh_hash = open(self.filename_hash, 'r')
2131
-        self.fh_hash2offset = open(self.filename_hash2offset, 'r')
2132 2118
 
2133 2119
     def __str__(self):
2134 2120
         return self.desc
@@ -2153,71 +2139,54 @@ class LookupTable(object):
2153 2139
         log.info("%s: sort tmp file" % self.filename)
2154 2140
         subprocess.call(('sort', '--output=/tmp/%s' % self.filename, '/tmp/%s' % self.filename))
2155 2141
 
2156
-        log.info("%s: write .hash and .hash2offset files" % self.filename)
2142
+        log.info("%s: write .hash file" % self.filename)
2157 2143
         with open('/tmp/%s' % self.filename, 'r') as fh_tmp:
2158 2144
             with open(self.filename_hash, 'w') as fh_hash:
2159
-                with open(self.filename_hash2offset, 'w') as fh_hash2offset:
2160
-                    offset = 0
2161
-                    prev_hash_index = None
2162
-                    first_hash_index = None
2163
-                    to_write = []
2145
+                prev_hash_index = None
2146
+                first_hash_index = None
2147
+                to_write = []
2164 2148
 
2165
-                    for line in fh_tmp:
2166
-                        line = line.strip()
2167
-                        (hash_index, state, step) = line.split(':')
2168
-                        hash_index = int(hash_index)
2169
-
2170
-                        if first_hash_index is None:
2171
-                            first_hash_index = hash_index
2172
-                            for x in range(first_hash_index):
2173
-                                fh_hash2offset.write("\n")
2174
-
2175
-                        # write to filename_hash
2176
-                        if prev_hash_index is not None and hash_index != prev_hash_index:
2177
-
2178
-                            to_write_string = ','.join(to_write) + '\n'
2179
-                            fh_hash.write(to_write_string)
2180
-                            #log.info("%s: prev_hash_index %s, offset %s" % (self, prev_hash_index, offset))
2181
-                            fh_hash2offset.write("%s\n" % offset)
2182
-                            offset += len(to_write_string)
2183
-                            to_write = []
2184
-
2185
-                            # write a blank line for any hash_index that did not have an entry
2186
-                            for x in range(hash_index - prev_hash_index - 1):
2187
-                                #log.info("%s: hash_index %d, prev_hash_index %d, write blank line" % (self, hash_index, prev_hash_index))
2188
-                                fh_hash2offset.write("\n")
2189
-
2190
-                        #if not to_write:
2191
-                        #    to_write.append("%s:INDEX" % hash_index)
2192
-                        to_write.append("%s:%s" % (state, step))
2193
-                        prev_hash_index = hash_index
2194
-
2195
-                    if to_write:
2196
-
2197
-                        to_write_string = ','.join(to_write) + '\n'
2198
-                        fh_hash.write(to_write_string)
2199
-                        fh_hash2offset.write("%s\n" % offset)
2200
-                        offset += len(to_write_string)
2149
+                for line in fh_tmp:
2150
+                    line = line.strip()
2151
+                    (hash_index, state, step) = line.split(':')
2152
+                    hash_index = int(hash_index)
2153
+
2154
+                    if first_hash_index is None:
2155
+                        first_hash_index = hash_index
2156
+                        for x in range(first_hash_index):
2157
+                            fh_hash.write("\n")
2158
+
2159
+                    # write to filename_hash
2160
+                    if prev_hash_index is not None and hash_index != prev_hash_index:
2161
+                        fh_hash.write(','.join(to_write) + '\n')
2201 2162
                         to_write = []
2202 2163
 
2203 2164
                         # write a blank line for any hash_index that did not have an entry
2204
-                        #for x in range(hash_index - prev_hash_index - 1):
2205
-                        #    fh_hash2offset.write("\n")
2165
+                        for x in range(hash_index - prev_hash_index - 1):
2166
+                            #log.info("%s: hash_index %d, prev_hash_index %d, write blank line" % (self, hash_index, prev_hash_index))
2167
+                            fh_hash.write("\n")
2168
+
2169
+                    to_write.append("%s:%s" % (state, step))
2170
+                    prev_hash_index = hash_index
2206 2171
 
2207
-        # Now pad the hash2offset file so that all lines are the same length
2208
-        log.info("%s: pad .hash2offset lines to be the same width" % self.filename)
2209
-        filename_pad = self.filename_hash2offset + '.pad'
2172
+                if to_write:
2173
+                    fh_hash.write(','.join(to_write) + '\n')
2174
+                    to_write = []
2175
+
2176
+        # Now pad the .hash file so that all lines are the same length
2177
+        log.info("%s: pad .hash lines to be the same width" % self.filename)
2178
+        filename_pad = self.filename_hash + '.pad'
2210 2179
         max_length = 0
2211 2180
 
2212
-        with open(self.filename_hash2offset, 'r') as fh:
2181
+        with open(self.filename_hash, 'r') as fh:
2213 2182
             for line in fh:
2214 2183
                 length = len(line.strip())
2215 2184
                 if length > max_length:
2216 2185
                     max_length = length
2217 2186
 
2218
-        log.info("%s: longest hash2offset line is %d characters" % (self.filename, max_length))
2187
+        log.info("%s: longest .hash line is %d characters" % (self.filename, max_length))
2219 2188
         with open(filename_pad, 'w') as fh_pad:
2220
-            with open(self.filename_hash2offset, 'r') as fh:
2189
+            with open(self.filename_hash, 'r') as fh:
2221 2190
                 for line in fh:
2222 2191
                     line = line.strip()
2223 2192
                     length = len(line)
@@ -2226,7 +2195,7 @@ class LookupTable(object):
2226 2195
                     if spaces_to_add:
2227 2196
                         line = line + ' ' * spaces_to_add
2228 2197
                     fh_pad.write(line + '\n')
2229
-        shutil.move(filename_pad, self.filename_hash2offset)
2198
+        shutil.move(filename_pad, self.filename_hash)
2230 2199
 
2231 2200
     def state(self):
2232 2201
         state_function = state_functions.get(self.state_type)
@@ -2261,24 +2230,15 @@ class LookupTable(object):
2261 2230
             # We use the hash_index as our line number in the file
2262 2231
             hash_index = hashxx(state_to_find) % self.modulo
2263 2232
 
2264
-            # All lines in the .hash2offset file have been padded to be the same length
2265
-            # so jump to 'hash_index' line number and read it.  This will tell us how
2266
-            # many bytes into the .hash file to look for the entry for this hash_index.
2267
-            self.fh_hash2offset.seek(hash_index * self.width)
2268
-            line = self.fh_hash2offset.readline().rstrip()
2269
-            #log.info("%s: state %s, hash_index %d, width %d, offset in .hash %s bytes" % (self, state_to_find, hash_index, self.width, line))
2233
+            # Now seek in that many bytes in the .hash file and read that line
2234
+            self.fh_hash.seek(hash_index * self.width)
2235
+            line = self.fh_hash.readline().rstrip()
2236
+            #log.info("%s: hash_index %s, state %s, line %s" % (self, hash_index, state_to_find, line))
2270 2237
 
2271 2238
             if not line:
2272 2239
                 self.cache[state_to_find] = None
2273 2240
                 return None
2274 2241
 
2275
-            hash_index_offset = int(line)
2276
-
2277
-            # Now seek in that many bytes in the .hash file and read that line
2278
-            self.fh_hash.seek(hash_index_offset)
2279
-            line = self.fh_hash.readline().rstrip()
2280
-            #log.info("%s: hash_index %s, state %s, line %s" % (self, hash_index, state_to_find, line))
2281
-
2282 2242
             for state_steps in line.split(','):
2283 2243
                 #log.info("%s: %s, state_steps %s" % (self, line, state_steps))
2284 2244
                 (state, steps) = state_steps.split(':')
@@ -2491,6 +2451,11 @@ class LookupTableIDA(LookupTable):
2491 2451
 
2492 2452
         if steps:
2493 2453
             log.info("%s: IDA, cube is already in a state that is in our lookup table" % self)
2454
+
2455
+            # The cube is now in a state where it is in the lookup table, we may need
2456
+            # to do several lookups to get to our target state though. Use
2457
+            # LookupTabele's solve() to take us the rest of the way to the target state.
2458
+            LookupTable.solve(self)
2494 2459
             return
2495 2460
 
2496 2461
         # If we are here (odds are very high we will be) it means that the current

+ 3
- 3
rubikscubennnsolver/RubiksCube666.py View File

@@ -465,7 +465,7 @@ class RubiksCube666(RubiksCube):
465 465
         log.info("")
466 466
 
467 467
         # At this point the 6x6x6 centers have been reduced to 5x5x5 centers
468
-        fake_555 = RubiksCube555(solved_5x5x5)
468
+        fake_555 = RubiksCube555(solved_5x5x5, 'URFDLB')
469 469
         fake_555.lt_init()
470 470
         self.populate_fake_555_for_ULFRBD(fake_555)
471 471
         fake_555.group_centers_guts()
@@ -476,7 +476,7 @@ class RubiksCube666(RubiksCube):
476 476
         log.info("Took %d steps to solve centers" % self.get_solution_len_minus_rotates(self.solution))
477 477
 
478 478
     def pair_inside_edges_via_444(self):
479
-        fake_444 = RubiksCube444(solved_4x4x4)
479
+        fake_444 = RubiksCube444(solved_4x4x4, 'URFDLB')
480 480
         fake_444.lt_init()
481 481
 
482 482
         # The corners don't matter but it does make troubleshooting easier if they match
@@ -589,7 +589,7 @@ class RubiksCube666(RubiksCube):
589 589
         log.info("Inside edges are paired, %d steps in" % self.get_solution_len_minus_rotates(self.solution))
590 590
 
591 591
     def pair_outside_edges_via_555(self):
592
-        fake_555 = RubiksCube555(solved_5x5x5)
592
+        fake_555 = RubiksCube555(solved_5x5x5, 'URFDLB')
593 593
         fake_555.lt_init()
594 594
 
595 595
         # The corners matter for avoiding PLL