1 | /* |
2 | * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, |
3 | * and the EPL 1.0 (http://h2database.com/html/license.html). |
4 | * Initial Developer: H2 Group |
5 | */ |
6 | package org.h2.mvstore; |
7 | |
8 | import java.io.IOException; |
9 | import java.io.PrintWriter; |
10 | import java.io.Writer; |
11 | import java.nio.ByteBuffer; |
12 | import java.nio.channels.FileChannel; |
13 | import java.sql.Timestamp; |
14 | import java.util.Map; |
15 | import java.util.Map.Entry; |
16 | import java.util.TreeMap; |
17 | |
18 | import org.h2.engine.Constants; |
19 | import org.h2.message.DbException; |
20 | import org.h2.mvstore.type.DataType; |
21 | import org.h2.mvstore.type.StringDataType; |
22 | import org.h2.store.fs.FilePath; |
23 | import org.h2.store.fs.FileUtils; |
24 | |
25 | /** |
26 | * Utility methods used in combination with the MVStore. |
27 | */ |
28 | public class MVStoreTool { |
29 | |
30 | /** |
31 | * Runs this tool. |
32 | * Options are case sensitive. Supported options are: |
33 | * <table> |
34 | * <tr><td>[-dump <fileName>]</td> |
35 | * <td>Dump the contends of the file</td></tr> |
36 | * <tr><td>[-info <fileName>]</td> |
37 | * <td>Get summary information about a file</td></tr> |
38 | * <tr><td>[-compact <fileName>]</td> |
39 | * <td>Compact a store</td></tr> |
40 | * <tr><td>[-compress <fileName>]</td> |
41 | * <td>Compact a store with compression enabled</td></tr> |
42 | * </table> |
43 | * |
44 | * @param args the command line arguments |
45 | */ |
46 | public static void main(String... args) { |
47 | for (int i = 0; i < args.length; i++) { |
48 | if ("-dump".equals(args[i])) { |
49 | String fileName = args[++i]; |
50 | dump(fileName, new PrintWriter(System.out), true); |
51 | } else if ("-info".equals(args[i])) { |
52 | String fileName = args[++i]; |
53 | info(fileName, new PrintWriter(System.out)); |
54 | } else if ("-compact".equals(args[i])) { |
55 | String fileName = args[++i]; |
56 | compact(fileName, false); |
57 | } else if ("-compress".equals(args[i])) { |
58 | String fileName = args[++i]; |
59 | compact(fileName, true); |
60 | } |
61 | } |
62 | } |
63 | |
64 | /** |
65 | * Read the contents of the file and write them to system out. |
66 | * |
67 | * @param fileName the name of the file |
68 | * @param details whether to print details |
69 | */ |
70 | public static void dump(String fileName, boolean details) { |
71 | dump(fileName, new PrintWriter(System.out), details); |
72 | } |
73 | |
74 | /** |
75 | * Read the summary information of the file and write them to system out. |
76 | * |
77 | * @param fileName the name of the file |
78 | */ |
79 | public static void info(String fileName) { |
80 | info(fileName, new PrintWriter(System.out)); |
81 | } |
82 | |
83 | /** |
84 | * Read the contents of the file and display them in a human-readable |
85 | * format. |
86 | * |
87 | * @param fileName the name of the file |
88 | * @param writer the print writer |
89 | * @param details print the page details |
90 | */ |
91 | public static void dump(String fileName, Writer writer, boolean details) { |
92 | PrintWriter pw = new PrintWriter(writer, true); |
93 | if (!FilePath.get(fileName).exists()) { |
94 | pw.println("File not found: " + fileName); |
95 | return; |
96 | } |
97 | long size = FileUtils.size(fileName); |
98 | pw.printf("File %s, %d bytes, %d MB\n", fileName, size, size / 1024 / 1024); |
99 | FileChannel file = null; |
100 | int blockSize = MVStore.BLOCK_SIZE; |
101 | TreeMap<Integer, Long> mapSizesTotal = |
102 | new TreeMap<Integer, Long>(); |
103 | long pageSizeTotal = 0; |
104 | try { |
105 | file = FilePath.get(fileName).open("r"); |
106 | long fileSize = file.size(); |
107 | int len = Long.toHexString(fileSize).length(); |
108 | ByteBuffer block = ByteBuffer.allocate(4096); |
109 | long pageCount = 0; |
110 | for (long pos = 0; pos < fileSize;) { |
111 | block.rewind(); |
112 | DataUtils.readFully(file, pos, block); |
113 | block.rewind(); |
114 | int headerType = block.get(); |
115 | if (headerType == 'H') { |
116 | pw.printf("%0" + len + "x fileHeader %s%n", |
117 | pos, |
118 | new String(block.array(), DataUtils.LATIN).trim()); |
119 | pos += blockSize; |
120 | continue; |
121 | } |
122 | if (headerType != 'c') { |
123 | pos += blockSize; |
124 | continue; |
125 | } |
126 | block.position(0); |
127 | Chunk c = null; |
128 | try { |
129 | c = Chunk.readChunkHeader(block, pos); |
130 | } catch (IllegalStateException e) { |
131 | pos += blockSize; |
132 | continue; |
133 | } |
134 | if (c.len <= 0) { |
135 | // not a chunk |
136 | pos += blockSize; |
137 | continue; |
138 | } |
139 | int length = c.len * MVStore.BLOCK_SIZE; |
140 | pw.printf("%n%0" + len + "x chunkHeader %s%n", |
141 | pos, c.toString()); |
142 | ByteBuffer chunk = ByteBuffer.allocate(length); |
143 | DataUtils.readFully(file, pos, chunk); |
144 | int p = block.position(); |
145 | pos += length; |
146 | int remaining = c.pageCount; |
147 | pageCount += c.pageCount; |
148 | TreeMap<Integer, Integer> mapSizes = |
149 | new TreeMap<Integer, Integer>(); |
150 | int pageSizeSum = 0; |
151 | while (remaining > 0) { |
152 | try { |
153 | chunk.position(p); |
154 | } catch (IllegalArgumentException e) { |
155 | // too far |
156 | pw.printf("ERROR illegal position %d%n", p); |
157 | break; |
158 | } |
159 | int pageSize = chunk.getInt(); |
160 | // check value (ignored) |
161 | chunk.getShort(); |
162 | int mapId = DataUtils.readVarInt(chunk); |
163 | int entries = DataUtils.readVarInt(chunk); |
164 | int type = chunk.get(); |
165 | boolean compressed = (type & 2) != 0; |
166 | boolean node = (type & 1) != 0; |
167 | if (details) { |
168 | pw.printf( |
169 | "+%0" + len + |
170 | "x %s, map %x, %d entries, %d bytes, maxLen %x%n", |
171 | p, |
172 | (node ? "node" : "leaf") + |
173 | (compressed ? " compressed" : ""), |
174 | mapId, |
175 | node ? entries + 1 : entries, |
176 | pageSize, |
177 | DataUtils.getPageMaxLength(DataUtils.getPagePos(0, 0, pageSize, 0)) |
178 | ); |
179 | } |
180 | p += pageSize; |
181 | Integer mapSize = mapSizes.get(mapId); |
182 | if (mapSize == null) { |
183 | mapSize = 0; |
184 | } |
185 | mapSizes.put(mapId, mapSize + pageSize); |
186 | Long total = mapSizesTotal.get(mapId); |
187 | if (total == null) { |
188 | total = 0L; |
189 | } |
190 | mapSizesTotal.put(mapId, total + pageSize); |
191 | pageSizeSum += pageSize; |
192 | pageSizeTotal += pageSize; |
193 | remaining--; |
194 | long[] children = null; |
195 | long[] counts = null; |
196 | if (node) { |
197 | children = new long[entries + 1]; |
198 | for (int i = 0; i <= entries; i++) { |
199 | children[i] = chunk.getLong(); |
200 | } |
201 | counts = new long[entries + 1]; |
202 | for (int i = 0; i <= entries; i++) { |
203 | long s = DataUtils.readVarLong(chunk); |
204 | counts[i] = s; |
205 | } |
206 | } |
207 | String[] keys = new String[entries]; |
208 | if (mapId == 0 && details) { |
209 | if (!compressed) { |
210 | for (int i = 0; i < entries; i++) { |
211 | String k = StringDataType.INSTANCE.read(chunk); |
212 | keys[i] = k; |
213 | } |
214 | } |
215 | if (node) { |
216 | // meta map node |
217 | for (int i = 0; i < entries; i++) { |
218 | long cp = children[i]; |
219 | pw.printf(" %d children < %s @ " + |
220 | "chunk %x +%0" + |
221 | len + "x%n", |
222 | counts[i], |
223 | keys[i], |
224 | DataUtils.getPageChunkId(cp), |
225 | DataUtils.getPageOffset(cp)); |
226 | } |
227 | long cp = children[entries]; |
228 | pw.printf(" %d children >= %s @ chunk %x +%0" + |
229 | len + "x%n", |
230 | counts[entries], |
231 | keys.length >= entries ? null : keys[entries], |
232 | DataUtils.getPageChunkId(cp), |
233 | DataUtils.getPageOffset(cp)); |
234 | } else if (!compressed) { |
235 | // meta map leaf |
236 | String[] values = new String[entries]; |
237 | for (int i = 0; i < entries; i++) { |
238 | String v = StringDataType.INSTANCE.read(chunk); |
239 | values[i] = v; |
240 | } |
241 | for (int i = 0; i < entries; i++) { |
242 | pw.println(" " + keys[i] + |
243 | " = " + values[i]); |
244 | } |
245 | } |
246 | } else { |
247 | if (node && details) { |
248 | for (int i = 0; i <= entries; i++) { |
249 | long cp = children[i]; |
250 | pw.printf(" %d children @ chunk %x +%0" + |
251 | len + "x%n", |
252 | counts[i], |
253 | DataUtils.getPageChunkId(cp), |
254 | DataUtils.getPageOffset(cp)); |
255 | } |
256 | } |
257 | } |
258 | } |
259 | pageSizeSum = Math.max(1, pageSizeSum); |
260 | for (Integer mapId : mapSizes.keySet()) { |
261 | int percent = 100 * mapSizes.get(mapId) / pageSizeSum; |
262 | pw.printf("map %x: %d bytes, %d%%%n", mapId, mapSizes.get(mapId), percent); |
263 | } |
264 | int footerPos = chunk.limit() - Chunk.FOOTER_LENGTH; |
265 | try { |
266 | chunk.position(footerPos); |
267 | pw.printf( |
268 | "+%0" + len + "x chunkFooter %s%n", |
269 | footerPos, |
270 | new String(chunk.array(), chunk.position(), |
271 | Chunk.FOOTER_LENGTH, DataUtils.LATIN).trim()); |
272 | } catch (IllegalArgumentException e) { |
273 | // too far |
274 | pw.printf("ERROR illegal footer position %d%n", footerPos); |
275 | } |
276 | } |
277 | pw.printf("%n%0" + len + "x eof%n", fileSize); |
278 | pw.printf("\n"); |
279 | pageCount = Math.max(1, pageCount); |
280 | pw.printf("page size total: %d bytes, page count: %d, average page size: %d bytes\n", |
281 | pageSizeTotal, pageCount, pageSizeTotal / pageCount); |
282 | pageSizeTotal = Math.max(1, pageSizeTotal); |
283 | for (Integer mapId : mapSizesTotal.keySet()) { |
284 | int percent = (int) (100 * mapSizesTotal.get(mapId) / pageSizeTotal); |
285 | pw.printf("map %x: %d bytes, %d%%%n", mapId, mapSizesTotal.get(mapId), percent); |
286 | } |
287 | } catch (IOException e) { |
288 | pw.println("ERROR: " + e); |
289 | e.printStackTrace(pw); |
290 | } finally { |
291 | if (file != null) { |
292 | try { |
293 | file.close(); |
294 | } catch (IOException e) { |
295 | // ignore |
296 | } |
297 | } |
298 | } |
299 | pw.flush(); |
300 | } |
301 | |
302 | /** |
303 | * Read the summary information of the file and write them to system out. |
304 | * |
305 | * @param fileName the name of the file |
306 | * @param writer the print writer |
307 | */ |
308 | public static void info(String fileName, Writer writer) { |
309 | PrintWriter pw = new PrintWriter(writer, true); |
310 | if (!FilePath.get(fileName).exists()) { |
311 | pw.println("File not found: " + fileName); |
312 | return; |
313 | } |
314 | long fileLength = FileUtils.size(fileName); |
315 | MVStore store = new MVStore.Builder(). |
316 | fileName(fileName). |
317 | readOnly().open(); |
318 | try { |
319 | MVMap<String, String> meta = store.getMetaMap(); |
320 | Map<String, Object> header = store.getStoreHeader(); |
321 | long fileCreated = DataUtils.readHexLong(header, "created", 0L); |
322 | TreeMap<Integer, Chunk> chunks = new TreeMap<Integer, Chunk>(); |
323 | long chunkLength = 0; |
324 | long maxLength = 0; |
325 | long maxLengthLive = 0; |
326 | long maxLengthNotEmpty = 0; |
327 | for (Entry<String, String> e : meta.entrySet()) { |
328 | String k = e.getKey(); |
329 | if (k.startsWith("chunk.")) { |
330 | Chunk c = Chunk.fromString(e.getValue()); |
331 | chunks.put(c.id, c); |
332 | chunkLength += c.len * MVStore.BLOCK_SIZE; |
333 | maxLength += c.maxLen; |
334 | maxLengthLive += c.maxLenLive; |
335 | if (c.maxLenLive > 0) { |
336 | maxLengthNotEmpty += c.maxLen; |
337 | } |
338 | } |
339 | } |
340 | pw.printf("Created: %s\n", formatTimestamp(fileCreated, fileCreated)); |
341 | pw.printf("Last modified: %s\n", |
342 | formatTimestamp(FileUtils.lastModified(fileName), fileCreated)); |
343 | pw.printf("File length: %d\n", fileLength); |
344 | pw.printf("The last chunk is not listed\n"); |
345 | pw.printf("Chunk length: %d\n", chunkLength); |
346 | pw.printf("Chunk count: %d\n", chunks.size()); |
347 | pw.printf("Used space: %d%%\n", getPercent(chunkLength, fileLength)); |
348 | pw.printf("Chunk fill rate: %d%%\n", maxLength == 0 ? 100 : |
349 | getPercent(maxLengthLive, maxLength)); |
350 | pw.printf("Chunk fill rate excluding empty chunks: %d%%\n", |
351 | maxLengthNotEmpty == 0 ? 100 : |
352 | getPercent(maxLengthLive, maxLengthNotEmpty)); |
353 | for (Entry<Integer, Chunk> e : chunks.entrySet()) { |
354 | Chunk c = e.getValue(); |
355 | long created = fileCreated + c.time; |
356 | pw.printf(" Chunk %d: %s, %d%% used, %d blocks", |
357 | c.id, formatTimestamp(created, fileCreated), |
358 | getPercent(c.maxLenLive, c.maxLen), |
359 | c.len |
360 | ); |
361 | if (c.maxLenLive == 0) { |
362 | pw.printf(", unused: %s", |
363 | formatTimestamp(fileCreated + c.unused, fileCreated)); |
364 | } |
365 | pw.printf("\n"); |
366 | } |
367 | pw.printf("\n"); |
368 | } catch (Exception e) { |
369 | pw.println("ERROR: " + e); |
370 | e.printStackTrace(pw); |
371 | } finally { |
372 | store.close(); |
373 | } |
374 | pw.flush(); |
375 | } |
376 | |
377 | private static String formatTimestamp(long t, long start) { |
378 | String x = new Timestamp(t).toString(); |
379 | String s = x.substring(0, 19); |
380 | s += " (+" + ((t - start) / 1000) + " s)"; |
381 | return s; |
382 | } |
383 | |
384 | private static int getPercent(long value, long max) { |
385 | if (value == 0) { |
386 | return 0; |
387 | } else if (value == max) { |
388 | return 100; |
389 | } |
390 | return (int) (1 + 98 * value / Math.max(1, max)); |
391 | } |
392 | |
393 | /** |
394 | * Compress the store by creating a new file and copying the live pages |
395 | * there. Temporarily, a file with the suffix ".tempFile" is created. This |
396 | * file is then renamed, replacing the original file, if possible. If not, |
397 | * the new file is renamed to ".newFile", then the old file is removed, and |
398 | * the new file is renamed. This might be interrupted, so it's better to |
399 | * compactCleanUp before opening a store, in case this method was used. |
400 | * |
401 | * @param fileName the file name |
402 | * @param compress whether to compress the data |
403 | */ |
404 | public static void compact(String fileName, boolean compress) { |
405 | String tempName = fileName + Constants.SUFFIX_MV_STORE_TEMP_FILE; |
406 | FileUtils.delete(tempName); |
407 | compact(fileName, tempName, compress); |
408 | try { |
409 | FileUtils.moveAtomicReplace(tempName, fileName); |
410 | } catch (DbException e) { |
411 | String newName = fileName + Constants.SUFFIX_MV_STORE_NEW_FILE; |
412 | FileUtils.delete(newName); |
413 | FileUtils.move(tempName, newName); |
414 | FileUtils.delete(fileName); |
415 | FileUtils.move(newName, fileName); |
416 | } |
417 | } |
418 | |
419 | /** |
420 | * Clean up if needed, in a case a compact operation was interrupted due to |
421 | * killing the process or a power failure. This will delete temporary files |
422 | * (if any), and in case atomic file replacements were not used, rename the |
423 | * new file. |
424 | * |
425 | * @param fileName the file name |
426 | */ |
427 | public static void compactCleanUp(String fileName) { |
428 | String tempName = fileName + Constants.SUFFIX_MV_STORE_TEMP_FILE; |
429 | if (FileUtils.exists(tempName)) { |
430 | FileUtils.delete(tempName); |
431 | } |
432 | String newName = fileName + Constants.SUFFIX_MV_STORE_NEW_FILE; |
433 | if (FileUtils.exists(newName)) { |
434 | if (FileUtils.exists(fileName)) { |
435 | FileUtils.delete(newName); |
436 | } else { |
437 | FileUtils.move(newName, fileName); |
438 | } |
439 | } |
440 | } |
441 | |
442 | /** |
443 | * Copy all live pages from the source store to the target store. |
444 | * |
445 | * @param sourceFileName the name of the source store |
446 | * @param targetFileName the name of the target store |
447 | * @param compress whether to compress the data |
448 | */ |
449 | public static void compact(String sourceFileName, String targetFileName, boolean compress) { |
450 | MVStore source = new MVStore.Builder(). |
451 | fileName(sourceFileName). |
452 | readOnly(). |
453 | open(); |
454 | FileUtils.delete(targetFileName); |
455 | MVStore.Builder b = new MVStore.Builder(). |
456 | fileName(targetFileName); |
457 | if (compress) { |
458 | b.compress(); |
459 | } |
460 | MVStore target = b.open(); |
461 | compact(source, target); |
462 | target.close(); |
463 | source.close(); |
464 | } |
465 | |
466 | /** |
467 | * Copy all live pages from the source store to the target store. |
468 | * |
469 | * @param source the source store |
470 | * @param target the target store |
471 | */ |
472 | public static void compact(MVStore source, MVStore target) { |
473 | MVMap<String, String> sourceMeta = source.getMetaMap(); |
474 | MVMap<String, String> targetMeta = target.getMetaMap(); |
475 | for (Entry<String, String> m : sourceMeta.entrySet()) { |
476 | String key = m.getKey(); |
477 | if (key.startsWith("chunk.")) { |
478 | // ignore |
479 | } else if (key.startsWith("map.")) { |
480 | // ignore |
481 | } else if (key.startsWith("name.")) { |
482 | // ignore |
483 | } else if (key.startsWith("root.")) { |
484 | // ignore |
485 | } else { |
486 | targetMeta.put(key, m.getValue()); |
487 | } |
488 | } |
489 | for (String mapName : source.getMapNames()) { |
490 | MVMap.Builder<Object, Object> mp = |
491 | new MVMap.Builder<Object, Object>(). |
492 | keyType(new GenericDataType()). |
493 | valueType(new GenericDataType()); |
494 | MVMap<Object, Object> sourceMap = source.openMap(mapName, mp); |
495 | MVMap<Object, Object> targetMap = target.openMap(mapName, mp); |
496 | targetMap.copyFrom(sourceMap); |
497 | } |
498 | } |
499 | |
500 | /** |
501 | * A data type that can read any data that is persisted, and converts it to |
502 | * a byte array. |
503 | */ |
504 | static class GenericDataType implements DataType { |
505 | |
506 | @Override |
507 | public int compare(Object a, Object b) { |
508 | throw DataUtils.newUnsupportedOperationException("Can not compare"); |
509 | } |
510 | |
511 | @Override |
512 | public int getMemory(Object obj) { |
513 | return obj == null ? 0 : ((byte[]) obj).length * 8; |
514 | } |
515 | |
516 | @Override |
517 | public void write(WriteBuffer buff, Object obj) { |
518 | if (obj != null) { |
519 | buff.put((byte[]) obj); |
520 | } |
521 | } |
522 | |
523 | @Override |
524 | public void write(WriteBuffer buff, Object[] obj, int len, boolean key) { |
525 | for (Object o : obj) { |
526 | write(buff, o); |
527 | } |
528 | } |
529 | |
530 | @Override |
531 | public Object read(ByteBuffer buff) { |
532 | int len = buff.remaining(); |
533 | if (len == 0) { |
534 | return null; |
535 | } |
536 | byte[] data = new byte[len]; |
537 | buff.get(data); |
538 | return data; |
539 | } |
540 | |
541 | @Override |
542 | public void read(ByteBuffer buff, Object[] obj, int len, boolean key) { |
543 | for (int i = 0; i < obj.length; i++) { |
544 | obj[i] = read(buff); |
545 | } |
546 | } |
547 | |
548 | } |
549 | |
550 | |
551 | } |