1 | /* |
2 | * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0, |
3 | * and the EPL 1.0 (http://h2database.com/html/license.html). |
4 | * Initial Developer: H2 Group |
5 | */ |
6 | package org.h2.mvstore; |
7 | |
8 | import java.lang.Thread.UncaughtExceptionHandler; |
9 | import java.nio.ByteBuffer; |
10 | import java.util.ArrayList; |
11 | import java.util.Arrays; |
12 | import java.util.Collections; |
13 | import java.util.Comparator; |
14 | import java.util.HashMap; |
15 | import java.util.HashSet; |
16 | import java.util.Iterator; |
17 | import java.util.Map; |
18 | import java.util.Map.Entry; |
19 | import java.util.Set; |
20 | import java.util.concurrent.ConcurrentHashMap; |
21 | |
22 | import org.h2.compress.CompressDeflate; |
23 | import org.h2.compress.CompressLZF; |
24 | import org.h2.compress.Compressor; |
25 | import org.h2.mvstore.cache.CacheLongKeyLIRS; |
26 | import org.h2.mvstore.type.StringDataType; |
27 | import org.h2.mvstore.Page.PageChildren; |
28 | import org.h2.util.MathUtils; |
29 | import org.h2.util.New; |
30 | |
31 | /* |
32 | |
33 | TODO: |
34 | |
35 | Documentation |
36 | - rolling docs review: at "Metadata Map" |
37 | - better document that writes are in background thread |
38 | - better document how to do non-unique indexes |
39 | - document pluggable store and OffHeapStore |
40 | |
41 | TransactionStore: |
42 | - ability to disable the transaction log, |
43 | if there is only one connection |
44 | |
45 | MVStore: |
46 | - better and clearer memory usage accounting rules |
47 | (heap memory versus disk memory), so that even there is |
48 | never an out of memory |
49 | even for a small heap, and so that chunks |
50 | are still relatively big on average |
51 | - make sure serialization / deserialization errors don't corrupt the file |
52 | - FileStore: don't open and close when set using MVStore.Builder.fileStore |
53 | - test and possibly improve compact operation (for large dbs) |
54 | - is data kept in the stream store if the transaction is not committed? |
55 | - automated 'kill process' and 'power failure' test |
56 | - defragment (re-creating maps, specially those with small pages) |
57 | - store number of write operations per page (maybe defragment |
58 | if much different than count) |
59 | - r-tree: nearest neighbor search |
60 | - use a small object value cache (StringCache), test on Android |
61 | for default serialization |
62 | - MVStoreTool.dump should dump the data if possible; |
63 | possibly using a callback for serialization |
64 | - implement a sharded map (in one store, multiple stores) |
65 | to support concurrent updates and writes, and very large maps |
66 | - to save space when persisting very small transactions, |
67 | use a transaction log where only the deltas are stored |
68 | - serialization for lists, sets, sets, sorted sets, maps, sorted maps |
69 | - maybe rename 'rollback' to 'revert' to distinguish from transactions |
70 | - support other compression algorithms (deflate, LZ4,...) |
71 | - remove features that are not really needed; simplify the code |
72 | possibly using a separate layer or tools |
73 | (retainVersion?) |
74 | - optional pluggable checksum mechanism (per page), which |
75 | requires that everything is a page (including headers) |
76 | - rename "store" to "save", as "store" is used in "storeVersion" |
77 | - rename setStoreVersion to setDataVersion, setSchemaVersion or similar |
78 | - temporary file storage |
79 | - simple rollback method (rollback to last committed version) |
80 | - MVMap to implement SortedMap, then NavigableMap |
81 | - Test with OSGi |
82 | - storage that splits database into multiple files, |
83 | to speed up compact and allow using trim |
84 | (by truncating / deleting empty files) |
85 | - add new feature to the file system API to avoid copying data |
86 | (reads that returns a ByteBuffer instead of writing into one) |
87 | for memory mapped files and off-heap storage |
88 | - support log structured merge style operations (blind writes) |
89 | using one map per level plus bloom filter |
90 | - have a strict call order MVStore -> MVMap -> Page -> FileStore |
91 | - autocommit commits, stores, and compacts from time to time; |
92 | the background thread should wait at least 90% of the |
93 | configured write delay to store changes |
94 | - compact* should also store uncommitted changes (if there are any) |
95 | - write a LSM-tree (log structured merge tree) utility on top of the MVStore |
96 | with blind writes and/or a bloom filter that |
97 | internally uses regular maps and merge sort |
98 | - chunk metadata: maybe split into static and variable, |
99 | or use a small page size for metadata |
100 | - data type "string": maybe use prefix compression for keys |
101 | - test chunk id rollover |
102 | - feature to auto-compact from time to time and on close |
103 | - compact very small chunks |
104 | - Page: to save memory, combine keys & values into one array |
105 | (also children & counts). Maybe remove some other |
106 | fields (childrenCount for example) |
107 | - Support SortedMap for MVMap |
108 | - compact: copy whole pages (without having to open all maps) |
109 | - maybe change the length code to have lower gaps |
110 | - test with very low limits (such as: short chunks, small pages) |
111 | - maybe allow to read beyond the retention time: |
112 | when compacting, move live pages in old chunks |
113 | to a map (possibly the metadata map) - |
114 | this requires a change in the compaction code, plus |
115 | a map lookup when reading old data; also, this |
116 | old data map needs to be cleaned up somehow; |
117 | maybe using an additional timeout |
118 | - rollback of removeMap should restore the data - |
119 | which has big consequences, as the metadata map |
120 | would probably need references to the root nodes of all maps |
121 | |
122 | */ |
123 | |
124 | /** |
125 | * A persistent storage for maps. |
126 | */ |
127 | public class MVStore { |
128 | |
129 | /** |
130 | * Whether assertions are enabled. |
131 | */ |
132 | public static final boolean ASSERT = false; |
133 | |
134 | /** |
135 | * The block size (physical sector size) of the disk. The store header is |
136 | * written twice, one copy in each block, to ensure it survives a crash. |
137 | */ |
138 | static final int BLOCK_SIZE = 4 * 1024; |
139 | |
140 | private static final int FORMAT_WRITE = 1; |
141 | private static final int FORMAT_READ = 1; |
142 | |
143 | /** |
144 | * Used to mark a chunk as free, when it was detected that live bookkeeping |
145 | * is incorrect. |
146 | */ |
147 | private static final int MARKED_FREE = 10000000; |
148 | |
149 | /** |
150 | * The background thread, if any. |
151 | */ |
152 | volatile BackgroundWriterThread backgroundWriterThread; |
153 | |
154 | private volatile boolean reuseSpace = true; |
155 | |
156 | private boolean closed; |
157 | |
158 | private FileStore fileStore; |
159 | private boolean fileStoreIsProvided; |
160 | |
161 | private final int pageSplitSize; |
162 | |
163 | /** |
164 | * The page cache. The default size is 16 MB, and the average size is 2 KB. |
165 | * It is split in 16 segments. The stack move distance is 2% of the expected |
166 | * number of entries. |
167 | */ |
168 | private CacheLongKeyLIRS<Page> cache; |
169 | |
170 | /** |
171 | * The page chunk references cache. The default size is 4 MB, and the |
172 | * average size is 2 KB. It is split in 16 segments. The stack move distance |
173 | * is 2% of the expected number of entries. |
174 | */ |
175 | private CacheLongKeyLIRS<PageChildren> cacheChunkRef; |
176 | |
177 | /** |
178 | * The newest chunk. If nothing was stored yet, this field is not set. |
179 | */ |
180 | private Chunk lastChunk; |
181 | |
182 | /** |
183 | * The map of chunks. |
184 | */ |
185 | private final ConcurrentHashMap<Integer, Chunk> chunks = |
186 | new ConcurrentHashMap<Integer, Chunk>(); |
187 | |
188 | /** |
189 | * The map of temporarily freed storage space caused by freed pages. The key |
190 | * is the unsaved version, the value is the map of chunks. The maps contains |
191 | * the number of freed entries per chunk. Access is synchronized. |
192 | */ |
193 | private final ConcurrentHashMap<Long, |
194 | HashMap<Integer, Chunk>> freedPageSpace = |
195 | new ConcurrentHashMap<Long, HashMap<Integer, Chunk>>(); |
196 | |
197 | /** |
198 | * The metadata map. Write access to this map needs to be synchronized on |
199 | * the store. |
200 | */ |
201 | private MVMap<String, String> meta; |
202 | |
203 | private final ConcurrentHashMap<Integer, MVMap<?, ?>> maps = |
204 | new ConcurrentHashMap<Integer, MVMap<?, ?>>(); |
205 | |
206 | private HashMap<String, Object> fileHeader = New.hashMap(); |
207 | |
208 | private WriteBuffer writeBuffer; |
209 | |
210 | private int lastMapId; |
211 | |
212 | private int versionsToKeep = 5; |
213 | |
214 | /** |
215 | * The compression level for new pages (0 for disabled, 1 for fast, 2 for |
216 | * high). Even if disabled, the store may contain (old) compressed pages. |
217 | */ |
218 | private final int compressionLevel; |
219 | |
220 | private Compressor compressorFast; |
221 | |
222 | private Compressor compressorHigh; |
223 | |
224 | private final UncaughtExceptionHandler backgroundExceptionHandler; |
225 | |
226 | private long currentVersion; |
227 | |
228 | /** |
229 | * The version of the last stored chunk, or -1 if nothing was stored so far. |
230 | */ |
231 | private long lastStoredVersion; |
232 | |
233 | /** |
234 | * The estimated memory used by unsaved pages. This number is not accurate, |
235 | * also because it may be changed concurrently, and because temporary pages |
236 | * are counted. |
237 | */ |
238 | private int unsavedMemory; |
239 | private int autoCommitMemory; |
240 | private boolean saveNeeded; |
241 | |
242 | /** |
243 | * The time the store was created, in milliseconds since 1970. |
244 | */ |
245 | private long creationTime; |
246 | private int retentionTime; |
247 | |
248 | private long lastCommitTime; |
249 | |
250 | /** |
251 | * The earliest chunk to retain, if any. |
252 | */ |
253 | private Chunk retainChunk; |
254 | |
255 | /** |
256 | * The version of the current store operation (if any). |
257 | */ |
258 | private volatile long currentStoreVersion = -1; |
259 | |
260 | private Thread currentStoreThread; |
261 | |
262 | private volatile boolean metaChanged; |
263 | |
264 | /** |
265 | * The delay in milliseconds to automatically commit and write changes. |
266 | */ |
267 | private int autoCommitDelay; |
268 | |
269 | private int autoCompactFillRate; |
270 | private long autoCompactLastFileOpCount; |
271 | |
272 | private Object compactSync = new Object(); |
273 | |
274 | private IllegalStateException panicException; |
275 | |
276 | /** |
277 | * Create and open the store. |
278 | * |
279 | * @param config the configuration to use |
280 | * @throws IllegalStateException if the file is corrupt, or an exception |
281 | * occurred while opening |
282 | * @throws IllegalArgumentException if the directory does not exist |
283 | */ |
284 | MVStore(HashMap<String, Object> config) { |
285 | Object o = config.get("compress"); |
286 | this.compressionLevel = o == null ? 0 : (Integer) o; |
287 | String fileName = (String) config.get("fileName"); |
288 | o = config.get("pageSplitSize"); |
289 | if (o == null) { |
290 | pageSplitSize = fileName == null ? 4 * 1024 : 16 * 1024; |
291 | } else { |
292 | pageSplitSize = (Integer) o; |
293 | } |
294 | o = config.get("backgroundExceptionHandler"); |
295 | this.backgroundExceptionHandler = (UncaughtExceptionHandler) o; |
296 | meta = new MVMap<String, String>(StringDataType.INSTANCE, |
297 | StringDataType.INSTANCE); |
298 | HashMap<String, Object> c = New.hashMap(); |
299 | c.put("id", 0); |
300 | c.put("createVersion", currentVersion); |
301 | meta.init(this, c); |
302 | fileStore = (FileStore) config.get("fileStore"); |
303 | if (fileName == null && fileStore == null) { |
304 | cache = null; |
305 | cacheChunkRef = null; |
306 | return; |
307 | } |
308 | if (fileStore == null) { |
309 | fileStoreIsProvided = false; |
310 | fileStore = new FileStore(); |
311 | } else { |
312 | fileStoreIsProvided = true; |
313 | } |
314 | retentionTime = fileStore.getDefaultRetentionTime(); |
315 | boolean readOnly = config.containsKey("readOnly"); |
316 | o = config.get("cacheSize"); |
317 | int mb = o == null ? 16 : (Integer) o; |
318 | if (mb > 0) { |
319 | long maxMemoryBytes = mb * 1024L * 1024L; |
320 | int segmentCount = 16; |
321 | int stackMoveDistance = 8; |
322 | cache = new CacheLongKeyLIRS<Page>( |
323 | maxMemoryBytes, |
324 | segmentCount, stackMoveDistance); |
325 | cacheChunkRef = new CacheLongKeyLIRS<PageChildren>( |
326 | maxMemoryBytes / 4, |
327 | segmentCount, stackMoveDistance); |
328 | } |
329 | o = config.get("autoCommitBufferSize"); |
330 | int kb = o == null ? 1024 : (Integer) o; |
331 | // 19 KB memory is about 1 KB storage |
332 | autoCommitMemory = kb * 1024 * 19; |
333 | |
334 | o = config.get("autoCompactFillRate"); |
335 | autoCompactFillRate = o == null ? 50 : (Integer) o; |
336 | |
337 | char[] encryptionKey = (char[]) config.get("encryptionKey"); |
338 | try { |
339 | if (!fileStoreIsProvided) { |
340 | fileStore.open(fileName, readOnly, encryptionKey); |
341 | } |
342 | if (fileStore.size() == 0) { |
343 | creationTime = getTime(); |
344 | lastCommitTime = creationTime; |
345 | fileHeader.put("H", 2); |
346 | fileHeader.put("blockSize", BLOCK_SIZE); |
347 | fileHeader.put("format", FORMAT_WRITE); |
348 | fileHeader.put("created", creationTime); |
349 | writeFileHeader(); |
350 | } else { |
351 | readFileHeader(); |
352 | } |
353 | } catch (IllegalStateException e) { |
354 | panic(e); |
355 | } finally { |
356 | if (encryptionKey != null) { |
357 | Arrays.fill(encryptionKey, (char) 0); |
358 | } |
359 | } |
360 | lastCommitTime = getTime(); |
361 | |
362 | // setAutoCommitDelay starts the thread, but only if |
363 | // the parameter is different from the old value |
364 | o = config.get("autoCommitDelay"); |
365 | int delay = o == null ? 1000 : (Integer) o; |
366 | setAutoCommitDelay(delay); |
367 | } |
368 | |
369 | private void panic(IllegalStateException e) { |
370 | if (backgroundExceptionHandler != null) { |
371 | backgroundExceptionHandler.uncaughtException(null, e); |
372 | } |
373 | panicException = e; |
374 | closeImmediately(); |
375 | throw e; |
376 | } |
377 | |
378 | /** |
379 | * Open a store in exclusive mode. For a file-based store, the parent |
380 | * directory must already exist. |
381 | * |
382 | * @param fileName the file name (null for in-memory) |
383 | * @return the store |
384 | */ |
385 | public static MVStore open(String fileName) { |
386 | HashMap<String, Object> config = New.hashMap(); |
387 | config.put("fileName", fileName); |
388 | return new MVStore(config); |
389 | } |
390 | |
391 | /** |
392 | * Open an old, stored version of a map. |
393 | * |
394 | * @param version the version |
395 | * @param mapId the map id |
396 | * @param template the template map |
397 | * @return the read-only map |
398 | */ |
399 | @SuppressWarnings("unchecked") |
400 | <T extends MVMap<?, ?>> T openMapVersion(long version, int mapId, |
401 | MVMap<?, ?> template) { |
402 | MVMap<String, String> oldMeta = getMetaMap(version); |
403 | long rootPos = getRootPos(oldMeta, mapId); |
404 | MVMap<?, ?> m = template.openReadOnly(); |
405 | m.setRootPos(rootPos, version); |
406 | return (T) m; |
407 | } |
408 | |
409 | /** |
410 | * Open a map with the default settings. The map is automatically create if |
411 | * it does not yet exist. If a map with this name is already open, this map |
412 | * is returned. |
413 | * |
414 | * @param <K> the key type |
415 | * @param <V> the value type |
416 | * @param name the name of the map |
417 | * @return the map |
418 | */ |
419 | public <K, V> MVMap<K, V> openMap(String name) { |
420 | return openMap(name, new MVMap.Builder<K, V>()); |
421 | } |
422 | |
423 | /** |
424 | * Open a map with the given builder. The map is automatically create if it |
425 | * does not yet exist. If a map with this name is already open, this map is |
426 | * returned. |
427 | * |
428 | * @param <K> the key type |
429 | * @param <V> the value type |
430 | * @param name the name of the map |
431 | * @param builder the map builder |
432 | * @return the map |
433 | */ |
434 | public synchronized <M extends MVMap<K, V>, K, V> M openMap( |
435 | String name, MVMap.MapBuilder<M, K, V> builder) { |
436 | checkOpen(); |
437 | String x = meta.get("name." + name); |
438 | int id; |
439 | long root; |
440 | HashMap<String, Object> c; |
441 | M map; |
442 | if (x != null) { |
443 | id = DataUtils.parseHexInt(x); |
444 | @SuppressWarnings("unchecked") |
445 | M old = (M) maps.get(id); |
446 | if (old != null) { |
447 | return old; |
448 | } |
449 | map = builder.create(); |
450 | String config = meta.get(MVMap.getMapKey(id)); |
451 | c = New.hashMap(); |
452 | c.putAll(DataUtils.parseMap(config)); |
453 | c.put("id", id); |
454 | map.init(this, c); |
455 | root = getRootPos(meta, id); |
456 | } else { |
457 | c = New.hashMap(); |
458 | id = ++lastMapId; |
459 | c.put("id", id); |
460 | c.put("createVersion", currentVersion); |
461 | map = builder.create(); |
462 | map.init(this, c); |
463 | markMetaChanged(); |
464 | x = Integer.toHexString(id); |
465 | meta.put(MVMap.getMapKey(id), map.asString(name)); |
466 | meta.put("name." + name, x); |
467 | root = 0; |
468 | } |
469 | map.setRootPos(root, -1); |
470 | maps.put(id, map); |
471 | return map; |
472 | } |
473 | |
474 | /** |
475 | * Get the set of all map names. |
476 | * |
477 | * @return the set of names |
478 | */ |
479 | public synchronized Set<String> getMapNames() { |
480 | HashSet<String> set = New.hashSet(); |
481 | checkOpen(); |
482 | for (Iterator<String> it = meta.keyIterator("name."); it.hasNext();) { |
483 | String x = it.next(); |
484 | if (!x.startsWith("name.")) { |
485 | break; |
486 | } |
487 | set.add(x.substring("name.".length())); |
488 | } |
489 | return set; |
490 | } |
491 | |
492 | /** |
493 | * Get the metadata map. This data is for informational purposes only. The |
494 | * data is subject to change in future versions. |
495 | * <p> |
496 | * The data in this map should not be modified (changing system data may |
497 | * corrupt the store). If modifications are needed, they need be |
498 | * synchronized on the store. |
499 | * <p> |
500 | * The metadata map contains the following entries: |
501 | * <pre> |
502 | * chunk.{chunkId} = {chunk metadata} |
503 | * name.{name} = {mapId} |
504 | * map.{mapId} = {map metadata} |
505 | * root.{mapId} = {root position} |
506 | * setting.storeVersion = {version} |
507 | * </pre> |
508 | * |
509 | * @return the metadata map |
510 | */ |
511 | public MVMap<String, String> getMetaMap() { |
512 | checkOpen(); |
513 | return meta; |
514 | } |
515 | |
516 | private MVMap<String, String> getMetaMap(long version) { |
517 | Chunk c = getChunkForVersion(version); |
518 | DataUtils.checkArgument(c != null, "Unknown version {0}", version); |
519 | c = readChunkHeader(c.block); |
520 | MVMap<String, String> oldMeta = meta.openReadOnly(); |
521 | oldMeta.setRootPos(c.metaRootPos, version); |
522 | return oldMeta; |
523 | } |
524 | |
525 | private Chunk getChunkForVersion(long version) { |
526 | Chunk c = lastChunk; |
527 | while (true) { |
528 | if (c == null || c.version <= version) { |
529 | return c; |
530 | } |
531 | c = chunks.get(c.id - 1); |
532 | } |
533 | } |
534 | |
535 | /** |
536 | * Check whether a given map exists. |
537 | * |
538 | * @param name the map name |
539 | * @return true if it exists |
540 | */ |
541 | public boolean hasMap(String name) { |
542 | return meta.containsKey("name." + name); |
543 | } |
544 | |
545 | private void markMetaChanged() { |
546 | // changes in the metadata alone are usually not detected, as the meta |
547 | // map is changed after storing |
548 | metaChanged = true; |
549 | } |
550 | |
551 | private synchronized void readFileHeader() { |
552 | boolean validHeader = false; |
553 | // we don't know yet which chunk and version are the newest |
554 | long newestVersion = -1; |
555 | long chunkBlock = -1; |
556 | // read the first two blocks |
557 | ByteBuffer fileHeaderBlocks = fileStore.readFully(0, 2 * BLOCK_SIZE); |
558 | byte[] buff = new byte[BLOCK_SIZE]; |
559 | for (int i = 0; i <= BLOCK_SIZE; i += BLOCK_SIZE) { |
560 | fileHeaderBlocks.get(buff); |
561 | // the following can fail for various reasons |
562 | try { |
563 | String s = new String(buff, 0, BLOCK_SIZE, |
564 | DataUtils.LATIN).trim(); |
565 | HashMap<String, String> m = DataUtils.parseMap(s); |
566 | int blockSize = DataUtils.readHexInt( |
567 | m, "blockSize", BLOCK_SIZE); |
568 | if (blockSize != BLOCK_SIZE) { |
569 | throw DataUtils.newIllegalStateException( |
570 | DataUtils.ERROR_UNSUPPORTED_FORMAT, |
571 | "Block size {0} is currently not supported", |
572 | blockSize); |
573 | } |
574 | int check = DataUtils.readHexInt(m, "fletcher", 0); |
575 | m.remove("fletcher"); |
576 | s = s.substring(0, s.lastIndexOf("fletcher") - 1); |
577 | byte[] bytes = s.getBytes(DataUtils.LATIN); |
578 | int checksum = DataUtils.getFletcher32(bytes, |
579 | bytes.length); |
580 | if (check != checksum) { |
581 | continue; |
582 | } |
583 | long version = DataUtils.readHexLong(m, "version", 0); |
584 | if (version > newestVersion) { |
585 | newestVersion = version; |
586 | fileHeader.putAll(m); |
587 | chunkBlock = DataUtils.readHexLong(m, "block", 0); |
588 | creationTime = DataUtils.readHexLong(m, "created", 0); |
589 | validHeader = true; |
590 | } |
591 | } catch (Exception e) { |
592 | continue; |
593 | } |
594 | } |
595 | if (!validHeader) { |
596 | throw DataUtils.newIllegalStateException( |
597 | DataUtils.ERROR_FILE_CORRUPT, |
598 | "Store header is corrupt: {0}", fileStore); |
599 | } |
600 | long format = DataUtils.readHexLong(fileHeader, "format", 1); |
601 | if (format > FORMAT_WRITE && !fileStore.isReadOnly()) { |
602 | throw DataUtils.newIllegalStateException( |
603 | DataUtils.ERROR_UNSUPPORTED_FORMAT, |
604 | "The write format {0} is larger " + |
605 | "than the supported format {1}, " + |
606 | "and the file was not opened in read-only mode", |
607 | format, FORMAT_WRITE); |
608 | } |
609 | format = DataUtils.readHexLong(fileHeader, "formatRead", format); |
610 | if (format > FORMAT_READ) { |
611 | throw DataUtils.newIllegalStateException( |
612 | DataUtils.ERROR_UNSUPPORTED_FORMAT, |
613 | "The read format {0} is larger " + |
614 | "than the supported format {1}", |
615 | format, FORMAT_READ); |
616 | } |
617 | lastStoredVersion = -1; |
618 | chunks.clear(); |
619 | long now = System.currentTimeMillis(); |
620 | // calculate the year (doesn't have to be exact; |
621 | // we assume 365.25 days per year, * 4 = 1461) |
622 | int year = 1970 + (int) (now / (1000L * 60 * 60 * 6 * 1461)); |
623 | if (year < 2014) { |
624 | // if the year is before 2014, |
625 | // we assume the system doesn't have a real-time clock, |
626 | // and we set the creationTime to the past, so that |
627 | // existing chunks are overwritten |
628 | creationTime = now - fileStore.getDefaultRetentionTime(); |
629 | } else if (now < creationTime) { |
630 | // the system time was set to the past: |
631 | // we change the creation time |
632 | creationTime = now; |
633 | fileHeader.put("created", creationTime); |
634 | } |
635 | |
636 | Chunk footer = readChunkFooter(fileStore.size()); |
637 | if (footer != null) { |
638 | if (footer.version > newestVersion) { |
639 | newestVersion = footer.version; |
640 | chunkBlock = footer.block; |
641 | } |
642 | } |
643 | if (chunkBlock <= 0) { |
644 | // no chunk |
645 | return; |
646 | } |
647 | |
648 | // read the chunk header and footer, |
649 | // and follow the chain of next chunks |
650 | lastChunk = null; |
651 | while (true) { |
652 | Chunk header; |
653 | try { |
654 | header = readChunkHeader(chunkBlock); |
655 | } catch (Exception e) { |
656 | // invalid chunk header: ignore, but stop |
657 | break; |
658 | } |
659 | if (header.version < newestVersion) { |
660 | // we have reached the end |
661 | break; |
662 | } |
663 | footer = readChunkFooter((chunkBlock + header.len) * BLOCK_SIZE); |
664 | if (footer == null || footer.id != header.id) { |
665 | // invalid chunk footer, or the wrong one |
666 | break; |
667 | } |
668 | lastChunk = header; |
669 | newestVersion = header.version; |
670 | if (header.next == 0 || |
671 | header.next >= fileStore.size() / BLOCK_SIZE) { |
672 | // no (valid) next |
673 | break; |
674 | } |
675 | chunkBlock = header.next; |
676 | } |
677 | if (lastChunk == null) { |
678 | // no valid chunk |
679 | return; |
680 | } |
681 | lastMapId = lastChunk.mapId; |
682 | currentVersion = lastChunk.version; |
683 | setWriteVersion(currentVersion); |
684 | chunks.put(lastChunk.id, lastChunk); |
685 | meta.setRootPos(lastChunk.metaRootPos, -1); |
686 | |
687 | // load the chunk metadata: we can load in any order, |
688 | // because loading chunk metadata might recursively load another chunk |
689 | for (Iterator<String> it = meta.keyIterator("chunk."); it.hasNext();) { |
690 | String s = it.next(); |
691 | if (!s.startsWith("chunk.")) { |
692 | break; |
693 | } |
694 | s = meta.get(s); |
695 | Chunk c = Chunk.fromString(s); |
696 | if (!chunks.containsKey(c.id)) { |
697 | if (c.block == Long.MAX_VALUE) { |
698 | throw DataUtils.newIllegalStateException( |
699 | DataUtils.ERROR_FILE_CORRUPT, |
700 | "Chunk {0} is invalid", c.id); |
701 | } |
702 | chunks.put(c.id, c); |
703 | } |
704 | } |
705 | // build the free space list |
706 | for (Chunk c : chunks.values()) { |
707 | if (c.pageCountLive == 0) { |
708 | // remove this chunk in the next save operation |
709 | registerFreePage(currentVersion, c.id, 0, 0); |
710 | } |
711 | long start = c.block * BLOCK_SIZE; |
712 | int length = c.len * BLOCK_SIZE; |
713 | fileStore.markUsed(start, length); |
714 | } |
715 | } |
716 | |
717 | /** |
718 | * Try to read a chunk footer. |
719 | * |
720 | * @param end the end of the chunk |
721 | * @return the chunk, or null if not successful |
722 | */ |
723 | private Chunk readChunkFooter(long end) { |
724 | // the following can fail for various reasons |
725 | try { |
726 | // read the chunk footer of the last block of the file |
727 | ByteBuffer lastBlock = fileStore.readFully( |
728 | end - Chunk.FOOTER_LENGTH, Chunk.FOOTER_LENGTH); |
729 | byte[] buff = new byte[Chunk.FOOTER_LENGTH]; |
730 | lastBlock.get(buff); |
731 | String s = new String(buff, DataUtils.LATIN).trim(); |
732 | HashMap<String, String> m = DataUtils.parseMap(s); |
733 | int check = DataUtils.readHexInt(m, "fletcher", 0); |
734 | m.remove("fletcher"); |
735 | s = s.substring(0, s.lastIndexOf("fletcher") - 1); |
736 | byte[] bytes = s.getBytes(DataUtils.LATIN); |
737 | int checksum = DataUtils.getFletcher32(bytes, bytes.length); |
738 | if (check == checksum) { |
739 | int chunk = DataUtils.readHexInt(m, "chunk", 0); |
740 | Chunk c = new Chunk(chunk); |
741 | c.version = DataUtils.readHexLong(m, "version", 0); |
742 | c.block = DataUtils.readHexLong(m, "block", 0); |
743 | return c; |
744 | } |
745 | } catch (Exception e) { |
746 | // ignore |
747 | } |
748 | return null; |
749 | } |
750 | |
751 | private void writeFileHeader() { |
752 | StringBuilder buff = new StringBuilder(); |
753 | if (lastChunk != null) { |
754 | fileHeader.put("block", lastChunk.block); |
755 | fileHeader.put("chunk", lastChunk.id); |
756 | fileHeader.put("version", lastChunk.version); |
757 | } |
758 | DataUtils.appendMap(buff, fileHeader); |
759 | byte[] bytes = buff.toString().getBytes(DataUtils.LATIN); |
760 | int checksum = DataUtils.getFletcher32(bytes, bytes.length); |
761 | DataUtils.appendMap(buff, "fletcher", checksum); |
762 | buff.append("\n"); |
763 | bytes = buff.toString().getBytes(DataUtils.LATIN); |
764 | ByteBuffer header = ByteBuffer.allocate(2 * BLOCK_SIZE); |
765 | header.put(bytes); |
766 | header.position(BLOCK_SIZE); |
767 | header.put(bytes); |
768 | header.rewind(); |
769 | write(0, header); |
770 | } |
771 | |
772 | private void write(long pos, ByteBuffer buffer) { |
773 | try { |
774 | fileStore.writeFully(pos, buffer); |
775 | } catch (IllegalStateException e) { |
776 | panic(e); |
777 | throw e; |
778 | } |
779 | } |
780 | |
781 | /** |
782 | * Close the file and the store. Unsaved changes are written to disk first. |
783 | */ |
784 | public void close() { |
785 | if (closed) { |
786 | return; |
787 | } |
788 | if (fileStore != null && !fileStore.isReadOnly()) { |
789 | stopBackgroundThread(); |
790 | if (hasUnsavedChanges()) { |
791 | commitAndSave(); |
792 | } |
793 | } |
794 | closeStore(true); |
795 | } |
796 | |
797 | /** |
798 | * Close the file and the store, without writing anything. This will stop |
799 | * the background thread. This method ignores all errors. |
800 | */ |
801 | public void closeImmediately() { |
802 | try { |
803 | closeStore(false); |
804 | } catch (Exception e) { |
805 | if (backgroundExceptionHandler != null) { |
806 | backgroundExceptionHandler.uncaughtException(null, e); |
807 | } |
808 | } |
809 | } |
810 | |
811 | private void closeStore(boolean shrinkIfPossible) { |
812 | if (closed) { |
813 | return; |
814 | } |
815 | // can not synchronize on this yet, because |
816 | // the thread also synchronized on this, which |
817 | // could result in a deadlock |
818 | stopBackgroundThread(); |
819 | closed = true; |
820 | if (fileStore == null) { |
821 | return; |
822 | } |
823 | synchronized (this) { |
824 | if (shrinkIfPossible) { |
825 | shrinkFileIfPossible(0); |
826 | } |
827 | // release memory early - this is important when called |
828 | // because of out of memory |
829 | cache = null; |
830 | cacheChunkRef = null; |
831 | for (MVMap<?, ?> m : New.arrayList(maps.values())) { |
832 | m.close(); |
833 | } |
834 | meta = null; |
835 | chunks.clear(); |
836 | maps.clear(); |
837 | try { |
838 | if (!fileStoreIsProvided) { |
839 | fileStore.close(); |
840 | } |
841 | } finally { |
842 | fileStore = null; |
843 | } |
844 | } |
845 | } |
846 | |
847 | /** |
848 | * Whether the chunk at the given position is live. |
849 | * |
850 | * @param the chunk id |
851 | * @return true if it is live |
852 | */ |
853 | boolean isChunkLive(int chunkId) { |
854 | String s = meta.get(Chunk.getMetaKey(chunkId)); |
855 | return s != null; |
856 | } |
857 | |
858 | /** |
859 | * Get the chunk for the given position. |
860 | * |
861 | * @param pos the position |
862 | * @return the chunk |
863 | */ |
864 | private Chunk getChunk(long pos) { |
865 | Chunk c = getChunkIfFound(pos); |
866 | if (c == null) { |
867 | int chunkId = DataUtils.getPageChunkId(pos); |
868 | throw DataUtils.newIllegalStateException( |
869 | DataUtils.ERROR_FILE_CORRUPT, |
870 | "Chunk {0} not found", chunkId); |
871 | } |
872 | return c; |
873 | } |
874 | |
875 | private Chunk getChunkIfFound(long pos) { |
876 | int chunkId = DataUtils.getPageChunkId(pos); |
877 | Chunk c = chunks.get(chunkId); |
878 | if (c == null) { |
879 | checkOpen(); |
880 | if (!Thread.holdsLock(this)) { |
881 | // it could also be unsynchronized metadata |
882 | // access (if synchronization on this was forgotten) |
883 | throw DataUtils.newIllegalStateException( |
884 | DataUtils.ERROR_CHUNK_NOT_FOUND, |
885 | "Chunk {0} no longer exists", |
886 | chunkId); |
887 | } |
888 | String s = meta.get(Chunk.getMetaKey(chunkId)); |
889 | if (s == null) { |
890 | return null; |
891 | } |
892 | c = Chunk.fromString(s); |
893 | if (c.block == Long.MAX_VALUE) { |
894 | throw DataUtils.newIllegalStateException( |
895 | DataUtils.ERROR_FILE_CORRUPT, |
896 | "Chunk {0} is invalid", chunkId); |
897 | } |
898 | chunks.put(c.id, c); |
899 | } |
900 | return c; |
901 | } |
902 | |
903 | private void setWriteVersion(long version) { |
904 | for (MVMap<?, ?> map : maps.values()) { |
905 | map.setWriteVersion(version); |
906 | } |
907 | meta.setWriteVersion(version); |
908 | } |
909 | |
910 | /** |
911 | * Commit the changes. |
912 | * <p> |
913 | * For in-memory stores, this method increments the version. |
914 | * <p> |
915 | * For persistent stores, it also writes changes to disk. It does nothing if |
916 | * there are no unsaved changes, and returns the old version. It is not |
917 | * necessary to call this method when auto-commit is enabled (the default |
918 | * setting), as in this case it is automatically called from time to time or |
919 | * when enough changes have accumulated. However, it may still be called to |
920 | * flush all changes to disk. |
921 | * |
922 | * @return the new version |
923 | */ |
924 | public long commit() { |
925 | if (fileStore != null) { |
926 | return commitAndSave(); |
927 | } |
928 | long v = ++currentVersion; |
929 | setWriteVersion(v); |
930 | return v; |
931 | } |
932 | |
933 | /** |
934 | * Commit all changes and persist them to disk. This method does nothing if |
935 | * there are no unsaved changes, otherwise it increments the current version |
936 | * and stores the data (for file based stores). |
937 | * <p> |
938 | * At most one store operation may run at any time. |
939 | * |
940 | * @return the new version (incremented if there were changes) |
941 | */ |
942 | private synchronized long commitAndSave() { |
943 | if (closed) { |
944 | return currentVersion; |
945 | } |
946 | if (fileStore == null) { |
947 | throw DataUtils.newIllegalStateException( |
948 | DataUtils.ERROR_WRITING_FAILED, |
949 | "This is an in-memory store"); |
950 | } |
951 | if (currentStoreVersion >= 0) { |
952 | // store is possibly called within store, if the meta map changed |
953 | return currentVersion; |
954 | } |
955 | if (!hasUnsavedChanges()) { |
956 | return currentVersion; |
957 | } |
958 | if (fileStore.isReadOnly()) { |
959 | throw DataUtils.newIllegalStateException( |
960 | DataUtils.ERROR_WRITING_FAILED, "This store is read-only"); |
961 | } |
962 | try { |
963 | currentStoreVersion = currentVersion; |
964 | currentStoreThread = Thread.currentThread(); |
965 | return storeNow(); |
966 | } finally { |
967 | // in any case reset the current store version, |
968 | // to allow closing the store |
969 | currentStoreVersion = -1; |
970 | currentStoreThread = null; |
971 | } |
972 | } |
973 | |
974 | private long storeNow() { |
975 | try { |
976 | return storeNowTry(); |
977 | } catch (IllegalStateException e) { |
978 | panic(e); |
979 | return -1; |
980 | } |
981 | } |
982 | |
983 | private long storeNowTry() { |
984 | freeUnusedChunks(); |
985 | int currentUnsavedPageCount = unsavedMemory; |
986 | long storeVersion = currentStoreVersion; |
987 | long version = ++currentVersion; |
988 | setWriteVersion(version); |
989 | long time = getTime(); |
990 | lastCommitTime = time; |
991 | retainChunk = null; |
992 | |
993 | // the metadata of the last chunk was not stored so far, and needs to be |
994 | // set now (it's better not to update right after storing, because that |
995 | // would modify the meta map again) |
996 | int lastChunkId; |
997 | if (lastChunk == null) { |
998 | lastChunkId = 0; |
999 | } else { |
1000 | lastChunkId = lastChunk.id; |
1001 | meta.put(Chunk.getMetaKey(lastChunkId), lastChunk.asString()); |
1002 | // never go backward in time |
1003 | time = Math.max(lastChunk.time, time); |
1004 | } |
1005 | int newChunkId = lastChunkId; |
1006 | do { |
1007 | newChunkId = (newChunkId + 1) % Chunk.MAX_ID; |
1008 | } while (chunks.containsKey(newChunkId)); |
1009 | Chunk c = new Chunk(newChunkId); |
1010 | |
1011 | c.pageCount = Integer.MAX_VALUE; |
1012 | c.pageCountLive = Integer.MAX_VALUE; |
1013 | c.maxLen = Long.MAX_VALUE; |
1014 | c.maxLenLive = Long.MAX_VALUE; |
1015 | c.metaRootPos = Long.MAX_VALUE; |
1016 | c.block = Long.MAX_VALUE; |
1017 | c.len = Integer.MAX_VALUE; |
1018 | c.time = time; |
1019 | c.version = version; |
1020 | c.mapId = lastMapId; |
1021 | c.next = Long.MAX_VALUE; |
1022 | chunks.put(c.id, c); |
1023 | // force a metadata update |
1024 | meta.put(Chunk.getMetaKey(c.id), c.asString()); |
1025 | meta.remove(Chunk.getMetaKey(c.id)); |
1026 | ArrayList<MVMap<?, ?>> list = New.arrayList(maps.values()); |
1027 | ArrayList<MVMap<?, ?>> changed = New.arrayList(); |
1028 | for (MVMap<?, ?> m : list) { |
1029 | m.setWriteVersion(version); |
1030 | long v = m.getVersion(); |
1031 | if (m.getCreateVersion() > storeVersion) { |
1032 | // the map was created after storing started |
1033 | continue; |
1034 | } |
1035 | if (m.isVolatile()) { |
1036 | continue; |
1037 | } |
1038 | if (v >= 0 && v >= lastStoredVersion) { |
1039 | MVMap<?, ?> r = m.openVersion(storeVersion); |
1040 | if (r.getRoot().getPos() == 0) { |
1041 | changed.add(r); |
1042 | } |
1043 | } |
1044 | } |
1045 | applyFreedSpace(storeVersion); |
1046 | WriteBuffer buff = getWriteBuffer(); |
1047 | // need to patch the header later |
1048 | c.writeChunkHeader(buff, 0); |
1049 | int headerLength = buff.position(); |
1050 | c.pageCount = 0; |
1051 | c.pageCountLive = 0; |
1052 | c.maxLen = 0; |
1053 | c.maxLenLive = 0; |
1054 | for (MVMap<?, ?> m : changed) { |
1055 | Page p = m.getRoot(); |
1056 | String key = MVMap.getMapRootKey(m.getId()); |
1057 | if (p.getTotalCount() == 0) { |
1058 | meta.put(key, "0"); |
1059 | } else { |
1060 | p.writeUnsavedRecursive(c, buff); |
1061 | long root = p.getPos(); |
1062 | meta.put(key, Long.toHexString(root)); |
1063 | } |
1064 | } |
1065 | meta.setWriteVersion(version); |
1066 | |
1067 | Page metaRoot = meta.getRoot(); |
1068 | metaRoot.writeUnsavedRecursive(c, buff); |
1069 | |
1070 | int chunkLength = buff.position(); |
1071 | |
1072 | // add the store header and round to the next block |
1073 | int length = MathUtils.roundUpInt(chunkLength + |
1074 | Chunk.FOOTER_LENGTH, BLOCK_SIZE); |
1075 | buff.limit(length); |
1076 | |
1077 | // the length of the file that is still in use |
1078 | // (not necessarily the end of the file) |
1079 | long end = getFileLengthInUse(); |
1080 | long filePos; |
1081 | if (reuseSpace) { |
1082 | filePos = fileStore.allocate(length); |
1083 | } else { |
1084 | filePos = end; |
1085 | } |
1086 | // end is not necessarily the end of the file |
1087 | boolean storeAtEndOfFile = filePos + length >= fileStore.size(); |
1088 | |
1089 | if (!reuseSpace) { |
1090 | // we can not mark it earlier, because it |
1091 | // might have been allocated by one of the |
1092 | // removed chunks |
1093 | fileStore.markUsed(end, length); |
1094 | } |
1095 | |
1096 | c.block = filePos / BLOCK_SIZE; |
1097 | c.len = length / BLOCK_SIZE; |
1098 | c.metaRootPos = metaRoot.getPos(); |
1099 | // calculate and set the likely next position |
1100 | if (reuseSpace) { |
1101 | int predictBlocks = c.len; |
1102 | long predictedNextStart = fileStore.allocate( |
1103 | predictBlocks * BLOCK_SIZE); |
1104 | fileStore.free(predictedNextStart, predictBlocks * BLOCK_SIZE); |
1105 | c.next = predictedNextStart / BLOCK_SIZE; |
1106 | } else { |
1107 | // just after this chunk |
1108 | c.next = 0; |
1109 | } |
1110 | buff.position(0); |
1111 | c.writeChunkHeader(buff, headerLength); |
1112 | revertTemp(storeVersion); |
1113 | |
1114 | buff.position(buff.limit() - Chunk.FOOTER_LENGTH); |
1115 | buff.put(c.getFooterBytes()); |
1116 | |
1117 | buff.position(0); |
1118 | write(filePos, buff.getBuffer()); |
1119 | releaseWriteBuffer(buff); |
1120 | |
1121 | // whether we need to write the file header |
1122 | boolean needHeader = false; |
1123 | if (!storeAtEndOfFile) { |
1124 | if (lastChunk == null) { |
1125 | needHeader = true; |
1126 | } else if (lastChunk.next != c.block) { |
1127 | // the last prediction did not matched |
1128 | needHeader = true; |
1129 | } else { |
1130 | long headerVersion = DataUtils.readHexLong( |
1131 | fileHeader, "version", 0); |
1132 | if (lastChunk.version - headerVersion > 20) { |
1133 | // we write after at least 20 entries |
1134 | needHeader = true; |
1135 | } else { |
1136 | int chunkId = DataUtils.readHexInt(fileHeader, "chunk", 0); |
1137 | while (true) { |
1138 | Chunk old = chunks.get(chunkId); |
1139 | if (old == null) { |
1140 | // one of the chunks in between |
1141 | // was removed |
1142 | needHeader = true; |
1143 | break; |
1144 | } |
1145 | if (chunkId == lastChunk.id) { |
1146 | break; |
1147 | } |
1148 | chunkId++; |
1149 | } |
1150 | } |
1151 | } |
1152 | } |
1153 | |
1154 | lastChunk = c; |
1155 | if (needHeader) { |
1156 | writeFileHeader(); |
1157 | } |
1158 | if (!storeAtEndOfFile) { |
1159 | // may only shrink after the file header was written |
1160 | shrinkFileIfPossible(1); |
1161 | } |
1162 | |
1163 | for (MVMap<?, ?> m : changed) { |
1164 | Page p = m.getRoot(); |
1165 | if (p.getTotalCount() > 0) { |
1166 | p.writeEnd(); |
1167 | } |
1168 | } |
1169 | metaRoot.writeEnd(); |
1170 | |
1171 | // some pages might have been changed in the meantime (in the newest |
1172 | // version) |
1173 | unsavedMemory = Math.max(0, unsavedMemory |
1174 | - currentUnsavedPageCount); |
1175 | |
1176 | metaChanged = false; |
1177 | lastStoredVersion = storeVersion; |
1178 | |
1179 | return version; |
1180 | } |
1181 | |
1182 | private synchronized void freeUnusedChunks() { |
1183 | if (lastChunk == null || !reuseSpace) { |
1184 | return; |
1185 | } |
1186 | Set<Integer> referenced = collectReferencedChunks(); |
1187 | ArrayList<Chunk> free = New.arrayList(); |
1188 | long time = getTime(); |
1189 | for (Chunk c : chunks.values()) { |
1190 | if (!referenced.contains(c.id)) { |
1191 | free.add(c); |
1192 | } |
1193 | } |
1194 | for (Chunk c : free) { |
1195 | if (canOverwriteChunk(c, time)) { |
1196 | chunks.remove(c.id); |
1197 | markMetaChanged(); |
1198 | meta.remove(Chunk.getMetaKey(c.id)); |
1199 | long start = c.block * BLOCK_SIZE; |
1200 | int length = c.len * BLOCK_SIZE; |
1201 | fileStore.free(start, length); |
1202 | } else { |
1203 | if (c.unused == 0) { |
1204 | c.unused = time; |
1205 | meta.put(Chunk.getMetaKey(c.id), c.asString()); |
1206 | markMetaChanged(); |
1207 | } |
1208 | } |
1209 | } |
1210 | } |
1211 | |
1212 | private Set<Integer> collectReferencedChunks() { |
1213 | long testVersion = lastChunk.version; |
1214 | DataUtils.checkArgument(testVersion > 0, "Collect references on version 0"); |
1215 | long readCount = getFileStore().readCount; |
1216 | Set<Integer> referenced = New.hashSet(); |
1217 | for (Cursor<String, String> c = meta.cursor("root."); c.hasNext();) { |
1218 | String key = c.next(); |
1219 | if (!key.startsWith("root.")) { |
1220 | break; |
1221 | } |
1222 | long pos = DataUtils.parseHexLong(c.getValue()); |
1223 | if (pos == 0) { |
1224 | continue; |
1225 | } |
1226 | int mapId = DataUtils.parseHexInt(key.substring("root.".length())); |
1227 | collectReferencedChunks(referenced, mapId, pos, 0); |
1228 | } |
1229 | long pos = lastChunk.metaRootPos; |
1230 | collectReferencedChunks(referenced, 0, pos, 0); |
1231 | readCount = fileStore.readCount - readCount; |
1232 | return referenced; |
1233 | } |
1234 | |
1235 | private void collectReferencedChunks(Set<Integer> targetChunkSet, |
1236 | int mapId, long pos, int level) { |
1237 | int c = DataUtils.getPageChunkId(pos); |
1238 | targetChunkSet.add(c); |
1239 | if (DataUtils.getPageType(pos) == DataUtils.PAGE_TYPE_LEAF) { |
1240 | return; |
1241 | } |
1242 | PageChildren refs = readPageChunkReferences(mapId, pos, -1); |
1243 | if (!refs.chunkList) { |
1244 | Set<Integer> target = New.hashSet(); |
1245 | for (int i = 0; i < refs.children.length; i++) { |
1246 | long p = refs.children[i]; |
1247 | collectReferencedChunks(target, mapId, p, level + 1); |
1248 | } |
1249 | // we don't need a reference to this chunk |
1250 | target.remove(c); |
1251 | long[] children = new long[target.size()]; |
1252 | int i = 0; |
1253 | for (Integer p : target) { |
1254 | children[i++] = DataUtils.getPagePos(p, 0, 0, |
1255 | DataUtils.PAGE_TYPE_LEAF); |
1256 | } |
1257 | refs.children = children; |
1258 | refs.chunkList = true; |
1259 | if (cacheChunkRef != null) { |
1260 | cacheChunkRef.put(refs.pos, refs, refs.getMemory()); |
1261 | } |
1262 | } |
1263 | for (long p : refs.children) { |
1264 | targetChunkSet.add(DataUtils.getPageChunkId(p)); |
1265 | } |
1266 | } |
1267 | |
1268 | private PageChildren readPageChunkReferences(int mapId, long pos, int parentChunk) { |
1269 | if (DataUtils.getPageType(pos) == DataUtils.PAGE_TYPE_LEAF) { |
1270 | return null; |
1271 | } |
1272 | PageChildren r; |
1273 | if (cacheChunkRef != null) { |
1274 | r = cacheChunkRef.get(pos); |
1275 | } else { |
1276 | r = null; |
1277 | } |
1278 | if (r == null) { |
1279 | // if possible, create it from the cached page |
1280 | if (cache != null) { |
1281 | Page p = cache.get(pos); |
1282 | if (p != null) { |
1283 | r = new PageChildren(p); |
1284 | } |
1285 | } |
1286 | if (r == null) { |
1287 | // page was not cached: read the data |
1288 | Chunk c = getChunk(pos); |
1289 | long filePos = c.block * BLOCK_SIZE; |
1290 | filePos += DataUtils.getPageOffset(pos); |
1291 | if (filePos < 0) { |
1292 | throw DataUtils.newIllegalStateException( |
1293 | DataUtils.ERROR_FILE_CORRUPT, |
1294 | "Negative position {0}; p={1}, c={2}", filePos, pos, c.toString()); |
1295 | } |
1296 | long maxPos = (c.block + c.len) * BLOCK_SIZE; |
1297 | r = PageChildren.read(fileStore, pos, mapId, filePos, maxPos); |
1298 | } |
1299 | r.removeDuplicateChunkReferences(); |
1300 | if (cacheChunkRef != null) { |
1301 | cacheChunkRef.put(pos, r, r.getMemory()); |
1302 | } |
1303 | } |
1304 | if (r.children.length == 0) { |
1305 | int chunk = DataUtils.getPageChunkId(pos); |
1306 | if (chunk == parentChunk) { |
1307 | return null; |
1308 | } |
1309 | } |
1310 | return r; |
1311 | } |
1312 | |
1313 | /** |
1314 | * Get a buffer for writing. This caller must synchronize on the store |
1315 | * before calling the method and until after using the buffer. |
1316 | * |
1317 | * @return the buffer |
1318 | */ |
1319 | private WriteBuffer getWriteBuffer() { |
1320 | WriteBuffer buff; |
1321 | if (writeBuffer != null) { |
1322 | buff = writeBuffer; |
1323 | buff.clear(); |
1324 | } else { |
1325 | buff = new WriteBuffer(); |
1326 | } |
1327 | return buff; |
1328 | } |
1329 | |
1330 | /** |
1331 | * Release a buffer for writing. This caller must synchronize on the store |
1332 | * before calling the method and until after using the buffer. |
1333 | * |
1334 | * @param buff the buffer than can be re-used |
1335 | */ |
1336 | private void releaseWriteBuffer(WriteBuffer buff) { |
1337 | if (buff.capacity() <= 4 * 1024 * 1024) { |
1338 | writeBuffer = buff; |
1339 | } |
1340 | } |
1341 | |
1342 | private boolean canOverwriteChunk(Chunk c, long time) { |
1343 | if (c.time + retentionTime > time) { |
1344 | return false; |
1345 | } |
1346 | if (c.unused == 0 || c.unused + retentionTime / 2 > time) { |
1347 | return false; |
1348 | } |
1349 | Chunk r = retainChunk; |
1350 | if (r != null && c.version > r.version) { |
1351 | return false; |
1352 | } |
1353 | return true; |
1354 | } |
1355 | |
1356 | private long getTime() { |
1357 | return System.currentTimeMillis() - creationTime; |
1358 | } |
1359 | |
1360 | /** |
1361 | * Apply the freed space to the chunk metadata. The metadata is updated, but |
1362 | * completely free chunks are not removed from the set of chunks, and the |
1363 | * disk space is not yet marked as free. |
1364 | * |
1365 | * @param storeVersion apply up to the given version |
1366 | */ |
1367 | private void applyFreedSpace(long storeVersion) { |
1368 | while (true) { |
1369 | ArrayList<Chunk> modified = New.arrayList(); |
1370 | Iterator<Entry<Long, HashMap<Integer, Chunk>>> it; |
1371 | it = freedPageSpace.entrySet().iterator(); |
1372 | while (it.hasNext()) { |
1373 | Entry<Long, HashMap<Integer, Chunk>> e = it.next(); |
1374 | long v = e.getKey(); |
1375 | if (v > storeVersion) { |
1376 | continue; |
1377 | } |
1378 | HashMap<Integer, Chunk> freed = e.getValue(); |
1379 | for (Chunk f : freed.values()) { |
1380 | Chunk c = chunks.get(f.id); |
1381 | if (c == null) { |
1382 | // already removed |
1383 | continue; |
1384 | } |
1385 | // no need to synchronize, as old entries |
1386 | // are not concurrently modified |
1387 | c.maxLenLive += f.maxLenLive; |
1388 | c.pageCountLive += f.pageCountLive; |
1389 | if (c.pageCountLive < 0 && c.pageCountLive > -MARKED_FREE) { |
1390 | throw DataUtils.newIllegalStateException( |
1391 | DataUtils.ERROR_INTERNAL, |
1392 | "Corrupt page count {0}", c.pageCountLive); |
1393 | } |
1394 | if (c.maxLenLive < 0 && c.maxLenLive > -MARKED_FREE) { |
1395 | throw DataUtils.newIllegalStateException( |
1396 | DataUtils.ERROR_INTERNAL, |
1397 | "Corrupt max length {0}", c.maxLenLive); |
1398 | } |
1399 | if (c.pageCountLive <= 0 && c.maxLenLive > 0 || |
1400 | c.maxLenLive <= 0 && c.pageCountLive > 0) { |
1401 | throw DataUtils.newIllegalStateException( |
1402 | DataUtils.ERROR_INTERNAL, |
1403 | "Corrupt max length {0}", c.maxLenLive); |
1404 | } |
1405 | modified.add(c); |
1406 | } |
1407 | it.remove(); |
1408 | } |
1409 | for (Chunk c : modified) { |
1410 | meta.put(Chunk.getMetaKey(c.id), c.asString()); |
1411 | } |
1412 | if (modified.size() == 0) { |
1413 | break; |
1414 | } |
1415 | } |
1416 | } |
1417 | |
1418 | /** |
1419 | * Shrink the file if possible, and if at least a given percentage can be |
1420 | * saved. |
1421 | * |
1422 | * @param minPercent the minimum percentage to save |
1423 | */ |
1424 | private void shrinkFileIfPossible(int minPercent) { |
1425 | long end = getFileLengthInUse(); |
1426 | long fileSize = fileStore.size(); |
1427 | if (end >= fileSize) { |
1428 | return; |
1429 | } |
1430 | if (minPercent > 0 && fileSize - end < BLOCK_SIZE) { |
1431 | return; |
1432 | } |
1433 | int savedPercent = (int) (100 - (end * 100 / fileSize)); |
1434 | if (savedPercent < minPercent) { |
1435 | return; |
1436 | } |
1437 | fileStore.truncate(end); |
1438 | } |
1439 | |
1440 | /** |
1441 | * Get the position of the last used byte. |
1442 | * |
1443 | * @return the position |
1444 | */ |
1445 | private long getFileLengthInUse() { |
1446 | long size = 2 * BLOCK_SIZE; |
1447 | for (Chunk c : chunks.values()) { |
1448 | if (c.len != Integer.MAX_VALUE) { |
1449 | long x = (c.block + c.len) * BLOCK_SIZE; |
1450 | size = Math.max(size, x); |
1451 | } |
1452 | } |
1453 | return size; |
1454 | } |
1455 | |
1456 | /** |
1457 | * Check whether there are any unsaved changes. |
1458 | * |
1459 | * @return if there are any changes |
1460 | */ |
1461 | public boolean hasUnsavedChanges() { |
1462 | checkOpen(); |
1463 | if (metaChanged) { |
1464 | return true; |
1465 | } |
1466 | for (MVMap<?, ?> m : maps.values()) { |
1467 | if (!m.isClosed()) { |
1468 | long v = m.getVersion(); |
1469 | if (v >= 0 && v > lastStoredVersion) { |
1470 | return true; |
1471 | } |
1472 | } |
1473 | } |
1474 | return false; |
1475 | } |
1476 | |
1477 | private Chunk readChunkHeader(long block) { |
1478 | long p = block * BLOCK_SIZE; |
1479 | ByteBuffer buff = fileStore.readFully(p, Chunk.MAX_HEADER_LENGTH); |
1480 | return Chunk.readChunkHeader(buff, p); |
1481 | } |
1482 | |
1483 | /** |
1484 | * Compact the store by moving all live pages to new chunks. |
1485 | * |
1486 | * @return if anything was written |
1487 | */ |
1488 | public synchronized boolean compactRewriteFully() { |
1489 | checkOpen(); |
1490 | if (lastChunk == null) { |
1491 | // nothing to do |
1492 | return false; |
1493 | } |
1494 | for (MVMap<?, ?> m : maps.values()) { |
1495 | @SuppressWarnings("unchecked") |
1496 | MVMap<Object, Object> map = (MVMap<Object, Object>) m; |
1497 | Cursor<Object, Object> cursor = map.cursor(null); |
1498 | Page lastPage = null; |
1499 | while (cursor.hasNext()) { |
1500 | cursor.next(); |
1501 | Page p = cursor.getPage(); |
1502 | if (p == lastPage) { |
1503 | continue; |
1504 | } |
1505 | Object k = p.getKey(0); |
1506 | Object v = p.getValue(0); |
1507 | map.put(k, v); |
1508 | lastPage = p; |
1509 | } |
1510 | } |
1511 | commitAndSave(); |
1512 | return true; |
1513 | } |
1514 | |
1515 | /** |
1516 | * Compact by moving all chunks next to each other. |
1517 | * |
1518 | * @return if anything was written |
1519 | */ |
1520 | public synchronized boolean compactMoveChunks() { |
1521 | return compactMoveChunks(100, Long.MAX_VALUE); |
1522 | } |
1523 | |
1524 | /** |
1525 | * Compact the store by moving all chunks next to each other, if there is |
1526 | * free space between chunks. This might temporarily increase the file size. |
1527 | * Chunks are overwritten irrespective of the current retention time. Before |
1528 | * overwriting chunks and before resizing the file, syncFile() is called. |
1529 | * |
1530 | * @param targetFillRate do nothing if the file store fill rate is higher |
1531 | * than this |
1532 | * @param moveSize the number of bytes to move |
1533 | * @return if anything was written |
1534 | */ |
1535 | public synchronized boolean compactMoveChunks(int targetFillRate, long moveSize) { |
1536 | checkOpen(); |
1537 | if (lastChunk == null || !reuseSpace) { |
1538 | // nothing to do |
1539 | return false; |
1540 | } |
1541 | int oldRetentionTime = retentionTime; |
1542 | boolean oldReuse = reuseSpace; |
1543 | try { |
1544 | retentionTime = 0; |
1545 | freeUnusedChunks(); |
1546 | if (fileStore.getFillRate() > targetFillRate) { |
1547 | return false; |
1548 | } |
1549 | long start = fileStore.getFirstFree() / BLOCK_SIZE; |
1550 | ArrayList<Chunk> move = compactGetMoveBlocks(start, moveSize); |
1551 | compactMoveChunks(move); |
1552 | freeUnusedChunks(); |
1553 | storeNow(); |
1554 | } finally { |
1555 | reuseSpace = oldReuse; |
1556 | retentionTime = oldRetentionTime; |
1557 | } |
1558 | return true; |
1559 | } |
1560 | |
1561 | private ArrayList<Chunk> compactGetMoveBlocks(long startBlock, long moveSize) { |
1562 | ArrayList<Chunk> move = New.arrayList(); |
1563 | for (Chunk c : chunks.values()) { |
1564 | if (c.block > startBlock) { |
1565 | move.add(c); |
1566 | } |
1567 | } |
1568 | // sort by block |
1569 | Collections.sort(move, new Comparator<Chunk>() { |
1570 | @Override |
1571 | public int compare(Chunk o1, Chunk o2) { |
1572 | return Long.signum(o1.block - o2.block); |
1573 | } |
1574 | }); |
1575 | // find which is the last block to keep |
1576 | int count = 0; |
1577 | long size = 0; |
1578 | for (Chunk c : move) { |
1579 | long chunkSize = c.len * (long) BLOCK_SIZE; |
1580 | if (size + chunkSize > moveSize) { |
1581 | break; |
1582 | } |
1583 | size += chunkSize; |
1584 | count++; |
1585 | } |
1586 | // move the first block (so the first gap is moved), |
1587 | // and the one at the end (so the file shrinks) |
1588 | while (move.size() > count && move.size() > 1) { |
1589 | move.remove(1); |
1590 | } |
1591 | |
1592 | return move; |
1593 | } |
1594 | |
1595 | private void compactMoveChunks(ArrayList<Chunk> move) { |
1596 | for (Chunk c : move) { |
1597 | WriteBuffer buff = getWriteBuffer(); |
1598 | long start = c.block * BLOCK_SIZE; |
1599 | int length = c.len * BLOCK_SIZE; |
1600 | buff.limit(length); |
1601 | ByteBuffer readBuff = fileStore.readFully(start, length); |
1602 | Chunk.readChunkHeader(readBuff, start); |
1603 | int chunkHeaderLen = readBuff.position(); |
1604 | buff.position(chunkHeaderLen); |
1605 | buff.put(readBuff); |
1606 | long end = getFileLengthInUse(); |
1607 | fileStore.markUsed(end, length); |
1608 | fileStore.free(start, length); |
1609 | c.block = end / BLOCK_SIZE; |
1610 | c.next = 0; |
1611 | buff.position(0); |
1612 | c.writeChunkHeader(buff, chunkHeaderLen); |
1613 | buff.position(length - Chunk.FOOTER_LENGTH); |
1614 | buff.put(lastChunk.getFooterBytes()); |
1615 | buff.position(0); |
1616 | write(end, buff.getBuffer()); |
1617 | releaseWriteBuffer(buff); |
1618 | markMetaChanged(); |
1619 | meta.put(Chunk.getMetaKey(c.id), c.asString()); |
1620 | } |
1621 | |
1622 | // update the metadata (store at the end of the file) |
1623 | reuseSpace = false; |
1624 | commitAndSave(); |
1625 | |
1626 | sync(); |
1627 | |
1628 | // now re-use the empty space |
1629 | reuseSpace = true; |
1630 | for (Chunk c : move) { |
1631 | if (!chunks.containsKey(c.id)) { |
1632 | // already removed during the |
1633 | // previous store operation |
1634 | continue; |
1635 | } |
1636 | WriteBuffer buff = getWriteBuffer(); |
1637 | long start = c.block * BLOCK_SIZE; |
1638 | int length = c.len * BLOCK_SIZE; |
1639 | buff.limit(length); |
1640 | ByteBuffer readBuff = fileStore.readFully(start, length); |
1641 | Chunk.readChunkHeader(readBuff, 0); |
1642 | int chunkHeaderLen = readBuff.position(); |
1643 | buff.position(chunkHeaderLen); |
1644 | buff.put(readBuff); |
1645 | long pos = fileStore.allocate(length); |
1646 | fileStore.free(start, length); |
1647 | buff.position(0); |
1648 | c.block = pos / BLOCK_SIZE; |
1649 | c.writeChunkHeader(buff, chunkHeaderLen); |
1650 | buff.position(length - Chunk.FOOTER_LENGTH); |
1651 | buff.put(lastChunk.getFooterBytes()); |
1652 | buff.position(0); |
1653 | write(pos, buff.getBuffer()); |
1654 | releaseWriteBuffer(buff); |
1655 | markMetaChanged(); |
1656 | meta.put(Chunk.getMetaKey(c.id), c.asString()); |
1657 | } |
1658 | |
1659 | // update the metadata (within the file) |
1660 | commitAndSave(); |
1661 | sync(); |
1662 | shrinkFileIfPossible(0); |
1663 | } |
1664 | |
1665 | /** |
1666 | * Force all stored changes to be written to the storage. The default |
1667 | * implementation calls FileChannel.force(true). |
1668 | */ |
1669 | public void sync() { |
1670 | fileStore.sync(); |
1671 | } |
1672 | |
1673 | /** |
1674 | * Try to increase the fill rate by re-writing partially full chunks. Chunks |
1675 | * with a low number of live items are re-written. |
1676 | * <p> |
1677 | * If the current fill rate is higher than the target fill rate, nothing is |
1678 | * done. |
1679 | * <p> |
1680 | * Please note this method will not necessarily reduce the file size, as |
1681 | * empty chunks are not overwritten. |
1682 | * <p> |
1683 | * Only data of open maps can be moved. For maps that are not open, the old |
1684 | * chunk is still referenced. Therefore, it is recommended to open all maps |
1685 | * before calling this method. |
1686 | * |
1687 | * @param targetFillRate the minimum percentage of live entries |
1688 | * @param write the minimum number of bytes to write |
1689 | * @return if a chunk was re-written |
1690 | */ |
1691 | public boolean compact(int targetFillRate, int write) { |
1692 | if (!reuseSpace) { |
1693 | return false; |
1694 | } |
1695 | synchronized (compactSync) { |
1696 | checkOpen(); |
1697 | ArrayList<Chunk> old; |
1698 | synchronized (this) { |
1699 | old = compactGetOldChunks(targetFillRate, write); |
1700 | } |
1701 | if (old == null || old.size() == 0) { |
1702 | return false; |
1703 | } |
1704 | compactRewrite(old); |
1705 | return true; |
1706 | } |
1707 | } |
1708 | |
1709 | private ArrayList<Chunk> compactGetOldChunks(int targetFillRate, int write) { |
1710 | if (lastChunk == null) { |
1711 | // nothing to do |
1712 | return null; |
1713 | } |
1714 | |
1715 | // calculate the fill rate |
1716 | long maxLengthSum = 0; |
1717 | long maxLengthLiveSum = 0; |
1718 | |
1719 | long time = getTime(); |
1720 | |
1721 | for (Chunk c : chunks.values()) { |
1722 | // ignore young chunks, because we don't optimize those |
1723 | if (c.time + retentionTime > time) { |
1724 | continue; |
1725 | } |
1726 | maxLengthSum += c.maxLen; |
1727 | maxLengthLiveSum += c.maxLenLive; |
1728 | } |
1729 | if (maxLengthLiveSum < 0) { |
1730 | // no old data |
1731 | return null; |
1732 | } |
1733 | // the fill rate of all chunks combined |
1734 | if (maxLengthSum <= 0) { |
1735 | // avoid division by 0 |
1736 | maxLengthSum = 1; |
1737 | } |
1738 | int fillRate = (int) (100 * maxLengthLiveSum / maxLengthSum); |
1739 | if (fillRate >= targetFillRate) { |
1740 | return null; |
1741 | } |
1742 | |
1743 | // the 'old' list contains the chunks we want to free up |
1744 | ArrayList<Chunk> old = New.arrayList(); |
1745 | Chunk last = chunks.get(lastChunk.id); |
1746 | for (Chunk c : chunks.values()) { |
1747 | // only look at chunk older than the retention time |
1748 | // (it's possible to compact chunks earlier, but right |
1749 | // now we don't do that) |
1750 | if (c.time + retentionTime > time) { |
1751 | continue; |
1752 | } |
1753 | long age = last.version - c.version + 1; |
1754 | c.collectPriority = (int) (c.getFillRate() * 1000 / age); |
1755 | old.add(c); |
1756 | } |
1757 | if (old.size() == 0) { |
1758 | return null; |
1759 | } |
1760 | |
1761 | // sort the list, so the first entry should be collected first |
1762 | Collections.sort(old, new Comparator<Chunk>() { |
1763 | @Override |
1764 | public int compare(Chunk o1, Chunk o2) { |
1765 | int comp = new Integer(o1.collectPriority). |
1766 | compareTo(o2.collectPriority); |
1767 | if (comp == 0) { |
1768 | comp = new Long(o1.maxLenLive). |
1769 | compareTo(o2.maxLenLive); |
1770 | } |
1771 | return comp; |
1772 | } |
1773 | }); |
1774 | // find out up to were in the old list we need to move |
1775 | long written = 0; |
1776 | int chunkCount = 0; |
1777 | Chunk move = null; |
1778 | for (Chunk c : old) { |
1779 | if (move != null) { |
1780 | if (c.collectPriority > 0 && written > write) { |
1781 | break; |
1782 | } |
1783 | } |
1784 | written += c.maxLenLive; |
1785 | chunkCount++; |
1786 | move = c; |
1787 | } |
1788 | if (chunkCount < 1) { |
1789 | return null; |
1790 | } |
1791 | // remove the chunks we want to keep from this list |
1792 | boolean remove = false; |
1793 | for (Iterator<Chunk> it = old.iterator(); it.hasNext();) { |
1794 | Chunk c = it.next(); |
1795 | if (move == c) { |
1796 | remove = true; |
1797 | } else if (remove) { |
1798 | it.remove(); |
1799 | } |
1800 | } |
1801 | return old; |
1802 | } |
1803 | |
1804 | private void compactRewrite(ArrayList<Chunk> old) { |
1805 | HashSet<Integer> set = New.hashSet(); |
1806 | for (Chunk c : old) { |
1807 | set.add(c.id); |
1808 | } |
1809 | for (MVMap<?, ?> m : maps.values()) { |
1810 | @SuppressWarnings("unchecked") |
1811 | MVMap<Object, Object> map = (MVMap<Object, Object>) m; |
1812 | if (!map.rewrite(set)) { |
1813 | return; |
1814 | } |
1815 | } |
1816 | if (!meta.rewrite(set)) { |
1817 | return; |
1818 | } |
1819 | freeUnusedChunks(); |
1820 | commitAndSave(); |
1821 | } |
1822 | |
1823 | /** |
1824 | * Read a page. |
1825 | * |
1826 | * @param map the map |
1827 | * @param pos the page position |
1828 | * @return the page |
1829 | */ |
1830 | Page readPage(MVMap<?, ?> map, long pos) { |
1831 | if (pos == 0) { |
1832 | throw DataUtils.newIllegalStateException( |
1833 | DataUtils.ERROR_FILE_CORRUPT, "Position 0"); |
1834 | } |
1835 | Page p = cache == null ? null : cache.get(pos); |
1836 | if (p == null) { |
1837 | Chunk c = getChunk(pos); |
1838 | long filePos = c.block * BLOCK_SIZE; |
1839 | filePos += DataUtils.getPageOffset(pos); |
1840 | if (filePos < 0) { |
1841 | throw DataUtils.newIllegalStateException( |
1842 | DataUtils.ERROR_FILE_CORRUPT, |
1843 | "Negative position {0}", filePos); |
1844 | } |
1845 | long maxPos = (c.block + c.len) * BLOCK_SIZE; |
1846 | p = Page.read(fileStore, pos, map, filePos, maxPos); |
1847 | cachePage(pos, p, p.getMemory()); |
1848 | } |
1849 | return p; |
1850 | } |
1851 | |
1852 | /** |
1853 | * Remove a page. |
1854 | * |
1855 | * @param map the map the page belongs to |
1856 | * @param pos the position of the page |
1857 | * @param memory the memory usage |
1858 | */ |
1859 | void removePage(MVMap<?, ?> map, long pos, int memory) { |
1860 | // we need to keep temporary pages, |
1861 | // to support reading old versions and rollback |
1862 | if (pos == 0) { |
1863 | // the page was not yet stored: |
1864 | // just using "unsavedMemory -= memory" could result in negative |
1865 | // values, because in some cases a page is allocated, but never |
1866 | // stored, so we need to use max |
1867 | unsavedMemory = Math.max(0, unsavedMemory - memory); |
1868 | return; |
1869 | } |
1870 | |
1871 | // This could result in a cache miss if the operation is rolled back, |
1872 | // but we don't optimize for rollback. |
1873 | // We could also keep the page in the cache, as somebody |
1874 | // could still read it (reading the old version). |
1875 | if (cache != null) { |
1876 | if (DataUtils.getPageType(pos) == DataUtils.PAGE_TYPE_LEAF) { |
1877 | // keep nodes in the cache, because they are still used for |
1878 | // garbage collection |
1879 | cache.remove(pos); |
1880 | } |
1881 | } |
1882 | |
1883 | Chunk c = getChunk(pos); |
1884 | long version = currentVersion; |
1885 | if (map == meta && currentStoreVersion >= 0) { |
1886 | if (Thread.currentThread() == currentStoreThread) { |
1887 | // if the meta map is modified while storing, |
1888 | // then this freed page needs to be registered |
1889 | // with the stored chunk, so that the old chunk |
1890 | // can be re-used |
1891 | version = currentStoreVersion; |
1892 | } |
1893 | } |
1894 | registerFreePage(version, c.id, |
1895 | DataUtils.getPageMaxLength(pos), 1); |
1896 | } |
1897 | |
1898 | private void registerFreePage(long version, int chunkId, |
1899 | long maxLengthLive, int pageCount) { |
1900 | HashMap<Integer, Chunk> freed = freedPageSpace.get(version); |
1901 | if (freed == null) { |
1902 | freed = New.hashMap(); |
1903 | HashMap<Integer, Chunk> f2 = freedPageSpace.putIfAbsent(version, |
1904 | freed); |
1905 | if (f2 != null) { |
1906 | freed = f2; |
1907 | } |
1908 | } |
1909 | // synchronize, because pages could be freed concurrently |
1910 | synchronized (freed) { |
1911 | Chunk f = freed.get(chunkId); |
1912 | if (f == null) { |
1913 | f = new Chunk(chunkId); |
1914 | freed.put(chunkId, f); |
1915 | } |
1916 | f.maxLenLive -= maxLengthLive; |
1917 | f.pageCountLive -= pageCount; |
1918 | } |
1919 | } |
1920 | |
1921 | Compressor getCompressorFast() { |
1922 | if (compressorFast == null) { |
1923 | compressorFast = new CompressLZF(); |
1924 | } |
1925 | return compressorFast; |
1926 | } |
1927 | |
1928 | Compressor getCompressorHigh() { |
1929 | if (compressorHigh == null) { |
1930 | compressorHigh = new CompressDeflate(); |
1931 | } |
1932 | return compressorHigh; |
1933 | } |
1934 | |
1935 | int getCompressionLevel() { |
1936 | return compressionLevel; |
1937 | } |
1938 | |
1939 | public int getPageSplitSize() { |
1940 | return pageSplitSize; |
1941 | } |
1942 | |
1943 | public boolean getReuseSpace() { |
1944 | return reuseSpace; |
1945 | } |
1946 | |
1947 | /** |
1948 | * Whether empty space in the file should be re-used. If enabled, old data |
1949 | * is overwritten (default). If disabled, writes are appended at the end of |
1950 | * the file. |
1951 | * <p> |
1952 | * This setting is specially useful for online backup. To create an online |
1953 | * backup, disable this setting, then copy the file (starting at the |
1954 | * beginning of the file). In this case, concurrent backup and write |
1955 | * operations are possible (obviously the backup process needs to be faster |
1956 | * than the write operations). |
1957 | * |
1958 | * @param reuseSpace the new value |
1959 | */ |
1960 | public void setReuseSpace(boolean reuseSpace) { |
1961 | this.reuseSpace = reuseSpace; |
1962 | } |
1963 | |
1964 | public int getRetentionTime() { |
1965 | return retentionTime; |
1966 | } |
1967 | |
1968 | /** |
1969 | * How long to retain old, persisted chunks, in milliseconds. Chunks that |
1970 | * are older may be overwritten once they contain no live data. |
1971 | * <p> |
1972 | * The default value is 45000 (45 seconds) when using the default file |
1973 | * store. It is assumed that a file system and hard disk will flush all |
1974 | * write buffers within this time. Using a lower value might be dangerous, |
1975 | * unless the file system and hard disk flush the buffers earlier. To |
1976 | * manually flush the buffers, use |
1977 | * <code>MVStore.getFile().force(true)</code>, however please note that |
1978 | * according to various tests this does not always work as expected |
1979 | * depending on the operating system and hardware. |
1980 | * <p> |
1981 | * The retention time needs to be long enough to allow reading old chunks |
1982 | * while traversing over the entries of a map. |
1983 | * <p> |
1984 | * This setting is not persisted. |
1985 | * |
1986 | * @param ms how many milliseconds to retain old chunks (0 to overwrite them |
1987 | * as early as possible) |
1988 | */ |
1989 | public void setRetentionTime(int ms) { |
1990 | this.retentionTime = ms; |
1991 | } |
1992 | |
1993 | /** |
1994 | * How many versions to retain for in-memory stores. If not set, 5 old |
1995 | * versions are retained. |
1996 | * |
1997 | * @param count the number of versions to keep |
1998 | */ |
1999 | public void setVersionsToKeep(int count) { |
2000 | this.versionsToKeep = count; |
2001 | } |
2002 | |
2003 | /** |
2004 | * Get the oldest version to retain in memory (for in-memory stores). |
2005 | * |
2006 | * @return the version |
2007 | */ |
2008 | public long getVersionsToKeep() { |
2009 | return versionsToKeep; |
2010 | } |
2011 | |
2012 | /** |
2013 | * Get the oldest version to retain in memory, which is the manually set |
2014 | * retain version, or the current store version (whatever is older). |
2015 | * |
2016 | * @return the version |
2017 | */ |
2018 | long getOldestVersionToKeep() { |
2019 | long v = currentVersion; |
2020 | if (fileStore == null) { |
2021 | return v - versionsToKeep; |
2022 | } |
2023 | long storeVersion = currentStoreVersion; |
2024 | if (storeVersion > -1) { |
2025 | v = Math.min(v, storeVersion); |
2026 | } |
2027 | return v; |
2028 | } |
2029 | |
2030 | /** |
2031 | * Check whether all data can be read from this version. This requires that |
2032 | * all chunks referenced by this version are still available (not |
2033 | * overwritten). |
2034 | * |
2035 | * @param version the version |
2036 | * @return true if all data can be read |
2037 | */ |
2038 | private boolean isKnownVersion(long version) { |
2039 | if (version > currentVersion || version < 0) { |
2040 | return false; |
2041 | } |
2042 | if (version == currentVersion || chunks.size() == 0) { |
2043 | // no stored data |
2044 | return true; |
2045 | } |
2046 | // need to check if a chunk for this version exists |
2047 | Chunk c = getChunkForVersion(version); |
2048 | if (c == null) { |
2049 | return false; |
2050 | } |
2051 | // also, all chunks referenced by this version |
2052 | // need to be available in the file |
2053 | MVMap<String, String> oldMeta = getMetaMap(version); |
2054 | if (oldMeta == null) { |
2055 | return false; |
2056 | } |
2057 | for (Iterator<String> it = oldMeta.keyIterator("chunk."); |
2058 | it.hasNext();) { |
2059 | String chunkKey = it.next(); |
2060 | if (!chunkKey.startsWith("chunk.")) { |
2061 | break; |
2062 | } |
2063 | if (!meta.containsKey(chunkKey)) { |
2064 | return false; |
2065 | } |
2066 | } |
2067 | return true; |
2068 | } |
2069 | |
2070 | /** |
2071 | * Increment the number of unsaved pages. |
2072 | * |
2073 | * @param memory the memory usage of the page |
2074 | */ |
2075 | void registerUnsavedPage(int memory) { |
2076 | unsavedMemory += memory; |
2077 | int newValue = unsavedMemory; |
2078 | if (newValue > autoCommitMemory && autoCommitMemory > 0) { |
2079 | saveNeeded = true; |
2080 | } |
2081 | } |
2082 | |
2083 | /** |
2084 | * This method is called before writing to a map. |
2085 | * |
2086 | * @param map the map |
2087 | */ |
2088 | void beforeWrite(MVMap<?, ?> map) { |
2089 | if (saveNeeded) { |
2090 | if (map == meta) { |
2091 | // to, don't save while the metadata map is locked |
2092 | // this is to avoid deadlocks that could occur when we |
2093 | // synchronize on the store and then on the metadata map |
2094 | // TODO there should be no deadlocks possible |
2095 | return; |
2096 | } |
2097 | saveNeeded = false; |
2098 | // check again, because it could have been written by now |
2099 | if (unsavedMemory > autoCommitMemory && autoCommitMemory > 0) { |
2100 | commitAndSave(); |
2101 | } |
2102 | } |
2103 | } |
2104 | |
2105 | /** |
2106 | * Get the store version. The store version is usually used to upgrade the |
2107 | * structure of the store after upgrading the application. Initially the |
2108 | * store version is 0, until it is changed. |
2109 | * |
2110 | * @return the store version |
2111 | */ |
2112 | public int getStoreVersion() { |
2113 | checkOpen(); |
2114 | String x = meta.get("setting.storeVersion"); |
2115 | return x == null ? 0 : DataUtils.parseHexInt(x); |
2116 | } |
2117 | |
2118 | /** |
2119 | * Update the store version. |
2120 | * |
2121 | * @param version the new store version |
2122 | */ |
2123 | public synchronized void setStoreVersion(int version) { |
2124 | checkOpen(); |
2125 | markMetaChanged(); |
2126 | meta.put("setting.storeVersion", Integer.toHexString(version)); |
2127 | } |
2128 | |
2129 | /** |
2130 | * Revert to the beginning of the current version, reverting all uncommitted |
2131 | * changes. |
2132 | */ |
2133 | public void rollback() { |
2134 | rollbackTo(currentVersion); |
2135 | } |
2136 | |
2137 | /** |
2138 | * Revert to the beginning of the given version. All later changes (stored |
2139 | * or not) are forgotten. All maps that were created later are closed. A |
2140 | * rollback to a version before the last stored version is immediately |
2141 | * persisted. Rollback to version 0 means all data is removed. |
2142 | * |
2143 | * @param version the version to revert to |
2144 | */ |
2145 | public synchronized void rollbackTo(long version) { |
2146 | checkOpen(); |
2147 | if (version == 0) { |
2148 | // special case: remove all data |
2149 | for (MVMap<?, ?> m : maps.values()) { |
2150 | m.close(); |
2151 | } |
2152 | meta.clear(); |
2153 | chunks.clear(); |
2154 | if (fileStore != null) { |
2155 | fileStore.clear(); |
2156 | } |
2157 | maps.clear(); |
2158 | freedPageSpace.clear(); |
2159 | currentVersion = version; |
2160 | setWriteVersion(version); |
2161 | metaChanged = false; |
2162 | return; |
2163 | } |
2164 | DataUtils.checkArgument( |
2165 | isKnownVersion(version), |
2166 | "Unknown version {0}", version); |
2167 | for (MVMap<?, ?> m : maps.values()) { |
2168 | m.rollbackTo(version); |
2169 | } |
2170 | for (long v = currentVersion; v >= version; v--) { |
2171 | if (freedPageSpace.size() == 0) { |
2172 | break; |
2173 | } |
2174 | freedPageSpace.remove(v); |
2175 | } |
2176 | meta.rollbackTo(version); |
2177 | metaChanged = false; |
2178 | boolean loadFromFile = false; |
2179 | // get the largest chunk with a version |
2180 | // higher or equal the requested version |
2181 | Chunk removeChunksNewerThan = null; |
2182 | Chunk c = lastChunk; |
2183 | while (true) { |
2184 | if (c == null || c.version < version) { |
2185 | break; |
2186 | } |
2187 | removeChunksNewerThan = c; |
2188 | c = chunks.get(c.id - 1); |
2189 | } |
2190 | Chunk last = lastChunk; |
2191 | if (removeChunksNewerThan != null && |
2192 | last.version > removeChunksNewerThan.version) { |
2193 | revertTemp(version); |
2194 | loadFromFile = true; |
2195 | while (true) { |
2196 | last = lastChunk; |
2197 | if (last == null) { |
2198 | break; |
2199 | } else if (last.version <= removeChunksNewerThan.version) { |
2200 | break; |
2201 | } |
2202 | chunks.remove(lastChunk.id); |
2203 | long start = last.block * BLOCK_SIZE; |
2204 | int length = last.len * BLOCK_SIZE; |
2205 | fileStore.free(start, length); |
2206 | // need to overwrite the chunk, |
2207 | // so it can not be used |
2208 | WriteBuffer buff = getWriteBuffer(); |
2209 | buff.limit(length); |
2210 | // buff.clear() does not set the data |
2211 | Arrays.fill(buff.getBuffer().array(), (byte) 0); |
2212 | write(start, buff.getBuffer()); |
2213 | releaseWriteBuffer(buff); |
2214 | lastChunk = chunks.get(lastChunk.id - 1); |
2215 | } |
2216 | writeFileHeader(); |
2217 | readFileHeader(); |
2218 | } |
2219 | for (MVMap<?, ?> m : New.arrayList(maps.values())) { |
2220 | int id = m.getId(); |
2221 | if (m.getCreateVersion() >= version) { |
2222 | m.close(); |
2223 | maps.remove(id); |
2224 | } else { |
2225 | if (loadFromFile) { |
2226 | m.setRootPos(getRootPos(meta, id), -1); |
2227 | } |
2228 | } |
2229 | |
2230 | } |
2231 | // rollback might have rolled back the stored chunk metadata as well |
2232 | if (lastChunk != null) { |
2233 | c = chunks.get(lastChunk.id - 1); |
2234 | if (c != null) { |
2235 | meta.put(Chunk.getMetaKey(c.id), c.asString()); |
2236 | } |
2237 | } |
2238 | currentVersion = version; |
2239 | setWriteVersion(version); |
2240 | } |
2241 | |
2242 | private static long getRootPos(MVMap<String, String> map, int mapId) { |
2243 | String root = map.get(MVMap.getMapRootKey(mapId)); |
2244 | return root == null ? 0 : DataUtils.parseHexLong(root); |
2245 | } |
2246 | |
2247 | private void revertTemp(long storeVersion) { |
2248 | for (Iterator<Long> it = freedPageSpace.keySet().iterator(); |
2249 | it.hasNext();) { |
2250 | long v = it.next(); |
2251 | if (v > storeVersion) { |
2252 | continue; |
2253 | } |
2254 | it.remove(); |
2255 | } |
2256 | for (MVMap<?, ?> m : maps.values()) { |
2257 | m.removeUnusedOldVersions(); |
2258 | } |
2259 | } |
2260 | |
2261 | /** |
2262 | * Get the current version of the data. When a new store is created, the |
2263 | * version is 0. |
2264 | * |
2265 | * @return the version |
2266 | */ |
2267 | public long getCurrentVersion() { |
2268 | return currentVersion; |
2269 | } |
2270 | |
2271 | /** |
2272 | * Get the file store. |
2273 | * |
2274 | * @return the file store |
2275 | */ |
2276 | public FileStore getFileStore() { |
2277 | return fileStore; |
2278 | } |
2279 | |
2280 | /** |
2281 | * Get the store header. This data is for informational purposes only. The |
2282 | * data is subject to change in future versions. The data should not be |
2283 | * modified (doing so may corrupt the store). |
2284 | * |
2285 | * @return the store header |
2286 | */ |
2287 | public Map<String, Object> getStoreHeader() { |
2288 | return fileHeader; |
2289 | } |
2290 | |
2291 | private void checkOpen() { |
2292 | if (closed) { |
2293 | throw DataUtils.newIllegalStateException(DataUtils.ERROR_CLOSED, |
2294 | "This store is closed", panicException); |
2295 | } |
2296 | } |
2297 | |
2298 | /** |
2299 | * Rename a map. |
2300 | * |
2301 | * @param map the map |
2302 | * @param newName the new name |
2303 | */ |
2304 | public synchronized void renameMap(MVMap<?, ?> map, String newName) { |
2305 | checkOpen(); |
2306 | DataUtils.checkArgument(map != meta, |
2307 | "Renaming the meta map is not allowed"); |
2308 | int id = map.getId(); |
2309 | String oldName = getMapName(id); |
2310 | if (oldName.equals(newName)) { |
2311 | return; |
2312 | } |
2313 | DataUtils.checkArgument( |
2314 | !meta.containsKey("name." + newName), |
2315 | "A map named {0} already exists", newName); |
2316 | markMetaChanged(); |
2317 | String x = Integer.toHexString(id); |
2318 | meta.remove("name." + oldName); |
2319 | meta.put(MVMap.getMapKey(id), map.asString(newName)); |
2320 | meta.put("name." + newName, x); |
2321 | } |
2322 | |
2323 | /** |
2324 | * Remove a map. Please note rolling back this operation does not restore |
2325 | * the data; if you need this ability, use Map.clear(). |
2326 | * |
2327 | * @param map the map to remove |
2328 | */ |
2329 | public synchronized void removeMap(MVMap<?, ?> map) { |
2330 | checkOpen(); |
2331 | DataUtils.checkArgument(map != meta, |
2332 | "Removing the meta map is not allowed"); |
2333 | map.clear(); |
2334 | int id = map.getId(); |
2335 | String name = getMapName(id); |
2336 | markMetaChanged(); |
2337 | meta.remove(MVMap.getMapKey(id)); |
2338 | meta.remove("name." + name); |
2339 | meta.remove(MVMap.getMapRootKey(id)); |
2340 | maps.remove(id); |
2341 | } |
2342 | |
2343 | /** |
2344 | * Get the name of the given map. |
2345 | * |
2346 | * @param id the map id |
2347 | * @return the name, or null if not found |
2348 | */ |
2349 | public synchronized String getMapName(int id) { |
2350 | checkOpen(); |
2351 | String m = meta.get(MVMap.getMapKey(id)); |
2352 | return m == null ? null : DataUtils.parseMap(m).get("name"); |
2353 | } |
2354 | |
2355 | /** |
2356 | * Commit and save all changes, if there are any, and compact the store if |
2357 | * needed. |
2358 | */ |
2359 | void writeInBackground() { |
2360 | if (closed) { |
2361 | return; |
2362 | } |
2363 | |
2364 | // could also commit when there are many unsaved pages, |
2365 | // but according to a test it doesn't really help |
2366 | |
2367 | long time = getTime(); |
2368 | if (time <= lastCommitTime + autoCommitDelay) { |
2369 | return; |
2370 | } |
2371 | if (hasUnsavedChanges()) { |
2372 | try { |
2373 | commitAndSave(); |
2374 | } catch (Exception e) { |
2375 | if (backgroundExceptionHandler != null) { |
2376 | backgroundExceptionHandler.uncaughtException(null, e); |
2377 | return; |
2378 | } |
2379 | } |
2380 | } |
2381 | if (autoCompactFillRate > 0) { |
2382 | try { |
2383 | // whether there were file read or write operations since |
2384 | // the last time |
2385 | boolean fileOps; |
2386 | long fileOpCount = fileStore.getWriteCount() + fileStore.getReadCount(); |
2387 | if (autoCompactLastFileOpCount != fileOpCount) { |
2388 | fileOps = true; |
2389 | } else { |
2390 | fileOps = false; |
2391 | } |
2392 | // use a lower fill rate if there were any file operations |
2393 | int fillRate = fileOps ? autoCompactFillRate / 3 : autoCompactFillRate; |
2394 | // TODO how to avoid endless compaction if there is a bug |
2395 | // in the bookkeeping? |
2396 | compact(fillRate, autoCommitMemory); |
2397 | autoCompactLastFileOpCount = fileStore.getWriteCount() + fileStore.getReadCount(); |
2398 | } catch (Exception e) { |
2399 | if (backgroundExceptionHandler != null) { |
2400 | backgroundExceptionHandler.uncaughtException(null, e); |
2401 | } |
2402 | } |
2403 | } |
2404 | } |
2405 | |
2406 | /** |
2407 | * Set the read cache size in MB. |
2408 | * |
2409 | * @param mb the cache size in MB. |
2410 | */ |
2411 | public void setCacheSize(int mb) { |
2412 | if (cache != null) { |
2413 | cache.setMaxMemory((long) mb * 1024 * 1024); |
2414 | cache.clear(); |
2415 | } |
2416 | } |
2417 | |
2418 | public boolean isClosed() { |
2419 | return closed; |
2420 | } |
2421 | |
2422 | private void stopBackgroundThread() { |
2423 | BackgroundWriterThread t = backgroundWriterThread; |
2424 | if (t == null) { |
2425 | return; |
2426 | } |
2427 | backgroundWriterThread = null; |
2428 | if (Thread.currentThread() == t) { |
2429 | // within the thread itself - can not join |
2430 | return; |
2431 | } |
2432 | synchronized (t.sync) { |
2433 | t.sync.notifyAll(); |
2434 | } |
2435 | if (Thread.holdsLock(this)) { |
2436 | // called from storeNow: can not join, |
2437 | // because that could result in a deadlock |
2438 | return; |
2439 | } |
2440 | try { |
2441 | t.join(); |
2442 | } catch (Exception e) { |
2443 | // ignore |
2444 | } |
2445 | } |
2446 | |
2447 | /** |
2448 | * Set the maximum delay in milliseconds to auto-commit changes. |
2449 | * <p> |
2450 | * To disable auto-commit, set the value to 0. In this case, changes are |
2451 | * only committed when explicitly calling commit. |
2452 | * <p> |
2453 | * The default is 1000, meaning all changes are committed after at most one |
2454 | * second. |
2455 | * |
2456 | * @param millis the maximum delay |
2457 | */ |
2458 | public void setAutoCommitDelay(int millis) { |
2459 | if (autoCommitDelay == millis) { |
2460 | return; |
2461 | } |
2462 | autoCommitDelay = millis; |
2463 | if (fileStore == null || fileStore.isReadOnly()) { |
2464 | return; |
2465 | } |
2466 | stopBackgroundThread(); |
2467 | // start the background thread if needed |
2468 | if (millis > 0) { |
2469 | int sleep = Math.max(1, millis / 10); |
2470 | BackgroundWriterThread t = |
2471 | new BackgroundWriterThread(this, sleep, |
2472 | fileStore.toString()); |
2473 | t.start(); |
2474 | backgroundWriterThread = t; |
2475 | } |
2476 | } |
2477 | |
2478 | /** |
2479 | * Get the auto-commit delay. |
2480 | * |
2481 | * @return the delay in milliseconds, or 0 if auto-commit is disabled. |
2482 | */ |
2483 | public int getAutoCommitDelay() { |
2484 | return autoCommitDelay; |
2485 | } |
2486 | |
2487 | /** |
2488 | * Get the maximum memory (in bytes) used for unsaved pages. If this number |
2489 | * is exceeded, unsaved changes are stored to disk. |
2490 | * |
2491 | * @return the memory in bytes |
2492 | */ |
2493 | public int getAutoCommitMemory() { |
2494 | return autoCommitMemory; |
2495 | } |
2496 | |
2497 | /** |
2498 | * Get the estimated memory (in bytes) of unsaved data. If the value exceeds |
2499 | * the auto-commit memory, the changes are committed. |
2500 | * <p> |
2501 | * The returned value is an estimation only. |
2502 | * |
2503 | * @return the memory in bytes |
2504 | */ |
2505 | public int getUnsavedMemory() { |
2506 | return unsavedMemory; |
2507 | } |
2508 | |
2509 | /** |
2510 | * Put the page in the cache. |
2511 | * |
2512 | * @param pos the page position |
2513 | * @param page the page |
2514 | * @param memory the memory used |
2515 | */ |
2516 | void cachePage(long pos, Page page, int memory) { |
2517 | if (cache != null) { |
2518 | cache.put(pos, page, memory); |
2519 | } |
2520 | } |
2521 | |
2522 | /** |
2523 | * Get the amount of memory used for caching, in MB. |
2524 | * |
2525 | * @return the amount of memory used for caching |
2526 | */ |
2527 | public int getCacheSizeUsed() { |
2528 | if (cache == null) { |
2529 | return 0; |
2530 | } |
2531 | return (int) (cache.getUsedMemory() / 1024 / 1024); |
2532 | } |
2533 | |
2534 | /** |
2535 | * Get the maximum cache size, in MB. |
2536 | * |
2537 | * @return the cache size |
2538 | */ |
2539 | public int getCacheSize() { |
2540 | if (cache == null) { |
2541 | return 0; |
2542 | } |
2543 | return (int) (cache.getMaxMemory() / 1024 / 1024); |
2544 | } |
2545 | |
2546 | /** |
2547 | * Get the cache. |
2548 | * |
2549 | * @return the cache |
2550 | */ |
2551 | public CacheLongKeyLIRS<Page> getCache() { |
2552 | return cache; |
2553 | } |
2554 | |
2555 | /** |
2556 | * A background writer thread to automatically store changes from time to |
2557 | * time. |
2558 | */ |
2559 | private static class BackgroundWriterThread extends Thread { |
2560 | |
2561 | public final Object sync = new Object(); |
2562 | private final MVStore store; |
2563 | private final int sleep; |
2564 | |
2565 | BackgroundWriterThread(MVStore store, int sleep, String fileStoreName) { |
2566 | super("MVStore background writer " + fileStoreName); |
2567 | this.store = store; |
2568 | this.sleep = sleep; |
2569 | setDaemon(true); |
2570 | } |
2571 | |
2572 | @Override |
2573 | public void run() { |
2574 | while (true) { |
2575 | Thread t = store.backgroundWriterThread; |
2576 | if (t == null) { |
2577 | break; |
2578 | } |
2579 | synchronized (sync) { |
2580 | try { |
2581 | sync.wait(sleep); |
2582 | } catch (InterruptedException e) { |
2583 | continue; |
2584 | } |
2585 | } |
2586 | store.writeInBackground(); |
2587 | } |
2588 | } |
2589 | |
2590 | } |
2591 | |
2592 | /** |
2593 | * A builder for an MVStore. |
2594 | */ |
2595 | public static class Builder { |
2596 | |
2597 | private final HashMap<String, Object> config = New.hashMap(); |
2598 | |
2599 | private Builder set(String key, Object value) { |
2600 | config.put(key, value); |
2601 | return this; |
2602 | } |
2603 | |
2604 | /** |
2605 | * Disable auto-commit, by setting the auto-commit delay and auto-commit |
2606 | * buffer size to 0. |
2607 | * |
2608 | * @return this |
2609 | */ |
2610 | public Builder autoCommitDisabled() { |
2611 | // we have a separate config option so that |
2612 | // no thread is started if the write delay is 0 |
2613 | // (if we only had a setter in the MVStore, |
2614 | // the thread would need to be started in any case) |
2615 | set("autoCommitBufferSize", 0); |
2616 | return set("autoCommitDelay", 0); |
2617 | } |
2618 | |
2619 | /** |
2620 | * Set the size of the write buffer, in KB disk space (for file-based |
2621 | * stores). Unless auto-commit is disabled, changes are automatically |
2622 | * saved if there are more than this amount of changes. |
2623 | * <p> |
2624 | * The default is 1024 KB. |
2625 | * <p> |
2626 | * When the value is set to 0 or lower, data is not automatically |
2627 | * stored. |
2628 | * |
2629 | * @param kb the write buffer size, in kilobytes |
2630 | * @return this |
2631 | */ |
2632 | public Builder autoCommitBufferSize(int kb) { |
2633 | return set("autoCommitBufferSize", kb); |
2634 | } |
2635 | |
2636 | /** |
2637 | * Set the auto-compact target fill rate. If the average fill rate (the |
2638 | * percentage of the storage space that contains active data) of the |
2639 | * chunks is lower, then the chunks with a low fill rate are re-written. |
2640 | * Also, if the percentage of empty space between chunks is higher than |
2641 | * this value, then chunks at the end of the file are moved. Compaction |
2642 | * stops if the target fill rate is reached. |
2643 | * <p> |
2644 | * The default value is 50 (50%). The value 0 disables auto-compacting. |
2645 | * <p> |
2646 | * |
2647 | * @param percent the target fill rate |
2648 | * @return this |
2649 | */ |
2650 | public Builder autoCompactFillRate(int percent) { |
2651 | return set("autoCompactFillRate", percent); |
2652 | } |
2653 | |
2654 | /** |
2655 | * Use the following file name. If the file does not exist, it is |
2656 | * automatically created. The parent directory already must exist. |
2657 | * |
2658 | * @param fileName the file name |
2659 | * @return this |
2660 | */ |
2661 | public Builder fileName(String fileName) { |
2662 | return set("fileName", fileName); |
2663 | } |
2664 | |
2665 | /** |
2666 | * Encrypt / decrypt the file using the given password. This method has |
2667 | * no effect for in-memory stores. The password is passed as a |
2668 | * char array so that it can be cleared as soon as possible. Please note |
2669 | * there is still a small risk that password stays in memory (due to |
2670 | * Java garbage collection). Also, the hashed encryption key is kept in |
2671 | * memory as long as the file is open. |
2672 | * |
2673 | * @param password the password |
2674 | * @return this |
2675 | */ |
2676 | public Builder encryptionKey(char[] password) { |
2677 | return set("encryptionKey", password); |
2678 | } |
2679 | |
2680 | /** |
2681 | * Open the file in read-only mode. In this case, a shared lock will be |
2682 | * acquired to ensure the file is not concurrently opened in write mode. |
2683 | * <p> |
2684 | * If this option is not used, the file is locked exclusively. |
2685 | * <p> |
2686 | * Please note a store may only be opened once in every JVM (no matter |
2687 | * whether it is opened in read-only or read-write mode), because each |
2688 | * file may be locked only once in a process. |
2689 | * |
2690 | * @return this |
2691 | */ |
2692 | public Builder readOnly() { |
2693 | return set("readOnly", 1); |
2694 | } |
2695 | |
2696 | /** |
2697 | * Set the read cache size in MB. The default is 16 MB. |
2698 | * |
2699 | * @param mb the cache size in megabytes |
2700 | * @return this |
2701 | */ |
2702 | public Builder cacheSize(int mb) { |
2703 | return set("cacheSize", mb); |
2704 | } |
2705 | |
2706 | /** |
2707 | * Compress data before writing using the LZF algorithm. This will save |
2708 | * about 50% of the disk space, but will slow down read and write |
2709 | * operations slightly. |
2710 | * <p> |
2711 | * This setting only affects writes; it is not necessary to enable |
2712 | * compression when reading, even if compression was enabled when |
2713 | * writing. |
2714 | * |
2715 | * @return this |
2716 | */ |
2717 | public Builder compress() { |
2718 | return set("compress", 1); |
2719 | } |
2720 | |
2721 | /** |
2722 | * Compress data before writing using the Deflate algorithm. This will |
2723 | * save more disk space, but will slow down read and write operations |
2724 | * quite a bit. |
2725 | * <p> |
2726 | * This setting only affects writes; it is not necessary to enable |
2727 | * compression when reading, even if compression was enabled when |
2728 | * writing. |
2729 | * |
2730 | * @return this |
2731 | */ |
2732 | public Builder compressHigh() { |
2733 | return set("compress", 2); |
2734 | } |
2735 | |
2736 | /** |
2737 | * Set the amount of memory a page should contain at most, in bytes, |
2738 | * before it is split. The default is 16 KB for persistent stores and 4 |
2739 | * KB for in-memory stores. This is not a limit in the page size, as |
2740 | * pages with one entry can get larger. It is just the point where pages |
2741 | * that contain more than one entry are split. |
2742 | * |
2743 | * @param pageSplitSize the page size |
2744 | * @return this |
2745 | */ |
2746 | public Builder pageSplitSize(int pageSplitSize) { |
2747 | return set("pageSplitSize", pageSplitSize); |
2748 | } |
2749 | |
2750 | /** |
2751 | * Set the listener to be used for exceptions that occur when writing in |
2752 | * the background thread. |
2753 | * |
2754 | * @param exceptionHandler the handler |
2755 | * @return this |
2756 | */ |
2757 | public Builder backgroundExceptionHandler( |
2758 | Thread.UncaughtExceptionHandler exceptionHandler) { |
2759 | return set("backgroundExceptionHandler", exceptionHandler); |
2760 | } |
2761 | |
2762 | /** |
2763 | * Use the provided file store instead of the default one. |
2764 | * <p> |
2765 | * File stores passed in this way need to be open. They are not closed |
2766 | * when closing the store. |
2767 | * <p> |
2768 | * Please note that any kind of store (including an off-heap store) is |
2769 | * considered a "persistence", while an "in-memory store" means objects |
2770 | * are not persisted and fully kept in the JVM heap. |
2771 | * |
2772 | * @param store the file store |
2773 | * @return this |
2774 | */ |
2775 | public Builder fileStore(FileStore store) { |
2776 | return set("fileStore", store); |
2777 | } |
2778 | |
2779 | /** |
2780 | * Open the store. |
2781 | * |
2782 | * @return the opened store |
2783 | */ |
2784 | public MVStore open() { |
2785 | return new MVStore(config); |
2786 | } |
2787 | |
2788 | @Override |
2789 | public String toString() { |
2790 | return DataUtils.appendMap(new StringBuilder(), config).toString(); |
2791 | } |
2792 | |
2793 | /** |
2794 | * Read the configuration from a string. |
2795 | * |
2796 | * @param s the string representation |
2797 | * @return the builder |
2798 | */ |
2799 | public static Builder fromString(String s) { |
2800 | HashMap<String, String> config = DataUtils.parseMap(s); |
2801 | Builder builder = new Builder(); |
2802 | builder.config.putAll(config); |
2803 | return builder; |
2804 | } |
2805 | |
2806 | } |
2807 | |
2808 | } |