1 | package com.renomad.minum.database; | |
2 | ||
3 | import com.renomad.minum.state.Context; | |
4 | import com.renomad.minum.logging.ILogger; | |
5 | import com.renomad.minum.queue.AbstractActionQueue; | |
6 | import com.renomad.minum.queue.ActionQueue; | |
7 | import com.renomad.minum.utils.FileUtils; | |
8 | ||
9 | import java.io.BufferedReader; | |
10 | import java.io.FileReader; | |
11 | import java.io.IOException; | |
12 | import java.nio.charset.StandardCharsets; | |
13 | import java.nio.file.Files; | |
14 | import java.nio.file.Path; | |
15 | import java.util.Collection; | |
16 | import java.util.Collections; | |
17 | import java.util.Map; | |
18 | import java.util.concurrent.ConcurrentHashMap; | |
19 | import java.util.concurrent.atomic.AtomicLong; | |
20 | import java.util.concurrent.locks.Lock; | |
21 | import java.util.concurrent.locks.ReentrantLock; | |
22 | ||
23 | import static com.renomad.minum.utils.Invariants.*; | |
24 | ||
25 | /** | |
26 | * a memory-based disk-persisted database class. | |
27 | * @param <T> the type of data we'll be persisting (must extend from {@link DbData} | |
28 | */ | |
29 | public final class Db<T extends DbData<?>> { | |
30 | ||
31 | /** | |
32 | * The suffix we will apply to each database file | |
33 | */ | |
34 | static final String DATABASE_FILE_SUFFIX = ".ddps"; | |
35 | private final T emptyInstance; | |
36 | ||
37 | // some locks we use for certain operations | |
38 | private final Lock loadDataLock = new ReentrantLock(); | |
39 | private final Lock writeLock = new ReentrantLock(); | |
40 | private final Lock deleteLock = new ReentrantLock(); | |
41 | ||
42 | /** | |
43 | * This is a lock that goes around the code that modifies data in the | |
44 | * map, so that it is not possible for two different modifications to | |
45 | * interleave (that is, cause race conditions). | |
46 | */ | |
47 | private final Lock modificationLock = new ReentrantLock(); | |
48 | ||
49 | /** | |
50 | * The full path to the file that contains the most-recent index | |
51 | * for this data. As we add new files, each gets its own index | |
52 | * value. When we start the program, we use this to determine | |
53 | * where to start counting for new indexes. | |
54 | */ | |
55 | private final Path fullPathForIndexFile; | |
56 | ||
57 | final AtomicLong index; | |
58 | ||
59 | private final Path dbDirectory; | |
60 | private final AbstractActionQueue actionQueue; | |
61 | private final ILogger logger; | |
62 | private final Map<Long, T> data; | |
63 | private final FileUtils fileUtils; | |
64 | private boolean hasLoadedData; | |
65 | ||
66 | /** | |
67 | * Constructs an in-memory disk-persisted database. | |
68 | * @param dbDirectory this uniquely names your database, and also sets the directory | |
69 | * name for this data. The expected use case is to name this after | |
70 | * the data in question. For example, "users", or "accounts". | |
71 | * @param context used to provide important state data to several components | |
72 | * @param instance an instance of the {@link DbData} object relevant for use in this database. Note | |
73 | * that each database (that is, each instance of this class), focuses on just one | |
74 | * data, which must be an implementation of {@link DbData}. | |
75 | */ | |
76 | public Db(Path dbDirectory, Context context, T instance) { | |
77 | this.hasLoadedData = false; | |
78 | data = new ConcurrentHashMap<>(); | |
79 | actionQueue = new ActionQueue("DatabaseWriter " + dbDirectory, context).initialize(); | |
80 | this.logger = context.getLogger(); | |
81 | this.dbDirectory = dbDirectory; | |
82 | this.fullPathForIndexFile = dbDirectory.resolve("index" + DATABASE_FILE_SUFFIX); | |
83 | this.emptyInstance = instance; | |
84 | this.fileUtils = new FileUtils(logger, context.getConstants()); | |
85 | ||
86 |
1
1. <init> : negated conditional → KILLED |
if (Files.exists(fullPathForIndexFile)) { |
87 | long indexValue; | |
88 | try (var fileReader = new FileReader(fullPathForIndexFile.toFile(), StandardCharsets.UTF_8)) { | |
89 | try (BufferedReader br = new BufferedReader(fileReader)) { | |
90 | String s = br.readLine(); | |
91 |
1
1. <init> : negated conditional → KILLED |
if (s == null) throw new DbException("index file for " + dbDirectory + " returned null when reading a line from it"); |
92 | mustBeFalse(s.isBlank(), "Unless something is terribly broken, we expect a numeric value here"); | |
93 | String trim = s.trim(); | |
94 | indexValue = Long.parseLong(trim); | |
95 | } | |
96 | } catch (Exception e) { | |
97 | throw new DbException("Exception while reading "+fullPathForIndexFile+" in Db constructor", e); | |
98 | } | |
99 | ||
100 | this.index = new AtomicLong(indexValue); | |
101 | ||
102 | } else { | |
103 | this.index = new AtomicLong(1); | |
104 | } | |
105 | ||
106 |
2
1. <init> : removed call to com/renomad/minum/queue/AbstractActionQueue::enqueue → TIMED_OUT 2. lambda$new$0 : removed call to com/renomad/minum/utils/FileUtils::makeDirectory → TIMED_OUT |
actionQueue.enqueue("create directory" + dbDirectory, () -> fileUtils.makeDirectory(dbDirectory)); |
107 | } | |
108 | ||
109 | /** | |
110 | * This function will stop the minum.database persistence cleanly. | |
111 | * <p> | |
112 | * In order to do this, we need to wait for our threads | |
113 | * to finish their work. In particular, we | |
114 | * have offloaded our file writes to [actionQueue], which | |
115 | * has an internal thread for serializing all actions | |
116 | * on our minum.database | |
117 | * </p> | |
118 | */ | |
119 | public void stop() { | |
120 |
1
1. stop : removed call to com/renomad/minum/queue/AbstractActionQueue::stop → KILLED |
actionQueue.stop(); |
121 | } | |
122 | ||
123 | /** | |
124 | * Similar to {@link #stop()} but gives more control over how long | |
125 | * we'll wait before crashing it closed. See {@link ActionQueue#stop(int, int)} | |
126 | */ | |
127 | public void stop(int count, int sleepTime) { | |
128 |
1
1. stop : removed call to com/renomad/minum/queue/AbstractActionQueue::stop → KILLED |
actionQueue.stop(count, sleepTime); |
129 | } | |
130 | ||
131 | /** | |
132 | * Write data to the database. Use an index of 0 to store new data, and a positive | |
133 | * non-zero value to update data. | |
134 | * <p><em> | |
135 | * Example of adding new data to the database: | |
136 | * </p></em> | |
137 | * {@snippet : | |
138 | * final var newSalt = StringUtils.generateSecureRandomString(10); | |
139 | * final var hashedPassword = CryptoUtils.createPasswordHash(newPassword, newSalt); | |
140 | * final var newUser = new User(0L, newUsername, hashedPassword, newSalt); | |
141 | * userDb.write(newUser); | |
142 | * } | |
143 | * <p><em> | |
144 | * Example of updating data: | |
145 | * </p></em> | |
146 | * {@snippet : | |
147 | * // write the updated salted password to the database | |
148 | * final var updatedUser = new User( | |
149 | * user().getIndex(), | |
150 | * user().getUsername(), | |
151 | * hashedPassword, | |
152 | * newSalt); | |
153 | * userDb.write(updatedUser); | |
154 | * } | |
155 | * | |
156 | * @param newData the data we are writing | |
157 | */ | |
158 | public T write(T newData) { | |
159 |
2
1. write : negated conditional → KILLED 2. write : changed conditional boundary → KILLED |
if (newData.getIndex() < 0) throw new DbException("Negative indexes are disallowed"); |
160 |
1
1. write : removed call to java/util/concurrent/locks/Lock::lock → KILLED |
writeLock.lock(); |
161 | try { | |
162 | // load data if needed | |
163 |
2
1. write : removed call to com/renomad/minum/database/Db::loadData → TIMED_OUT 2. write : negated conditional → KILLED |
if (!hasLoadedData) loadData(); |
164 | boolean newIndexCreated = false; | |
165 | ||
166 |
1
1. write : removed call to java/util/concurrent/locks/Lock::lock → KILLED |
modificationLock.lock(); |
167 | try { | |
168 | // *** deal with the in-memory portion *** | |
169 | ||
170 | // create a new index for the data, if needed | |
171 |
1
1. write : negated conditional → KILLED |
if (newData.getIndex() == 0) { |
172 |
1
1. write : removed call to com/renomad/minum/database/DbData::setIndex → KILLED |
newData.setIndex(index.getAndIncrement()); |
173 | newIndexCreated = true; | |
174 | } else { | |
175 | // if the data does not exist, and a positive non-zero | |
176 | // index was provided, throw an exception. | |
177 |
2
1. lambda$write$1 : replaced boolean return with true for com/renomad/minum/database/Db::lambda$write$1 → KILLED 2. lambda$write$1 : negated conditional → KILLED |
boolean dataEntryExists = data.values().stream().anyMatch(x -> x.getIndex() == newData.getIndex()); |
178 |
1
1. write : negated conditional → KILLED |
if (! dataEntryExists) { |
179 | throw new DbException( | |
180 | String.format("Positive indexes are only allowed when updating existing data. Index: %d", | |
181 | newData.getIndex())); | |
182 | } | |
183 | } | |
184 | // if we got here, we are safe to proceed with putting the data into memory and disk | |
185 | logger.logTrace(() -> String.format("in thread %s, writing data %s", Thread.currentThread().getName(), newData)); | |
186 | data.put(newData.getIndex(), newData); | |
187 | } finally { | |
188 |
1
1. write : removed call to java/util/concurrent/locks/Lock::unlock → KILLED |
modificationLock.unlock(); |
189 | } | |
190 | ||
191 | // *** now handle the disk portion *** | |
192 | boolean finalNewIndexCreated = newIndexCreated; | |
193 |
1
1. write : removed call to com/renomad/minum/queue/AbstractActionQueue::enqueue → KILLED |
actionQueue.enqueue("persist data to disk", () -> { |
194 | final Path fullPath = dbDirectory.resolve(newData.getIndex() + DATABASE_FILE_SUFFIX); | |
195 | logger.logTrace(() -> String.format("writing data to %s", fullPath)); | |
196 | String serializedData = newData.serialize(); | |
197 | mustBeFalse(serializedData == null || serializedData.isBlank(), | |
198 | "the serialized form of data must not be blank. " + | |
199 | "Is the serialization code written properly? Our datatype: " + emptyInstance); | |
200 |
1
1. lambda$write$4 : removed call to com/renomad/minum/utils/FileUtils::writeString → KILLED |
fileUtils.writeString(fullPath, serializedData); |
201 |
1
1. lambda$write$4 : negated conditional → KILLED |
if (finalNewIndexCreated) { |
202 |
2
1. lambda$write$4 : Replaced long addition with subtraction → TIMED_OUT 2. lambda$write$4 : removed call to com/renomad/minum/utils/FileUtils::writeString → TIMED_OUT |
fileUtils.writeString(fullPathForIndexFile, String.valueOf(newData.getIndex() + 1)); |
203 | } | |
204 | }); | |
205 | ||
206 | // returning the data at this point is the most convenient | |
207 | // way users will have access to the new index of the data. | |
208 |
1
1. write : replaced return value with null for com/renomad/minum/database/Db::write → TIMED_OUT |
return newData; |
209 | } finally { | |
210 |
1
1. write : removed call to java/util/concurrent/locks/Lock::unlock → TIMED_OUT |
writeLock.unlock(); |
211 | } | |
212 | } | |
213 | ||
214 | /** | |
215 | * Delete data | |
216 | * <p><em>Example:</p></em> | |
217 | * {@snippet : | |
218 | * userDb.delete(user); | |
219 | * } | |
220 | * @param dataToDelete the data we are serializing and writing | |
221 | */ | |
222 | public void delete(T dataToDelete) { | |
223 |
1
1. delete : removed call to java/util/concurrent/locks/Lock::lock → KILLED |
deleteLock.lock(); |
224 | try { | |
225 | // load data if needed | |
226 |
2
1. delete : removed call to com/renomad/minum/database/Db::loadData → KILLED 2. delete : negated conditional → KILLED |
if (!hasLoadedData) loadData(); |
227 | ||
228 | boolean hasResetIndex; | |
229 | long dataIndex; | |
230 | ||
231 | // deal with the in-memory portion | |
232 |
1
1. delete : removed call to java/util/concurrent/locks/Lock::lock → KILLED |
modificationLock.lock(); |
233 | try { | |
234 |
1
1. delete : negated conditional → KILLED |
if (dataToDelete == null) { |
235 | throw new DbException("Db.delete was given a null value to delete"); | |
236 | } | |
237 | dataIndex = dataToDelete.getIndex(); | |
238 |
1
1. delete : negated conditional → KILLED |
if (!data.containsKey(dataIndex)) { |
239 | throw new DbException("no data was found with index of " + dataIndex); | |
240 | } | |
241 | logger.logTrace(() -> String.format("in thread %s, deleting data with index %d", Thread.currentThread().getName(), dataIndex)); | |
242 | data.remove(dataIndex); | |
243 | ||
244 | // if all the data was just now deleted, we need to | |
245 | // reset the index back to 1 | |
246 | ||
247 |
1
1. delete : negated conditional → KILLED |
if (data.isEmpty()) { |
248 |
1
1. delete : removed call to java/util/concurrent/atomic/AtomicLong::set → TIMED_OUT |
index.set(1); |
249 | hasResetIndex = true; | |
250 | } else { | |
251 | hasResetIndex = false; | |
252 | } | |
253 | } finally { | |
254 |
1
1. delete : removed call to java/util/concurrent/locks/Lock::unlock → KILLED |
modificationLock.unlock(); |
255 | } | |
256 | ||
257 | // now handle the disk portion | |
258 |
1
1. delete : removed call to com/renomad/minum/queue/AbstractActionQueue::enqueue → TIMED_OUT |
actionQueue.enqueue("delete data from disk", () -> { |
259 | final Path fullPath = dbDirectory.resolve(dataIndex + DATABASE_FILE_SUFFIX); | |
260 | logger.logTrace(() -> String.format("deleting data at %s", fullPath)); | |
261 | try { | |
262 | mustBeTrue(fullPath.toFile().exists(), fullPath + " must already exist before deletion"); | |
263 |
1
1. lambda$delete$8 : removed call to java/nio/file/Files::delete → KILLED |
Files.delete(fullPath); |
264 |
1
1. lambda$delete$8 : negated conditional → KILLED |
if (hasResetIndex) { |
265 |
1
1. lambda$delete$8 : removed call to com/renomad/minum/utils/FileUtils::writeString → KILLED |
fileUtils.writeString(fullPathForIndexFile, String.valueOf(1)); |
266 | } | |
267 | } catch (Exception ex) { | |
268 | logger.logAsyncError(() -> "failed to delete file " + fullPath + " during deleteOnDisk. Exception: " + ex); | |
269 | } | |
270 | }); | |
271 | } finally { | |
272 |
1
1. delete : removed call to java/util/concurrent/locks/Lock::unlock → KILLED |
deleteLock.unlock(); |
273 | } | |
274 | } | |
275 | ||
276 | /** | |
277 | * Grabs all the data from disk and returns it as a list. This | |
278 | * method is run by various programs when the system first loads. | |
279 | */ | |
280 | void loadDataFromDisk() { | |
281 |
1
1. loadDataFromDisk : negated conditional → KILLED |
if (! Files.exists(dbDirectory)) { |
282 | logger.logDebug(() -> dbDirectory + " directory missing, adding nothing to the data list"); | |
283 | return; | |
284 | } | |
285 | ||
286 |
1
1. loadDataFromDisk : removed call to com/renomad/minum/database/Db::walkAndLoad → KILLED |
walkAndLoad(dbDirectory); |
287 | } | |
288 | ||
289 | void walkAndLoad(Path dbDirectory) { | |
290 | // walk through all the files in this directory, collecting | |
291 | // all regular files (non-subdirectories) except for index.ddps | |
292 | try (final var pathStream = Files.walk(dbDirectory)) { | |
293 | final var listOfFiles = pathStream.filter(path -> | |
294 |
2
1. lambda$walkAndLoad$10 : negated conditional → TIMED_OUT 2. lambda$walkAndLoad$10 : replaced boolean return with true for com/renomad/minum/database/Db::lambda$walkAndLoad$10 → KILLED |
Files.isRegularFile(path) && |
295 |
1
1. lambda$walkAndLoad$10 : negated conditional → KILLED |
!path.getFileName().toString().startsWith("index") |
296 | ).toList(); | |
297 | for (Path p : listOfFiles) { | |
298 |
1
1. walkAndLoad : removed call to com/renomad/minum/database/Db::readAndDeserialize → TIMED_OUT |
readAndDeserialize(p); |
299 | } | |
300 | } catch (IOException e) { | |
301 | throw new DbException(e); | |
302 | } | |
303 | } | |
304 | ||
305 | /** | |
306 | * Carry out the process of reading data files into our in-memory structure | |
307 | * @param p the path of a particular file | |
308 | */ | |
309 | void readAndDeserialize(Path p) throws IOException { | |
310 | Path fileName = p.getFileName(); | |
311 |
1
1. readAndDeserialize : negated conditional → KILLED |
if (fileName == null) throw new DbException("At readAndDeserialize, path " + p + " returned a null filename"); |
312 | String filename = fileName.toString(); | |
313 | int startOfSuffixIndex = filename.indexOf('.'); | |
314 |
1
1. readAndDeserialize : negated conditional → KILLED |
if(startOfSuffixIndex == -1) { |
315 | throw new DbException("the files must have a ddps suffix, like 1.ddps. filename: " + filename); | |
316 | } | |
317 | String fileContents = Files.readString(p); | |
318 |
1
1. readAndDeserialize : negated conditional → KILLED |
if (fileContents.isBlank()) { |
319 | logger.logDebug( () -> fileName + " file exists but empty, skipping"); | |
320 | } else { | |
321 | try { | |
322 | @SuppressWarnings("unchecked") | |
323 | T deserializedData = (T) emptyInstance.deserialize(fileContents); | |
324 | mustBeTrue(deserializedData != null, "deserialization of " + emptyInstance + | |
325 | " resulted in a null value. Was the serialization method implemented properly?"); | |
326 | int fileNameIdentifier = Integer.parseInt(filename.substring(0, startOfSuffixIndex)); | |
327 | mustBeTrue(deserializedData.getIndex() == fileNameIdentifier, | |
328 | "The filename must correspond to the data's index. e.g. 1.ddps must have an id of 1"); | |
329 | ||
330 | // put the data into the in-memory data structure | |
331 | data.put(deserializedData.getIndex(), deserializedData); | |
332 | } catch (Exception e) { | |
333 | throw new DbException("Failed to deserialize "+ p +" with data (\""+fileContents+"\"). Caused by: " + e); | |
334 | } | |
335 | } | |
336 | } | |
337 | ||
338 | /** | |
339 | * This method provides read capability for the values of a database. | |
340 | * <br> | |
341 | * The returned collection is a read-only view over the data, through {@link Collections#unmodifiableCollection(Collection)} | |
342 | * | |
343 | * <p><em>Example:</em></p> | |
344 | * {@snippet : | |
345 | * boolean doesUserAlreadyExist(String username) { | |
346 | * return userDb.values().stream().anyMatch(x -> x.getUsername().equals(username)); | |
347 | * } | |
348 | * } | |
349 | */ | |
350 | public Collection<T> values() { | |
351 | // load data if needed | |
352 |
2
1. values : negated conditional → KILLED 2. values : removed call to com/renomad/minum/database/Db::loadData → KILLED |
if (!hasLoadedData) loadData(); |
353 | ||
354 |
1
1. values : replaced return value with Collections.emptyList for com/renomad/minum/database/Db::values → KILLED |
return Collections.unmodifiableCollection(data.values()); |
355 | } | |
356 | ||
357 | /** | |
358 | * This is what loads the data from disk the | |
359 | * first time someone needs it. Because it is | |
360 | * locked, only one thread can enter at | |
361 | * a time. The first one in will load the data, | |
362 | * and the second will encounter a branch which skips loading. | |
363 | */ | |
364 | private void loadData() { | |
365 |
1
1. loadData : removed call to java/util/concurrent/locks/Lock::lock → KILLED |
loadDataLock.lock(); // block threads here if multiple are trying to get in - only one gets in at a time |
366 | try { | |
367 |
1
1. loadData : removed call to com/renomad/minum/database/Db::loadDataCore → KILLED |
loadDataCore(hasLoadedData, this::loadDataFromDisk); |
368 | hasLoadedData = true; | |
369 | } finally { | |
370 |
1
1. loadData : removed call to java/util/concurrent/locks/Lock::unlock → KILLED |
loadDataLock.unlock(); |
371 | } | |
372 | } | |
373 | ||
374 | static void loadDataCore(boolean hasLoadedData, Runnable loadDataFromDisk) { | |
375 |
1
1. loadDataCore : negated conditional → KILLED |
if (!hasLoadedData) { |
376 |
1
1. loadDataCore : removed call to java/lang/Runnable::run → KILLED |
loadDataFromDisk.run(); |
377 | } | |
378 | } | |
379 | ||
380 | } | |
Mutations | ||
86 |
1.1 |
|
91 |
1.1 |
|
106 |
1.1 2.2 |
|
120 |
1.1 |
|
128 |
1.1 |
|
159 |
1.1 2.2 |
|
160 |
1.1 |
|
163 |
1.1 2.2 |
|
166 |
1.1 |
|
171 |
1.1 |
|
172 |
1.1 |
|
177 |
1.1 2.2 |
|
178 |
1.1 |
|
188 |
1.1 |
|
193 |
1.1 |
|
200 |
1.1 |
|
201 |
1.1 |
|
202 |
1.1 2.2 |
|
208 |
1.1 |
|
210 |
1.1 |
|
223 |
1.1 |
|
226 |
1.1 2.2 |
|
232 |
1.1 |
|
234 |
1.1 |
|
238 |
1.1 |
|
247 |
1.1 |
|
248 |
1.1 |
|
254 |
1.1 |
|
258 |
1.1 |
|
263 |
1.1 |
|
264 |
1.1 |
|
265 |
1.1 |
|
272 |
1.1 |
|
281 |
1.1 |
|
286 |
1.1 |
|
294 |
1.1 2.2 |
|
295 |
1.1 |
|
298 |
1.1 |
|
311 |
1.1 |
|
314 |
1.1 |
|
318 |
1.1 |
|
352 |
1.1 2.2 |
|
354 |
1.1 |
|
365 |
1.1 |
|
367 |
1.1 |
|
370 |
1.1 |
|
375 |
1.1 |
|
376 |
1.1 |