1 // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. See the AUTHORS file for names of contributors.
8 #include "db/db_impl.h"
9 #include "db/version_set.h"
10 #include "leveldb/cache.h"
11 #include "leveldb/db.h"
12 #include "leveldb/env.h"
13 #include "leveldb/write_batch.h"
14 #include "port/port.h"
15 #include "util/crc32c.h"
16 #include "util/histogram.h"
17 #include "util/mutexlock.h"
18 #include "util/random.h"
19 #include "util/testutil.h"
21 // Comma-separated list of operations to run in the specified order
23 // fillseq -- write N values in sequential key order in async mode
24 // fillrandom -- write N values in random key order in async mode
25 // overwrite -- overwrite N values in random key order in async mode
26 // fillsync -- write N/100 values in random key order in sync mode
27 // fill100K -- write N/1000 100K values in random order in async mode
28 // deleteseq -- delete N keys in sequential order
29 // deleterandom -- delete N keys in random order
30 // readseq -- read N times sequentially
31 // readreverse -- read N times in reverse order
32 // readrandom -- read N times in random order
33 // readmissing -- read N missing keys in random order
34 // readhot -- read N times in random order from 1% section of DB
35 // seekrandom -- N random seeks
36 // crc32c -- repeated crc32c of 4K of data
37 // acquireload -- load N*1000 times
39 // compact -- Compact the entire DB
40 // stats -- Print DB stats
41 // sstables -- Print sstable info
42 // heapprofile -- Dump a heap profile (if supported by this port)
43 static const char* FLAGS_benchmarks =
49 "readrandom," // Extra run to allow previous compactions to quiesce
63 // Number of key/values to place in database
64 static int FLAGS_num = 1000000;
66 // Number of read operations to do. If negative, do FLAGS_num reads.
67 static int FLAGS_reads = -1;
69 // Number of concurrent threads to run.
70 static int FLAGS_threads = 1;
73 static int FLAGS_value_size = 100;
75 // Arrange to generate values that shrink to this fraction of
76 // their original size after compression
77 static double FLAGS_compression_ratio = 0.5;
79 // Print histogram of operation timings
80 static bool FLAGS_histogram = false;
82 // Number of bytes to buffer in memtable before compacting
83 // (initialized to default value by "main")
84 static int FLAGS_write_buffer_size = 0;
86 // Number of bytes to use as a cache of uncompressed data.
87 // Negative means use default settings.
88 static int FLAGS_cache_size = -1;
90 // Maximum number of files to keep open at the same time (use default if == 0)
91 static int FLAGS_open_files = 0;
93 // Bloom filter bits per key.
94 // Negative means use default settings.
95 static int FLAGS_bloom_bits = -1;
97 // If true, do not destroy the existing database. If you set this
98 // flag and also specify a benchmark that wants a fresh database, that
99 // benchmark will fail.
100 static bool FLAGS_use_existing_db = false;
102 // Use the db with the following name.
103 static const char* FLAGS_db = NULL;
109 // Helper for quickly generating random data.
110 class RandomGenerator {
117 // We use a limited amount of data over and over again and ensure
118 // that it is larger than the compression window (32KB), and also
119 // large enough to serve all typical value sizes we want to write.
122 while (data_.size() < 1048576) {
123 // Add a short fragment that is as compressible as specified
124 // by FLAGS_compression_ratio.
125 test::CompressibleString(&rnd, FLAGS_compression_ratio, 100, &piece);
131 Slice Generate(size_t len) {
132 if (pos_ + len > data_.size()) {
134 assert(len < data_.size());
137 return Slice(data_.data() + pos_ - len, len);
141 static Slice TrimSpace(Slice s) {
143 while (start < s.size() && isspace(s[start])) {
146 size_t limit = s.size();
147 while (limit > start && isspace(s[limit-1])) {
150 return Slice(s.data() + start, limit - start);
153 static void AppendWithSpace(std::string* str, Slice msg) {
154 if (msg.empty()) return;
158 str->append(msg.data(), msg.size());
169 double last_op_finish_;
171 std::string message_;
178 last_op_finish_ = start_;
183 start_ = Env::Default()->NowMicros();
188 void Merge(const Stats& other) {
189 hist_.Merge(other.hist_);
190 done_ += other.done_;
191 bytes_ += other.bytes_;
192 seconds_ += other.seconds_;
193 if (other.start_ < start_) start_ = other.start_;
194 if (other.finish_ > finish_) finish_ = other.finish_;
196 // Just keep the messages from one thread
197 if (message_.empty()) message_ = other.message_;
201 finish_ = Env::Default()->NowMicros();
202 seconds_ = (finish_ - start_) * 1e-6;
205 void AddMessage(Slice msg) {
206 AppendWithSpace(&message_, msg);
209 void FinishedSingleOp() {
210 if (FLAGS_histogram) {
211 double now = Env::Default()->NowMicros();
212 double micros = now - last_op_finish_;
214 if (micros > 20000) {
215 fprintf(stderr, "long op: %.1f micros%30s\r", micros, "");
218 last_op_finish_ = now;
222 if (done_ >= next_report_) {
223 if (next_report_ < 1000) next_report_ += 100;
224 else if (next_report_ < 5000) next_report_ += 500;
225 else if (next_report_ < 10000) next_report_ += 1000;
226 else if (next_report_ < 50000) next_report_ += 5000;
227 else if (next_report_ < 100000) next_report_ += 10000;
228 else if (next_report_ < 500000) next_report_ += 50000;
229 else next_report_ += 100000;
230 fprintf(stderr, "... finished %d ops%30s\r", done_, "");
235 void AddBytes(int64_t n) {
239 void Report(const Slice& name) {
240 // Pretend at least one op was done in case we are running a benchmark
241 // that does not call FinishedSingleOp().
242 if (done_ < 1) done_ = 1;
246 // Rate is computed on actual elapsed time, not the sum of per-thread
248 double elapsed = (finish_ - start_) * 1e-6;
250 snprintf(rate, sizeof(rate), "%6.1f MB/s",
251 (bytes_ / 1048576.0) / elapsed);
254 AppendWithSpace(&extra, message_);
256 fprintf(stdout, "%-12s : %11.3f micros/op;%s%s\n",
257 name.ToString().c_str(),
258 seconds_ * 1e6 / done_,
259 (extra.empty() ? "" : " "),
261 if (FLAGS_histogram) {
262 fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
268 // State shared by all concurrent executions of the same benchmark.
274 // Each thread goes through the following states:
276 // (2) waiting for others to be initialized
284 SharedState() : cv(&mu) { }
287 // Per-thread state for concurrent executions of the same benchmark.
289 int tid; // 0..n-1 when running in n threads
290 Random rand; // Has different seeds for different threads
294 ThreadState(int index)
305 const FilterPolicy* filter_policy_;
309 int entries_per_batch_;
310 WriteOptions write_options_;
315 const int kKeySize = 16;
317 fprintf(stdout, "Keys: %d bytes each\n", kKeySize);
318 fprintf(stdout, "Values: %d bytes each (%d bytes after compression)\n",
320 static_cast<int>(FLAGS_value_size * FLAGS_compression_ratio + 0.5));
321 fprintf(stdout, "Entries: %d\n", num_);
322 fprintf(stdout, "RawSize: %.1f MB (estimated)\n",
323 ((static_cast<int64_t>(kKeySize + FLAGS_value_size) * num_)
325 fprintf(stdout, "FileSize: %.1f MB (estimated)\n",
326 (((kKeySize + FLAGS_value_size * FLAGS_compression_ratio) * num_)
329 fprintf(stdout, "------------------------------------------------\n");
332 void PrintWarnings() {
333 #if defined(__GNUC__) && !defined(__OPTIMIZE__)
335 "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"
340 "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
343 // See if snappy is working by attempting to compress a compressible string
344 const char text[] = "yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy";
345 std::string compressed;
346 if (!port::Snappy_Compress(text, sizeof(text), &compressed)) {
347 fprintf(stdout, "WARNING: Snappy compression is not enabled\n");
348 } else if (compressed.size() >= sizeof(text)) {
349 fprintf(stdout, "WARNING: Snappy compression is not effective\n");
353 void PrintEnvironment() {
354 fprintf(stderr, "LevelDB: version %d.%d\n",
355 kMajorVersion, kMinorVersion);
358 time_t now = time(NULL);
359 fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline
361 FILE* cpuinfo = fopen("/proc/cpuinfo", "r");
362 if (cpuinfo != NULL) {
365 std::string cpu_type;
366 std::string cache_size;
367 while (fgets(line, sizeof(line), cpuinfo) != NULL) {
368 const char* sep = strchr(line, ':');
372 Slice key = TrimSpace(Slice(line, sep - 1 - line));
373 Slice val = TrimSpace(Slice(sep + 1));
374 if (key == "model name") {
376 cpu_type = val.ToString();
377 } else if (key == "cache size") {
378 cache_size = val.ToString();
382 fprintf(stderr, "CPU: %d * %s\n", num_cpus, cpu_type.c_str());
383 fprintf(stderr, "CPUCache: %s\n", cache_size.c_str());
390 : cache_(FLAGS_cache_size >= 0 ? NewLRUCache(FLAGS_cache_size) : NULL),
391 filter_policy_(FLAGS_bloom_bits >= 0
392 ? NewBloomFilterPolicy(FLAGS_bloom_bits)
396 value_size_(FLAGS_value_size),
397 entries_per_batch_(1),
398 reads_(FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads),
400 std::vector<std::string> files;
401 Env::Default()->GetChildren(FLAGS_db, &files);
402 for (size_t i = 0; i < files.size(); i++) {
403 if (Slice(files[i]).starts_with("heap-")) {
404 Env::Default()->DeleteFile(std::string(FLAGS_db) + "/" + files[i]);
407 if (!FLAGS_use_existing_db) {
408 DestroyDB(FLAGS_db, Options());
415 delete filter_policy_;
422 const char* benchmarks = FLAGS_benchmarks;
423 while (benchmarks != NULL) {
424 const char* sep = strchr(benchmarks, ',');
430 name = Slice(benchmarks, sep - benchmarks);
431 benchmarks = sep + 1;
434 // Reset parameters that may be overridden below
436 reads_ = (FLAGS_reads < 0 ? FLAGS_num : FLAGS_reads);
437 value_size_ = FLAGS_value_size;
438 entries_per_batch_ = 1;
439 write_options_ = WriteOptions();
441 void (Benchmark::*method)(ThreadState*) = NULL;
442 bool fresh_db = false;
443 int num_threads = FLAGS_threads;
445 if (name == Slice("fillseq")) {
447 method = &Benchmark::WriteSeq;
448 } else if (name == Slice("fillbatch")) {
450 entries_per_batch_ = 1000;
451 method = &Benchmark::WriteSeq;
452 } else if (name == Slice("fillrandom")) {
454 method = &Benchmark::WriteRandom;
455 } else if (name == Slice("overwrite")) {
457 method = &Benchmark::WriteRandom;
458 } else if (name == Slice("fillsync")) {
461 write_options_.sync = true;
462 method = &Benchmark::WriteRandom;
463 } else if (name == Slice("fill100K")) {
466 value_size_ = 100 * 1000;
467 method = &Benchmark::WriteRandom;
468 } else if (name == Slice("readseq")) {
469 method = &Benchmark::ReadSequential;
470 } else if (name == Slice("readreverse")) {
471 method = &Benchmark::ReadReverse;
472 } else if (name == Slice("readrandom")) {
473 method = &Benchmark::ReadRandom;
474 } else if (name == Slice("readmissing")) {
475 method = &Benchmark::ReadMissing;
476 } else if (name == Slice("seekrandom")) {
477 method = &Benchmark::SeekRandom;
478 } else if (name == Slice("readhot")) {
479 method = &Benchmark::ReadHot;
480 } else if (name == Slice("readrandomsmall")) {
482 method = &Benchmark::ReadRandom;
483 } else if (name == Slice("deleteseq")) {
484 method = &Benchmark::DeleteSeq;
485 } else if (name == Slice("deleterandom")) {
486 method = &Benchmark::DeleteRandom;
487 } else if (name == Slice("readwhilewriting")) {
488 num_threads++; // Add extra thread for writing
489 method = &Benchmark::ReadWhileWriting;
490 } else if (name == Slice("compact")) {
491 method = &Benchmark::Compact;
492 } else if (name == Slice("crc32c")) {
493 method = &Benchmark::Crc32c;
494 } else if (name == Slice("acquireload")) {
495 method = &Benchmark::AcquireLoad;
496 } else if (name == Slice("snappycomp")) {
497 method = &Benchmark::SnappyCompress;
498 } else if (name == Slice("snappyuncomp")) {
499 method = &Benchmark::SnappyUncompress;
500 } else if (name == Slice("heapprofile")) {
502 } else if (name == Slice("stats")) {
503 PrintStats("leveldb.stats");
504 } else if (name == Slice("sstables")) {
505 PrintStats("leveldb.sstables");
507 if (name != Slice()) { // No error message for empty name
508 fprintf(stderr, "unknown benchmark '%s'\n", name.ToString().c_str());
513 if (FLAGS_use_existing_db) {
514 fprintf(stdout, "%-12s : skipped (--use_existing_db is true)\n",
515 name.ToString().c_str());
520 DestroyDB(FLAGS_db, Options());
525 if (method != NULL) {
526 RunBenchmark(num_threads, name, method);
536 void (Benchmark::*method)(ThreadState*);
539 static void ThreadBody(void* v) {
540 ThreadArg* arg = reinterpret_cast<ThreadArg*>(v);
541 SharedState* shared = arg->shared;
542 ThreadState* thread = arg->thread;
544 MutexLock l(&shared->mu);
545 shared->num_initialized++;
546 if (shared->num_initialized >= shared->total) {
547 shared->cv.SignalAll();
549 while (!shared->start) {
554 thread->stats.Start();
555 (arg->bm->*(arg->method))(thread);
556 thread->stats.Stop();
559 MutexLock l(&shared->mu);
561 if (shared->num_done >= shared->total) {
562 shared->cv.SignalAll();
567 void RunBenchmark(int n, Slice name,
568 void (Benchmark::*method)(ThreadState*)) {
571 shared.num_initialized = 0;
573 shared.start = false;
575 ThreadArg* arg = new ThreadArg[n];
576 for (int i = 0; i < n; i++) {
578 arg[i].method = method;
579 arg[i].shared = &shared;
580 arg[i].thread = new ThreadState(i);
581 arg[i].thread->shared = &shared;
582 Env::Default()->StartThread(ThreadBody, &arg[i]);
586 while (shared.num_initialized < n) {
591 shared.cv.SignalAll();
592 while (shared.num_done < n) {
597 for (int i = 1; i < n; i++) {
598 arg[0].thread->stats.Merge(arg[i].thread->stats);
600 arg[0].thread->stats.Report(name);
602 for (int i = 0; i < n; i++) {
603 delete arg[i].thread;
608 void Crc32c(ThreadState* thread) {
609 // Checksum about 500MB of data total
610 const int size = 4096;
611 const char* label = "(4K per op)";
612 std::string data(size, 'x');
615 while (bytes < 500 * 1048576) {
616 crc = crc32c::Value(data.data(), size);
617 thread->stats.FinishedSingleOp();
620 // Print so result is not dead
621 fprintf(stderr, "... crc=0x%x\r", static_cast<unsigned int>(crc));
623 thread->stats.AddBytes(bytes);
624 thread->stats.AddMessage(label);
627 void AcquireLoad(ThreadState* thread) {
629 port::AtomicPointer ap(&dummy);
632 thread->stats.AddMessage("(each op is 1000 loads)");
633 while (count < 100000) {
634 for (int i = 0; i < 1000; i++) {
635 ptr = ap.Acquire_Load();
638 thread->stats.FinishedSingleOp();
640 if (ptr == NULL) exit(1); // Disable unused variable warning.
643 void SnappyCompress(ThreadState* thread) {
645 Slice input = gen.Generate(Options().block_size);
647 int64_t produced = 0;
649 std::string compressed;
650 while (ok && bytes < 1024 * 1048576) { // Compress 1G
651 ok = port::Snappy_Compress(input.data(), input.size(), &compressed);
652 produced += compressed.size();
653 bytes += input.size();
654 thread->stats.FinishedSingleOp();
658 thread->stats.AddMessage("(snappy failure)");
661 snprintf(buf, sizeof(buf), "(output: %.1f%%)",
662 (produced * 100.0) / bytes);
663 thread->stats.AddMessage(buf);
664 thread->stats.AddBytes(bytes);
668 void SnappyUncompress(ThreadState* thread) {
670 Slice input = gen.Generate(Options().block_size);
671 std::string compressed;
672 bool ok = port::Snappy_Compress(input.data(), input.size(), &compressed);
674 char* uncompressed = new char[input.size()];
675 while (ok && bytes < 1024 * 1048576) { // Compress 1G
676 ok = port::Snappy_Uncompress(compressed.data(), compressed.size(),
678 bytes += input.size();
679 thread->stats.FinishedSingleOp();
681 delete[] uncompressed;
684 thread->stats.AddMessage("(snappy failure)");
686 thread->stats.AddBytes(bytes);
693 options.create_if_missing = !FLAGS_use_existing_db;
694 options.block_cache = cache_;
695 options.write_buffer_size = FLAGS_write_buffer_size;
696 options.max_open_files = FLAGS_open_files;
697 options.filter_policy = filter_policy_;
698 Status s = DB::Open(options, FLAGS_db, &db_);
700 fprintf(stderr, "open error: %s\n", s.ToString().c_str());
705 void WriteSeq(ThreadState* thread) {
706 DoWrite(thread, true);
709 void WriteRandom(ThreadState* thread) {
710 DoWrite(thread, false);
713 void DoWrite(ThreadState* thread, bool seq) {
714 if (num_ != FLAGS_num) {
716 snprintf(msg, sizeof(msg), "(%d ops)", num_);
717 thread->stats.AddMessage(msg);
724 for (int i = 0; i < num_; i += entries_per_batch_) {
726 for (int j = 0; j < entries_per_batch_; j++) {
727 const int k = seq ? i+j : (thread->rand.Next() % FLAGS_num);
729 snprintf(key, sizeof(key), "%016d", k);
730 batch.Put(key, gen.Generate(value_size_));
731 bytes += value_size_ + strlen(key);
732 thread->stats.FinishedSingleOp();
734 s = db_->Write(write_options_, &batch);
736 fprintf(stderr, "put error: %s\n", s.ToString().c_str());
740 thread->stats.AddBytes(bytes);
743 void ReadSequential(ThreadState* thread) {
744 Iterator* iter = db_->NewIterator(ReadOptions());
747 for (iter->SeekToFirst(); i < reads_ && iter->Valid(); iter->Next()) {
748 bytes += iter->key().size() + iter->value().size();
749 thread->stats.FinishedSingleOp();
753 thread->stats.AddBytes(bytes);
756 void ReadReverse(ThreadState* thread) {
757 Iterator* iter = db_->NewIterator(ReadOptions());
760 for (iter->SeekToLast(); i < reads_ && iter->Valid(); iter->Prev()) {
761 bytes += iter->key().size() + iter->value().size();
762 thread->stats.FinishedSingleOp();
766 thread->stats.AddBytes(bytes);
769 void ReadRandom(ThreadState* thread) {
773 for (int i = 0; i < reads_; i++) {
775 const int k = thread->rand.Next() % FLAGS_num;
776 snprintf(key, sizeof(key), "%016d", k);
777 if (db_->Get(options, key, &value).ok()) {
780 thread->stats.FinishedSingleOp();
783 snprintf(msg, sizeof(msg), "(%d of %d found)", found, num_);
784 thread->stats.AddMessage(msg);
787 void ReadMissing(ThreadState* thread) {
790 for (int i = 0; i < reads_; i++) {
792 const int k = thread->rand.Next() % FLAGS_num;
793 snprintf(key, sizeof(key), "%016d.", k);
794 db_->Get(options, key, &value);
795 thread->stats.FinishedSingleOp();
799 void ReadHot(ThreadState* thread) {
802 const int range = (FLAGS_num + 99) / 100;
803 for (int i = 0; i < reads_; i++) {
805 const int k = thread->rand.Next() % range;
806 snprintf(key, sizeof(key), "%016d", k);
807 db_->Get(options, key, &value);
808 thread->stats.FinishedSingleOp();
812 void SeekRandom(ThreadState* thread) {
815 for (int i = 0; i < reads_; i++) {
816 Iterator* iter = db_->NewIterator(options);
818 const int k = thread->rand.Next() % FLAGS_num;
819 snprintf(key, sizeof(key), "%016d", k);
821 if (iter->Valid() && iter->key() == key) found++;
823 thread->stats.FinishedSingleOp();
826 snprintf(msg, sizeof(msg), "(%d of %d found)", found, num_);
827 thread->stats.AddMessage(msg);
830 void DoDelete(ThreadState* thread, bool seq) {
834 for (int i = 0; i < num_; i += entries_per_batch_) {
836 for (int j = 0; j < entries_per_batch_; j++) {
837 const int k = seq ? i+j : (thread->rand.Next() % FLAGS_num);
839 snprintf(key, sizeof(key), "%016d", k);
841 thread->stats.FinishedSingleOp();
843 s = db_->Write(write_options_, &batch);
845 fprintf(stderr, "del error: %s\n", s.ToString().c_str());
851 void DeleteSeq(ThreadState* thread) {
852 DoDelete(thread, true);
855 void DeleteRandom(ThreadState* thread) {
856 DoDelete(thread, false);
859 void ReadWhileWriting(ThreadState* thread) {
860 if (thread->tid > 0) {
863 // Special thread that keeps writing until other threads are done.
867 MutexLock l(&thread->shared->mu);
868 if (thread->shared->num_done + 1 >= thread->shared->num_initialized) {
869 // Other threads have finished
874 const int k = thread->rand.Next() % FLAGS_num;
876 snprintf(key, sizeof(key), "%016d", k);
877 Status s = db_->Put(write_options_, key, gen.Generate(value_size_));
879 fprintf(stderr, "put error: %s\n", s.ToString().c_str());
884 // Do not count any of the preceding work/delay in stats.
885 thread->stats.Start();
889 void Compact(ThreadState* thread) {
890 db_->CompactRange(NULL, NULL);
893 void PrintStats(const char* key) {
895 if (!db_->GetProperty(key, &stats)) {
898 fprintf(stdout, "\n%s\n", stats.c_str());
901 static void WriteToFile(void* arg, const char* buf, int n) {
902 reinterpret_cast<WritableFile*>(arg)->Append(Slice(buf, n));
907 snprintf(fname, sizeof(fname), "%s/heap-%04d", FLAGS_db, ++heap_counter_);
909 Status s = Env::Default()->NewWritableFile(fname, &file);
911 fprintf(stderr, "%s\n", s.ToString().c_str());
914 bool ok = port::GetHeapProfile(WriteToFile, file);
917 fprintf(stderr, "heap profiling not supported\n");
918 Env::Default()->DeleteFile(fname);
923 } // namespace leveldb
925 int main(int argc, char** argv) {
926 FLAGS_write_buffer_size = leveldb::Options().write_buffer_size;
927 FLAGS_open_files = leveldb::Options().max_open_files;
928 std::string default_db_path;
930 for (int i = 1; i < argc; i++) {
934 if (leveldb::Slice(argv[i]).starts_with("--benchmarks=")) {
935 FLAGS_benchmarks = argv[i] + strlen("--benchmarks=");
936 } else if (sscanf(argv[i], "--compression_ratio=%lf%c", &d, &junk) == 1) {
937 FLAGS_compression_ratio = d;
938 } else if (sscanf(argv[i], "--histogram=%d%c", &n, &junk) == 1 &&
939 (n == 0 || n == 1)) {
941 } else if (sscanf(argv[i], "--use_existing_db=%d%c", &n, &junk) == 1 &&
942 (n == 0 || n == 1)) {
943 FLAGS_use_existing_db = n;
944 } else if (sscanf(argv[i], "--num=%d%c", &n, &junk) == 1) {
946 } else if (sscanf(argv[i], "--reads=%d%c", &n, &junk) == 1) {
948 } else if (sscanf(argv[i], "--threads=%d%c", &n, &junk) == 1) {
950 } else if (sscanf(argv[i], "--value_size=%d%c", &n, &junk) == 1) {
951 FLAGS_value_size = n;
952 } else if (sscanf(argv[i], "--write_buffer_size=%d%c", &n, &junk) == 1) {
953 FLAGS_write_buffer_size = n;
954 } else if (sscanf(argv[i], "--cache_size=%d%c", &n, &junk) == 1) {
955 FLAGS_cache_size = n;
956 } else if (sscanf(argv[i], "--bloom_bits=%d%c", &n, &junk) == 1) {
957 FLAGS_bloom_bits = n;
958 } else if (sscanf(argv[i], "--open_files=%d%c", &n, &junk) == 1) {
959 FLAGS_open_files = n;
960 } else if (strncmp(argv[i], "--db=", 5) == 0) {
961 FLAGS_db = argv[i] + 5;
963 fprintf(stderr, "Invalid flag '%s'\n", argv[i]);
968 // Choose a location for the test database if none given with --db=<path>
969 if (FLAGS_db == NULL) {
970 leveldb::Env::Default()->GetTestDirectory(&default_db_path);
971 default_db_path += "/dbbench";
972 FLAGS_db = default_db_path.c_str();
975 leveldb::Benchmark benchmark;