blob: 9c82b93b794366207ed43578eddcbc358e1ffef0 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
|
#!/usr/bin/python
#
# Excercise my btree implementation, for simple benchmarking purposes.
# The benchmark gets a location and an operation count as command line
# arguments.
#
# If the location is the empty string, an in-memory node store is used.
# Otherwise it must be a non-existent directory name.
#
# The benchmark will do the given number of insertions into the tree, and
# measure the speed of that. Then it will look up each of those.
import os
import random
import shutil
import sys
import time
import btree
import nodestore_disk
import nodestore_memory
def measure(keys, func):
start = time.clock()
for key in keys:
func(key)
end = time.clock()
return end - start
def main():
location = sys.argv[1]
n = int(sys.argv[2])
node_size = 4096
if location == '':
ns = nodestore_memory.NodeStoreMemory(4096)
else:
if os.path.exists(location):
raise Exception('%s exists already' % location)
os.mkdir(location)
ns = nodestore_disk.NodeStoreDisk(location, 4096)
tree = btree.BTree(ns, 8)
# Create list of keys.
keys = ['%08d' % i for i in xrange(n)]
# Calibrate.
looptime = measure(keys, lambda key: None)
# Measure inserts.
random.shuffle(keys)
value = 'x' * 128
insert_time = measure(keys, lambda key: tree.insert(key, value)) - looptime
# Measure lookups.
random.shuffle(keys)
lookup_time = measure(keys, lambda key: tree.lookup(key)) - looptime
# Report
print 'num_operations: %d' % n
print 'insert: %.3f s (%.1f/s)' % (insert_time, n/insert_time)
print 'lookup-time: %.3f s (%.1f/s)' % (lookup_time, n/lookup_time)
# Clean up
if location:
shutil.rmtree(location)
if __name__ == '__main__':
main()
|