diff options
author | Lars Wirzenius <liw@liw.fi> | 2010-12-02 15:16:19 +0000 |
---|---|---|
committer | Lars Wirzenius <liw@liw.fi> | 2010-12-02 15:16:19 +0000 |
commit | 7822f8cf0816d3c21c9c7048f6805ab956d38be1 (patch) | |
tree | cdb6751449f53954645ad0fd45246a6d1c0c3ecf | |
parent | 5a9bc498377f1d68b31ee4d301d47d91fe7d3907 (diff) | |
download | genbackupdata-7822f8cf0816d3c21c9c7048f6805ab956d38be1.tar.gz |
Update binary junk generation algorithm.
-rw-r--r-- | genbackupdata.py | 27 |
1 files changed, 16 insertions, 11 deletions
diff --git a/genbackupdata.py b/genbackupdata.py index 5693f1f..2fbb416 100644 --- a/genbackupdata.py +++ b/genbackupdata.py @@ -260,29 +260,34 @@ class BackupData: """Generate SIZE bytes of more or less random binary junk""" # The following code has had some fine manual fine tuning done - # to it. This has made it ugly, but faster. On a 1.2 MHz Intel - # Pentium M, it generates around 6 MB/s. + # to it. This has made it a bit ugly, but faster. On a + # "Intel(R) Core(TM)2 Duo CPU L9400 @ 1.86GHz", it produces + # about 25 MB/s. chunks = [] - sum = hashlib.md5() + sum = hashlib.sha1() chunk_size = len(sum.digest()) - - initial_bytes = min(size, chunk_size * 8) + + initial_bytes = min(size, 128) for i in range(initial_bytes / chunk_size): sum.update(chr(random.getrandbits(8))) - chunks.append(sum.digest()) - + chunk = sum.digest() + chunks.append(chunk) + size -= len(chunks) * chunk_size for i in range(size / chunk_size): sum.update("a") - chunks.append(sum.digest()) - + chunk = sum.digest() + chunks.append(chunk) + if size % chunk_size > 0: sum.update(chr(random.getrandbits(8))) - chunks.append(sum.digest()[:size % chunk_size]) - + chunk = sum.digest() + chunks.append(chunk[:size % chunk_size]) + return "".join(chunks) + def generate_binary_data(self, size): """Generate SIZE bytes of binary junk. |