Browse Source

minor changes to sorted, and make hashing large files work..

main
John-Mark Gurney 2 years ago
parent
commit
d4d2d2e367
1 changed files with 12 additions and 9 deletions
  1. +12
    -9
      ui/medashare/cli.py

+ 12
- 9
ui/medashare/cli.py View File

@@ -29,9 +29,7 @@ _validhashes = set([ 'sha256', 'sha512' ])
_hashlengths = { len(getattr(hashlib, x)().hexdigest()): x for x in _validhashes }

def _iterdictlist(obj):
itms = list(obj.items())
itms.sort()
for k, v in itms:
for k, v in sorted(obj.items()):
if isinstance(v, list):
v = v[:]
v.sort()
@@ -200,10 +198,7 @@ class CanonicalCoder(pasn1.ASN1DictCoder):
def enc_dict(self, obj, **kwargs):
class FakeIter:
def items(self):
itms = list(obj.items())
itms.sort()

return iter(itms)
return iter(sorted(obj.items()))

return pasn1.ASN1DictCoder.enc_dict(self, FakeIter(), **kwargs)

@@ -478,11 +473,19 @@ class ObjectStore(object):
else:
raise KeyError('unable to find metadata for file')

def _readfp(fp):
while True:
r = fp.read(64*1024)
if r == b'':
return

yield r

def _hashfile(fname):
hash = getattr(hashlib, _defaulthash)()
with open(fname, 'rb') as fp:
r = fp.read()
hash.update(r)
for r in _readfp(fp):
hash.update(r)

return '%s:%s' % (_defaulthash, hash.hexdigest())



Loading…
Cancel
Save