Compare commits

...

3 Commits

13
main.py
View File

@ -33,6 +33,8 @@ import os
import gzip
import secrets
MAX_CACHE_AGE = 60*60 # 1h
class IotaFS_BlobStore():
def __init__(self, api=None):
if api==None:
@ -139,7 +141,6 @@ class IotaFS_BlobStore():
x = f.read()
sHash = self.uploadData(x,secret)
print(sHash.hex())
#sHash = getSHash(x, "catSecret".encode())
y = self.getData(sHash)
with open("res.jpeg","wb") as f:
f.write(y)
@ -255,8 +256,7 @@ class IotaFS():
if sHash==b'':
return [b'', 0, time.time()]
print("/GET/ "+str(sHash)+" <- ")
if sHash in self.cache:
# TODO: maybe update if to old?
if sHash in self.cache and time.time()-self.cache[sHash][2] > MAX_CACHE_AGE:
self.cache[sHash][2] = time.time()
return self.cache[sHash]
else:
@ -369,14 +369,11 @@ class IotaFS_Fuse(LoggingMixIn, Operations):
st['st_mtime'] = lastFetch
st['st_ctime'] = 0
else:
st['st_size'] = 1024*1024*1024 # 1 Byte lol
st['st_size'] = len(sHash) + len(path) + 8 # Just an approximation...
st['st_atime'] = 0 #last access time in seconds
st['st_mtime'] = 0 #last modified time in seconds
st['st_ctime'] = 0 # very old file
# TODO: Actuall real value
# I think, when I put it like this, we executes copies as one write and not multiple (which would be stupid with my implementation)
block_size = 1024*1024*1024
st['st_blocks'] = (int) ((st['st_size'] + block_size-1) / block_size)
st['st_blocks'] = 1 # Until we can skip blocks when reading / writing this should provide best possible performance...
return st
def mkdir(self, path, mode):