2020-06-18 17:33:23 +02:00
|
|
|
|
# TODO: Implement File COW (except for append) (+ version / token updates caused by this)
|
|
|
|
|
# TODO: ? Stop using tokens for dirs, use hashed name + parent token
|
|
|
|
|
# TODO: ? Stop using tokens for files, use hashed name + short ?version-number + parent token instead
|
|
|
|
|
# TODO: Store version-numbers for files in parent-dir
|
|
|
|
|
# TODO: Chane Milestone-Format from [stone,stone,...] to [[dirStne,dirStone,...],[fileStone,,...]] and dont save type for the stones
|
2020-06-18 20:56:06 +02:00
|
|
|
|
# TODO: Decide how / when / from which class to push new blocks
|
|
|
|
|
# TODO: Unload 'overwritten' blobs
|
|
|
|
|
# TODO: Close blobs when they become unknown to the kernel or when we unmount (genesis only on unmount)
|
2020-06-19 13:00:02 +02:00
|
|
|
|
# TODO: inode_id -> actuall Inode lookup table
|
2020-06-18 17:33:23 +02:00
|
|
|
|
|
2020-06-11 22:20:41 +02:00
|
|
|
|
from iota import Iota, ProposedTransaction, Address, TryteString, Tag
|
|
|
|
|
from iota.crypto.addresses import AddressGenerator
|
|
|
|
|
from iota.crypto.types import Seed
|
|
|
|
|
from iota.codecs import TrytesDecodeError
|
|
|
|
|
|
|
|
|
|
from Crypto.Cipher import AES
|
|
|
|
|
from Crypto.Util.Padding import pad, unpad
|
|
|
|
|
|
|
|
|
|
import math
|
|
|
|
|
from pprint import pprint
|
|
|
|
|
import hashlib
|
|
|
|
|
import sys
|
|
|
|
|
import random
|
|
|
|
|
import time
|
|
|
|
|
|
|
|
|
|
import msgpack
|
|
|
|
|
import copy
|
|
|
|
|
|
|
|
|
|
import gzip
|
|
|
|
|
import secrets
|
|
|
|
|
|
2020-06-19 13:00:02 +02:00
|
|
|
|
import stat
|
|
|
|
|
import errno
|
|
|
|
|
import pyfuse3
|
|
|
|
|
import trio
|
|
|
|
|
from collections import defaultdict
|
|
|
|
|
from pyfuse3 import FUSEError
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
import faulthandler
|
|
|
|
|
except ImportError:
|
|
|
|
|
pass
|
|
|
|
|
else:
|
|
|
|
|
faulthandler.enable()
|
|
|
|
|
|
2020-06-18 17:33:23 +02:00
|
|
|
|
CHUNKSIZE = 2187
|
2020-06-18 20:56:06 +02:00
|
|
|
|
SYNCWRITES = True
|
|
|
|
|
|
2020-06-11 22:20:41 +02:00
|
|
|
|
def log(txt):
|
|
|
|
|
print("[-] "+str(txt))
|
|
|
|
|
|
2020-06-18 20:56:06 +02:00
|
|
|
|
def sendEmOff(bundles, api):
|
2020-06-19 00:25:00 +02:00
|
|
|
|
for bundle in bundles:
|
2020-06-19 13:00:02 +02:00
|
|
|
|
print("[->]")
|
2020-06-19 00:25:00 +02:00
|
|
|
|
api.send_trytes(
|
|
|
|
|
trytes=bundle
|
|
|
|
|
)
|
2020-06-18 20:56:06 +02:00
|
|
|
|
|
2020-06-11 22:20:41 +02:00
|
|
|
|
class Atom():
|
2020-06-18 17:33:23 +02:00
|
|
|
|
def __init__(self, milestone: bool, cont, name: str = None) -> None:
|
|
|
|
|
self.milestone = milestone
|
|
|
|
|
self.delta = not milestone
|
|
|
|
|
self.name = name
|
|
|
|
|
self.cont = cont
|
2020-06-11 22:20:41 +02:00
|
|
|
|
|
2020-06-18 17:33:23 +02:00
|
|
|
|
def dump(self):
|
|
|
|
|
if self.milestone:
|
|
|
|
|
return msgpack.dumps([True, self.cont])
|
2020-06-11 22:20:41 +02:00
|
|
|
|
else:
|
2020-06-18 17:33:23 +02:00
|
|
|
|
return msgpack.dumps([False, self.name, self.cont])
|
|
|
|
|
|
2020-06-11 22:20:41 +02:00
|
|
|
|
class BlobChunk():
|
|
|
|
|
def __init__(self, data: bytes = b'', sealed: bool = False) -> None:
|
|
|
|
|
self.data = data
|
2020-06-11 23:26:19 +02:00
|
|
|
|
self.sealed = sealed
|
2020-06-11 22:20:41 +02:00
|
|
|
|
|
|
|
|
|
def getData(self) -> bytes:
|
|
|
|
|
return self.data
|
|
|
|
|
|
|
|
|
|
def append(self, data: bytes) -> None:
|
|
|
|
|
if len(data)+len(self.data) > CHUNKSIZE:
|
|
|
|
|
raise Exception("That to big!")
|
|
|
|
|
self.data += data
|
2020-06-18 17:33:23 +02:00
|
|
|
|
if len(self.data) == CHUNKSIZE:
|
|
|
|
|
self.seal()
|
2020-06-11 22:20:41 +02:00
|
|
|
|
|
|
|
|
|
def getBytesLeft(self) -> int:
|
|
|
|
|
if self.sealed:
|
|
|
|
|
return 0
|
|
|
|
|
return CHUNKSIZE - len(self.data)
|
|
|
|
|
|
|
|
|
|
def seal(self) -> None:
|
|
|
|
|
self.sealed = True
|
|
|
|
|
|
|
|
|
|
def isSealed(self) -> bool:
|
|
|
|
|
return self.sealed
|
|
|
|
|
|
|
|
|
|
class TangleBlob():
|
|
|
|
|
def __init__(self, token: bytes, iotaApi: Iota) -> None:
|
|
|
|
|
self.token = token
|
|
|
|
|
self.iotaApi = iotaApi
|
|
|
|
|
self.preChunks = 0
|
|
|
|
|
self.chunks = []
|
|
|
|
|
m = hashlib.sha3_512()
|
|
|
|
|
m.update(self.token)
|
2020-06-11 23:26:19 +02:00
|
|
|
|
trSeed = TryteString.from_bytes(m.digest())[:81]
|
|
|
|
|
self.adressGen = AddressGenerator(Seed(trSeed))
|
2020-06-11 22:20:41 +02:00
|
|
|
|
self.fetched = False
|
|
|
|
|
self.pushedNum = 0
|
|
|
|
|
|
|
|
|
|
def _requireFetched(self):
|
|
|
|
|
if not self.fetched:
|
|
|
|
|
self.fetch()
|
|
|
|
|
|
|
|
|
|
def _getKey(self, chunkNum: int) -> bytes:
|
|
|
|
|
m = hashlib.sha3_384()
|
|
|
|
|
m.update(self.token)
|
|
|
|
|
m.update(chunkNum.to_bytes(8, "little")) # 64 bits should be enought...
|
|
|
|
|
m.update(self.token)
|
|
|
|
|
return m.digest()
|
|
|
|
|
|
|
|
|
|
def _genBundle(self, data, addr) -> str:
|
|
|
|
|
txMsg = TryteString.from_bytes(data)
|
|
|
|
|
trans = ProposedTransaction(
|
|
|
|
|
address = addr,
|
|
|
|
|
value = 0,
|
|
|
|
|
tag = Tag("IOTAFS"),
|
|
|
|
|
message = txMsg
|
|
|
|
|
)
|
|
|
|
|
return self.iotaApi.prepare_transfer(
|
|
|
|
|
transfers = [trans],
|
|
|
|
|
inputs = [addr]
|
|
|
|
|
)['trytes']
|
|
|
|
|
|
2020-06-11 23:26:19 +02:00
|
|
|
|
def _dumpChunk(self, chunkNum: int) -> str:
|
2020-06-11 22:20:41 +02:00
|
|
|
|
key = self._getKey(chunkNum + self.preChunks)
|
|
|
|
|
data = self.chunks[chunkNum].getData()
|
|
|
|
|
cipher = AES.new(key[16:][:16], AES.MODE_CBC, key[:16])
|
|
|
|
|
ct_bytes = cipher.encrypt(pad(data, AES.block_size))
|
2020-06-11 23:26:19 +02:00
|
|
|
|
addr = self.adressGen.get_addresses(start=chunkNum + self.preChunks, count=1)[0]
|
2020-06-11 22:20:41 +02:00
|
|
|
|
return self._genBundle(ct_bytes, addr)
|
|
|
|
|
|
2020-06-18 20:56:06 +02:00
|
|
|
|
def dumpAllSealed(self):
|
|
|
|
|
bundles = []
|
2020-06-19 00:25:00 +02:00
|
|
|
|
for i in range(max(0,len(self.chunks)-self.pushedNum)):
|
2020-06-18 20:56:06 +02:00
|
|
|
|
c = i + self.pushedNum
|
|
|
|
|
chunk = self.chunks[c]
|
|
|
|
|
if chunk.isSealed():
|
|
|
|
|
bundles.append(self._dumpChunk(c))
|
|
|
|
|
self.pushedNum+=1
|
|
|
|
|
return bundles
|
|
|
|
|
|
|
|
|
|
def sealAndDump(self):
|
|
|
|
|
# When unmounting / closing / ...
|
2020-06-19 00:25:00 +02:00
|
|
|
|
if self.chunks:
|
|
|
|
|
self.chunks[-1].seal()
|
2020-06-18 20:56:06 +02:00
|
|
|
|
return self.dumpAllSealed()
|
|
|
|
|
|
2020-06-11 22:20:41 +02:00
|
|
|
|
def append(self, data: bytes, newBlock: bool = False) -> None:
|
|
|
|
|
self._requireFetched()
|
2020-06-18 20:32:06 +02:00
|
|
|
|
if newBlock:
|
|
|
|
|
self.chunks[-1].seal()
|
|
|
|
|
elif len(self.chunks):
|
2020-06-11 22:20:41 +02:00
|
|
|
|
bytesLeft = self.chunks[-1].getBytesLeft()
|
|
|
|
|
if bytesLeft:
|
|
|
|
|
leftChunk = data[:bytesLeft]
|
|
|
|
|
data = data[bytesLeft:]
|
|
|
|
|
self.chunks[-1].append(leftChunk)
|
|
|
|
|
while len(data):
|
|
|
|
|
chunk = data[:CHUNKSIZE]
|
|
|
|
|
self.chunks.append(BlobChunk(chunk))
|
|
|
|
|
data = data[CHUNKSIZE:]
|
2020-06-18 20:56:06 +02:00
|
|
|
|
if SYNCWRITES:
|
|
|
|
|
bundles = self.dumpAllSealed()
|
|
|
|
|
if bundles:
|
|
|
|
|
sendEmOff(bundles, self.iotaApi)
|
2020-06-11 22:20:41 +02:00
|
|
|
|
|
|
|
|
|
def getChunkLen(self) -> int:
|
|
|
|
|
return self.preChunks + len(self.chunks)
|
|
|
|
|
|
|
|
|
|
def getSize(self) -> int:
|
|
|
|
|
if len(self.chunks):
|
|
|
|
|
return self.getChunkLen()*CHUNKSIZE - self.chunks[-1].getBytesLeft()
|
|
|
|
|
return self.preChunks
|
|
|
|
|
|
|
|
|
|
def read(self) -> bytes:
|
2020-06-19 00:25:00 +02:00
|
|
|
|
self._requireFetched()
|
2020-06-11 22:20:41 +02:00
|
|
|
|
data = b''
|
|
|
|
|
for chunk in self.chunks:
|
|
|
|
|
data += chunk.getData()
|
|
|
|
|
return data
|
|
|
|
|
|
|
|
|
|
def _dump(self) -> str:
|
|
|
|
|
self.chunks[-1].seal()
|
|
|
|
|
data = ""
|
|
|
|
|
for c in range(len(self.chunks)-self.pushedNum):
|
|
|
|
|
num = c + self.pushedNum
|
2020-06-11 23:26:19 +02:00
|
|
|
|
data += self._dumpChunk(num) # num is without preChunks
|
|
|
|
|
self.pushedNum = self.getChunkLen()
|
2020-06-11 22:20:41 +02:00
|
|
|
|
return data
|
|
|
|
|
|
2020-06-18 17:33:23 +02:00
|
|
|
|
def fetch(self) -> None:
|
|
|
|
|
skipChunks = self.preChunks
|
2020-06-11 22:20:41 +02:00
|
|
|
|
chunkNum = self.getChunkLen() + skipChunks
|
|
|
|
|
while True:
|
|
|
|
|
key = self._getKey(chunkNum)
|
|
|
|
|
cipher = AES.new(key[16:][:16], AES.MODE_CBC, key[:16])
|
2020-06-11 23:26:19 +02:00
|
|
|
|
addr = self.adressGen.get_addresses(start=chunkNum, count=1)[0]
|
2020-06-18 17:33:23 +02:00
|
|
|
|
txHash = self.iotaApi.find_transactions(tags=[Tag("IOTAFS")], addresses=[addr])["hashes"]
|
2020-06-11 22:20:41 +02:00
|
|
|
|
if len(txHash)==0:
|
|
|
|
|
break
|
2020-06-19 00:25:00 +02:00
|
|
|
|
bundles = self.iotaApi.get_bundles(txHash[0])["bundles"]
|
2020-06-11 22:20:41 +02:00
|
|
|
|
for bundle in bundles:
|
|
|
|
|
for tx in bundle.transactions:
|
|
|
|
|
# TODO: Can we just strip the 9s and call it a day?
|
|
|
|
|
tryteStr = TryteString(str(tx.signature_message_fragment).rstrip("9"))
|
|
|
|
|
try:
|
|
|
|
|
ct_bytes = tryteStr.as_bytes()
|
|
|
|
|
except TrytesDecodeError:
|
|
|
|
|
ct_bytes = (tryteStr+"9").as_bytes()
|
|
|
|
|
self.chunks.append(BlobChunk(unpad(cipher.decrypt(ct_bytes), AES.block_size), True))
|
|
|
|
|
chunkNum += 1
|
|
|
|
|
self.pushedNum = len(self.chunks)
|
|
|
|
|
self.fetched = True
|
|
|
|
|
self._afterFetch()
|
|
|
|
|
|
2020-06-11 23:26:19 +02:00
|
|
|
|
def _afterFetch(self) -> None:
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
def genToken(self) -> bytes:
|
|
|
|
|
return secrets.token_bytes(32)
|
2020-06-11 22:20:41 +02:00
|
|
|
|
|
2020-06-18 17:33:23 +02:00
|
|
|
|
def sealLastChunk(self) -> None:
|
|
|
|
|
self.chunks[-1].seal()
|
|
|
|
|
|
2020-06-18 20:32:06 +02:00
|
|
|
|
def chunkLayout(self, width=50):
|
|
|
|
|
fac = width / CHUNKSIZE
|
|
|
|
|
lines = []
|
|
|
|
|
for c,chunk in enumerate(self.chunks):
|
|
|
|
|
bytesWritten = len(chunk.data)
|
|
|
|
|
bytesEmpty = CHUNKSIZE - bytesWritten
|
|
|
|
|
if chunk.isSealed():
|
|
|
|
|
lines.append("["+"#"*int(bytesWritten*fac)+"="*int(bytesEmpty*fac)+"] (SEALED)")
|
|
|
|
|
else:
|
|
|
|
|
lines.append("["+"#"*int(bytesWritten*fac)+" "*int(bytesEmpty*fac)+"] ("+str(bytesWritten)+"/"+str(CHUNKSIZE)+")")
|
|
|
|
|
if self.pushedNum < c+1:
|
|
|
|
|
lines[-1]+=" {+}"
|
|
|
|
|
return "\n".join(lines)
|
|
|
|
|
|
2020-06-19 00:25:00 +02:00
|
|
|
|
def _close(self):
|
2020-06-18 20:56:06 +02:00
|
|
|
|
bundles = self.sealAndDump()
|
|
|
|
|
if bundles:
|
|
|
|
|
sendEmOff(bundles, self.iotaApi)
|
|
|
|
|
|
2020-06-11 22:20:41 +02:00
|
|
|
|
class TangleFileTreeElement(TangleBlob):
|
2020-06-18 17:33:23 +02:00
|
|
|
|
def __init__(self, name: str, lastMilestoneIndex: int, parent, iotaApi: Iota) -> None:
|
|
|
|
|
if isinstance(parent, bytes):
|
|
|
|
|
self.token = hashlib.sha3_384(parent + name.encode()).digest()
|
|
|
|
|
else:
|
2020-06-18 17:52:47 +02:00
|
|
|
|
self.token = hashlib.sha3_384(parent.token + name.encode()).digest()
|
2020-06-18 17:33:23 +02:00
|
|
|
|
super(TangleFileTreeElement, self).__init__(self.token, iotaApi)
|
|
|
|
|
self.name = name
|
2020-06-11 22:20:41 +02:00
|
|
|
|
self.inodes = {}
|
2020-06-11 23:26:19 +02:00
|
|
|
|
self.parent = parent
|
2020-06-18 17:33:23 +02:00
|
|
|
|
self.milestoneIndex = lastMilestoneIndex
|
|
|
|
|
self.preChunks = self.milestoneIndex
|
2020-06-11 22:20:41 +02:00
|
|
|
|
|
|
|
|
|
def _afterFetch(self) -> None:
|
2020-06-11 23:26:19 +02:00
|
|
|
|
raw = self.read()
|
|
|
|
|
if raw==b'':
|
|
|
|
|
return
|
2020-06-18 17:33:23 +02:00
|
|
|
|
unpacker = msgpack.Unpacker(raw=True)
|
|
|
|
|
unpacker.feed(raw)
|
2020-06-19 00:25:00 +02:00
|
|
|
|
for i, elem in enumerate(reversed(list(unpacker))):
|
2020-06-18 17:33:23 +02:00
|
|
|
|
if elem[0]:
|
|
|
|
|
# Is a milestone
|
|
|
|
|
# TODO: Update our known milestoneIndex, if we find one
|
|
|
|
|
# might have to rewrite .fetch() and merge it here...
|
|
|
|
|
self.milestoneIndex = self.getChunkLen()
|
|
|
|
|
self._applyMilestone(elem[1])
|
2020-06-11 22:20:41 +02:00
|
|
|
|
break
|
2020-06-18 17:33:23 +02:00
|
|
|
|
else:
|
2020-06-19 00:25:00 +02:00
|
|
|
|
name = elem[1].decode()
|
|
|
|
|
if name in self.inodes: #name
|
|
|
|
|
atom = Atom(False, elem[2], name)
|
|
|
|
|
self.inodes[name].applyAtom(atom)
|
2020-06-18 17:33:23 +02:00
|
|
|
|
else:
|
2020-06-19 00:25:00 +02:00
|
|
|
|
# new inode
|
|
|
|
|
type = ["dir","file"][elem[2][b't']]
|
|
|
|
|
self.inodes[name] = Inode(name, self.iotaApi, self, type)
|
|
|
|
|
atom = Atom(False, elem[2], name)
|
|
|
|
|
self.inodes[name].applyAtom(atom)
|
2020-06-18 17:33:23 +02:00
|
|
|
|
|
|
|
|
|
def _getSkipChunks(self):
|
|
|
|
|
return
|
2020-06-11 22:20:41 +02:00
|
|
|
|
|
|
|
|
|
def _applyMilestone(self, milestone) -> None:
|
|
|
|
|
self.inodes = {}
|
|
|
|
|
for stone in milestone:
|
|
|
|
|
atom = Atom()
|
|
|
|
|
atom.load(stone)
|
|
|
|
|
self.inodes[atom.name] = atom
|
|
|
|
|
|
|
|
|
|
def _applyAtom(self, atom: Atom) -> None:
|
2020-06-18 18:11:59 +02:00
|
|
|
|
if atom.name in self.inodes:
|
|
|
|
|
self.inodes[atom.name].applyAtom(atom)
|
|
|
|
|
else:
|
|
|
|
|
cont = atom.cont
|
|
|
|
|
type = ["dir","file"][cont[b't']]
|
|
|
|
|
inode = Inode(atom.name, self.iotaApi, self, type)
|
|
|
|
|
self.inodes[atom.name] = inode
|
2020-06-11 22:20:41 +02:00
|
|
|
|
|
|
|
|
|
def _newAtom(self, atom: Atom) -> None:
|
|
|
|
|
self.append(atom.dump())
|
|
|
|
|
|
2020-06-11 23:26:19 +02:00
|
|
|
|
def getNameList(self):
|
2020-06-11 22:20:41 +02:00
|
|
|
|
self._requireFetched()
|
|
|
|
|
return list(self.inodes.keys())
|
|
|
|
|
|
2020-06-18 20:32:06 +02:00
|
|
|
|
def _tree(self):
|
2020-06-19 00:25:00 +02:00
|
|
|
|
self._requireFetched()
|
2020-06-18 20:32:06 +02:00
|
|
|
|
dirs = {}
|
|
|
|
|
files = []
|
|
|
|
|
for inode in self.inodes:
|
|
|
|
|
if self.inodes[inode].type=="file":
|
|
|
|
|
files.append(inode)
|
|
|
|
|
elif self.inodes[inode].type=="dir":
|
|
|
|
|
dirs[inode] = self.inodes[inode].getRef()._tree()
|
|
|
|
|
else:
|
|
|
|
|
files.append("["+str(self.inodes[inode].type)+"]/"+inode)
|
|
|
|
|
lines = ["{"+self.name+"}"]
|
|
|
|
|
for i,d in enumerate(dirs):
|
|
|
|
|
dir = dirs[d]
|
|
|
|
|
if len(files)==0 and i==len(dirs)-1:
|
|
|
|
|
lines.append(" └──"+dir[0]+"")
|
|
|
|
|
for l in range(len(dir)-1):
|
|
|
|
|
lines.append(" "+dir[l+1])
|
|
|
|
|
else:
|
|
|
|
|
lines.append(" ├──"+dir[0]+"")
|
|
|
|
|
for l in range(len(dir)-1):
|
|
|
|
|
lines.append(" │ "+dir[l+1])
|
|
|
|
|
if len(files):
|
|
|
|
|
for f in range(len(files)-1):
|
|
|
|
|
lines.append(" ├──"+files[f])
|
|
|
|
|
lines.append(" └──"+files[-1])
|
|
|
|
|
return lines
|
|
|
|
|
|
|
|
|
|
def tree(self):
|
|
|
|
|
return "\n".join(self._tree())
|
|
|
|
|
|
2020-06-11 22:20:41 +02:00
|
|
|
|
def getInode(self, name: str) -> Atom:
|
|
|
|
|
self._requireFetched()
|
|
|
|
|
return self.inodes[name]
|
|
|
|
|
|
|
|
|
|
def mkdir(self, name: str) -> bool:
|
|
|
|
|
self._requireFetched()
|
|
|
|
|
if name in self.getNameList():
|
|
|
|
|
return False
|
2020-06-18 17:52:47 +02:00
|
|
|
|
inode = Inode(name, self.iotaApi, self, "dir")
|
2020-06-18 17:33:23 +02:00
|
|
|
|
atom = inode.change(milestoneIndex=0)
|
2020-06-11 22:20:41 +02:00
|
|
|
|
self._newAtom(atom)
|
2020-06-18 17:33:23 +02:00
|
|
|
|
self.inodes[name] = inode
|
2020-06-11 22:20:41 +02:00
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
def mkfile(self, name: str) -> bool:
|
|
|
|
|
self._requireFetched()
|
|
|
|
|
if name in self.getNameList():
|
|
|
|
|
return False
|
2020-06-18 20:32:06 +02:00
|
|
|
|
inode = Inode(name, self.iotaApi, self, "file")
|
|
|
|
|
atom = inode.change(size=0, milestoneIndex=0, hash=b'NULL')
|
2020-06-11 22:20:41 +02:00
|
|
|
|
self._newAtom(atom)
|
2020-06-18 20:32:06 +02:00
|
|
|
|
self.inodes[name] = inode
|
2020-06-11 22:20:41 +02:00
|
|
|
|
return True
|
|
|
|
|
|
2020-06-18 17:33:23 +02:00
|
|
|
|
def _updateFileSize(self, name: str, size: int) -> None:
|
2020-06-11 22:20:41 +02:00
|
|
|
|
self._requireFetched()
|
2020-06-19 00:25:00 +02:00
|
|
|
|
atom = self.inodes[name].change(size=size)
|
|
|
|
|
self._newAtom(atom)
|
2020-06-11 22:20:41 +02:00
|
|
|
|
|
2020-06-19 00:25:00 +02:00
|
|
|
|
def _updateFileHash(self, name: str, hash: bytes, size: int) -> None:
|
2020-06-18 17:33:23 +02:00
|
|
|
|
log("New FileToken for file '"+name+"' registered")
|
2020-06-11 22:20:41 +02:00
|
|
|
|
self._requireFetched()
|
2020-06-19 00:25:00 +02:00
|
|
|
|
atom = self.inodes[name].change(size=size, hash=hash)
|
|
|
|
|
self._newAtom(atom)
|
2020-06-11 22:20:41 +02:00
|
|
|
|
|
2020-06-18 17:33:23 +02:00
|
|
|
|
def performMilestone(self) -> None:
|
2020-06-18 20:32:06 +02:00
|
|
|
|
if isinstance(self.parent, bytes):
|
|
|
|
|
raise Exception("You cant create a milestone of the genesis block, you idiot!")
|
2020-06-18 17:33:23 +02:00
|
|
|
|
stones = {}
|
2020-06-11 23:26:19 +02:00
|
|
|
|
for a in self.inodes:
|
2020-06-18 17:33:23 +02:00
|
|
|
|
stones[a] = self.inodes[a].toStone()
|
2020-06-11 22:20:41 +02:00
|
|
|
|
self.atomStack = 0
|
2020-06-18 17:33:23 +02:00
|
|
|
|
milestoneAtom = Atom(True, stones)
|
|
|
|
|
data = milestoneAtom.dump()
|
2020-06-19 00:25:00 +02:00
|
|
|
|
self.milestoneIndex = self.getChunkLen()
|
2020-06-18 17:33:23 +02:00
|
|
|
|
if self.parent!=None:
|
|
|
|
|
self.parent._updateChildMilestone(self.name, self.milestoneIndex)
|
|
|
|
|
self.append(data, True)
|
2020-06-11 22:20:41 +02:00
|
|
|
|
|
2020-06-18 17:33:23 +02:00
|
|
|
|
def _updateChildMilestone(self, name: str, milestoneIndex: int):
|
2020-06-19 00:25:00 +02:00
|
|
|
|
atom = self.inodes[name].change(milestoneIndex = milestoneIndex)
|
2020-06-18 20:32:06 +02:00
|
|
|
|
if isinstance(self.parent, bytes):
|
|
|
|
|
# We are the genesis-block
|
2020-06-19 00:25:00 +02:00
|
|
|
|
self.milestoneIndex = milestoneIndex
|
|
|
|
|
self.append(msgpack.dumps(milestoneIndex))
|
2020-06-18 20:32:06 +02:00
|
|
|
|
else:
|
|
|
|
|
self._newAtom(atom)
|
2020-06-18 17:33:23 +02:00
|
|
|
|
|
2020-06-19 00:25:00 +02:00
|
|
|
|
def close(self) -> None:
|
|
|
|
|
for i in self.inodes:
|
|
|
|
|
inode = self.inodes[i]
|
|
|
|
|
if inode.hasRef:
|
|
|
|
|
ref = inode.getRef()
|
|
|
|
|
ref.close()
|
|
|
|
|
self._close()
|
|
|
|
|
|
2020-06-11 22:20:41 +02:00
|
|
|
|
class TangleFile():
|
|
|
|
|
def __init__(self, name: str, parent: TangleFileTreeElement, iotaApi: Iota) -> None:
|
2020-06-11 23:26:19 +02:00
|
|
|
|
self.api = iotaApi
|
2020-06-11 22:20:41 +02:00
|
|
|
|
self.name = name
|
|
|
|
|
self.parent = parent
|
2020-06-19 00:25:00 +02:00
|
|
|
|
self.reflexiveInode = parent.inodes[name]
|
|
|
|
|
self.size = self.reflexiveInode.size
|
|
|
|
|
self.hash = self.reflexiveInode.hash
|
|
|
|
|
self.token = hashlib.sha3_384(b'f' + parent.token + self.hash).digest()
|
2020-06-11 23:26:19 +02:00
|
|
|
|
self.blob = TangleBlob(self.token, iotaApi)
|
2020-06-11 22:20:41 +02:00
|
|
|
|
|
|
|
|
|
def write(self, offset: int, data: bytes):
|
|
|
|
|
if offset == self.size:
|
2020-06-19 00:25:00 +02:00
|
|
|
|
self.blob.append(data)
|
|
|
|
|
self.size = self.blob.getSize()
|
|
|
|
|
self.parent._updateFileSize(self.name, self.size)
|
2020-06-11 22:20:41 +02:00
|
|
|
|
else:
|
2020-06-18 17:33:23 +02:00
|
|
|
|
oldData = self.blob.read()
|
|
|
|
|
newData = oldData[:offset] + data + oldData[offset+len(data):]
|
2020-06-19 00:25:00 +02:00
|
|
|
|
self.hash = hashlib.sha256(newData).digest()
|
|
|
|
|
self.token = hashlib.sha3_384(b'f' + self.parent.token + self.hash).digest()
|
2020-06-18 17:33:23 +02:00
|
|
|
|
self.blob = TangleBlob(self.token)
|
|
|
|
|
self.blob.append(newData)
|
|
|
|
|
self.size = self.blob.getSize()
|
2020-06-19 00:25:00 +02:00
|
|
|
|
self.parent._updateFileHash(self.name, self.hash, self.size)
|
|
|
|
|
|
|
|
|
|
def close(self):
|
|
|
|
|
self.blob._close()
|
2020-06-11 22:20:41 +02:00
|
|
|
|
|
2020-06-18 17:52:47 +02:00
|
|
|
|
class Inode():
|
|
|
|
|
def __init__(self, name: str, iotaApi: Iota, parent: TangleFileTreeElement = None, type: str = None) -> None:
|
|
|
|
|
self.parent = parent
|
|
|
|
|
self.name = name
|
|
|
|
|
self.type = type
|
|
|
|
|
self.ref = None
|
|
|
|
|
self.iotaApi = iotaApi
|
|
|
|
|
|
|
|
|
|
def setType(self, type: str) -> None:
|
|
|
|
|
self.type = type
|
|
|
|
|
|
|
|
|
|
def change(self, size: int=None, hash: int=None, milestoneIndex: int=None) -> Atom:
|
|
|
|
|
delta = {}
|
2020-06-18 18:11:59 +02:00
|
|
|
|
delta[b't'] = (self.type=="file")
|
2020-06-18 17:52:47 +02:00
|
|
|
|
if size!=None:
|
|
|
|
|
self.size = size
|
|
|
|
|
delta[b's'] = size
|
2020-06-18 18:11:59 +02:00
|
|
|
|
if hash!=None:
|
|
|
|
|
self.hash = hash
|
2020-06-18 17:52:47 +02:00
|
|
|
|
delta[b'h'] = hash
|
|
|
|
|
if milestoneIndex!=None:
|
|
|
|
|
self.milestoneIndex = milestoneIndex
|
|
|
|
|
delta[b'm'] = milestoneIndex
|
|
|
|
|
return Atom(False, delta, self.name)
|
|
|
|
|
|
|
|
|
|
def applyAtom(self, atom: Atom) -> None:
|
|
|
|
|
if atom.name != self.name:
|
|
|
|
|
raise Exception("Cannot apply atom ment for a different inode (names differ)")
|
|
|
|
|
if atom.milestone:
|
|
|
|
|
stones = atom.cont
|
|
|
|
|
if self.name in stones:
|
|
|
|
|
self.applyAtom(Atom(False, stones[self.name], self.name))
|
|
|
|
|
else:
|
2020-06-18 18:11:59 +02:00
|
|
|
|
if (self.type=="file") != atom.cont[b't']:
|
|
|
|
|
raise Exception("I am a "+self.type+"; this atom is for the other thing")
|
2020-06-18 17:52:47 +02:00
|
|
|
|
delta = atom.cont
|
|
|
|
|
if b's' in delta:
|
|
|
|
|
self.size = delta[b's']
|
2020-06-18 20:32:06 +02:00
|
|
|
|
if b'h' in delta:
|
|
|
|
|
self.hash = delta[b'h']
|
2020-06-18 17:52:47 +02:00
|
|
|
|
if b'm' in delta:
|
|
|
|
|
self.milestoneIndex = delta[b'm']
|
|
|
|
|
|
|
|
|
|
def toStone(self) -> None:
|
|
|
|
|
if self.type=="file":
|
|
|
|
|
return [self.size, self.hash]
|
|
|
|
|
else:
|
|
|
|
|
return [self.milestoneIndex]
|
|
|
|
|
|
|
|
|
|
def getRef(self):
|
|
|
|
|
if self.name == "*":
|
|
|
|
|
return None
|
|
|
|
|
if not self.ref:
|
|
|
|
|
if self.type=="dir":
|
2020-06-18 18:11:59 +02:00
|
|
|
|
self.ref = TangleFileTreeElement(self.name, self.milestoneIndex, self.parent, self.iotaApi)
|
2020-06-18 17:52:47 +02:00
|
|
|
|
elif self.type=="file":
|
2020-06-19 00:25:00 +02:00
|
|
|
|
self.ref = TangleFile(self.name, self.parent, self.iotaApi)
|
2020-06-18 17:52:47 +02:00
|
|
|
|
else:
|
|
|
|
|
raise Exception("Cannot get reference of an inode of type "+self.type)
|
|
|
|
|
return self.ref
|
|
|
|
|
|
2020-06-19 00:25:00 +02:00
|
|
|
|
def hasRef(self):
|
|
|
|
|
return not self.ref==None
|
|
|
|
|
|
2020-06-19 13:00:02 +02:00
|
|
|
|
class IotaFS(pyfuse3.Operations):
|
|
|
|
|
|
|
|
|
|
#supports_dot_lookup = True
|
|
|
|
|
enable_writeback_cache = True
|
|
|
|
|
|
2020-06-11 22:20:41 +02:00
|
|
|
|
def __init__(self, token) -> None:
|
|
|
|
|
self.api = Iota('https://nodes.thetangle.org:443', local_pow=True)
|
2020-06-18 17:33:23 +02:00
|
|
|
|
# TODO Cache last known milestone-Index of genesis locally
|
|
|
|
|
self.genesis = TangleFileTreeElement("*", 0, token, None, self.api)
|
2020-06-11 22:20:41 +02:00
|
|
|
|
log("Fetching Genesis...")
|
|
|
|
|
self.genesis.fetch()
|
|
|
|
|
log("Retrieving reference to root")
|
|
|
|
|
if self.genesis.getNameList()!=["/"]:
|
|
|
|
|
if len(self.genesis.getNameList()):
|
|
|
|
|
# theres another directory in our genesis chain... WTF?!
|
|
|
|
|
raise Exception("Corrupted Genesis-Chain:"
|
|
|
|
|
+ "Unknown records for no root-directory in Genesis Chain: "+str(self.genesis.getNameList()))
|
|
|
|
|
else:
|
|
|
|
|
# we dont have a root yet, lets create one...
|
2020-06-19 00:25:00 +02:00
|
|
|
|
log("Unable to find reference to root: Creating new root")
|
2020-06-11 22:20:41 +02:00
|
|
|
|
self.genesis.mkdir("/")
|
|
|
|
|
log("Successfully Mounted!")
|
2020-06-19 13:00:02 +02:00
|
|
|
|
self.inodeIds = {}
|
|
|
|
|
self.fhs = {}
|
2020-06-11 22:20:41 +02:00
|
|
|
|
|
2020-06-19 13:00:02 +02:00
|
|
|
|
async def access(self, inodeId, mode, ctx):
|
|
|
|
|
# not called
|
|
|
|
|
raise Exception("This function should not been called; WTF")
|
|
|
|
|
|
|
|
|
|
async def create(self, parent_inodeId, name, mode, flags, ctx):
|
|
|
|
|
#return (fi, attr)
|
|
|
|
|
# $increase lookupN
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
async def flush(self, fh):
|
|
|
|
|
# flush the file at fh
|
|
|
|
|
# basically means: close, but may be called multiple times,
|
|
|
|
|
# when open multiple times with same fh
|
2020-06-11 22:20:41 +02:00
|
|
|
|
pass
|
2020-06-11 23:26:19 +02:00
|
|
|
|
|
2020-06-19 13:00:02 +02:00
|
|
|
|
async def forget(self, inode_list):
|
|
|
|
|
# inodelist = [(fh, nlookup),...]
|
|
|
|
|
# decrement lookupN of file at fh
|
|
|
|
|
# if lookupN == 0:
|
|
|
|
|
# 'remove' Inode
|
|
|
|
|
# should be called at unmount to bring lookupN to 0 for all files
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
async def getattr(self, inodeId, ctx):
|
|
|
|
|
# return EntryAttributes()
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
#async def link(self, inodeId, new_parent_inode, new_name, ctx):
|
|
|
|
|
|
|
|
|
|
async def lookup(self, parent_inodeId, name, ctx):
|
|
|
|
|
#return EntryAttributes()
|
|
|
|
|
# not exists: raise FUSEError(errno.ENOENT)
|
|
|
|
|
# must handle .. and .
|
|
|
|
|
# $increase lookupN
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
async def mkdir(self, parent_inodeId, name, mode, ctx):
|
|
|
|
|
#return EntryAttributes()
|
|
|
|
|
# $increase lookupN
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
async def mknod(self, parent_inodeId, name, mode, rdev, ctx):
|
|
|
|
|
# create file
|
|
|
|
|
#return EntryAttributes()
|
|
|
|
|
# $increase lookupN
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
async def open(self, inodeId, flags, ctx):
|
|
|
|
|
# open file at inodeId; give back fh
|
|
|
|
|
#return FileInode(..fh)
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
async def opendir(self, inodeId, ctx):
|
|
|
|
|
#return fh
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
async def read(self, fh, off, size):
|
|
|
|
|
# Read size bytes from fh at position off
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
async def readdir(self, fh, start_id, token):
|
|
|
|
|
# fuck this shit
|
|
|
|
|
# http://www.rath.org/pyfuse3-docs/operations.html#pyfuse3.Operations.readdir
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
async def release(self, fh):
|
|
|
|
|
# file no longer open -> close? uncache?
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
async def releasedir(self, fh):
|
|
|
|
|
# dir no longer open -> uncache?
|
|
|
|
|
|
|
|
|
|
#api = Iota('https://nodes.thetangle.org:443', local_pow=True)
|
|
|
|
|
#token = b'testToken'
|
|
|
|
|
#genesis = TangleFileTreeElement("*", 0, token, api)
|
2020-06-19 00:25:00 +02:00
|
|
|
|
|
|
|
|
|
if False:
|
|
|
|
|
genesis.mkdir("/")
|
|
|
|
|
root = genesis.inodes["/"].getRef()
|
|
|
|
|
|
|
|
|
|
root.mkdir("dir1")
|
|
|
|
|
root.mkdir("dir2")
|
|
|
|
|
root.mkdir("dir3")
|
|
|
|
|
root.mkdir("dir4")
|
|
|
|
|
root.mkfile("file.txt")
|
|
|
|
|
d2 = root.inodes["dir2"].getRef()
|
|
|
|
|
d2.mkfile("a.txt")
|
|
|
|
|
d3 = root.inodes["dir3"].getRef()
|
|
|
|
|
d3.mkfile("b.txt")
|
|
|
|
|
d3.mkfile("c.txt")
|
|
|
|
|
d3.mkfile("d.txt")
|
|
|
|
|
d3.mkdir("subDir")
|
|
|
|
|
d4 = root.inodes["dir4"].getRef()
|
|
|
|
|
d4.mkdir("sub")
|
|
|
|
|
sub = d4.inodes["sub"].getRef()
|
|
|
|
|
sub.mkdir("subsub")
|
|
|
|
|
|
|
|
|
|
print(genesis.tree())
|
2020-06-19 13:00:02 +02:00
|
|
|
|
|
|
|
|
|
def main():
|
|
|
|
|
iotaFs = IotaFS(b'This is a test token')
|
|
|
|
|
opts = set(pyfuse3.default_options)
|
|
|
|
|
opts.add('fsname=IotaFS')
|
|
|
|
|
#opts.add('debug')
|
|
|
|
|
pyfuse3.init(iotaFs, "mount", opts)
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
trio.run(pyfuse3.main)
|
|
|
|
|
except:
|
|
|
|
|
pyfuse3.close(unmount=True)
|
|
|
|
|
raise
|
|
|
|
|
|
|
|
|
|
pyfuse3.close()
|