Sucessful discrimination; updated README
This commit is contained in:
parent
42ed2dd676
commit
30cc846c6f
@ -5,3 +5,4 @@ I made it because I want to try to break it.
|
|||||||
(Precisely: Show it does not provide semantic security, because it is not IND-CPA-secure)
|
(Precisely: Show it does not provide semantic security, because it is not IND-CPA-secure)
|
||||||
This will work iff I succeed in building a PPT-discriminator for sha256 from randomness
|
This will work iff I succeed in building a PPT-discriminator for sha256 from randomness
|
||||||
As my first approach this discriminator will be based on an LSTM-network.
|
As my first approach this discriminator will be based on an LSTM-network.
|
||||||
|
Update: This worked out way better than expected; given long enought sequences (128 Bytes are more than enough) we can discriminate successfully in 100% of cases.
|
||||||
|
@ -60,8 +60,6 @@ def train(model, seq_len=16*64):
|
|||||||
ltLoss = ltLoss*0.9 + 0.1*loss.item()
|
ltLoss = ltLoss*0.9 + 0.1*loss.item()
|
||||||
lltLoss = lltLoss*0.9 + 0.1*ltLoss
|
lltLoss = lltLoss*0.9 + 0.1*ltLoss
|
||||||
print({ 'epoch': epoch, 'loss': loss.item(), 'ltLoss': ltLoss, 'correct?': correct })
|
print({ 'epoch': epoch, 'loss': loss.item(), 'ltLoss': ltLoss, 'correct?': correct })
|
||||||
if ltLoss < 0.20 and lltLoss < 0.225:
|
|
||||||
print("[*] Hell Yeah! Poccing! Got sup")
|
|
||||||
if epoch % 8 == 0:
|
if epoch % 8 == 0:
|
||||||
torch.save(model.state_dict(), 'model_savepoints/'+tid+'_'+str(epoch)+'.n')
|
torch.save(model.state_dict(), 'model_savepoints/'+tid+'_'+str(epoch)+'.n')
|
||||||
|
|
||||||
|
BIN
wh_discriminator.n
Normal file
BIN
wh_discriminator.n
Normal file
Binary file not shown.
Loading…
Reference in New Issue
Block a user