From 1b4b6f1a2f64b28c0bcac82c376126b2bca6b554 Mon Sep 17 00:00:00 2001 From: Dominik Roth Date: Wed, 22 Sep 2021 10:28:08 +0200 Subject: [PATCH] Updated README and better epoch-printing --- README.md | 1 + train.py | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index ee7a1b4..1b0fd16 100644 --- a/README.md +++ b/README.md @@ -6,3 +6,4 @@ I made it because I want to try to break it. This will work iff I succeed in building a PPT-discriminator for sha256 from randomness As my first approach this discriminator will be based on an LSTM-network. Update: This worked out way better than expected; given long enought sequences (128 Bytes are more than enough) we can discriminate successfully in 100% of cases. +Update: I did an upsie in the training-code and the discriminator is actually shit. diff --git a/train.py b/train.py index ec8ae83..4bb2ac8 100644 --- a/train.py +++ b/train.py @@ -4,6 +4,7 @@ from torch import nn, optim from torch.utils.data import DataLoader import numpy as np import random +import math import shark @@ -34,7 +35,7 @@ def train(model, seq_len=16*64): tid = str(int(random.random()*99999)).zfill(5) print("[i] I am "+str(tid)) ltLoss = 50 - lltLoss = 51 + lltLoss = 52 model.train() criterion = nn.BCELoss() @@ -67,7 +68,7 @@ def train(model, seq_len=16*64): correct[t] = round(y_pred.item()) == t ltLoss = ltLoss*0.9 + 0.1*loss.item() lltLoss = lltLoss*0.9 + 0.1*ltLoss - print({ 'epoch': epoch, 'loss': loss.item(), 'ltLoss': ltLoss, 'ok0': correct[0], 'ok1': correct[1], 'succ': correct[0] and correct[1] }) + print({ 'epoch': epoch, 'loss': loss.item(), 'ltLoss': ltLoss, 'ok0': correct[0], 'ok1': correct[1], 'succ': correct[0] and correct[1], 'acc': str(int(max(0, 1-math.sqrt(lltLoss))*100))+"%" }) if epoch % 8 == 0: torch.save(model.state_dict(), 'model_savepoints/'+tid+'_'+str(epoch)+'.n')