Simpler and faster training

This commit is contained in:
Dominik Moritz Roth 2022-02-07 19:57:03 +01:00
parent dfa2f89f29
commit 93230c4285

View File

@ -1058,20 +1058,17 @@ def evaluateFitness(books, batchSize=-1, debugPrint=False):
for wt in weights:
scoreB = sum([a*(1.001 if wt==w[0] else 1)*weights[w[0]]*(w[1] if len(w)>1 else 1) for a,w in zip(G.nodes[b]['_act'], G.nodes[b]['_wgh'])])/sum([(1.001 if wt==w[0] else 1)*weights[w[0]]*(w[1] if len(w)>1 else 1) for w in G.nodes[b]['_wgh']])
gradient[wt] += ((rating - G.nodes[b]['score'])**2 - (rating - scoreB)**2)*1000
if 'score' in G.nodes[b] and G.nodes[b]['score'] != None:
score = G.nodes[b]['score']
if score > 10.0:
boundsLoss += (score - 10)**2
elif score < 0.0:
boundsLoss += (score)**2
# reward seperation linearly
linSepLoss.append(abs(score - mu))
regressionLoss = sum([(1-w)**2 for w in weights.values()])
regressionLoss = sum([max(0,abs(w)-1)**2 for w in weights.values()]) # no punishment if w within -1 and 1
for wt in weights:
if abs(weights[wt]) > 1.0:
gradient[wt] -= weights[wt]*10
else:
gradient[wt] -= weights[wt]*1
for g in gradient:
gradient[g] /= len(errSq)
if debugPrint:
print(sum(errSq)/len(errSq), 0.001*regressionLoss, 0.1*boundsLoss/len(ratedBooks), 0.5*sum(linSepLoss)/len(linSepLoss))
fit = sum(errSq)/len(errSq) + 0.001*regressionLoss + 0.1*boundsLoss/len(ratedBooks) - 0.5*sum(linSepLoss)/len(linSepLoss)
print(sum(errSq)/len(errSq), 0.001*regressionLoss)
fit = sum(errSq)/len(errSq) + 0.001*regressionLoss
return fit, gradient
def train(initGamma, full=True):
@ -1171,7 +1168,7 @@ def cliInterface():
p_show.add_argument('-d', type=float, default=2.1, help='depth of expansion')
p_train = cmds.add_parser('train', description="TODO", aliases=[])
p_train.add_argument('-g', type=float, default=0.1, help='learning rate gamma')
p_train.add_argument('-g', type=float, default=0.2, help='learning rate gamma')
p_train.add_argument('--full', action="store_true")
p_prog = cmds.add_parser('progress', description="TODO", aliases=[])