Stronger linear seperation

This commit is contained in:
Dominik Moritz Roth 2021-09-26 14:31:00 +02:00
parent 147a78302f
commit 9126bbcc14

View File

@ -545,8 +545,8 @@ def addScoreToLabels(G):
if 'rating' in node and node['rating'] != None:
node['label'] += " ("+str(node['rating'])+")"
else:
if 'score' in node and node['score'] != None:
node['label'] += " (~{:.2f}±{:.1f})".format(node['score'], node['se'] if 'se' in node else 0.0)
if 'score' in node and node['score'] != None and 'se' in node:
node['label'] += " (~{:.2f}±{:.1f})".format(node['score'], node['se'])
else:
node['label'] += " (~0±∞)"
@ -775,7 +775,7 @@ def waveFlow(G, node, n, dist, menge, firstEdge=False):
if node in bestlist or node in keeplist:
waveFlow(G, node, m, dist, menge, firstEdge=firstEdge)
def evaluateFitness(books):
def evaluateFitness(books, debugPrint=False):
global weights
G = buildBookGraph(books)
graphAddAuthors(G, books)
@ -808,8 +808,9 @@ def evaluateFitness(books):
# reward seperation linearly
linSepLoss.append(abs(score - mu))
regressionLoss = sum([(1-w)**2 for w in weights.values()])
print(sum(errSq)/len(errSq), 0.005*regressionLoss, 0.2*boundsLoss/len(ratedBooks), 0.5*sum(linSepLoss)/len(linSepLoss))
return sum(errSq)/len(errSq) + 0.005*regressionLoss + 0.2*boundsLoss/len(ratedBooks) - 0.5*sum(linSepLoss)/len(linSepLoss)
if debugPrint:
print(sum(errSq)/len(errSq), 0.005*regressionLoss, 0.2*boundsLoss/len(ratedBooks), 1.0*sum(linSepLoss)/len(linSepLoss))
return sum(errSq)/len(errSq) + 0.005*regressionLoss + 0.2*boundsLoss/len(ratedBooks) - 1.0*sum(linSepLoss)/len(linSepLoss)
def train(gamma = 1):
global weights