removeUselessSeries and stronger linear seperation
This commit is contained in:
parent
2bb2e15b73
commit
f1c887275c
21
caliGraph.py
21
caliGraph.py
@ -344,6 +344,12 @@ def removeUselessTags(G, minUnread=1):
|
||||
if foundUnread < minUnread:
|
||||
G.remove_node(n)
|
||||
|
||||
def removeUselessSeries(G, minSco=0):
|
||||
for n in list(G.nodes):
|
||||
node = G.nodes[n]
|
||||
if node['t'] == 'series':
|
||||
if len(G.adj[n]) < 2 or node['score'] < minSco:
|
||||
G.remove_node(n)
|
||||
|
||||
def scoreOpinions(G, globMu, globStd):
|
||||
for n in list(G.nodes):
|
||||
@ -386,7 +392,8 @@ def scoreUnread(G, globMu, globStd):
|
||||
ws.append(getWeightForType('sigma'))
|
||||
feedbacks.append(node['se'])
|
||||
ws.append(getWeightForType('se'))
|
||||
#node['score'] = sum([fb*w for fb, w in zip(feedbacks, weights)])/len(feedbacks)
|
||||
feedbacks.append(globMu)
|
||||
ws.append(getWeightForType('bias'))
|
||||
node['score'] = sum([fb*w for fb, w in zip(feedbacks, ws)])/sum(ws)
|
||||
else:
|
||||
node['score'] = globMu + errorFac*globStd + len(feedbacks)*0.0000000001
|
||||
@ -644,13 +651,17 @@ def recommendNBooks(G, mu, std, n, removeTopListsB=True, removeUselessRecommende
|
||||
pruneTags(G, 12)
|
||||
pruneAuthorCons(G, int(n/5))
|
||||
pruneRecommenders(G, 12 - min(5, n/20))
|
||||
removeUselessSeries(G, mu)
|
||||
removeUselessTags(G)
|
||||
if removeTopListsB:
|
||||
removeTopLists(G)
|
||||
removeDangling(G, alsoBooks=True)
|
||||
removeKeepBest(G, n+math.ceil(n/20)+3, maxDistForRead=1.5)
|
||||
removeUselessReadBooks(G)
|
||||
removeEdge(G)
|
||||
removeKeepBest(G, n+1, maxDistForRead=1.25)
|
||||
removeUselessSeries(G, mu)
|
||||
removeUselessTags(G)
|
||||
removeUselessReadBooks(G)
|
||||
removeKeepBest(G, n, maxDistForRead=1.25)
|
||||
|
||||
scaleBooksByRating(G)
|
||||
@ -775,6 +786,7 @@ def evaluateFitness(books):
|
||||
|
||||
ratedBooks = [n for n in list(G.nodes) if 'rating' in G.nodes[n] and G.nodes[n]['rating'] != None]
|
||||
boundsLoss = 0
|
||||
linSepLoss = []
|
||||
errSq = []
|
||||
mu, sigma = genScores(G, books)
|
||||
for b in G.nodes:
|
||||
@ -794,9 +806,10 @@ def evaluateFitness(books):
|
||||
elif score < 0.0:
|
||||
boundsLoss += (score)**2
|
||||
# reward seperation linearly
|
||||
boundsLoss -= abs(score - mu)/10
|
||||
linSepLoss.append(abs(score - mu))
|
||||
regressionLoss = sum([(1-w)**2 for w in weights.values()])
|
||||
return sum(errSq)/len(errSq) + regressionLoss/100 + boundsLoss/100
|
||||
print(sum(errSq)/len(errSq), 0.005*regressionLoss, 0.2*boundsLoss/len(ratedBooks), 0.5*sum(linSepLoss)/len(linSepLoss))
|
||||
return sum(errSq)/len(errSq) + 0.005*regressionLoss + 0.2*boundsLoss/len(ratedBooks) - 0.5*sum(linSepLoss)/len(linSepLoss)
|
||||
|
||||
def train(gamma = 1):
|
||||
global weights
|
||||
|
Loading…
Reference in New Issue
Block a user