Train using gradient
This commit is contained in:
parent
06a0461e93
commit
98aaec1e22
81
caliGraph.py
81
caliGraph.py
@ -373,7 +373,7 @@ def scoreOpinions(G, globMu, globStd):
|
||||
def scoreUnread(G, globMu, globStd):
|
||||
for n in list(G.nodes):
|
||||
feedbacks = [globMu]
|
||||
ws = [getWeightForType('mu')]
|
||||
ws = [['mu']]
|
||||
node = G.nodes[n]
|
||||
if node['t'] == 'book':
|
||||
if node['rating'] == None:
|
||||
@ -381,7 +381,7 @@ def scoreUnread(G, globMu, globStd):
|
||||
for adj in adjacens:
|
||||
adjNode = G.nodes[adj]
|
||||
if 'score' in adjNode and adjNode['score'] != None:
|
||||
w = getWeightForType(adjNode['t'], G[n][adj]['weight'] if 'weight' in G[n][adj] else 1)
|
||||
w = [adjNode['t'], G[n][adj]['weight'] if 'weight' in G[n][adj] else 1]
|
||||
for fb in adjNode['feedbacks']:
|
||||
feedbacks.append(fb)
|
||||
ws.append(w)
|
||||
@ -389,19 +389,20 @@ def scoreUnread(G, globMu, globStd):
|
||||
node['mean'], node['std'] = norm.fit(feedbacks)
|
||||
node['se'] = globStd / math.sqrt(len(feedbacks))
|
||||
feedbacks.append(node['std'])
|
||||
ws.append(getWeightForType('sigma'))
|
||||
ws.append(['sigma'])
|
||||
feedbacks.append(node['se'])
|
||||
ws.append(getWeightForType('se'))
|
||||
ws.append(['se'])
|
||||
feedbacks.append(globMu)
|
||||
ws.append(getWeightForType('bias'))
|
||||
node['score'] = sum([fb*w for fb, w in zip(feedbacks, ws)])/sum(ws)
|
||||
ws.append(['bias'])
|
||||
node['score'] = sum([fb*getWeightForType(w[0], w[1] if len(w)>1 else 1) for fb, w in zip(feedbacks, ws)])/sum([getWeightForType(w[0], w[1] if len(w)>1 else 1) for w in ws])
|
||||
node['_act'] = feedbacks
|
||||
node['_wgh'] = ws
|
||||
else:
|
||||
node['score'] = globMu + errorFac*globStd + len(feedbacks)*0.0000000001
|
||||
if 'series' in node:
|
||||
if node['series_index'] == 1.0:
|
||||
node['score'] += 0.000000001
|
||||
|
||||
# TODO: Make this neural and train it
|
||||
def getWeightForType(nodeType, edgeWeight=1):
|
||||
global weights
|
||||
w = weights[nodeType]
|
||||
@ -789,6 +790,9 @@ def evaluateFitness(books, debugPrint=False):
|
||||
boundsLoss = 0
|
||||
linSepLoss = []
|
||||
errSq = []
|
||||
gradient = {}
|
||||
for wt in weights:
|
||||
gradient[wt] = 0
|
||||
mu, sigma = genScores(G, books)
|
||||
for b in G.nodes:
|
||||
if b in ratedBooks:
|
||||
@ -800,6 +804,9 @@ def evaluateFitness(books, debugPrint=False):
|
||||
else:
|
||||
errSq.append((rating - G.nodes[b]['score'])**2)
|
||||
G.nodes[b]['rating'] = rating
|
||||
for wt in weights:
|
||||
scoreB = sum([a*(1.001 if wt==w[0] else 1)*weights[w[0]]*(w[1] if len(w)>1 else 1) for a,w in zip(G.nodes[b]['_act'], G.nodes[b]['_wgh'])])/sum([(1.001 if wt==w[0] else 1)*weights[w[0]]*(w[1] if len(w)>1 else 1) for w in G.nodes[b]['_wgh']])
|
||||
gradient[wt] += ((rating - G.nodes[b]['score'])**2 - (rating - scoreB)**2)*1000
|
||||
if 'score' in G.nodes[b] and G.nodes[b]['score'] != None:
|
||||
score = G.nodes[b]['score']
|
||||
if score > 10.0:
|
||||
@ -809,49 +816,45 @@ def evaluateFitness(books, debugPrint=False):
|
||||
# reward seperation linearly
|
||||
linSepLoss.append(abs(score - mu))
|
||||
regressionLoss = sum([(1-w)**2 for w in weights.values()])
|
||||
for g in gradient:
|
||||
gradient[g] /= len(errSq)
|
||||
if debugPrint:
|
||||
print(sum(errSq)/len(errSq), 0.005*regressionLoss, 0.2*boundsLoss/len(ratedBooks), 1.0*sum(linSepLoss)/len(linSepLoss))
|
||||
return sum(errSq)/len(errSq) + 0.005*regressionLoss + 0.2*boundsLoss/len(ratedBooks) - 1.0*sum(linSepLoss)/len(linSepLoss)
|
||||
fit = sum(errSq)/len(errSq) + 0.005*regressionLoss + 0.2*boundsLoss/len(ratedBooks) - 1.0*sum(linSepLoss)/len(linSepLoss)
|
||||
return fit, gradient
|
||||
|
||||
def train(gamma = 1, maxEmptySteps=-1):
|
||||
def train(gamma = 1, full=True):
|
||||
global weights
|
||||
if full:
|
||||
for wt in weights:
|
||||
weights[wt] = random.random()
|
||||
books = loadBooksFromDB()
|
||||
bestWeights = copy.copy(weights)
|
||||
best_mse = evaluateFitness(books)
|
||||
w = list(weights.keys())
|
||||
attr = random.choice(w)
|
||||
delta = gamma * (-0.5 + (0.75 + 0.25*random.random()))
|
||||
emptyStepsLeft = maxEmptySteps
|
||||
mse, gradient = evaluateFitness(books)
|
||||
best_mse = mse
|
||||
delta = 1
|
||||
stagLen = 0
|
||||
|
||||
while gamma > 1.0e-08:
|
||||
print({'mse': best_mse, 'w': weights, 'gamma': gamma})
|
||||
weights = copy.copy(bestWeights)
|
||||
if gamma < 0.01:
|
||||
while random.random() < 0.5:
|
||||
attr = random.choice(w)
|
||||
weights[attr] = -0.1+random.random()*1.5+random.random()
|
||||
while gamma > 1.0e-06 and delta > 1.0e-06:
|
||||
last_mse = mse
|
||||
print({'mse': mse, 'w': weights, 'gamma': gamma, 'delta': delta})
|
||||
print(gradient)
|
||||
delta = sum(gradient[g]**2 for g in gradient)
|
||||
for wt in weights:
|
||||
weights[wt] += gamma*gradient[wt]
|
||||
mse, gradient = evaluateFitness(books)
|
||||
if mse < last_mse:
|
||||
gamma = gamma*1.25
|
||||
else:
|
||||
weights[attr] += delta
|
||||
if attr not in ['sigma', 'mu', 'se']:
|
||||
weights[attr] = min(max(0.0, weights[attr]), 5.0)
|
||||
mse = evaluateFitness(books)
|
||||
gamma *= 0.25
|
||||
if mse < best_mse:
|
||||
saveWeights(weights)
|
||||
gamma = min(max(gamma*1.75, 0.01), 1)
|
||||
bestWeights = copy.copy(weights)
|
||||
best_mse = mse
|
||||
delta *= 2
|
||||
if random.random() < 0.10:
|
||||
attr = random.choice(w)
|
||||
emptyStepsLeft = maxEmptySteps
|
||||
else:
|
||||
weights = copy.copy(bestWeights)
|
||||
gamma *= 0.8
|
||||
attr = random.choice(w)
|
||||
delta = gamma * (-0.5 + (0.75 + 0.25*random.random()))
|
||||
emptyStepsLeft -= 1
|
||||
if emptyStepsLeft == 0:
|
||||
return
|
||||
stagLen += 1
|
||||
if stagLen == 3 or mse > 100:
|
||||
for wt in weights:
|
||||
weights[wt] = random.random()
|
||||
|
||||
def saveWeights(weights):
|
||||
with open('neuralWeights.json', 'w') as f:
|
||||
@ -900,7 +903,7 @@ def cliInterface():
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.cmd=="train":
|
||||
train(args.g, -1 if args.full else 32)
|
||||
train(args.g, args.full)
|
||||
exit()
|
||||
|
||||
G, books = buildFullGraph()
|
||||
|
@ -1 +1 @@
|
||||
{"topList": 4.173476109103811, "recommender": 2.204400674657485, "author": 4.2062120384245425, "series": 0.086990081240546, "tag": 0.2896280973944825, "mu": 2.0656384571269544, "sigma": 0.25275483246754604, "se": 3.20651748273233, "bias": 1.2331776397132}
|
||||
{"topList": 4.605030088443722, "recommender": 3.3234411783311066, "author": 4.6589079802163855, "series": 0.6203580388042937, "tag": 0.6051529346000674, "mu": 1.3820384196729414, "sigma": 2.059654503148757, "se": 3.25936622066864, "bias": 1.9033827699091248}
|
1
neuralWeights.json.bak
Normal file
1
neuralWeights.json.bak
Normal file
@ -0,0 +1 @@
|
||||
{"topList": 4.173476109103811, "recommender": 2.204400674657485, "author": 4.2062120384245425, "series": 0.086990081240546, "tag": 0.2896280973944825, "mu": 2.0656384571269544, "sigma": 0.25275483246754604, "se": 3.20651748273233, "bias": 1.2331776397132}
|
Loading…
Reference in New Issue
Block a user