Better shit-detection and mitigation when training

This commit is contained in:
Dominik Moritz Roth 2021-10-05 18:08:32 +02:00
parent f5763fe7da
commit 7fd1f4fa3f

View File

@ -823,11 +823,12 @@ def evaluateFitness(books, debugPrint=False):
fit = sum(errSq)/len(errSq) + 0.005*regressionLoss + 0.2*boundsLoss/len(ratedBooks) - 1.0*sum(linSepLoss)/len(linSepLoss)
return fit, gradient
def train(gamma = 1, full=True):
def train(initGamma = 1, full=True):
global weights
if full:
for wt in weights:
weights[wt] = random.random()
gamma = initGamma
books = loadBooksFromDB()
bestWeights = copy.copy(weights)
mse, gradient = evaluateFitness(books)
@ -849,12 +850,22 @@ def train(gamma = 1, full=True):
if mse < best_mse:
saveWeights(weights)
bestWeights = copy.copy(weights)
else:
best_mse = mse
if mse > last_mse:
stagLen += 1
if stagLen == 3 or mse > 100:
stagLen = -2
for wt in weights:
weights[wt] = random.random()
else:
stagLen = 0
if stagLen == 4 or mse > 50:
print("#")
stagLen = 0
gamma = initGamma
if random.random() < 0.50:
for wt in weights:
weights[wt] = random.random()
else:
weights = copy.copy(bestWeights)
for wt in weights:
weights[wt] *= 0.975+0.05*random.random()
print('Done.')
def saveWeights(weights):