From a355c3105d56f0339995befd825d768c9992af45 Mon Sep 17 00:00:00 2001 From: Dominik Roth Date: Sun, 3 Oct 2021 13:12:26 +0200 Subject: [PATCH] Initial commit --- .gitignore | 1 + README.md | 3 +++ collapse.py | 39 +++++++++++++++++++++++++++++ diySym.py | 70 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 113 insertions(+) create mode 100644 .gitignore create mode 100644 README.md create mode 100644 collapse.py create mode 100644 diySym.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..bee8a64 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +__pycache__ diff --git a/README.md b/README.md new file mode 100644 index 0000000..dd50ca5 --- /dev/null +++ b/README.md @@ -0,0 +1,3 @@ +# Project Collapse +I basically I'm just playing around with the idea of solving neural networks for their minimal loss in a closed form (without iteration). +I do realize that this is impossible (because it violates physics), but I want to get a better and more intuitive understanding to why it is impossible from a practical information perspective. diff --git a/collapse.py b/collapse.py new file mode 100644 index 0000000..eec0ca6 --- /dev/null +++ b/collapse.py @@ -0,0 +1,39 @@ +from sympy import * +import math + +class Model(): + def __init__(self): + self.layers = [] + + def append(self, layer): + if len(self.layers)!=0: + layer._connect(self.layers[-1]) + self.layers.append(layer) + + def collapse(self): + act = symbols(' '.join(['x'+str(i) for i in range(self.layers[0].length)])) + for layer in self.layers[1:]: + act = layer.activate(act) + return list(act) + +class Layer(): + def __init__(self, length): + self.length = length + self.layNum = 0 + + def _connect(self, prev): + self.prev = prev + self.weights = [] + self.layNum = self.prev.layNum + 1 + for w in range(self.length * (self.prev.length+1)): + self.weights.append(symbols('w'+str(self.layNum)+'_'+str(w))) + + def activate(self, inp): + act = [] + for neuron in range(self.length): + accum = self.weights[(self.prev.length+1)*neuron]*self.prev.length + for iNeuron in range(self.prev.length): + accum += inp[iNeuron]*self.weights[(self.prev.length+1)*neuron+iNeuron+1] + a = 1/(1+math.e**(-(accum)/self.prev.length)) + act.append(a) + return act diff --git a/diySym.py b/diySym.py new file mode 100644 index 0000000..c35fdb1 --- /dev/null +++ b/diySym.py @@ -0,0 +1,70 @@ +class Tracer(): + def __init__(self): + raise Exception("Use BaseTracer instead of Tracer") + + def _push(self, op, obj): + return CompTracer(self, obj, op) + + def __add__(self, other): + return self._push("+", other) + + def __sub__(self, other): + return self._push("-", other) + + def __mul__(self, other): + return self._push("*", other) + + def __truediv__(self, other): + return self._push("/", other) + + def __pow__(self, other): + return self._push("**", other) + + def __pos__(self): + return self + + def __neg__(self): + return self._push("*", -1) + + def __repr__(self): + v = self.eval() + return "" + +class BaseTracer(Tracer): + def __init__(self, name, val=None): + self.name = name + self.val = val + + def eval(self): + return self.val + + def __str__(self): + return self.name + +class CompTracer(Tracer): + def __init__(self, lParent, rParent, op): + if not isinstance(lParent, Tracer): + lParent = BaseTracer(repr(lParent), lParent) + if not isinstance(rParent, Tracer): + rParent = BaseTracer(repr(rParent), rParent) + self.parents = [lParent, rParent] + self.op = op + + def eval(self): + l = self.parents[0].eval() + r = self.parents[1].eval() + if None in [l,r]: + return None + if self.op=="+": + return l + r + elif self.op=="-": + return l - r + elif self.op=="*": + return l * r + elif self.op=="/": + return l / r + elif self.op=="**": + return l ** r + + def __str__(self): + return "("+str(self.parents[0])+" "+self.op+" "+str(self.parents[1])+")"