Commit 9147a91b authored by Ngocson's avatar Ngocson

Cleaning the 'Experiment' scripts

parent 11168278
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 28 14:00:12 2019
@author: ngocson
"""
import numpy as np
class NARMA:
def __init__(s,n=10,alpha=0.3,beta=0.05,gamma=1.5,delta=0.1):
s.n = n
s.alpha = alpha
s.beta = beta
s.gamma = gamma
s.delta = delta
s.reset(np.zeros(n),(np.random.rand(n)-0.5)*0.2)
def reset(s,state,inputs):
s.state = state
s.inputs= inputs
def step(s,u):
yt = s.alpha*s.state[-1]\
+s.beta*s.state[-1]*s.state.sum()\
+s.gamma*s.inputs[0]*s.inputs[-1]\
+s.delta
s.inputs= np.append(s.inputs[1:],u)
s.state = np.append(s.state[1:],yt)
def steps(s,U):
for u in U:
s.step(u)
def read(s):
return s.state[-1]
def getTimeSerie(s,U):
Y = []
for u in U:
s.step(u)
Y.append(s.read())
return np.array(Y)
\ No newline at end of file
import sys
sys.path.append('../../module/')
import numpy as np
import matplotlib.pyplot as plt
from random import *
import Reservoir
'''
import scipy.sparse.linalg as splinalg
from scipy.sparse import csr_matrix
'''
N = 6000 #Total number of points for the training
n = 100 #Number of points skipped to remove the influence of the initial state
n_show = 200
with open('../../data/santa_fe.txt') as sftr_file:
sftr_reader = csv.reader(sftr_file, delimiter=';')
santa_fe = np.array(next(sftr_reader)).astype(np.float)
santa_fe = santa_fe/santa_fe.max()
X = santa_fe[:-1] #The input
Y = santa_fe[1:] #The output
#Possible activation functions
relu = np.vectorize(lambda x: max(0,x))
Sig = np.vectorize(lambda x: np.sign(x)*(1-np.exp(-abs(x/5))))
Ln = np.vectorize(lambda x: np.sign(x)*np.log(1+np.abs(x)))
#Creating the reservoir
sp = 0.8
R = Reservoir.Reservoir(100,
0.05,
sp,
0.0,
0.001,
Activation = np.tanh,
Feedback = False,
inputScaling=2.0
)
#Training
R.steps(X[:n]) #Removing the influence of the initial state
epsi = R.fit( Y[n:N+n],
X[n:N+n],
Forced=False,
Show = True
)
#Validating
R.reset()
R.steps(X[:n]) #Removing the influence of the initial state
Y_ = R.steps(X[n:]) #Prediction
#Showing result
plt.figure()
plt.title("Prediction")
plt.plot(Y_,'g.-')
plt.show()
#Calculating the error of the prediction
Epsi = np.sqrt(((Y_-Y[n:])**2).mean()/Y[n:].std())
plt.figure()
plt.subplot(3,1,1)
plt.title("Results")
plt.plot(Y_,'g.-',label = "Prediction")
plt.plot(Y[n:],'r-', label="Teacher")
plt.legend()
plt.subplot(3,1,3)
plt.title("Closer look")
plt.plot(Y_[-n_show:],'g.-',label = "Prediction")
plt.plot(Y[-n_show:],'r-', label="Teacher")
plt.legend()
plt.show()
print("NRMSE of the training:",epsi,"| NRMSE on the testing",Epsi)
\ No newline at end of file
......@@ -5,29 +5,35 @@ Created on Wed Jan 16 15:20:31 2019
@author: ngocson
"""
import sys
sys.path.append('../../module/')
import numpy as np
import matplotlib.pyplot as plt
from random import *
from Reservoir import *
import Reservoir
'''
import scipy.sparse.linalg as splinalg
from scipy.sparse import csr_matrix
'''
N = 400
n = 200
N = 400 #Total number of points for the signal
n = 100 #Number of points skipped to remove the influence of the initial state
k = np.linspace(1,N,N)/4
S = np.sin(k) #The input
X = S**7 #The output
#Possible activation functions
relu = np.vectorize(lambda x: max(0,x))
Sig = np.vectorize(lambda x: np.sign(x)*(1-np.exp(-abs(x/5))))
Ln = np.vectorize(lambda x: np.sign(x)*np.log(1+np.abs(x)))
#Creating the reservoir
sp = 0.8
R = Reservoir(40,
R = Reservoir.Reservoir(40,
0.05,
sp,
0.0,
......@@ -37,44 +43,39 @@ R = Reservoir(40,
inputScaling=0.5
)
k = np.linspace(1,N,N)/4
S = np.sin(k)
X = S**7
R.steps(S[:n],X[:n])
Train = S
epsi = R.fit( X[:n],
S[:n],
Forced=False
#Training
R.steps(S[:n]) #Removing the influence of the initial state
epsi = R.fit( X[n:],
S[n:],
Forced=False,
Show = True
)
#U = np.sin(k)
R.reset()
R.steps(S[:n])
Y = R.steps(S[n:])
#Validating
R.reset()
R.steps(S[:n]) #Removing the influence of the initial state
Y = R.steps(S[n:]) #Prediction
mafigure = plt.figure()
plt.title("Results")
#plt.plot(k[n:],X[n:])
plt.figure("NRMSE")
plt.plot(k[n:],Y)
Epsi = np.sqrt(((Y-Train[n:])**2).mean()/X[n:].std())
plt.show()
'''
plt.plot(X)
plt.plot(Xsquare)
#Showing result
plt.figure()
plt.title("Prediction")
plt.plot(k[n:],Y,'g.-')
plt.show()
'''
plt.figure("Results")
R.reset()
R.steps(S[:n])
Y_ = R.steps(S[n:])
plt.plot(Y_,'r.-')
plt.plot(X[n:],'g-')
#Calculating the error of the prediction
Epsi = np.sqrt(((Y-X[n:])**2).mean()/X[n:].std())
plt.figure()
plt.plot(Y,'g.-',label = "Prediction")
plt.plot(X[n:],'r-', label="Teacher")
plt.legend()
plt.show()
print(epsi,Epsi)
\ No newline at end of file
print("NRMSE of the training:",epsi,"| NRMSE on the testing",Epsi)
\ No newline at end of file
......@@ -5,78 +5,83 @@ Created on Wed Jan 16 15:20:31 2019
@author: ngocson
"""
import sys
sys.path.append('../../module/')
import numpy as np
import matplotlib.pyplot as plt
from random import *
from Reservoir import *
import Reservoir
'''
import scipy.sparse.linalg as splinalg
from scipy.sparse import csr_matrix
'''
N = 300
n = 100
N = 400 #Total number of points for the signal
n = 200 #Number of points skipped to remove the influence of the initial state
k = np.linspace(1,N,N)/4
S = np.sin(k) #The output
U = np.ones(N) #The input
#Possible activation functions
relu = np.vectorize(lambda x: max(0,x))
Sig = np.vectorize(lambda x: np.sign(x)*(1-np.exp(-abs(x/5))))
Ln = np.vectorize(lambda x: np.sign(x)*np.log(1+np.abs(x)))
#Creating the reservoir
sp = 0.8
R = Reservoir(20,
0.1,
sp,
0.5,
0.001,
sp = 0.9
R = Reservoir.Reservoir(N=100,
p =0.2,
sp = sp,
outputScaling = 0.8,
v = 0.00001,
Activation = np.tanh,
Feedback = True,
inputScaling=0
)
k = np.linspace(1,N,N)/4
U = np.ones(N)
S = np.sin(k)
X = S**7
R.steps(U[:n],X[:n])
Train = S
epsi = R.fit( Train[:n],
U[1:n],
Forced=True
#Training
R.steps(U[:n]) #Removing the influence of the initial state
epsi = R.fit( S[n:N], #We need one more training point to drive the initial feed back of the Reservoir
U[n+1:N],
Forced = True, #Only if the froced teaching method is used. Else S[:n] and U[:n] can be use
Show = True
)
#U = np.sin(k)
R.reset()
R.steps(U[:n])
Y = R.steps(U[n:])
mafigure = plt.figure()
plt.title("Results")
#Validating
R.reset()
R.steps(U[:n]) #Removing the influence of the initial state
Y = R.steps(U[n:]) #Prediction
#plt.plot(k[n:],X[n:])
#plt.plot(k[n:],X[n:])
plt.figure("NRMSE")
plt.plot(k[n:],Y)
Epsi = np.sqrt(((Y-Train[n:])**2).mean()/X[n:].std())
plt.show()
'''
plt.plot(X)
plt.plot(Xsquare)
#Showing result
plt.figure()
plt.title("Prediction")
plt.plot(k[n:],Y,'g.-')
plt.show()
'''
plt.figure("Results")
R.reset()
R.reset()
R.steps(U[:100])
Y_ = R.steps(U[100:200])
plt.plot(Y_)
plt.plot(Train[100:200])
#Calculating the error of the prediction
#Synchronizing the sinus
Y = Y[np.argmax(Y):]
S = S[np.argmax(S):]
m = min(len(S),len(Y))
Y = Y[:m]
S = S[:m]
Epsi = np.sqrt(((Y-S)**2).mean()/S.std())
plt.figure()
plt.plot(Y,'g.-',label = "Prediction")
plt.plot(S,'r-', label="Teacher")
plt.legend()
plt.show()
print(epsi,Epsi)
\ No newline at end of file
print("NRMSE of the training:",epsi,"| NRMSE on the testing",Epsi)
\ No newline at end of file
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 28 14:23:42 2019
@author: ngocson
"""
from NARMA import *
from Reservoir import *
import sys
sys.path.append('../../module/')
import numpy as np
import matplotlib.pyplot as plt
from random import *
global V
V = []
with open('ActivationFunction.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=';')
for row in csv_reader:
L = []
for value in row:
L.append(float(value))
V.append(L)
def approx_lin(x,vr=0.3,a=1.4,s=1.0):
global V
fi = min(max(100*(vr+1)/11,0.0),99.0)
if not fi.is_integer():
i = int(fi)
f = V[i]
else:
f = V[int(fi)]
fx = min(max(1000.0*(x+5)*s,0.0),24999.0)
if not fx.is_integer():
x = int(fx)
return a*((x+1-fx)*(f[x]-f[x+1])+f[x+1])
else:
return a*f[int(fx)]
n_train, n_test, d_train, d_test = 4000,1000,400,402
Nessais = 10
Ntrain = 5
sp = 0.8
Narma10Generator = NARMA()
TrainError = []
TestError = []
for i in range(Ntrain):
R = Reservoir(N = 200,
p = 0.1,
sp = sp,
outputScaling = 1,
v = 0.0001,
Activation = np.vectorize(approx_lin),
Feedback = False,
inputScaling = 5
)
for j in range(Nessais):
Narma10Generator.reset(np.zeros(10),np.zeros(10))
U_train = (np.random.rand(d_train+n_train)*0.5)
U_test = (np.random.rand(d_test+n_test)*0.5)
Y_train = Narma10Generator.getTimeSerie(U_train)
epsi = R.fit(Y = Y_train[d_train:],
U = U_train[d_train:],
Forced = True,
Show = (j)%5==0
)
Narma10Generator.reset(np.zeros(10),np.zeros(10))
R.reset(Zeros=True)
Y_ = R.steps(U_test)[d_test:]
Y_test = Narma10Generator.getTimeSerie(U_test)[d_test:]
NRMSE = np.sqrt(((Y_-Y_test)**2).mean()/np.matrix(Y_test).std())
if (i*Ntrain+j)%40 ==0:
print(epsi,NRMSE)
TrainError.append(epsi)
TestError.append(NRMSE)
plt.plot(Y_[:100],'r')
plt.plot(Y_test[:100],'g')
plt.show()
import Reservoir
import NARMA
'''
import scipy.sparse.linalg as splinalg
from scipy.sparse import csr_matrix
'''
n_train, n_test = 4000,1000 #Total number of points for the signal
d_train, d_test = 400,402 #Number of points skipped to remove the influence of the initial state
n_show = 200
Narma10Generator = NARMA.NARMA()
#The inputs
U_train = (np.random.rand(d_train+n_train)*0.5)
U_test = (np.random.rand(d_test+n_test)*0.5)
#The outputs
Y_train = Narma10Generator.getTimeSerie(U_train)
Narma10Generator.reset(np.zeros(10),np.zeros(10))
Y_test = Narma10Generator.getTimeSerie(U_test)
Narma10Generator.reset(np.zeros(10),np.zeros(10))
#Possible activation functions
relu = np.vectorize(lambda x: max(0,x))
Sig = np.vectorize(lambda x: np.sign(x)*(1-np.exp(-abs(x/5))))
Ln = np.vectorize(lambda x: np.sign(x)*np.log(1+np.abs(x)))
plt.plot(Y_[-150:],'r')
plt.plot(Y_test[-150:],'g')
#Creating the reservoir
sp = 0.8
R = Reservoir.Reservoir(N = 200,
p = 0.1,
sp = sp,
outputScaling = 1,
v = 0.001,
Activation = np.tanh,
Feedback = False,
inputScaling=5
)
#Training
R.steps(U_train[:d_train]) #Removing the influence of the initial state
epsi = R.fit( Y_train[d_train:],
U_train[d_train:],
Forced=False,
Show = True
)
#Validating
R.reset()
R.steps(U_test[:d_test]) #Removing the influence of the initial state
Y = R.steps(U_test[d_test:]) #Prediction
#Showing result
#Calculating the error of the prediction
Epsi = np.sqrt(((Y-Y_test[d_test:])**2).mean()/Y_test[d_test:].std())
plt.figure()
plt.plot(Y,'g.-',label = "Prediction")
plt.plot(Y_test[d_test:],'r-', label="Teacher")
plt.legend()
plt.show()
TrE = np.array(TrainError)
TsE = np.array(TestError)
print(TrE[~np.isnan(TrE)].mean(),TsE[~np.isnan(TsE)].mean())
print(TrE[~np.isnan(TrE)].max(),TsE[~np.isnan(TsE)].max())
print(TrE[~np.isnan(TrE)].min(),TsE[~np.isnan(TsE)].min())
plt.figure()
plt.title("closer look")
plt.plot(Y[-n_show:],'g.-',label = "Prediction")
plt.plot(Y_test[-n_show:],'r-', label="Teacher")
plt.legend()
plt.show()
print("NRMSE of the training:",epsi,"| NRMSE on the testing",Epsi)
\ No newline at end of file
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 18 16:06:52 2019
@author: Ngocson
"""
import Reservoir
import numpy as np
import matplotlib.pyplot as plt
import csv
global V
V = []
with open('ActivationFunction.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=';')
for row in csv_reader:
L = []
for value in row:
L.append(float(value))
V.append(L)
with open('santa_fe.txt') as sftr_file:
sftr_reader = csv.reader(sftr_file, delimiter=';')
santa_fe = np.array(next(sftr_reader)).astype(np.float)
santa_fe = santa_fe/santa_fe.max()
X = santa_fe[:-1]
Y = santa_fe[1:]
def approx_lin(x,vr=0.18,a=1.0,s=1.0):
global V
fi = min(max(100*(vr+1)/11,0.0),99.0)
if not fi.is_integer():
i = int(fi)
f = V[i]
else:
f = V[int(fi)]
fx = min(max(1000.0*(x+5)*s,0.0),24999.0)
if not fx.is_integer():
x = int(fx)
return a*((x+1-fx)*(f[x]-f[x+1])+f[x+1])
else:
return a*f[int(fx)]
#Using the activation fonction of our node
nmos_training_error = []
nmos_testing_error = []
tanh_training_error = []
tanh_testing_error = []
Vr = np.linspace(0.1,2.5,30)
Ns = range(1000,6000,100)
N = 450
n_train = 4000
n_init = 2*N
R_tanh = Reservoir.Reservoir(N = N,
p = 0.05,
sp = 0.9,
outputScaling = 1,
v = 0.0001,
Activation = np.tanh,
Feedback = False,
inputScaling = 10,
damping = False
)
R_tanh.steps(X[:n_init])
NRMSE_tanh = R_tanh.fit(Y[n_init:n_train],X[n_init:n_train])
R_tanh.reset()
Ytanh_ = R_tanh.steps(X)
tanh_training_error.append(NRMSE_tanh)
tanh_testing_error.append(np.sqrt(np.linalg.norm(Ytanh_[n_init:]-Y[n_init:])/Ytanh_[n_init:].std()))
vr =0.3
R_nmos = Reservoir.Reservoir(N = N,
p = 0.05,
sp = 0.9,
outputScaling = 1,
v = 0.0001,
Activation = np.vectorize(lambda x: approx_lin(x,vr)),
Feedback = False,
inputScaling = 10,
damping = False,
Verbose = False
)
R_nmos.steps(X[:n_init])
NRMSE_nmos = R_nmos.fit(Y[n_init:n_train],X[n_init:n_train])
R_nmos.reset()
Ynmos_ = R_nmos.steps(X)
print(" ",n_train,": Nmos NRMSE:",NRMSE_nmos," / ",20*np.log(NRMSE_nmos),"db ; ","Tanh NRMSE:",NRMSE_tanh," / ",20*np.log(NRMSE_tanh),"db ; ")
nmos_training_error.append(NRMSE_nmos)
nmos_testing_error.append(np.sqrt(np.linalg.norm(Ynmos_[n_init:]-Y[n_init:])/Ynmos_[n_init:].std()))
plt.figure()
plt.plot(Y[-200:],'r-.',label="Teacher")
plt.plot(Ytanh_[-200:],'b-',label="Tanh reservoir output")
plt.plot(Ynmos_[-200:],'g-',label="Nmos reservoir output")
plt.legend()
plt.show()
'''
plt.figure()
plt.title('Number of points used for fitting')
plt.plot(Ns,nmos_training_error,'r-',label='nmos training error')
plt.plot(Ns,nmos_testing_error,'b-',label='nmos testing error')
plt.plot(Ns,tanh_training_error,'y-',label='tanh training error')
plt.plot(Ns,tanh_testing_error,'g-',label='tanh testing error')
plt.legend()
plt.show()
plt.figure()
plt.plot(n_train,[Vr[np.argmax(i)] for i in nmos_testing_error])
plt.show()
'''
\ No newline at end of file
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 10 14:46:13 2019