Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Normalized spectrograms #36

Open
wants to merge 9 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
75 changes: 75 additions & 0 deletions data_analysis/cnn_rnnAproach.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
from spectrogram import full_bpm_to_data, HEART_AV_ROOT, NormalizedSpectrograms, NormalizedSubjectSplitSpectrograms
from get_heartrates import get_interesting_heartrates
from keras.callbacks import EarlyStopping
from kbhit import KBHit

import numpy as np
import code
import random
import learnLib

kb = KBHit()
#(X_train, y_train), (X_test, y_test) = full_bpm_to_data(get_interesting_heartrates(HEART_AV_ROOT))

ns = NormalizedSubjectSplitSpectrograms(subjectIdependant=False)#NormalizedSpectrograms()

X_train, Y_train = ns.getTrainData()
X_val, Y_val = ns.getValidationData()

#slice the spectrogram
print(X_train.shape)
#Y_train = np.repeat(np.reshape(-1,1), X_train.shape[1], axis=1)
print(Y_train.shape)

print("Model (nb_filters1, nb_col1, nb_filters2, nb_col2, ltsm_neurons, drop1, drops2)")

prevLoss = 34534645735673
maxModel = None
stop = False
models = {}

for args in learnLib.RandomCnnRnnParameters(): #itertools.product(nb_hiddens, drop1s):
print("Model: ", args)
model = learnLib.get_CNN_RNN_model(X_train[0].shape, *args)
early_stopping = EarlyStopping(monitor='val_loss', patience=3)
history = model.fit(X_train, Y_train, batch_size=50, nb_epoch=30,
verbose=1, validation_data=(X_val,Y_val), callbacks=[early_stopping])


# most recent loss hist.history["loss"][-1]
r, rmse, _ = learnLib.assess_model(model, X_val, Y_val)
models[args] = r,rmse
print("Model r: ", r)
print("Model rmse: ", rmse)
if rmse < prevLoss:
prevLoss = rmse
maxModel = model
while kb.kbhit():
try:
if "q" in kb.getch():
print("quiting due to user pressing q")
stop = True
except UnicodeDecodeError:
pass

if stop:
break

del X_train

X_test, Y_test = ns.getTestData()

learnLib.printModels(models)

r, rmse, preds = learnLib.assess_model(maxModel, X_test, Y_test)
predicted_bpm = np.array(list(map(ns.unnormalize_bpm, preds)))
test_bpm = np.array(list(map(ns.unnormalize_bpm, Y_test)))

val_bpm = np.array(list(map(ns.unnormalize_bpm, Y_val)))
_, _, preds = learnLib.assess_model(maxModel, X_val, Y_val)
val_predicted_bpm = np.array(list(map(ns.unnormalize_bpm, preds)))


print("Model r: ", r)
print("Model rmse: ", rmse)
code.interact(local=locals())
109 changes: 96 additions & 13 deletions data_analysis/learnLib.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,17 @@
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers.recurrent import LSTM
from keras.layers.core import Dense, Activation, Dropout, Flatten, TimeDistributedDense, Reshape, Permute
from keras.layers.convolutional import Convolution2D, MaxPooling2D, Convolution1D
from keras.layers.recurrent import LSTM, GRU, SimpleDeepRNN
from keras.layers.noise import GaussianNoise
from keras.regularizers import l2
from keras.callbacks import EarlyStopping
from scipy.stats import pearsonr
from sklearn.metrics import mean_squared_error
from math import sqrt

import random
import numpy as np
import code

class RandomMlpParameters():
def __init__(self):
Expand All @@ -22,7 +24,7 @@ def __next__(self):
self.__cnt += 1
if self.__cnt > 100:
raise StopIteration
return random.randrange(100,1000,100), random.uniform(0.2,0.8)
return random.randrange(10,1000,50), random.uniform(0.2,0.8)

def printModels(models):
for key, value in models.items():
Expand All @@ -43,12 +45,20 @@ def get_2_layer_MLP_model(in_shape, nb_hidden=50, drop1=0.1):
model.compile(loss='mse', optimizer='adam')
return model

def assess_2dmodel(model, X_test, Y_test):
predictions = model.predict(X_test)
r = pearsonr(np.mean(predictions[:,:,0],axis=1), np.mean(Y_test[:,:,0],axis=1))
rmse = sqrt(mean_squared_error(predictions[:,:,0], Y_test[:,:,0]))
return r, rmse, predictions

def assess_model(model, X_test, Y_test):
predictions = model.predict(X_test)
r = pearsonr(predictions[:,0], Y_test)
rmse = sqrt(mean_squared_error(predictions, Y_test))
return r, rmse, predictions



def shuffle_in_unison(a, b):
rng_state = np.random.get_state()
np.random.shuffle(a)
Expand All @@ -66,25 +76,34 @@ def __next__(self):
self.__cnt += 1
if self.__cnt > 100:
raise StopIteration
return random.randrange(100,600,100), \
random.randrange(50,200,50), \
random.uniform(0.4,0.7), \
random.uniform(0.4,0.7)
return random.randrange(300,600,50), \
random.randrange(100,400,20), \
random.randrange(200,300,20), \
random.uniform(0.3,0.7), \
random.uniform(0.3,0.7)


def get_RNN_model(in_shape, ltsm_out_dim = 256,nb_hidden=100, drop1=0.5, drop2=0.5):
def get_RNN_model(in_shape,td_num=512, ltsm_out_dim = 256,nb_hidden=100, drop1=0.5, drop2=0.5):
model = Sequential()

model.add(GaussianNoise(0.05, input_shape=in_shape))
model.add(LSTM(ltsm_out_dim, input_shape=in_shape, return_sequences=True))
#model.add(Activation('relu'))
model.add(LSTM(ltsm_out_dim, return_sequences=True))
reg = l2(0.05)
#model.add(TimeDistributedDense(td_num, W_regularizer=l2(0.03)))
#reg.set_param(model.layers[3].get_params()[0][0])
#model.layers[3].regularizers = [reg]
model.add(Dropout(drop1))

model.add(LSTM(ltsm_out_dim // 2))
model.add(LSTM(ltsm_out_dim))
# reg = l2(0.05)
# reg.set_param(model.layers[3].get_params()[0][0])
# model.layers[3].regularizers = [reg]
model.add(Dropout(drop1))
# model.regularizers = [l2(0.05)]
#model.add(Activation('relu'))

model.add(Flatten())
model.add(Dense(nb_hidden))
model.add(Dense(nb_hidden, W_regularizer=l2(0.05)))
model.add(Activation('relu'))
model.add(Dropout(drop2))

Expand All @@ -93,3 +112,67 @@ def get_RNN_model(in_shape, ltsm_out_dim = 256,nb_hidden=100, drop1=0.5, drop2=0

model.compile(loss='mse', optimizer='rmsprop')
return model


def plotR(predicted_bpm, test_bpm):
import matplotlib.pyplot as plt
n = predicted_bpm.shape[0]
xs = range(0,n)
plt.plot(xs, predicted_bpm, 'r--', xs, test_bpm, 'bs')
plt.show()

class RandomCnnRnnParameters():
def __init__(self):
self.__cnt = 0

def __iter__(self):
return self

def __next__(self):
self.__cnt += 1
if self.__cnt > 100:
raise StopIteration
return random.randrange(16,32,2), \
random.randrange(3,15,2), \
random.randrange(32,256,6), \
random.randrange(5,15,2), \
random.randrange(50,300,30), \
random.uniform(0.3,0.7), \
random.uniform(0.3,0.7)

def get_CNN_RNN_model(in_shape, nb_filters1 = 32,nb_col1=5,
nb_filters2 = 64,nb_col2=10,
ltsm_out_dim = 256, drop1=0.5, drop2=0.5):
model = Sequential()

# shape (n_images, frequencies, time)
# shape (1,200,158)
model.add(GaussianNoise(0.01, input_shape=in_shape))
#Convolution2D(nb_filter, nb_row, nb_col)

model.add(Convolution2D(nb_filters1,in_shape[1],nb_col1))
model.add(Activation('relu'))
model.add(MaxPooling2D((1,2)))
model.add(Dropout(drop2))

# shape (16,1,77)
model.add(Convolution2D(nb_filters2,1,nb_col2))
model.add(Activation('relu'))
model.add(MaxPooling2D((1,2)))
model.add(Dropout(drop2))


# shape (32,1,68)
shape = model.layers[-1].input_shape
model.add(Reshape(dims=(shape[1],shape[3])))
#shape (32,68)
model.add(Permute((2,1)))
model.add(Dropout(drop1))
model.add(LSTM(ltsm_out_dim))
model.add(Dropout(drop1))

model.add(Dense(1))
model.add(Activation('linear'))

model.compile(loss='mse', optimizer='adam')
return model
7 changes: 4 additions & 3 deletions data_analysis/mlpAproach.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from spectrogram import full_bpm_to_data, HEART_AV_ROOT, NormalizedSpectrograms
from spectrogram import full_bpm_to_data, HEART_AV_ROOT, NormalizedSpectrograms, NormalizedSubjectSplitSpectrograms
from get_heartrates import get_interesting_heartrates
from keras.callbacks import EarlyStopping
from kbhit import KBHit
Expand All @@ -11,9 +11,10 @@
kb = KBHit()
#(X_train, y_train), (X_test, y_test) = full_bpm_to_data(get_interesting_heartrates(HEART_AV_ROOT))

ns = NormalizedSpectrograms()
ns = NormalizedSubjectSplitSpectrograms()

(X_train, Y_train) , valTuple = ns.getTrainAndValidationData()
(X_train, Y_train) = ns.getTrainData()
valTuple = ns.getValidationData()

print(X_train.shape)

Expand Down
1 change: 1 addition & 0 deletions data_analysis/my_model_architecture65%.json
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"name": "Sequential", "layers": [{"name": "GaussianNoise", "input_shape": [30, 1000], "sigma": 0.05}, {"input_dim": null, "name": "LSTM", "activation": "tanh", "forget_bias_init": "one", "inner_activation": "hard_sigmoid", "return_sequences": true, "input_length": null, "init": "glorot_uniform", "truncate_gradient": -1, "output_dim": 350, "input_shape": [30, 1000], "inner_init": "orthogonal"}, {"name": "Dropout", "p": 0.6590742443657821}, {"input_dim": null, "name": "LSTM", "init": "glorot_uniform", "activation": "tanh", "truncate_gradient": -1, "output_dim": 350, "forget_bias_init": "one", "inner_activation": "hard_sigmoid", "inner_init": "orthogonal", "return_sequences": false, "input_length": null}, {"name": "Flatten"}, {"activity_regularizer": null, "name": "Dense", "init": "glorot_uniform", "W_regularizer": null, "output_dim": 200, "b_regularizer": null, "input_dim": null, "b_constraint": null, "activation": "linear", "W_constraint": null}, {"target": 0, "name": "Activation", "activation": "relu", "beta": 0.1}, {"name": "Dropout", "p": 0.4064783719962251}, {"activity_regularizer": null, "name": "Dense", "init": "glorot_uniform", "W_regularizer": null, "output_dim": 1, "b_regularizer": null, "input_dim": null, "b_constraint": null, "activation": "linear", "W_constraint": null}, {"target": 0, "name": "Activation", "activation": "linear", "beta": 0.1}], "optimizer": {"name": "RMSprop", "rho": 0.8999999761581421, "epsilon": 1e-06, "lr": 0.0010000000474974513}, "loss": "mean_squared_error", "class_mode": "categorical", "theano_mode": null}
Binary file added data_analysis/my_model_weights65%.h5
Binary file not shown.
18 changes: 13 additions & 5 deletions data_analysis/rnnAproach.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from spectrogram import full_bpm_to_data, HEART_AV_ROOT, NormalizedSpectrograms
from spectrogram import full_bpm_to_data, HEART_AV_ROOT, NormalizedSpectrograms, NormalizedSubjectSplitSpectrograms
from get_heartrates import get_interesting_heartrates
from keras.callbacks import EarlyStopping
from kbhit import KBHit
Expand All @@ -11,7 +11,7 @@
kb = KBHit()
#(X_train, y_train), (X_test, y_test) = full_bpm_to_data(get_interesting_heartrates(HEART_AV_ROOT))

ns = NormalizedSpectrograms()
ns = NormalizedSubjectSplitSpectrograms(subjectIdependant=True)#NormalizedSpectrograms()

def sliceToTimeSeries(X):
divisibleTime = X[:,0,:,:150]
Expand All @@ -21,7 +21,8 @@ def sliceToTimeSeries(X):
return flattenLastTwo


(X_train, Y_train) , (X_val, Y_val) = ns.getTrainAndValidationData()
X_train, Y_train = ns.getTrainData()
X_val, Y_val = ns.getValidationData()

#slice the spectrogram
X_train = sliceToTimeSeries(X_train)
Expand All @@ -37,12 +38,13 @@ def sliceToTimeSeries(X):
stop = False
models = {}
X_val = sliceToTimeSeries(X_val)
print(X_val.shape)

for args in learnLib.RandomRnnParameters(): #itertools.product(nb_hiddens, drop1s):
print("Model: ", args)
model = learnLib.get_RNN_model(X_train[0].shape, *args)
early_stopping = EarlyStopping(monitor='val_loss', patience=2)
history = model.fit(X_train, Y_train, batch_size=100, nb_epoch=20,
early_stopping = EarlyStopping(monitor='val_loss', patience=3)
history = model.fit(X_train, Y_train, batch_size=30, nb_epoch=15,
verbose=1, validation_data=(X_val,Y_val), callbacks=[early_stopping])


Expand Down Expand Up @@ -74,6 +76,12 @@ def sliceToTimeSeries(X):

r, rmse, preds = learnLib.assess_model(maxModel, X_test, Y_test)
predicted_bpm = np.array(list(map(ns.unnormalize_bpm, preds)))
test_bpm = np.array(list(map(ns.unnormalize_bpm, Y_test)))

val_bpm = np.array(list(map(ns.unnormalize_bpm, Y_val)))
_, _, preds = learnLib.assess_model(maxModel, X_val, Y_val)
val_predicted_bpm = np.array(list(map(ns.unnormalize_bpm, preds)))

print("Model r: ", r)
print("Model rmse: ", rmse)
code.interact(local=locals())
Binary file added data_analysis/spec.pickle
Binary file not shown.
Loading