TypeError: object of type 'NoneType' has no len()
First time try with Hyperas. I get an error: object of type 'NoneType' has no len() at trials=Trials() in the optim.minimize(...) call. Not sure what this means. I am trying to optimize a network that takes three inputs in the form of a list X_train[], so X_train[0] has a set of input data, X_train[1] and X_train_[2] also. Not sure if that could be the cause for this error? Don't know what Hyperas can handle.
Can confirm this is a problem. It happens both on Linux and Mac. Here is my source code:
`from random import randint from math import ceil, log10 from numpy import argmax from keras.models import Sequential from keras.layers import Dense, LSTM, TimeDistributed, RepeatVector from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder import numpy as np from hyperopt import Trials, STATUS_OK, tpe from hyperas import optim from hyperas.distributions import choice, uniform
def createSingleInput(max_num, num_of_input):
x=[]
y=[]
y_target=[]
sum_num=0;
for i in range(0, num_of_input):
#print(i, "<->", num_of_input)
if i>0:
x.append('+')
num=randint(1, max_num)
sum_num=sum_num+num
num=str(num)
for char in num:
x.append(char)
for i in range(0, len(str(sum_num))):
if i==0:
y.append('S')
if i>0:
y_target.append(str(sum_num)[i-1])
y.append(str(sum_num)[i])
if i==len(str(sum_num))-1:
y.append('E')
y_target.append(str(sum_num)[i])
return x, y, y_target
#x, y=createSingleInput(20, 2)
def singleTermLengthIfChars(max_num): return ceil(log10(max_num+1)) # length of a numeric number
def max_length_of_a_series(max_num, num_of_input): return int(singleTermLengthIfChars(max_num)*num_of_input+num_of_input-1) # 2+2+2 3 plus two plus signs
def max_length_of_a_y_targeted(max_num, num_of_input): return int(ceil(log10(num_of_input * (max_num+1))))
def max_length_of_a_y(max_num, num_of_input): return int(ceil(log10(num_of_input * (max_num+1))))+2 #return int(singleTermLengthIfChars(max_num)*num_of_input+2)
def onehot_encode(max_length, data): encoded=np.zeros((max_length, 14 )) for row in range(0, data.shape[0]): encoded[row][data[row]]=1 return encoded
def label_encode( data): vocab={ '1':1,'2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8, '9':9, '0':0, '+':10, 'S':13, 'E':12 } vec=[] for datum in data: vec.append(vocab[datum]) return np.array(vec)
def createASeriesInput(max_num, num_of_input, size): label_encoder=LabelEncoder() label_encoder.fit(['1','2', '3', '4', '5', '6', '7', '8', '9', '0', '+', 'S', 'E'])
max_length=max_length_of_a_series(max_num, num_of_input)
max_length_y=max_length_of_a_y(max_num, num_of_input)
max_length_y_targeted=max_length_of_a_y_targeted(max_num, num_of_input)
X=[]
Y=[]
Y_targeted=[]
for i in range(0, size):
x, y, y_target=createSingleInput(max_num, num_of_input)
#print(x, "<->", y, "<->", y_target)
x=label_encode(x)
#print("x encoded:", x)
x=onehot_encode(max_length,x)
#print("x onehot:", x)
y=label_encode(y)
#print("y encoded:", y)
y_target=label_encode(y_target)
#print("y_target encoded:", y_target)
y=onehot_encode(max_length_y,y)
#print("y onehot:", y)
y_target=onehot_encode(max_length_y_targeted,y_target)
#print("y_target onehot:", y_target)
X.append(x)
Y.append(y)
Y_targeted.append(y_target)
X=np.array(X)
Y=np.array(Y)
Y_targeted=np.array(Y_targeted)
return X,Y, Y_targeted
def produceData(): #this function prepare for training data portion=0.9 X, Y, Y_target=createASeriesInput(max_num=20, num_of_input=3, size=100000) train_limit=int(ceil(X.shape[0]*portion)) X_train=X[:train_limit] X_eval=X[train_limit:] Y_train=Y_target[:train_limit] Y_eval=Y_target[train_limit:] return X_train, Y_train, X_eval, Y_eval
def produceModel(X_train, Y_train, X_eval, Y_eval): adam = keras.optimizers.Adam(lr={{choice([10**-3, 10**-2, 10**-1])}}) rmsprop = keras.optimizers.RMSprop(lr={{choice([10**-3, 10**-2, 10**-1])}}) sgd = keras.optimizers.SGD(lr={{choice([10**-3, 10**-2, 10**-1])}}) choiceval = {{choice(['adam', 'sgd', 'rmsprop'])}} if choiceval == 'adam': optim = adam elif choiceval == 'rmsprop': optim = rmsprop else: optim = sgd model=Sequential() model.add(LSTM({{choice([75, 128, 256])}}, input_shape=(X_train.shape[1], X_train.shape[2]))) model.add(RepeatVector(Y_train.shape[1])) model.add(LSTM({{choice([75, 128, 256])}}, return_sequences=True)) model.add(TimeDistributed(Dense(X_train.shape[2], activation='softmax'))) model.compile(loss='categorical_crossentropy', optimizer=optim, metrics=['accuracy']) model.summary() model.fit(X_train, Y_train, epochs={{choice([2, 5, 10, 15])}}, batch_size={{choice([8, 16, 32, 64])}}, validation_data=(X_val, Y_val)) score, acc = model.evaluate(X_eval, Y_eval, verbose=0) print('Test accuracy:', acc) return {'loss': -acc, 'status': STATUS_OK, 'model': model}
best_run, best_model = optim.minimize(model=produceModel, data=produceData, algo=tpe.suggest, max_evals=30, trials=Trials(), notebook_name='hyperas_trial') `
and here is the stacktrace: `>>> Imports: #coding=utf-8
try: from random import randint except: pass
try: from math import ceil, log10 except: pass
try: from numpy import argmax except: pass
try: from keras.models import Sequential except: pass
try: from keras.layers import Dense, LSTM, TimeDistributed, RepeatVector except: pass
try: from sklearn.preprocessing import LabelEncoder except: pass
try: from sklearn.preprocessing import OneHotEncoder except: pass
try: import numpy as np except: pass
try: from hyperopt import Trials, STATUS_OK, tpe except: pass
try: from hyperas import optim except: pass
try: from hyperas.distributions import choice, uniform except: pass
Hyperas search space:
def get_space(): return { 'lr': hp.choice('lr', [10**-3, 10**-2, 10**-1]), 'lr_1': hp.choice('lr_1', [10**-3, 10**-2, 10**-1]), 'lr_2': hp.choice('lr_2', [10**-3, 10**-2, 10**-1]), 'choiceval': hp.choice('choiceval', ['adam', 'sgd', 'rmsprop']), 'LSTM': hp.choice('LSTM', [75, 128, 256]), 'LSTM_1': hp.choice('LSTM_1', [75, 128, 256]), 'epochs': hp.choice('epochs', [2, 5, 10, 15]), 'batch_size': hp.choice('batch_size', [8, 16, 32, 64]), }
TypeError Traceback (most recent call last)
/usr/local/lib/python3.5/dist-packages/hyperas/optim.py in minimize(model, data, algo, max_evals, trials, functions, rseed, notebook_name, verbose, eval_space, return_space) 65 full_model_string=None, 66 notebook_name=notebook_name, ---> 67 verbose=verbose) 68 69 best_model = None
/usr/local/lib/python3.5/dist-packages/hyperas/optim.py in base_minimizer(model, data, functions, algo, max_evals, trials, rseed, full_model_string, notebook_name, verbose, stack) 94 model_str = full_model_string 95 else: ---> 96 model_str = get_hyperopt_model_string(model, data, functions, notebook_name, verbose, stack) 97 temp_file = './temp_model.py' 98 write_temp_files(model_str, temp_file)
/usr/local/lib/python3.5/dist-packages/hyperas/optim.py in get_hyperopt_model_string(model, data, functions, notebook_name, verbose, stack) 189 190 functions_string = retrieve_function_string(functions, verbose) --> 191 data_string = retrieve_data_string(data, verbose) 192 model = hyperopt_keras_model(model_string, parts, aug_parts, verbose) 193
/usr/local/lib/python3.5/dist-packages/hyperas/optim.py in retrieve_data_string(data, verbose) 212 data_string = inspect.getsource(data) 213 first_line = data_string.split("\n")[0] --> 214 indent_length = len(determine_indent(data_string)) 215 data_string = data_string.replace(first_line, "") 216 r = re.compile(r'^\sreturn.')
TypeError: object of type 'NoneType' has no len()`
@aojue1109 , @ichenjia , have you found the solution, I am also getting the same error. The funny part is that it got executed with the same dataset earlier, then later the error has happend.
Have not solved the problem. I switched to Talos instead. Here is a blog I wrote about talos https://medium.com/jia-chen/multi-input-seq2seq-generation-with-keras-and-talos-84d8bdec2d46
@ichenjia Great, thanks for your instant reply. I am wondering if Talos has got the parallel computing feature using MongoDB of hyperas.
https://github.com/maxpumperla/hyperas/issues/57#issuecomment-571884491