1
votes
print(X_val_train.shape)    #(158, 128, 256, 1)

print(Y_val_train.shape)    #(158, 24)

print(X_train.shape)        # (3012, 128, 256, 1)
print(Y_train.shape)        #(3012, 24)

import keras
from keras.utils.np_utils import to_categorical
import keras_resnet.models
shape, classes = (128, 256, 1), 24
x = keras.layers.Input(shape)
model = keras_resnet.models.ResNet50(x, classes=classes)
model.compile("adam", "categorical_crossentropy", ["accuracy"])
model.fit(X_train, Y_train,batch_size=32,epochs=10,validation_data=(X_val_train,Y_val_train))
Epoch 1/10
94/95 [============================>.] - ETA: 0s - loss: 3.3494 - accuracy: 0.2193
---------------------------------------------------------------------------
IndexError                                Traceback (most recent call last)
<ipython-input-47-21de3679f6a9> in <module>
     11 #training_y = to_categorical(training_y)
     12 
---> 13 model.fit(X_train, Y_train,batch_size=32,epochs=10,validation_data=(X_val_train,Y_val_train))
     14 # = model.fit(X, Y, validation_split=0.33, epochs=150, batch_size=10, verbose=0)
     15 # list all data in history

/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
   1115           # Create data_handler for evaluation and cache it.
   1116           if getattr(self, '_eval_data_handler', None) is None:
-> 1117             self._fit_frame = tf_inspect.currentframe()
   1118             self._eval_data_handler = data_adapter.DataHandler(
   1119                 x=val_x,

/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/utils/tf_inspect.py in currentframe()
     93 def currentframe():
     94   """TFDecorator-aware replacement for inspect.currentframe."""
---> 95   return _inspect.stack()[1][0]
     96 
     97 

/opt/conda/lib/python3.7/inspect.py in stack(context)
   1511 def stack(context=1):
   1512     """Return a list of records for the stack above the caller's frame."""
-> 1513     return getouterframes(sys._getframe(1), context)
   1514 
   1515 def trace(context=1):

/opt/conda/lib/python3.7/inspect.py in getouterframes(frame, context)
   1488     framelist = []
   1489     while frame:
-> 1490         frameinfo = (frame,) + getframeinfo(frame, context)
   1491         framelist.append(FrameInfo(*frameinfo))
   1492         frame = frame.f_back

/opt/conda/lib/python3.7/inspect.py in getframeinfo(frame, context)
   1462         start = lineno - 1 - context//2
   1463         try:
-> 1464             lines, lnum = findsource(frame)
   1465         except OSError:
   1466             lines = index = None

/opt/conda/lib/python3.7/inspect.py in findsource(object)
    826         pat = re.compile(r'^(\s*def\s)|(\s*async\s+def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
    827         while lnum > 0:
--> 828             if pat.match(lines[lnum]): break
    829             lnum = lnum - 1
    830         return lines, lnum

IndexError: list index out of range

If I run the below line without adding validation everything works fine but after adding validation data I get the "list out of index error"

model.fit(X_train, Y_train, batch_size=32, epochs=10)

Even model.fit(X_train, Y_train, batch_size=32, epochs=10, validation_split=0.3) gives same error

there is something wrong with adding validation data

1

1 Answers

0
votes

you have 3012 samples and a batch size of 32 so it will take 3012/32=94.125 steps to process on full set of your data on each epoch. Since steps must be an integer set steps_per_epoch =94 in model.fit. That way you should not run out of data in an epoch or get an index error. See if that solves the problem. From the data you showed model.fit is trying to do 95 steps so that's the problem. By the way here is a nice bit of code I use to calculate the batch size and steps so that batch_size * steps=total number of samples. In the code below, length is the number of samples you have, b_max is the maximum batch size you will allow based on your memory capacity.

length=3012
b_max=40
batch_size=sorted([int(length/n) for n in range(1,length+1) if length % n ==0 and length/n<=b_max],reverse=True)[0]  
 steps=int(length/batch_size)
# result will be batch_size= 12 , steps=251

what the code does is search for the factors of length then picks the largest factor less than b_max as the batch size. Note if length is a prime number then batch size will =1 and steps will be =length. or if you want to keep it simple set steps to

steps_per_epoch=length//batch_size
same issue is the case with validation steps