import tensorflow as tf import numpy as np #import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data import keras as k from keras.datasets import cifar10 from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, MaxPooling2D, BatchNormalization from keras.optimizers import SGD, Adam from keras.regularizers import l2 import h5py from keras.models import load_model from keras.preprocessing.image import ImageDataGenerator #load data (x_train, y_train), (x_test, y_test) = cifar10.load_data() img_rows, img_cols , channels= 32,32,3 #for i in range(0,9): # plt.subplot(330 + 1 + i) # plt.imshow(x_train[i]) #plt.show()#load data #(x_train, y_train), (x_test, y_test) = cifar10.load_data() #img_rows, img_cols , channels= 32,32,3 #for i in range(0,9): # plt.subplot(330 + 1 + i) # plt.imshow(x_train[i]) #plt.show() #reshape into images x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, channels) x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, channels) input_shape = (img_rows, img_cols, 1) print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') #convert integers to float; normalise and center the mean x_train=x_train.astype("float32") x_test=x_test.astype("float32") mean=np.mean(x_train) std=np.std(x_train) x_test=(x_test-mean)/std x_train=(x_train-mean)/std # labels num_classes=10 y_train = k.utils.to_categorical(y_train, num_classes) y_test = k.utils.to_categorical(y_test, num_classes) # build and compile the model (roughly following the VGG paper) #reg=l2(1e-4) # L2 or "ridge" regularisation reg=None num_filters=32 ac='relu' adm=Adam(lr=0.001,decay=0, beta_1=0.9, beta_2=0.999, epsilon=1e-08) opt=adm drop_dense=0.5 drop_conv=0 model = Sequential() model.add(Conv2D(num_filters, (3, 3), activation=ac, kernel_regularizer=reg, input_shape=(img_rows, img_cols, channels),padding='same')) model.add(BatchNormalization(axis=-1)) model.add(Conv2D(num_filters, (3, 3), activation=ac,kernel_regularizer=reg,padding='same')) model.add(BatchNormalization(axis=-1)) model.add(MaxPooling2D(pool_size=(2, 2))) # reduces to 16x16x3xnum_filters model.add(Dropout(drop_conv)) model.add(Conv2D(2*num_filters, (3, 3), activation=ac,kernel_regularizer=reg,padding='same')) model.add(BatchNormalization(axis=-1)) model.add(Conv2D(2*num_filters, (3, 3), activation=ac,kernel_regularizer=reg,padding='same')) model.add(BatchNormalization(axis=-1)) model.add(MaxPooling2D(pool_size=(2, 2))) # reduces to 8x8x3x(2*num_filters) model.add(Dropout(drop_conv)) model.add(Conv2D(4*num_filters, (3, 3), activation=ac,kernel_regularizer=reg,padding='same')) model.add(BatchNormalization(axis=-1)) model.add(Conv2D(4*num_filters, (3, 3), activation=ac,kernel_regularizer=reg,padding='same')) model.add(BatchNormalization(axis=-1)) model.add(MaxPooling2D(pool_size=(2, 2))) # reduces to 4x4x3x(4*num_filters) model.add(Dropout(drop_conv)) model.add(Flatten()) model.add(Dense(512, activation=ac,kernel_regularizer=reg)) model.add(BatchNormalization()) model.add(Dropout(drop_dense)) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', metrics=['accuracy'],optimizer=opt) #print model summary model.summary()