ai_platform_cv/mnist/MNIST_cnn.py

71 lines
2.2 KiB
Python

import os.path
import numpy as np
import tensorflow as tf
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
num_classes = 10
input_shape = (28, 28, 1)
def pre_process():
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
# Scale images to the [0, 1] range
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
# Make sure images have shape (28, 28, 1)
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
# convert class vectors to binary class matrices
y_train = tf.keras.utils.to_categorical(y_train, num_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes)
return x_train, y_train, x_test, y_test
def createModel(neure, kernel_size, pool_size, activation):
model = tf.keras.Sequential(
[
tf.keras.Input(shape=(input_shape)),
tf.keras.layers.Conv2D(neure, kernel_size=(kernel_size, kernel_size), activation=activation),
tf.keras.layers.MaxPooling2D(pool_size=(pool_size, pool_size)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(num_classes, activation="softmax"),
]
)
model.summary()
return model
def trainModel(model, x_train, y_train, epochs, loss):
batch_size = 128
epochs = epochs
model.compile(loss=loss, optimizer="adam", metrics=["accuracy"])
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1)
return model
# 模型预测
def predictModel(x_test, model):
predicted_data = model.predict(x_test)
return predicted_data
def train(neure, kernel_size, pool_size, activation, epochs, loss):
x_train, y_train, x_test, y_test = pre_process()
model = createModel(neure, kernel_size, pool_size, activation)
model = trainModel(model, x_train, y_train, epochs, loss)
score = model.evaluate(x_test, y_test, verbose=0)
accuracy = score[1]
model.save(os.path.abspath("./appweb/self_model/mnist_cnn.h5"))
return accuracy