手写数字识别1.py 2.9 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394
  1. #调用 TensorFlow
  2. from __future__ import absolute_import, division, print_function, unicode_literals
  3. import tensorflow as tf
  4. from tensorflow.keras.layers import Dense, Flatten, Conv2D
  5. from tensorflow.keras import Model
  6. #载入并准备好 MNIST 数据集:
  7. mnist = tf.keras.datasets.mnist
  8. (x_train, y_train), (x_test, y_test) = mnist.load_data()
  9. x_train, x_test = x_train / 255.0, x_test / 255.0
  10. # Add a channels dimension
  11. x_train = x_train[..., tf.newaxis]
  12. x_test = x_test[..., tf.newaxis]
  13. #使用 tf.data 来将数据集切分为 batch 以及混淆数据集:
  14. train_ds = tf.data.Dataset.from_tensor_slices(
  15. (x_train, y_train)).shuffle(10000).batch(32)
  16. test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32)
  17. #使用 Keras 模型子类化(model subclassing) API 构建 tf.keras 模型:
  18. class MyModel(Model):
  19. def __init__(self):
  20. super(MyModel, self).__init__()
  21. self.conv1 = Conv2D(32, 3, activation='relu')
  22. self.flatten = Flatten()
  23. self.d1 = Dense(128, activation='relu')
  24. self.d2 = Dense(10, activation='softmax')
  25. def call(self, x):
  26. x = self.conv1(x)
  27. x = self.flatten(x)
  28. x = self.d1(x)
  29. return self.d2(x)
  30. model = MyModel()
  31. #为训练选择优化器与损失函数:
  32. loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
  33. optimizer = tf.keras.optimizers.Adam()
  34. #选择衡量指标来度量模型的损失值(loss)和准确率(accuracy)。这些指标在 epoch 上累积值,然后打印出整体结果
  35. train_loss = tf.keras.metrics.Mean(name='train_loss')
  36. train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
  37. name='train_accuracy')
  38. test_loss = tf.keras.metrics.Mean(name='test_loss')
  39. test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
  40. name='test_accuracy')
  41. #使用 tf.GradientTape 来训练模型
  42. @tf.function
  43. def train_step(images, labels):
  44. with tf.GradientTape() as tape:
  45. predictions = model(images)
  46. loss = loss_object(labels, predictions)
  47. gradients = tape.gradient(loss, model.trainable_variables)
  48. optimizer.apply_gradients(zip(gradients, model.trainable_variables))
  49. train_loss(loss)
  50. train_accuracy(labels, predictions)
  51. #测试模型:
  52. @tf.function
  53. def test_step(images, labels):
  54. predictions = model(images)
  55. t_loss = loss_object(labels, predictions)
  56. test_loss(t_loss)
  57. test_accuracy(labels, predictions)
  58. EPOCHS = 5
  59. for epoch in range(EPOCHS):
  60. for images, labels in train_ds:
  61. train_step(images, labels)
  62. for test_images, test_labels in test_ds:
  63. test_step(test_images, test_labels)
  64. template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
  65. print(template.format(epoch + 1,
  66. train_loss.result(),
  67. train_accuracy.result() * 100,
  68. test_loss.result(),
  69. test_accuracy.result() * 100))