TechTogetWorld

인공지능 구현에 대한 글입니다.


글의 순서는 아래와 같습니다.


================================================

1. # lab-10-1-mnist_softmax 

    ==># 정확도 90%

2. # lab-10-2-mnist_nn

   3 단 깊이 /relu 적용=> 정확도 94 %

3. # lab-10-3-mnist_nn_xavier
   xavier 적용(초기값 세팅) ==> 정확도 97.6 %

4. #lab-10-4-mnist_nn_deep

   더 넓게(512), 더깊게 (L5) ==> 정확도 향상효과 없음

   원인: OVERFITTING  대책 : Drop out 적용

5. lab-10-5-mnist_nn_dropout
   drop out 적용 : 정확도 97.96%
6. 코드 탐구(추가)
   lab-10-6-mnist_nn_batchnorm
   lab-10-7-mnist_nn_higher_level_API
   lab-10-8-mnist_nn_selu(wip)
   lab-10-X1-mnist_back_prop

7. 참고자료

=================================================


[  lab-10-1-mnist_softmax]


# lab-10-1-mnist_softmax

# 정확도 90%

import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

# Lab 7 Learning rate and Evaluation

import tensorflow as tf

import random

#import matplotlib.pyplot as plt

from tensorflow.examples.tutorials.mnist import input_data

tf.set_random_seed(777)  # reproducibility


mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

# Check out https://www.tensorflow.org/get_started/mnist/beginners for

# more information about the mnist dataset


# parameters

learning_rate = 0.001

training_epochs = 15

batch_size = 100


# input place holders

X = tf.placeholder(tf.float32, [None, 784])

Y = tf.placeholder(tf.float32, [None, 10])


# weights & bias for nn layers

W = tf.Variable(tf.random_normal([784, 10]))

b = tf.Variable(tf.random_normal([10]))


hypothesis = tf.matmul(X, W) + b


# define cost/loss & optimizer

cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(

    logits=hypothesis, labels=Y))

optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)


# initialize

sess = tf.Session()

sess.run(tf.global_variables_initializer())


# train my model

for epoch in range(training_epochs):

    avg_cost = 0

    total_batch = int(mnist.train.num_examples / batch_size)


    for i in range(total_batch):

        batch_xs, batch_ys = mnist.train.next_batch(batch_size)

        feed_dict = {X: batch_xs, Y: batch_ys}

        c, _ = sess.run([cost, optimizer], feed_dict=feed_dict)

        avg_cost += c / total_batch


    print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost))


print('Learning Finished!')


# Test model and check accuracy

correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))

accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

print('Accuracy:', sess.run(accuracy, feed_dict={

      X: mnist.test.images, Y: mnist.test.labels}))


# Get one and predict

r = random.randint(0, mnist.test.num_examples - 1)

print("Label: ", sess.run(tf.argmax(mnist.test.labels[r:r + 1], 1)))

print("Prediction: ", sess.run(

    tf.argmax(hypothesis, 1), feed_dict={X: mnist.test.images[r:r + 1]}))


#plt.imshow(mnist.test.images[r:r + 1].

 #   reshape(28, 28), cmap='Greys', interpolation='nearest')

#plt.show()


'''

Epoch: 0001 cost = 5.888845987

Epoch: 0002 cost = 1.860620173

Epoch: 0003 cost = 1.159035648

Epoch: 0004 cost = 0.892340870

Epoch: 0005 cost = 0.751155428

Epoch: 0006 cost = 0.662484806

Epoch: 0007 cost = 0.601544010

Epoch: 0008 cost = 0.556526115

Epoch: 0009 cost = 0.521186961

Epoch: 0010 cost = 0.493068354

Epoch: 0011 cost = 0.469686249

Epoch: 0012 cost = 0.449967254

Epoch: 0013 cost = 0.433519321

Epoch: 0014 cost = 0.419000337

Epoch: 0015 cost = 0.406490815

Learning Finished!

Accuracy: 0.9035

'''


[# lab-10-2-mnist_nn]

# lab-10-2-mnist_nn

# 3 단 깊이 /relu 적용=> 정확도 94 %

import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

# Lab 10 MNIST and NN

import tensorflow as tf

import random

# import matplotlib.pyplot as plt


from tensorflow.examples.tutorials.mnist import input_data


tf.set_random_seed(777)  # reproducibility


mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

# Check out https://www.tensorflow.org/get_started/mnist/beginners for

# more information about the mnist dataset


# parameters

learning_rate = 0.001

training_epochs = 15

batch_size = 100


# input place holders

X = tf.placeholder(tf.float32, [None, 784])

Y = tf.placeholder(tf.float32, [None, 10])


# weights & bias for nn layers

W1 = tf.Variable(tf.random_normal([784, 256]))

b1 = tf.Variable(tf.random_normal([256]))

L1 = tf.nn.relu(tf.matmul(X, W1) + b1)


W2 = tf.Variable(tf.random_normal([256, 256]))

b2 = tf.Variable(tf.random_normal([256]))

L2 = tf.nn.relu(tf.matmul(L1, W2) + b2)


W3 = tf.Variable(tf.random_normal([256, 10]))

b3 = tf.Variable(tf.random_normal([10]))

hypothesis = tf.matmul(L2, W3) + b3


# define cost/loss & optimizer

cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(

    logits=hypothesis, labels=Y))

optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)


# initialize

sess = tf.Session()

sess.run(tf.global_variables_initializer())


# train my model

for epoch in range(training_epochs):

    avg_cost = 0

    total_batch = int(mnist.train.num_examples / batch_size)


    for i in range(total_batch):

        batch_xs, batch_ys = mnist.train.next_batch(batch_size)

        feed_dict = {X: batch_xs, Y: batch_ys}

        c, _ = sess.run([cost, optimizer], feed_dict=feed_dict)

        avg_cost += c / total_batch


    print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost))


print('Learning Finished!')


# Test model and check accuracy

correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))

accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

print('Accuracy:', sess.run(accuracy, feed_dict={

      X: mnist.test.images, Y: mnist.test.labels}))


# Get one and predict

r = random.randint(0, mnist.test.num_examples - 1)

print("Label: ", sess.run(tf.argmax(mnist.test.labels[r:r + 1], 1)))

print("Prediction: ", sess.run(

    tf.argmax(hypothesis, 1), feed_dict={X: mnist.test.images[r:r + 1]}))


# plt.imshow(mnist.test.images[r:r + 1].

#           reshape(28, 28), cmap='Greys', interpolation='nearest')

# plt.show()


'''

Epoch: 0001 cost = 141.207671860

Epoch: 0002 cost = 38.788445864

Epoch: 0003 cost = 23.977515479

Epoch: 0004 cost = 16.315132428

Epoch: 0005 cost = 11.702554882

Epoch: 0006 cost = 8.573139748

Epoch: 0007 cost = 6.370995680

Epoch: 0008 cost = 4.537178684

Epoch: 0009 cost = 3.216900532

Epoch: 0010 cost = 2.329708954

Epoch: 0011 cost = 1.715552875

Epoch: 0012 cost = 1.189857912

Epoch: 0013 cost = 0.820965160

Epoch: 0014 cost = 0.624131458

Epoch: 0015 cost = 0.454633765

Learning Finished!

Accuracy: 0.9455


'''

[ lab-10-3-mnist_nn_xavier]

# lab-10-3-mnist_nn_xavier
# xavier 적용 ==> 정확도 97.6 %
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Lab 10 MNIST and Xavier
import tensorflow as tf
import random
# import matplotlib.pyplot as plt

from tensorflow.examples.tutorials.mnist import input_data

tf.set_random_seed(777)  # reproducibility

mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# Check out https://www.tensorflow.org/get_started/mnist/beginners for
# more information about the mnist dataset

# parameters
learning_rate = 0.001
training_epochs = 15
batch_size = 100

# input place holders
X = tf.placeholder(tf.float32, [None, 784])
Y = tf.placeholder(tf.float32, [None, 10])

# weights & bias for nn layers
# http://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow
W1 = tf.get_variable("W1", shape=[784, 256],
                     initializer=tf.contrib.layers.xavier_initializer())
b1 = tf.Variable(tf.random_normal([256]))
L1 = tf.nn.relu(tf.matmul(X, W1) + b1)

W2 = tf.get_variable("W2", shape=[256, 256],
                     initializer=tf.contrib.layers.xavier_initializer())
b2 = tf.Variable(tf.random_normal([256]))
L2 = tf.nn.relu(tf.matmul(L1, W2) + b2)

W3 = tf.get_variable("W3", shape=[256, 10],
                     initializer=tf.contrib.layers.xavier_initializer())
b3 = tf.Variable(tf.random_normal([10]))
hypothesis = tf.matmul(L2, W3) + b3

# define cost/loss & optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
    logits=hypothesis, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

# initialize
sess = tf.Session()
sess.run(tf.global_variables_initializer())

# train my model
for epoch in range(training_epochs):
    avg_cost = 0
    total_batch = int(mnist.train.num_examples / batch_size)

    for i in range(total_batch):
        batch_xs, batch_ys = mnist.train.next_batch(batch_size)
        feed_dict = {X: batch_xs, Y: batch_ys}
        c, _ = sess.run([cost, optimizer], feed_dict=feed_dict)
        avg_cost += c / total_batch

    print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost))

print('Learning Finished!')

# Test model and check accuracy
correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print('Accuracy:', sess.run(accuracy, feed_dict={
      X: mnist.test.images, Y: mnist.test.labels}))

# Get one and predict
r = random.randint(0, mnist.test.num_examples - 1)
print("Label: ", sess.run(tf.argmax(mnist.test.labels[r:r + 1], 1)))
print("Prediction: ", sess.run(
    tf.argmax(hypothesis, 1), feed_dict={X: mnist.test.images[r:r + 1]}))

# plt.imshow(mnist.test.images[r:r + 1].
#           reshape(28, 28), cmap='Greys', interpolation='nearest')
# plt.show()

'''
Epoch: 0001 cost = 0.301498963
Epoch: 0002 cost = 0.107252513
Epoch: 0003 cost = 0.064888892
Epoch: 0004 cost = 0.044463030
Epoch: 0005 cost = 0.029951642
Epoch: 0006 cost = 0.020663404
Epoch: 0007 cost = 0.015853033
Epoch: 0008 cost = 0.011764387
Epoch: 0009 cost = 0.008598264
Epoch: 0010 cost = 0.007383116
Epoch: 0011 cost = 0.006839140
Epoch: 0012 cost = 0.004672963
Epoch: 0013 cost = 0.003979437
Epoch: 0014 cost = 0.002714260
Epoch: 0015 cost = 0.004707661
Learning Finished!
Accuracy: 0.9783
'''
[ lab-10-4-mnist_nn_deep ]

#lab-10-4-mnist_nn_deep

# 더 넓게(512), 더깊게 (L5) ==> 정확도 향상효과 없음

# 원인: OVERFITTING  대책 : Drop out 적용



import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

# Lab 10 MNIST and Deep learning

import tensorflow as tf

import random

# import matplotlib.pyplot as plt


from tensorflow.examples.tutorials.mnist import input_data


tf.set_random_seed(777)  # reproducibility


mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

# Check out https://www.tensorflow.org/get_started/mnist/beginners for

# more information about the mnist dataset


# parameters

learning_rate = 0.001

training_epochs = 15

batch_size = 100


# input place holders

X = tf.placeholder(tf.float32, [None, 784])

Y = tf.placeholder(tf.float32, [None, 10])


# weights & bias for nn layers

# http://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow

W1 = tf.get_variable("W1", shape=[784, 512],

                     initializer=tf.contrib.layers.xavier_initializer())

b1 = tf.Variable(tf.random_normal([512]))

L1 = tf.nn.relu(tf.matmul(X, W1) + b1)


W2 = tf.get_variable("W2", shape=[512, 512],

                     initializer=tf.contrib.layers.xavier_initializer())

b2 = tf.Variable(tf.random_normal([512]))

L2 = tf.nn.relu(tf.matmul(L1, W2) + b2)


W3 = tf.get_variable("W3", shape=[512, 512],

                     initializer=tf.contrib.layers.xavier_initializer())

b3 = tf.Variable(tf.random_normal([512]))

L3 = tf.nn.relu(tf.matmul(L2, W3) + b3)


W4 = tf.get_variable("W4", shape=[512, 512],

                     initializer=tf.contrib.layers.xavier_initializer())

b4 = tf.Variable(tf.random_normal([512]))

L4 = tf.nn.relu(tf.matmul(L3, W4) + b4)


W5 = tf.get_variable("W5", shape=[512, 10],

                     initializer=tf.contrib.layers.xavier_initializer())

b5 = tf.Variable(tf.random_normal([10]))

hypothesis = tf.matmul(L4, W5) + b5


# define cost/loss & optimizer

cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(

    logits=hypothesis, labels=Y))

optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)


# initialize

sess = tf.Session()

sess.run(tf.global_variables_initializer())


# train my model

for epoch in range(training_epochs):

    avg_cost = 0

    total_batch = int(mnist.train.num_examples / batch_size)


    for i in range(total_batch):

        batch_xs, batch_ys = mnist.train.next_batch(batch_size)

        feed_dict = {X: batch_xs, Y: batch_ys}

        c, _ = sess.run([cost, optimizer], feed_dict=feed_dict)

        avg_cost += c / total_batch


    print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost))


print('Learning Finished!')


# Test model and check accuracy

correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))

accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

print('Accuracy:', sess.run(accuracy, feed_dict={

      X: mnist.test.images, Y: mnist.test.labels}))


# Get one and predict

r = random.randint(0, mnist.test.num_examples - 1)

print("Label: ", sess.run(tf.argmax(mnist.test.labels[r:r + 1], 1)))

print("Prediction: ", sess.run(

    tf.argmax(hypothesis, 1), feed_dict={X: mnist.test.images[r:r + 1]}))


# plt.imshow(mnist.test.images[r:r + 1].

#           reshape(28, 28), cmap='Greys', interpolation='nearest')

# plt.show()


'''

Epoch: 0001 cost = 0.266061549

Epoch: 0002 cost = 0.080796588

Epoch: 0003 cost = 0.049075800

Epoch: 0004 cost = 0.034772298

Epoch: 0005 cost = 0.024780529

Epoch: 0006 cost = 0.017072763

Epoch: 0007 cost = 0.014031383

Epoch: 0008 cost = 0.013763446

Epoch: 0009 cost = 0.009164047

Epoch: 0010 cost = 0.008291388

Epoch: 0011 cost = 0.007319742

Epoch: 0012 cost = 0.006434021

Epoch: 0013 cost = 0.005684378

Epoch: 0014 cost = 0.004781207

Epoch: 0015 cost = 0.004342310

Learning Finished!

Accuracy: 0.9742

'''


[ lab-10-5-mnist_nn_dropout ]

#lab-10-5-mnist_nn_dropout
# drop out 적용 : 정확도 97.96%
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Lab 10 MNIST and Dropout
import tensorflow as tf
import random
# import matplotlib.pyplot as plt

from tensorflow.examples.tutorials.mnist import input_data

tf.set_random_seed(777)  # reproducibility

mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# Check out https://www.tensorflow.org/get_started/mnist/beginners for
# more information about the mnist dataset

# parameters
learning_rate = 0.001
training_epochs = 15
batch_size = 100

# input place holders
X = tf.placeholder(tf.float32, [None, 784])
Y = tf.placeholder(tf.float32, [None, 10])

# dropout (keep_prob) rate  0.7 on training, but should be 1 for testing
keep_prob = tf.placeholder(tf.float32)

# weights & bias for nn layers
# http://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow
W1 = tf.get_variable("W1", shape=[784, 512],
                     initializer=tf.contrib.layers.xavier_initializer())
b1 = tf.Variable(tf.random_normal([512]))
L1 = tf.nn.relu(tf.matmul(X, W1) + b1)
L1 = tf.nn.dropout(L1, keep_prob=keep_prob)

W2 = tf.get_variable("W2", shape=[512, 512],
                     initializer=tf.contrib.layers.xavier_initializer())
b2 = tf.Variable(tf.random_normal([512]))
L2 = tf.nn.relu(tf.matmul(L1, W2) + b2)
L2 = tf.nn.dropout(L2, keep_prob=keep_prob)

W3 = tf.get_variable("W3", shape=[512, 512],
                     initializer=tf.contrib.layers.xavier_initializer())
b3 = tf.Variable(tf.random_normal([512]))
L3 = tf.nn.relu(tf.matmul(L2, W3) + b3)
L3 = tf.nn.dropout(L3, keep_prob=keep_prob)

W4 = tf.get_variable("W4", shape=[512, 512],
                     initializer=tf.contrib.layers.xavier_initializer())
b4 = tf.Variable(tf.random_normal([512]))
L4 = tf.nn.relu(tf.matmul(L3, W4) + b4)
L4 = tf.nn.dropout(L4, keep_prob=keep_prob)

W5 = tf.get_variable("W5", shape=[512, 10],
                     initializer=tf.contrib.layers.xavier_initializer())
b5 = tf.Variable(tf.random_normal([10]))
hypothesis = tf.matmul(L4, W5) + b5

# define cost/loss & optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
    logits=hypothesis, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

# initialize
sess = tf.Session()
sess.run(tf.global_variables_initializer())

# train my model
for epoch in range(training_epochs):
    avg_cost = 0
    total_batch = int(mnist.train.num_examples / batch_size)

    for i in range(total_batch):
        batch_xs, batch_ys = mnist.train.next_batch(batch_size)
        feed_dict = {X: batch_xs, Y: batch_ys, keep_prob: 0.7}
        c, _ = sess.run([cost, optimizer], feed_dict=feed_dict)
        avg_cost += c / total_batch

    print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost))

print('Learning Finished!')

# Test model and check accuracy
correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print('Accuracy:', sess.run(accuracy, feed_dict={
      X: mnist.test.images, Y: mnist.test.labels, keep_prob: 1}))

# Get one and predict
r = random.randint(0, mnist.test.num_examples - 1)
print("Label: ", sess.run(tf.argmax(mnist.test.labels[r:r + 1], 1)))
print("Prediction: ", sess.run(
    tf.argmax(hypothesis, 1), feed_dict={X: mnist.test.images[r:r + 1], keep_prob: 1}))

# plt.imshow(mnist.test.images[r:r + 1].
#           reshape(28, 28), cmap='Greys', interpolation='nearest')
# plt.show()

'''
Epoch: 0001 cost = 0.447322626
Epoch: 0002 cost = 0.157285590
Epoch: 0003 cost = 0.121884535
Epoch: 0004 cost = 0.098128681
Epoch: 0005 cost = 0.082901778
Epoch: 0006 cost = 0.075337573
Epoch: 0007 cost = 0.069752543
Epoch: 0008 cost = 0.060884363
Epoch: 0009 cost = 0.055276413
Epoch: 0010 cost = 0.054631256
Epoch: 0011 cost = 0.049675195
Epoch: 0012 cost = 0.049125314
Epoch: 0013 cost = 0.047231930
Epoch: 0014 cost = 0.041290121
Epoch: 0015 cost = 0.043621063
Learning Finished!
Accuracy: 0.9804
'''

[ 참고자료 ]

https://www.inflearn.com/course/기본적인-머신러닝-딥러닝-강좌

https://github.com/hunkim/deeplearningzerotoall