TensorFlow
TensorFlow 기초 7 - Keras XOR(복수의 뉴런(노드)를 사용)
코딩탕탕
2022. 11. 29. 13:14
# 논리게이트 중 XOR은 복수의 뉴런(노드)를 사용
import numpy as np
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.optimizers import Adam
# 논리회로 분류 모델 생성
x = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
print(x)
y = np.array([0, 1, 1, 0]) # xor
model = Sequential()
"""
model.add(Dense(units=5, input_dim=2))
model.add(Activation('relu')) # hidden layer
# model.add(Dense(units=5))
# model.add(Activation('relu'))
model.add(Dense(units=1))
model.add(Activation('sigmoid'))
"""
# model.add(Flatten(input_shape=(2, )))
# model.add(Dense(units=5, activation='relu'))
model.add(Dense(units=5, input_dim=2, activation='relu')) # 위 두 줄을 한줄로 기술
# model.add(Dense(units=5, input_shape=(2, ), activation='relu')) # 위와 같음
model.add(Dense(units=5, activation='relu'))
model.add(Dense(units=1, activation='sigmoid'))
print(model.summary()) # 설계된 모델의 layer, parameter 확인
# Output이 None이면 어떠한 값도 가능하다는 의미
# Param은 입력뉴런수(input_dim) + 1(bias) * 출력뉴런수(Shape)
model.compile(optimizer=Adam(learning_rate=0.01), loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(x, y, epochs=100, batch_size=1, verbose=0) # batch_size는 샘플 데이터를 n개로 묶어 가중치 부여
print('history :', history.history['loss']) # 학습 도중 loss 값 확인
print('history :', history.history['accuracy']) # 학습 도중 acc 값 확인
loss_metrics = model.evaluate(x, y) # 학습 최종값 확인
print('loss :', loss_metrics[0], 'acc :', loss_metrics[1])
pred = (model.predict(x) > 0.5).astype('int32')
print('예측 결과 :', pred.flatten())
print()
print(model.input) # 모델 입력 갯수
print(model.output) # 모델 출력 갯수
print(model.weights) # kernel : 가중치, bias 값 확인
# history 값 시각화
import matplotlib.pyplot as plt
plt.plot(history.history['loss'], label = 'train loss')
plt.plot(history.history['accuracy'], label = 'train acc')
plt.xlabel('epochs')
plt.show()
<console>
[[0 0]
[0 1]
[1 0]
[1 1]]
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense (Dense) (None, 5) 15
dense_1 (Dense) (None, 5) 30
dense_2 (Dense) (None, 1) 6
=================================================================
Total params: 51
Trainable params: 51
Non-trainable params: 0
_________________________________________________________________
None
history : [0.730238139629364, 0.7154098153114319, 0.7100121974945068, 0.6942211985588074, 0.6912755966186523, 0.6771771311759949, 0.6702845692634583, 0.6661607623100281, 0.6569141745567322, 0.6533308029174805, 0.6424304842948914, 0.6368405222892761, 0.6287590265274048, 0.616847813129425, 0.6100606918334961, 0.6020330190658569, 0.5957791805267334, 0.5922914743423462, 0.5805368423461914, 0.5739890933036804, 0.5649680495262146, 0.5537996292114258, 0.5418347716331482, 0.5319989919662476, 0.5223968625068665, 0.509902834892273, 0.5025970935821533, 0.4874396324157715, 0.48330748081207275, 0.46904534101486206, 0.45898550748825073, 0.44712939858436584, 0.43461304903030396, 0.42407849431037903, 0.4099261462688446, 0.4007248282432556, 0.38443583250045776, 0.3751389682292938, 0.3621520698070526, 0.34787824749946594, 0.33386707305908203, 0.32321491837501526, 0.3109804689884186, 0.2967110574245453, 0.28243082761764526, 0.2690805196762085, 0.2580115497112274, 0.24253058433532715, 0.23164182901382446, 0.2171609103679657, 0.20592109858989716, 0.1914011836051941, 0.1812133938074112, 0.16990183293819427, 0.15902861952781677, 0.1498274803161621, 0.14376214146614075, 0.133173406124115, 0.1235436275601387, 0.11740739643573761, 0.10922440886497498, 0.10136499255895615, 0.09402070194482803, 0.08929584920406342, 0.08628110587596893, 0.07867847383022308, 0.07471789419651031, 0.07303788512945175, 0.0680709034204483, 0.0635533258318901, 0.06030545011162758, 0.05617605894804001, 0.05365687608718872, 0.05149179697036743, 0.04786583036184311, 0.04679875820875168, 0.04433054104447365, 0.043185122311115265, 0.04067330062389374, 0.038394421339035034, 0.03704328089952469, 0.03529468923807144, 0.03360763192176819, 0.03227853775024414, 0.030598875135183334, 0.029467450454831123, 0.028173863887786865, 0.027079960331320763, 0.026634931564331055, 0.025396810844540596, 0.024584736675024033, 0.02362598106265068, 0.02287617325782776, 0.02235300838947296, 0.02168215624988079, 0.020772522315382957, 0.020062701776623726, 0.019421666860580444, 0.018669623881578445, 0.01799682527780533]
history : [0.5, 0.5, 0.25, 0.25, 0.25, 0.5, 0.5, 0.5, 0.5, 0.5, 0.75, 0.75, 0.75, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
1/1 [==============================] - ETA: 0s - loss: 0.0176 - accuracy: 1.0000
1/1 [==============================] - 0s 88ms/step - loss: 0.0176 - accuracy: 1.0000
loss : 0.01760084182024002 acc : 1.0
1/1 [==============================] - ETA: 0s
1/1 [==============================] - 0s 40ms/step
예측 결과 : [0 1 1 0]
KerasTensor(type_spec=TensorSpec(shape=(None, 2), dtype=tf.float32, name='dense_input'), name='dense_input', description="created by layer 'dense_input'")
KerasTensor(type_spec=TensorSpec(shape=(None, 1), dtype=tf.float32, name=None), name='dense_2/Sigmoid:0', description="created by layer 'dense_2'")
[<tf.Variable 'dense/kernel:0' shape=(2, 5) dtype=float32, numpy=
array([[ 1.1138238 , -0.08162075, -0.9033398 , 0.8365305 , 1.9727403 ],
[-1.0136162 , -0.14996159, -0.6667481 , 2.1844456 , -0.5413068 ]],
dtype=float32)>, <tf.Variable 'dense/bias:0' shape=(5,) dtype=float32, numpy=
array([-0.00089572, 0. , 0. , 0.00469716, 0.5426557 ],
dtype=float32)>, <tf.Variable 'dense_1/kernel:0' shape=(5, 5) dtype=float32, numpy=
array([[-1.5369235 , -0.42903438, -1.3316493 , -0.37653124, 2.065992 ],
[ 0.3890469 , 0.3396889 , -0.05939364, 0.5432068 , 0.68184185],
[ 0.3147874 , 0.50249064, 0.7047187 , 0.69607663, -0.37269086],
[-1.3446227 , 0.59388936, 1.1210352 , -0.66114014, -0.3911119 ],
[ 0.5690664 , 0.722234 , -2.1595266 , -0.7109815 , 0.67951345]],
dtype=float32)>, <tf.Variable 'dense_1/bias:0' shape=(5,) dtype=float32, numpy=
array([ 1.3922737 , -0.03047287, 0.99146134, 0. , -0.35937154],
dtype=float32)>, <tf.Variable 'dense_2/kernel:0' shape=(5, 1) dtype=float32, numpy=
array([[-1.8706352],
[-1.1388556],
[ 1.6652844],
[-0.5855408],
[ 2.087474 ]], dtype=float32)>, <tf.Variable 'dense_2/bias:0' shape=(1,) dtype=float32, numpy=array([-0.25701573], dtype=float32)>]
input_dim(들어오는 데이터 수)는 첫번째에만 작성하면 된다.
relu는 중간에 들어갈 hidden layer이다.
loss 가 0에 가까워 질 수록 accuracy가 안정된 모습을 볼 수 있다.