mnist imshow
import numpy as np
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()# 자동으로 이미지와 라벨이 x와 y에 들어가는 건가?
print(x_train.shape, y_train.shape) #(60000, 28, 28) 왜 마지막에 하나 더 있지,,? 이미지라
print(x_test.shape, y_test.shape)
print(x_train[0])
print(y_train[0])
'''
(60000, 28, 28) (60000,)
(10000, 28, 28) (10000,)
[[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
fashion_mnist
import numpy as np
from keras.datasets import fashion_mnist
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
print(x_train.shape, y_train.shape)
print(x_test.shape, y_test.shape)
print(x_train[0])
print(y_train[0])
'''
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-labels-idx1-ubyte.gz
32768/29515 [=================================] - 0s 1us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-images-idx3-ubyte.gz
26427392/26421880 [==============================] - 1s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-labels-idx1-ubyte.gz
8192/5148 [===============================================] - 0s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-images-idx3-ubyte.gz
4423680/4422102 [==============================] - 0s 0us/step
(60000, 28, 28) (60000,)
(10000, 28, 28) (10000,)
[[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 13 73 0
0 1 4 0 0 0 0 1 1 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 3 0 36 136 127 62
54 0 0 0 1 3 4 0 0 3]
[ 0 0 0 0 0 0 0 0 0 0 0 0 6 0 102 204 176 134
144 123 23 0 0 0 0 12 10 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 155 236 207 178
107 156 161 109 64 23 77 130 72 15]
[ 0 0 0 0 0 0 0 0 0 0 0 1 0 69 207 223 218 216
216 163 127 121 122 146 141 88 172 66]
[ 0 0 0 0 0 0 0 0 0 1 1 1 0 200 232 232 233 229
223 223 215 213 164 127 123 196 229 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 183 225 216 223 228
235 227 224 222 224 221 223 245 173 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 193 228 218 213 198
180 212 210 211 213 223 220 243 202 0]
[ 0 0 0 0 0 0 0 0 0 1 3 0 12 219 220 212 218 192
169 227 208 218 224 212 226 197 209 52]
[ 0 0 0 0 0 0 0 0 0 0 6 0 99 244 222 220 218 203
198 221 215 213 222 220 245 119 167 56]
[ 0 0 0 0 0 0 0 0 0 4 0 0 55 236 228 230 228 240
232 213 218 223 234 217 217 209 92 0]
[ 0 0 1 4 6 7 2 0 0 0 0 0 237 226 217 223 222 219
222 221 216 223 229 215 218 255 77 0]
[ 0 3 0 0 0 0 0 0 0 62 145 204 228 207 213 221 218 208
211 218 224 223 219 215 224 244 159 0]
[ 0 0 0 0 18 44 82 107 189 228 220 222 217 226 200 205 211 230
224 234 176 188 250 248 233 238 215 0]
[ 0 57 187 208 224 221 224 208 204 214 208 209 200 159 245 193 206 223
255 255 221 234 221 211 220 232 246 0]
[ 3 202 228 224 221 211 211 214 205 205 205 220 240 80 150 255 229 221
188 154 191 210 204 209 222 228 225 0]
[ 98 233 198 210 222 229 229 234 249 220 194 215 217 241 65 73 106 117
168 219 221 215 217 223 223 224 229 29]
[ 75 204 212 204 193 205 211 225 216 185 197 206 198 213 240 195 227 245
239 223 218 212 209 222 220 221 230 67]
[ 48 203 183 194 213 197 185 190 194 192 202 214 219 221 220 236 225 216
199 206 186 181 177 172 181 205 206 115]
[ 0 122 219 193 179 171 183 196 204 210 213 207 211 210 200 196 194 191
195 191 198 192 176 156 167 177 210 92]
[ 0 0 74 189 212 191 175 172 175 181 185 188 189 188 193 198 204 209
210 210 211 188 188 194 192 216 170 0]
[ 2 0 0 0 66 200 222 237 239 242 246 243 244 221 220 193 191 179
182 182 181 176 166 168 99 58 0 0]
[ 0 0 0 0 0 0 0 40 61 44 72 41 35 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0]]
9
'''
import matplotlib.pyplot as plt
plt.imshow(x_train[0], 'gray') #gray 안 넣으면 컬러로 나옴
plt.show()
cifar10
import numpy as np
from keras.datasets import cifar10
Downloading data from https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
170500096/170498071 [==============================] - 25s 0us/step
(50000, 32, 32, 3) (50000, 1)
(10000, 32, 32, 3) (10000, 1)
[[[ 59 62 63]
[ 43 46 45]
[ 50 48 43]
##
(50000, 32, 32, 3) 3 = 색깔
R [ 59 62 63]
G [ 43 46 45]
B [ 50 48 43]
imshow2 - 이해불가
import numpy as np
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
class_names = ['0','1','2','3','4','5','6','7','8','9']
------------
import matplotlib.pyplot as plt
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False) #불린 트루 퍼ㅓㄹ스는 대문자로
plt.imshow(x_train[i], cmap=plt.cm.binary)
plt.xlabel(class_names[y_train[i]])
plt.show()
cifar10 컬러 리스트 보기
import numpy as np
from keras.datasets import cifar10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
class_names = ['airplane','automobile','bird','cat','deer','dog','frog','horse','ship','truck']
import matplotlib.pyplot as plt
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(x_train[i], cmap=plt.cm.binary)
plt.xlabel(class_names[y_train[i][0]])
plt.show()
tf01_cnn_mnist
import numpy as np
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Flatten, Conv2D
#1. 데이터
datasets=mnist
(x_train, y_train), (x_test, y_test) = datasets.load_data()
# print(datasets.load_data())
#정규화(Nomalization) = 0~1 사이로 숫자변환. 이미지만 하면 됨 =>
x_train, x_text = x_train/255.0, x_test/255.0
#2. 모델구성
model = Sequential()
model.add(Flatten())
model.add(Dense(256, input_dim=(28,28)))
model.add(Dense(128, activation='relu'))
model.add(Dense(10, activation='softmax'))
#3. 컴파일 ,훈련
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(x_train, y_train, epochs=10, batch_size=256)
#4. 평가 예측
loss, acc = model.evaluate(x_test, y_test)
print('loss: ', loss)
print('acc: ', acc)
#################
loss: 33.30949401855469
acc: 0.9764999747276306
cnn_ fashionMnist
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Flatten, Conv2D
from keras.datasets import fashion_mnist
#1. 데이터
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
x_train, x_test = x_train/255.0, x_test/255.0
# print(x_train.shape, y_train.shape) #(60000, 28, 28) (60000,)
# print(x_test.shape, y_test.shape) #(10000, 28, 28) (10000,)
# reshape
x_train = x_train.reshape(60000,28,28,1)
x_test = x_test.reshape(10000,28,28,1)
#[실습]
#2. 모델 구성
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(3,3), input_shape=(28,28,1)))
model.add(Conv2D(64, (3,3), activation='relu'))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(10, activation='softmax'))
#3 컴파일, 훈련
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(x_train, y_train, epochs=10, batch_size=256)
#4. 평가예측
loss, acc = model.evaluate(x_test, y_test)
print('loss : ', loss)
print('acc : ', acc)
###
# loss : 0.4495483934879303
# acc : 0.9068999886512756
cnn_ fashionMnist _ max pulling, dropout 추가
dropout: 노드갯수를 줄여준다
max pulling: 제일 큰값을,,
##from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Dropout
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Dropout
from keras.datasets import fashion_mnist
#1. 데이터
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
x_train, x_test = x_train/255.0, x_test/255.0
# print(x_train.shape, y_train.shape) #(60000, 28, 28) (60000,)
# print(x_test.shape, y_test.shape) #(10000, 28, 28) (10000,)
# reshape
x_train = x_train.reshape(60000,28,28,1)
x_test = x_test.reshape(10000,28,28,1)
#[실습]
#2. 모델 구성
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(3,3),
input_shape=(28,28,1), activation='relu')) #activation='relu'추가
model.add(MaxPooling2D(2,2))
model.add(Conv2D(64, (3,3), activation='relu'))
model.add(MaxPooling2D(2,2))
model.add(Dropout(0.2)) #퍼센트로
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(10, activation='softmax'))
#3 컴파일, 훈련
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(x_train, y_train, epochs=10, batch_size=256)
#4. 평가예측
loss, acc = model.evaluate(x_test, y_test)
print('loss : ', loss)
print('acc : ', acc)
###
###
# maxpooling, dropout 전
# loss : 0.4495483934879303
# acc : 0.9068999886512756
#Maxpooling2D(2,2), 레이어 2, Dropout(0.2)
# loss : 0.24207457900047302
# acc : 0.9121000170707703
#Maxpooling2D(2,2), 레이어 1, Dropout(0.3)
# loss : 0.2307022511959076
# acc : 0.9283000230789185
cifar10 maxpooing
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Dropout
from keras.datasets import cifar10
import time
#1. 데이터
(x_train, y_train), (x_test, y_test)= cifar10.load_data() # x이미지 y라벨
print(x_train.shape, y_train.shape)#(50000, 32, 32, 3) (50000, 1)
print(x_test.shape, y_test.shape)#(10000, 32, 32, 3) (10000, 1)
#정규화 Nomalization
x_train = x_train/255.0
x_test = x_test/255.0
#2. 모델 구성
model= Sequential()
model.add(Conv2D(filters=64, kernel_size=(3,3),
padding='same',
activation='relu',
input_shape=(32,32,3)))
model.add(MaxPooling2D(2,2))
model.add(Dropout(0.25))
model.add(Conv2D(32, (3,3), padding='same', activation='relu'))
model.add(MaxPooling2D(2,2))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(10, activation='softmax'))
#3. 컴파일, 훈련
model.compile(loss='sparse_categorical_entropy', optimizer='adam', metrics='accuracy')
start_time = time.time()
model.fit(x_train, y_train, epochs=10, batch_size=128)
end_time= time.time() - start_time
#4. 평가, 예측
loss, acc = model.evaluate(x_test, y_test)
print('loss: ', loss)
print('acc :', acc)
print('걸린 시간 : ', end_time)
######
loss: 0.9302626252174377
acc : 0.6725999712944031
걸린시간 : 388.4362733364105
cifar 100 maxpooing, Dropout, Conv2D
import numpy as numpy
from keras.models import Sequential
from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Dropout
from keras.datasets import cifar100
import time
#1. 데이터
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
print(x_train.shape, y_train.shape)
print(x_test.shape, y_test.shape)
#(50000, 32, 32, 3) (50000, 1)
#(10000, 32, 32, 3) (10000, 1)
#정규화
x_train = x_train/255.0
x_test= x_test/255.0
#2. 모델구성
model = Sequential()
model.add(Conv2D(filters=64, kernel_size=(3,3), padding='same', activation='relu',
input_shape=(32,32,3)))
model.add(MaxPooling2D(2,2))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=(3,3), padding='same', activation='relu'))
model.add(MaxPooling2D(2,2))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(100, activation='softmax'))
#3.컴파일, 훈련
model.compile(loss='sparse_categorical_crossentropy',
optimizer='adam', metrics='accuracy')
start_time = time.time()
model.fit(x_train,y_train, epochs=10, batch_size=256)
end_time = time.time() - start_time
#4 평가 예측
loss, acc = model.evaluate(x_test, y_test)
print('loss :', loss)
print('acc: ', acc)
print('걸린 시간: ', end_time)
# loss : 2.381784439086914
# acc: 0.3955000042915344
# 걸린시간: 333.3105571269989
'[네이버클라우드] 클라우드 기반의 개발자 과정 7기 > AI' 카테고리의 다른 글
[5-1] 개념정리 - 자연어처리(NLP) 기초 (0) | 2023.05.12 |
---|---|
[4-3] 판다스... 찍 (3) | 2023.05.11 |
[4-2] Conv2D, MaxPooling, Dropout, Flatten, pyplot (0) | 2023.05.11 |
Ai 개념정리 4 - 합성곱 신경망 (0) | 2023.05.11 |
Ai 개념 정리 3 (1) | 2023.05.10 |