728x90
๋ฐ์ํ
https://teachablemachine.withgoogle.com/
Teachable Machine
Train a computer to recognize your own images, sounds, & poses. A fast, easy way to create machine learning models for your sites, apps, and more – no expertise or coding required.
teachablemachine.withgoogle.com
์ฌ๋ฌ๊ฐ์ง ์ข ๋ฅ์ ํ๋ก์ ํธ๋ฅผ ์์ฑํ ์ ์์
[์ด๋ฏธ์ง ํ๋ก์ ํธ ๋ก์ปฌ์์ ์คํ์ํค๊ธฐ]
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
from keras.models import load_model
from PIL import Image, ImageOps
import numpy as np
import cv2
capture = cv2.VideoCapture(0)
capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
label_class = { 0: 'ํฌ์คํฐ์',
1: '์ค๋งํธํฐ',
2: '๋ฆฌ๋ชจ์ฝ' }
# Load the model
model = load_model('./keras_model.h5')
# Create the array of the right shape to feed into the keras model
# The 'length' or number of images you can put into the array is
# determined by the first position in the shape tuple, in this case 1.
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
# Replace this with the path to your image
while cv2.waitKey(33) < 0:
try:
ret, frame = capture.read()
cv2.imshow("VideoFrame", frame)
size = (224, 224)
frame = cv2.resize(frame, size)
image = frame
#turn the image into a numpy array
image_array = np.asarray(image)
# Normalize the image
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array
data[0] = normalized_image_array
# run the inference
predictions = model.predict(data)
score = tf.nn.softmax(predictions[0])
print( label_class[np.argmax(score)], 100 * np.max(score) )
except:
None
capture.release()
cv2.destroyAllWindows()
[์ข๋ ์ ํํ ๋ฒ์ ]
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
from keras.models import load_model
import numpy as np
import cv2
import math
label_class = { 0: 'STAY',
1: 'STOP',
2: 'UP',
3: 'DOWN'}
gages = {}
def initGages(label_class):
for label in label_class.values():
gages[ label ] = 0
max_gage = 20
def modeSelector(mode, frame):
global gages
for key1 in gages:
if mode == key1:
gages[key1] += 1
for key2 in gages:
if key2 != mode:
gages[key2] = 0
cv2.putText(frame, key1 + ': ' + str( math.floor ( (gages[key1] / max_gage) * 100) ) + "%", (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255, 255, 255), 3, cv2.LINE_AA)
for key in gages:
if gages[key] >= max_gage:
gages[key] = 0
cv2.putText(frame, '[' + str(key) + ']', (200, 200), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 3, cv2.LINE_AA)
return frame
if __name__ == '__main__':
initGages( label_class )
capture = cv2.VideoCapture(0)
capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
# Load the model
model = load_model('./keras_model.h5')
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
while cv2.waitKey(33) < 0:
try:
ret, frame = capture.read()
size = (224, 224)
image = cv2.resize(frame, size)
#turn the image into a numpy array
image_array = np.asarray(image)
# Normalize the image
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array
data[0] = normalized_image_array
# run the inference
predictions = model.predict(data)
score = tf.nn.softmax(predictions[0])
print( label_class[np.argmax(score)] )
frame = modeSelector( label_class[np.argmax(score)], frame )
cv2.imshow("VideoFrame", frame)
except Exception as e:
None
capture.release()
cv2.destroyAllWindows()
728x90
๋ฐ์ํ
'๐ค๋จธ์ ๋ฌ๋ > ํ ์ํ๋ก์ฐ' ์นดํ ๊ณ ๋ฆฌ์ ๋ค๋ฅธ ๊ธ
ํ ์ํ๋ก์ฐ - ๊ฝ ์ด๋ฏธ์ง ๋ง์ถ๊ธฐ (0) | 2022.04.29 |
---|