# Model recognition
|
|
|
|
## Training source code:
|
|
```
|
|
python
|
|
import cv2
|
|
import numpy as np
|
|
from tensorflow.keras.models import Sequential
|
|
from tensorflow.keras.layers import Dense, Dropout
|
|
|
|
# Load dataset of images with desired output (facial expressions)
|
|
data_dir = 'path/to/dataset'
|
|
image_paths = [f for f in os.listdir(data_dir) if f.endswith('.jpg')]
|
|
|
|
# Create a list to hold our image data and labels
|
|
X_train = []
|
|
y_train = []
|
|
|
|
for i, img_path in enumerate(image_paths):
|
|
# Load image from disk using OpenCV
|
|
img = cv2.imread(os.path.join(data_dir, img_path))
|
|
|
|
# Resize image to 224x224 pixels (standard for VGG16)
|
|
img = cv2.resize(img, (224, 224))
|
|
|
|
# Convert image to grayscale and normalize pixel values
|
|
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
|
img_norm = img_gray / 255.0
|
|
|
|
# Add image data to list
|
|
X_train.append(np.array(img_norm))
|
|
|
|
# Assign label for current facial expression (e.g., happy, sad, angry)
|
|
if i % 3 == 0: # Label for 'happy' expression
|
|
y_train.append(1)
|
|
elif i % 3 == 1: # Label for 'sad' expression
|
|
y_train.append(2)
|
|
else: # Label for 'angry' expression
|
|
y_train.append(3)
|
|
else: # Label for 'angry' expression
|
|
y_train.append(3)
|
|
|
|
# Create a TensorFlow model using the VGG16 architecture as base
|
|
model = Sequential()
|
|
model.add(Dense(64, activation='relu', input_shape=(224*224,)))
|
|
model.add(Dropout(0.2))
|
|
model.add(Dense(32, activation='relu'))
|
|
model.add(Dropout(0.2))
|
|
model.add(Dense(3)) # Output layer for 3 facial expressions
|
|
|
|
# Compile the model with suitable loss function and optimizer
|
|
model.compile(loss='sparse_categorical_crossentropy',
|
|
optimizer='adam',
|
|
metrics=['accuracy'])
|
|
|
|
# Train the model using our dataset
|
|
model.fit(np.array(X_train), np.array(y_train), epochs=10, batch_size=32)
|
|
|
|
print("Model trained!")
|
|
```
|
|
|
|
|