Résultat :
Résultat :
Résultat :
Résultat :
Résultat :
00
Résultat :
import os
from tensorflow.keras import layers
from tensorflow.keras import Model
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
from tensorflow.keras.callbacks import ReduceLROnPlateau,
ModelCheckpoint, EarlyStopping
from tensorflow.keras.models import Sequential,load_model,save_model
from tensorflow.keras.layers import Dense,Conv2D,Flatten,MaxPooling2D
from keras.layers import BatchNormalization
from keras.optimizers import Adam
train_datagen = ImageDataGenerator(rescale = 1./255,
rotation_range=25,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
batch_size = 64
target_size = (64, 64)
input_shape=(64, 64, 3)
seed=1337
adam = 0.001
fre= -20
FC = 2048
E = 1
patience = 3
verbose = 1
factor = 0.50
min_lr = 0.0001
steps_per_epoch=256
validation_steps=256
epochs=8
from google.colab import files
from keras.preprocessing.image import ImageDataGenerator
import rarfile
import os
# Instructions pour l'utilisateur
print("Veuillez sélectionner le fichier .rar à uploader.")
uploaded = files.upload()
# Vérification de l'upload
if not uploaded:
print("Aucun fichier n'a été sélectionné. Veuillez réessayer.")
else:
# Obtenez le nom du fichier rar
rar_file_name = list(uploaded.keys())[0]
# Vérifiez si le fichier est au format rar
if not rar_file_name.endswith('.rar'):
print("Le fichier n'est pas au format rar. Veuillez compresser
le fichier en tant que fichier rar et réessayer.")
else:
# Créez un répertoire pour extraire les fichiers
extract_path = '/content/extracted/'
os.makedirs(extract_path, exist_ok=True)
# Décompressez le fichier rar
with rarfile.RarFile(rar_file_name) as rar_ref:
# Extrait seulement les dossiers "Train" et "Validation"
rar_ref.extractall(extract_path, members=[
member for member in rar_ref.infolist()
if 'Dataset/Train' in member.filename or
'Dataset/Validation' in member.filename
])
# Paramètres pour les générateurs d'images
batch_size = 32
seed = 42
target_size = (150, 150)
# Créez les générateurs d'images
train_datagen = ImageDataGenerator(rescale=1.0/255)
test_datagen = ImageDataGenerator(rescale=1.0/255)
# Utilisez flow_from_directory avec les chemins corrects
train_generator =
train_datagen.flow_from_directory(os.path.join(extract_path,
'archivevvv/Dataset/Train'),
batch_size=batch_size,
class_mode='binary',
seed=seed,
target_size=target_size)
validation_generator =
test_datagen.flow_from_directory(os.path.join(extract_path,
'archivevvv/Dataset/Validation'),
batch_size=batch_size,
class_mode='binary',
seed=seed,
target_size=target_size)
base_model =
tf.keras.applications.VGG16(input_shape=input_shape,include_top=False,we
ights="imagenet")
for layer in base_model.layers[:fre]:
layer.trainable=False
# Building Model
model=Sequential()
model.add(base_model)
model.add(layers.Dropout(.2))
model.add(Conv2D(512, (3, 3),strides=(1,1), activation='relu',
padding='same'))
model.add(BatchNormalization())
model.add(layers.Dropout(.1))
model.add(Conv2D(128, (3, 3),strides=(1,1), activation='relu',
padding='same'))
model.add(BatchNormalization())
model.add(layers.Dropout(.1))
model.add(Conv2D(384, (3, 3),strides=(1,1), activation='relu',
padding='same'))
model.add(BatchNormalization())
model.add(layers.Dropout(.1))
model.add(Conv2D(384, (3, 3),strides=(1,1), activation='relu',
padding='same'))
model.add(BatchNormalization())
model.add(layers.Dropout(.1))
model.add(Conv2D(500, (3, 3),strides=(1,1), activation='relu',
padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D(2,strides=(2,2), padding='same'))
# Add new layers
model.add(Flatten())
model.add(Dense(FC , activation='relu'))
model.add(layers.Dropout(.2))
model.add(Dense(FC , activation='relu'))
model.add(layers.Dropout(.2))
model.add(Dense(FC, activation='relu'))
model.add(layers.Dropout(.2))
model.add(Dense(E, activation='sigmoid'))
model.summary()
résultat:
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
vgg16 (Functional) (None, 2, 2, 512) 14714688
dropout (Dropout) (None, 2, 2, 512) 0
conv2d_2 (Conv2D) (None, 2, 2, 512) 2359808
batch_normalization (Batch (None, 2, 2, 512) 2048
Normalization)
dropout_1 (Dropout) (None, 2, 2, 512) 0
conv2d_3 (Conv2D) (None, 2, 2, 128) 589952
batch_normalization_1 (Bat (None, 2, 2, 128) 512
chNormalization)
dropout_2 (Dropout) (None, 2, 2, 128) 0
conv2d_4 (Conv2D) (None, 2, 2, 384) 442752
batch_normalization_2 (Bat (None, 2, 2, 384) 1536
chNormalization)
dropout_3 (Dropout) (None, 2, 2, 384) 0
conv2d_5 (Conv2D) (None, 2, 2, 384) 1327488
batch_normalization_3 (Bat (None, 2, 2, 384) 1536
chNormalization)
dropout_4 (Dropout) (None, 2, 2, 384) 0
conv2d_6 (Conv2D) (None, 2, 2, 500) 1728500
batch_normalization_4 (Bat (None, 2, 2, 500) 2000
chNormalization)
max_pooling2d_2 (MaxPoolin (None, 1, 1, 500) 0
g2D)
flatten_1 (Flatten) (None, 500) 0
dense_2 (Dense) (None, 2048) 1026048
dropout_5 (Dropout) (None, 2048) 0
dense_3 (Dense) (None, 2048) 4196352
dropout_6 (Dropout) (None, 2048) 0
dense_4 (Dense) (None, 2048) 4196352
dropout_7 (Dropout) (None, 2048) 0
dense_5 (Dense) (None, 1) 2049
=================================================================
Total params: 30591621 (116.70 MB)
Trainable params: 30587805 (116.68 MB)
Non-trainable params: 3816 (14.91 KB)
model.compile(optimizer=Adam(adam),
loss='binary_crossentropy'
,metrics=['accuracy'])
from tensorflow.keras.utils import plot_model
plot_model(model,show_shapes=True, show_layer_names=True, rankdir='TB',
expand_nested=True)
lrd = ReduceLROnPlateau(monitor = 'val_loss',
patience = patience,
verbose = verbose ,
factor = factor,
min_lr = min_lr)
mcp = ModelCheckpoint('model.h5')
es = EarlyStopping(verbose=verbose, patience=patience)
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
from keras.callbacks import LearningRateScheduler, ModelCheckpoint,
EarlyStopping
from keras.optimizers import Adam
import numpy as np
# Créez votre modèle ici (assurez-vous d'ajuster la structure du modèle
en fonction de votre tâche)
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150,
3)))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy',
metrics=['accuracy'])
train_generator = train_generator =
train_datagen.flow_from_directory(os.path.join(extract_path,
'archivevvv/Dataset/Train'),
batch_size=batch_size,
class_mode='binary',
seed=seed,
target_size=target_size)
validation_generator =
test_datagen.flow_from_directory(os.path.join(extract_path,
'archivevvv/Dataset/Validation'),
batch_size=batch_size,
class_mode='binary',
seed=seed,
target_size=target_size)
lrd = LearningRateScheduler(lambda epoch: 0.001 * np.exp(-epoch / 10.))
mcp = ModelCheckpoint('model.h5', save_best_only=True)
es = EarlyStopping(patience=10, restore_best_weights=True)
# Définissez les paramètres pour l'entraînement
steps_per_epoch = len(train_generator)
validation_steps = len(validation_generator)
epochs = 8
# Entraînez votre modèle
%time
hist = model.fit_generator(generator=train_generator,
validation_data=validation_generator,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
epochs=epochs,
callbacks=[lrd, mcp, es])
résultat:
Found 12864 images belonging to 2 classes.
Found 22598 images belonging to 2 classes.
CPU times: user 2 µs, sys: 2 µs, total: 4 µs
Wall time: 6.68 µs
<ipython-input-13-280b5d4fd35f>:42: UserWarning: `Model.fit_generator` is deprecated
and will be removed in a future version. Please use `Model.fit`, which supports
generators.
hist = model.fit_generator(generator=train_generator,
Epoch 1/8
402/402 [==============================] - ETA: 0s - loss: 0.4001 -
accuracy: 0.8187
/usr/local/lib/python3.10/dist-packages/keras/src/engine/training.py:3103: UserWarning:
You are saving your model as an HDF5 file via `model.save()`. This file format is
considered legacy. We recommend using instead the native Keras format, e.g.
`model.save('my_model.keras')`.
saving_api.save_model(
402/402 [==============================] - 658s 2s/step - loss: 0.4001
- accuracy: 0.8187 - val_loss: 0.2054 - val_accuracy: 0.9186 - lr: 0.0010
Epoch 2/8
402/402 [==============================] - 733s 2s/step - loss: 0.1615
- accuracy: 0.9363 - val_loss: 0.1599 - val_accuracy: 0.9365 - lr: 9.0484e-04
Epoch 3/8
402/402 [==============================] - 710s 2s/step - loss: 0.1112
- accuracy: 0.9564 - val_loss: 0.1626 - val_accuracy: 0.9388 - lr: 8.1873e-04
Epoch 4/8
402/402 [==============================] - 729s 2s/step - loss: 0.0775
- accuracy: 0.9708 - val_loss: 0.1618 - val_accuracy: 0.9407 - lr: 7.4082e-04
Epoch 5/8
402/402 [==============================] - 721s 2s/step - loss: 0.0503
- accuracy: 0.9820 - val_loss: 0.1799 - val_accuracy: 0.9408 - lr: 6.7032e-04
Epoch 6/8
402/402 [==============================] - 722s 2s/step - loss: 0.0314
- accuracy: 0.9893 - val_loss: 0.1876 - val_accuracy: 0.9409 - lr: 6.0653e-04
Epoch 7/8
402/402 [==============================] - 688s 2s/step - loss: 0.0189
- accuracy: 0.9947 - val_loss: 0.2029 - val_accuracy: 0.9446 - lr: 5.4881e-04
Epoch 8/8
402/402 [==============================] - 721s 2s/step - loss: 0.0109
- accuracy: 0.9978 - val_loss: 0.2262 - val_accuracy: 0.9427 - lr: 4.9659e-04
import matplotlib.pyplot as plt
acc = hist.history['accuracy']
val_acc = hist.history['val_accuracy']
loss = hist.history['loss']
val_loss = hist.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'g', label='Training accuracy')
plt.plot(epochs, val_acc, 'y', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend(loc=0)
plt.figure()
plt.show()
from google.colab import files
import numpy as np
from keras.preprocessing import image
import matplotlib.pyplot as plt
import io
# Demandez à l'utilisateur d'uploader une image
print("Veuillez sélectionner le fichier image à uploader.")
uploaded = files.upload()
# Obtenez le nom du fichier image
img_file_name = list(uploaded.keys())[0]
# Chargez l'image depuis le contenu uploadé
imge = image.load_img(io.BytesIO(uploaded[img_file_name]),
target_size=target_size)
X = image.img_to_array(imge)
X = np.expand_dims(X, axis=0)
# Effectuez la prédiction avec probabilités
probabilities = model.predict(X, batch_size=1)
print("Probabilités pour chaque classe:", probabilities)
# Obtenez la classe prédite
predicted_class = np.argmax(probabilities)
print("Classe prédite:", predicted_class)
# Affichez l'image
plt.imshow(imge)
plt.show()
# Affichez l'accuracy
if predicted_class == 0:
print("C'est un homme.")
accuracy = 1 - probabilities[0][0] # Pour inverser la probabilité
car la classe 0 correspond à "homme"
else:
print("C'est une femme.")
accuracy = probabilities[0][1]
from transformers import ViTFeatureExtractor, ViTForImageClassification
import requests
from PIL import Image
from io import BytesIO
from IPython.display import display
processor = ViTFeatureExtractor.from_pretrained('nateraw/vit-age-
classifier')
model = ViTForImageClassification.from_pretrained('nateraw/vit-age-
classifier')
from google.colab import files
import cv2
import numpy as np
from io import BytesIO
from PIL import Image
# Demander à l'utilisateur de télécharger un fichier
uploaded = files.upload()
# Récupérer le nom du fichier téléchargé
file_name = list(uploaded.keys())[0]
# Charger l'image en utilisant OpenCV
image = cv2.imdecode(np.frombuffer(uploaded[file_name], np.uint8),
cv2.IMREAD_COLOR)
# Afficher l'image
if image is not None:
# Convertir l'image de BGR à RGB (car OpenCV lit les images en BGR)
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
img_pil = Image.fromarray(image_rgb)
display(img_pil)
else:
print("Impossible de charger l'image.")
try:
# Process and classify the image
inputs = processor(images=image, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
# Model predicts one of the 1000 ImageNet classes
predicted_class_idx = logits.argmax(-1).item()
print("Age:", model.config.id2label[predicted_class_idx])
except Exception as e:
print("An error occurred:", e)