diff --git a/machine_learning/classification/Adaboost.ipynb b/machine_learning/classification/Adaboost.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..359d8fab1a81d93015b0e85080445c7d7193b887
--- /dev/null
+++ b/machine_learning/classification/Adaboost.ipynb
@@ -0,0 +1,314 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Entrainement d'un modèle avec la méthode des SVM"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import os\n",
+    "import numpy as np\n",
+    "import random\n",
+    "from PIL import Image"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### 1) Fonctions de Preprocessing des datasets"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "AVERAGE_SIZE_IMAGE = (127, 145)  # Thanks to the stats, we know that size of bbox will be (127, 145) -> Average size of labels \n",
+    "\n",
+    "def generate_empty_bbox(image_width, image_height):\n",
+    "    \"\"\" \n",
+    "    Generate an empty box for images without label\n",
+    "    \"\"\"\n",
+    "    # Generating random coords for the bbox\n",
+    "    x_min = random.randint(0, image_width - AVERAGE_SIZE_IMAGE[0])\n",
+    "    y_min = random.randint(0, image_height - AVERAGE_SIZE_IMAGE[1])\n",
+    "    \n",
+    "    # Compute complete coords of the bbox\n",
+    "    x_max = x_min + AVERAGE_SIZE_IMAGE[0]\n",
+    "    y_max = y_min + AVERAGE_SIZE_IMAGE[1]\n",
+    "    \n",
+    "    return (x_min, y_min, x_max, y_max)\n",
+    "\n",
+    "def load_data(image_dir, label_dir):\n",
+    "    \"\"\" \n",
+    "    Create a dict with all the usefull datas of the dataset\n",
+    "    datas = {\n",
+    "        \"XXXX\" (name of the file) : {\n",
+    "            \"img\" : image as an array,\n",
+    "            \"labels\" (data of the labels): {\n",
+    "                \"X\" index of the label (0,1,...,n) : {\n",
+    "                    \"name\" : name of the label,\n",
+    "                    \"coord\" : coord of the label like xmin, ymin, xmax, ymax,\n",
+    "                    \"img\" : crooped img of the label,\n",
+    "                }\n",
+    "            }\n",
+    "        }\n",
+    "    }\n",
+    "    \"\"\"\n",
+    "    \n",
+    "    datas = {}\n",
+    "\n",
+    "    for image_file in os.listdir(image_dir):\n",
+    "        # Computing name and files paths\n",
+    "        image_path = image_dir + '/' + image_file\n",
+    "        name = image_file.split('.')[0]\n",
+    "        label_path = label_dir + '/' + name + '.csv'\n",
+    "        \n",
+    "        # Import image as array\n",
+    "        image = np.array(Image.open(image_path))\n",
+    "\n",
+    "        # Import labels as array \n",
+    "        with open(label_path, 'r') as file:\n",
+    "            rows = file.readlines()\n",
+    "\n",
+    "            label_data = {}\n",
+    "            if rows == ['\\n']:  # Create a random empty label to balance model\n",
+    "                # Create random coords for empty label\n",
+    "                xmin, ymin, xmax, ymax = generate_empty_bbox(image.shape[1], image.shape[0])\n",
+    "    \n",
+    "                # Get the cropped image (as array) of the label\n",
+    "                cropped_image = np.array(Image.fromarray(image[ymin:ymax, xmin:xmax]).resize(AVERAGE_SIZE_IMAGE))\n",
+    "               \n",
+    "                label_data[0] = {\n",
+    "                        \"name\":\"empty\",\n",
+    "                        \"coord\": (xmin, ymin, xmax, ymax),\n",
+    "                        \"img\":cropped_image\n",
+    "                    }\n",
+    "            else:\n",
+    "                for i, row in enumerate(rows):  # One image can contain several labels\n",
+    "                    row = row.strip().split(\",\")\n",
+    "\n",
+    "                    # Compute coords of the label\n",
+    "                    xmin, ymin, xmax, ymax = map(int, row[0:4])\n",
+    "\n",
+    "                    # Get the label name\n",
+    "                    class_name = row[4]\n",
+    "\n",
+    "                    # Get the cropped image (as array) of the label\n",
+    "                    cropped_image = np.array(Image.fromarray(image[ymin:ymax, xmin:xmax]).resize(AVERAGE_SIZE_IMAGE))\n",
+    "                    \n",
+    "                    # Adding to the json\n",
+    "                    label_data[i] = {\n",
+    "                        \"name\":class_name,\n",
+    "                        \"coord\": (xmin, ymin, xmax, ymax),\n",
+    "                        \"img\":cropped_image\n",
+    "                    }\n",
+    "\n",
+    "        datas[name] = {\n",
+    "             \"img\" : image,\n",
+    "             \"labels\" : label_data,\n",
+    "        }\n",
+    "       \n",
+    "    return datas\n",
+    "\n",
+    "# Dict to convert str class name to int\n",
+    "name_to_int = {\n",
+    "    \"danger\": 0,\n",
+    "    \"interdiction\": 1,\n",
+    "    \"obligation\": 2,\n",
+    "    \"stop\": 3,\n",
+    "    \"ceder\": 4,\n",
+    "    \"frouge\": 5,\n",
+    "    \"forange\": 6,\n",
+    "    \"fvert\": 7,\n",
+    "    \"ff\": 8,\n",
+    "    \"empty\": 9\n",
+    "}"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### 2) Fonction de création des datasets"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def create_xy(datas):\n",
+    "    # Creating arrays with all labels datas & classes\n",
+    "    X = []\n",
+    "    Y = []\n",
+    "\n",
+    "    for name, data in datas.items():\n",
+    "        for row in data[\"labels\"].values():\n",
+    "            image_as_array = np.array(row[\"img\"]).flatten()\n",
+    "            X.append(image_as_array)\n",
+    "            Y.append(name_to_int[row[\"name\"]])\n",
+    "\n",
+    "    X = np.array(X)\n",
+    "    Y = np.array(Y)\n",
+    "\n",
+    "    return X, Y"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### 3) Création des datasets"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Training dataset\n",
+    "datas_train = load_data(\"../../data/train/images\", \"../../data/train/labels\")\n",
+    "X_train, Y_train = create_xy(datas=datas_train)\n",
+    "\n",
+    "# Validation dataset\n",
+    "datas_val = load_data(\"../../data/val/images\", \"../../data/val/labels\")\n",
+    "X_val, Y_val = create_xy(datas=datas_val)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### 4) Application de la méthode Adaboost"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 9,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "c:\\Users\\victo\\AppData\\Local\\Programs\\Python\\Python312\\Lib\\site-packages\\sklearn\\ensemble\\_weight_boosting.py:519: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.\n",
+      "  warnings.warn(\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Taux d'erreur : 0.6302521008403361\n"
+     ]
+    }
+   ],
+   "source": [
+    "from sklearn.ensemble import AdaBoostClassifier\n",
+    "\n",
+    "adaboost_clf = AdaBoostClassifier(n_estimators=10) # To change\n",
+    "adaboost_clf.fit(X_train, Y_train)\n",
+    "y = adaboost_clf.predict(X_val)\n",
+    "\n",
+    "print(f\"Taux d'erreur : {np.mean(y != Y_val)}\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### 5) Test de la méthode Adaboost avec application des caractéristiques HOG"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from skimage.feature import hog\n",
+    "from skimage.color import rgb2gray\n",
+    "\n",
+    "def extract_hog(datas):\n",
+    "    # Creating X array with all HOG information of images\n",
+    "    X = []\n",
+    "\n",
+    "    for name, data in datas.items():\n",
+    "        for row in data[\"labels\"].values():\n",
+    "            image_as_array = np.array(hog(rgb2gray(row[\"img\"]))).flatten()\n",
+    "            X.append(image_as_array)\n",
+    "\n",
+    "    return np.array(X)\n",
+    "\n",
+    "\n",
+    "# Update training dataset\n",
+    "X_train_HOG = extract_hog(datas=datas_train)\n",
+    "\n",
+    "# Update validation dataset\n",
+    "X_val_HOG = extract_hog(datas=datas_val)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "c:\\Users\\victo\\AppData\\Local\\Programs\\Python\\Python312\\Lib\\site-packages\\sklearn\\ensemble\\_weight_boosting.py:519: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.\n",
+      "  warnings.warn(\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Taux d'erreur : 0.5378151260504201\n"
+     ]
+    }
+   ],
+   "source": [
+    "adaboost_clf = AdaBoostClassifier(n_estimators=10)\n",
+    "adaboost_clf.fit(X_train_HOG, Y_train)\n",
+    "y_HOG = adaboost_clf.predict(X_val_HOG)\n",
+    "\n",
+    "print(f\"Taux d'erreur : {np.mean(y_HOG != Y_val)}\")"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "venv",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.12.2"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/machine_learning/classification/RandomForest.ipynb b/machine_learning/classification/RandomForest.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..9e9be72cd7f076603c260f7eacdbd5e51e7f6f22
--- /dev/null
+++ b/machine_learning/classification/RandomForest.ipynb
@@ -0,0 +1,348 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Entrainement d'un modèle avec la méthode RandomTree"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import os\n",
+    "import numpy as np\n",
+    "import random\n",
+    "from PIL import Image"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### 1) Fonctions de Preprocessing des datasets"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "AVERAGE_SIZE_IMAGE = (127, 145)  # Thanks to the stats, we know that size of bbox will be (127, 145) -> Average size of labels \n",
+    "\n",
+    "def generate_empty_bbox(image_width, image_height):\n",
+    "    \"\"\" \n",
+    "    Generate an empty box for images without label\n",
+    "    \"\"\"\n",
+    "    # Generating random coords for the bbox\n",
+    "    x_min = random.randint(0, image_width - AVERAGE_SIZE_IMAGE[0])\n",
+    "    y_min = random.randint(0, image_height - AVERAGE_SIZE_IMAGE[1])\n",
+    "    \n",
+    "    # Compute complete coords of the bbox\n",
+    "    x_max = x_min + AVERAGE_SIZE_IMAGE[0]\n",
+    "    y_max = y_min + AVERAGE_SIZE_IMAGE[1]\n",
+    "    \n",
+    "    return (x_min, y_min, x_max, y_max)\n",
+    "\n",
+    "def load_data(image_dir, label_dir):\n",
+    "    \"\"\" \n",
+    "    Create a dict with all the usefull datas of the dataset\n",
+    "    datas = {\n",
+    "        \"XXXX\" (name of the file) : {\n",
+    "            \"img\" : image as an array,\n",
+    "            \"labels\" (data of the labels): {\n",
+    "                \"X\" index of the label (0,1,...,n) : {\n",
+    "                    \"name\" : name of the label,\n",
+    "                    \"coord\" : coord of the label like xmin, ymin, xmax, ymax,\n",
+    "                    \"img\" : crooped img of the label,\n",
+    "                }\n",
+    "            }\n",
+    "        }\n",
+    "    }\n",
+    "    \"\"\"\n",
+    "    \n",
+    "    datas = {}\n",
+    "\n",
+    "    for image_file in os.listdir(image_dir):\n",
+    "        # Computing name and files paths\n",
+    "        image_path = image_dir + '/' + image_file\n",
+    "        name = image_file.split('.')[0]\n",
+    "        label_path = label_dir + '/' + name + '.csv'\n",
+    "        \n",
+    "        # Import image as array\n",
+    "        image = np.array(Image.open(image_path))\n",
+    "\n",
+    "        # Import labels as array \n",
+    "        with open(label_path, 'r') as file:\n",
+    "            rows = file.readlines()\n",
+    "\n",
+    "            label_data = {}\n",
+    "            if rows == ['\\n']:  # Create a random empty label to balance model\n",
+    "                # Create random coords for empty label\n",
+    "                xmin, ymin, xmax, ymax = generate_empty_bbox(image.shape[1], image.shape[0])\n",
+    "    \n",
+    "                # Get the cropped image (as array) of the label\n",
+    "                cropped_image = np.array(Image.fromarray(image[ymin:ymax, xmin:xmax]).resize(AVERAGE_SIZE_IMAGE))\n",
+    "               \n",
+    "                label_data[0] = {\n",
+    "                        \"name\":\"empty\",\n",
+    "                        \"coord\": (xmin, ymin, xmax, ymax),\n",
+    "                        \"img\":cropped_image\n",
+    "                    }\n",
+    "            else:\n",
+    "                for i, row in enumerate(rows):  # One image can contain several labels\n",
+    "                    row = row.strip().split(\",\")\n",
+    "\n",
+    "                    # Compute coords of the label\n",
+    "                    xmin, ymin, xmax, ymax = map(int, row[0:4])\n",
+    "\n",
+    "                    # Get the label name\n",
+    "                    class_name = row[4]\n",
+    "\n",
+    "                    # Get the cropped image (as array) of the label\n",
+    "                    cropped_image = np.array(Image.fromarray(image[ymin:ymax, xmin:xmax]).resize(AVERAGE_SIZE_IMAGE))\n",
+    "                    \n",
+    "                    # Adding to the json\n",
+    "                    label_data[i] = {\n",
+    "                        \"name\":class_name,\n",
+    "                        \"coord\": (xmin, ymin, xmax, ymax),\n",
+    "                        \"img\":cropped_image\n",
+    "                    }\n",
+    "\n",
+    "        datas[name] = {\n",
+    "             \"img\" : image,\n",
+    "             \"labels\" : label_data,\n",
+    "        }\n",
+    "       \n",
+    "    return datas\n",
+    "\n",
+    "# Dict to convert str class name to int\n",
+    "name_to_int = {\n",
+    "    \"danger\": 0,\n",
+    "    \"interdiction\": 1,\n",
+    "    \"obligation\": 2,\n",
+    "    \"stop\": 3,\n",
+    "    \"ceder\": 4,\n",
+    "    \"frouge\": 5,\n",
+    "    \"forange\": 6,\n",
+    "    \"fvert\": 7,\n",
+    "    \"ff\": 8,\n",
+    "    \"empty\": 9\n",
+    "}"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### 2) Fonction de création des datasets"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def create_xy(datas):\n",
+    "    # Creating arrays with all labels datas & classes\n",
+    "    X = []\n",
+    "    Y = []\n",
+    "\n",
+    "    for name, data in datas.items():\n",
+    "        for row in data[\"labels\"].values():\n",
+    "            image_as_array = np.array(row[\"img\"]).flatten()\n",
+    "            X.append(image_as_array)\n",
+    "            Y.append(name_to_int[row[\"name\"]])\n",
+    "\n",
+    "    X = np.array(X)\n",
+    "    Y = np.array(Y)\n",
+    "\n",
+    "    return X, Y"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### 3) Création des datasets"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Training dataset\n",
+    "datas_train = load_data(\"../../data/train/images\", \"../../data/train/labels\")\n",
+    "X_train, Y_train = create_xy(datas=datas_train)\n",
+    "\n",
+    "# Validation dataset\n",
+    "datas_val = load_data(\"../../data/val/images\", \"../../data/val/labels\")\n",
+    "X_val, Y_val = create_xy(datas=datas_val)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### 4) Application de la méthode RandomTree"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 32,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Taux d'erreur : 0.20168067226890757\n"
+     ]
+    }
+   ],
+   "source": [
+    "from sklearn.ensemble import RandomForestClassifier\n",
+    "\n",
+    "adaboost_clf = RandomForestClassifier(n_estimators=50) # To change\n",
+    "adaboost_clf.fit(X_train, Y_train)\n",
+    "y = adaboost_clf.predict(X_val)\n",
+    "\n",
+    "print(f\"Taux d'erreur : {np.mean(y != Y_val)}\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### 5) Test de la méthode RandomTree avec application des caractéristiques HOG"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from skimage.feature import hog\n",
+    "from skimage.color import rgb2gray\n",
+    "\n",
+    "def extract_hog(datas):\n",
+    "    # Creating X array with all HOG information of images\n",
+    "    X = []\n",
+    "\n",
+    "    for name, data in datas.items():\n",
+    "        for row in data[\"labels\"].values():\n",
+    "            image_as_array = np.array(hog(rgb2gray(row[\"img\"]))).flatten()\n",
+    "            X.append(image_as_array)\n",
+    "\n",
+    "    return np.array(X)\n",
+    "\n",
+    "\n",
+    "# Update training dataset\n",
+    "X_train_HOG = extract_hog(datas=datas_train)\n",
+    "\n",
+    "# Update validation dataset\n",
+    "X_val_HOG = extract_hog(datas=datas_val)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Taux d'erreur : 0.2689075630252101\n"
+     ]
+    }
+   ],
+   "source": [
+    "adaboost_clf = RandomForestClassifier(n_estimators=10)\n",
+    "adaboost_clf.fit(X_train_HOG, Y_train)\n",
+    "y_HOG = adaboost_clf.predict(X_val_HOG)\n",
+    "\n",
+    "print(f\"Taux d'erreur : {np.mean(y_HOG != Y_val)}\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### 6) Détermination du meilleur paramètre"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 33,
+   "metadata": {},
+   "outputs": [
+    {
+     "ename": "KeyboardInterrupt",
+     "evalue": "",
+     "output_type": "error",
+     "traceback": [
+      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
+      "\u001b[1;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
+      "Cell \u001b[1;32mIn[33], line 7\u001b[0m\n\u001b[0;32m      5\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m i \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(\u001b[38;5;241m1\u001b[39m, \u001b[38;5;241m100\u001b[39m, \u001b[38;5;241m10\u001b[39m):\n\u001b[0;32m      6\u001b[0m     adaboost_clf \u001b[38;5;241m=\u001b[39m RandomForestClassifier(n_estimators\u001b[38;5;241m=\u001b[39mi) \u001b[38;5;66;03m# To change\u001b[39;00m\n\u001b[1;32m----> 7\u001b[0m     \u001b[43madaboost_clf\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfit\u001b[49m\u001b[43m(\u001b[49m\u001b[43mX_train\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mY_train\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m      8\u001b[0m     y \u001b[38;5;241m=\u001b[39m adaboost_clf\u001b[38;5;241m.\u001b[39mpredict(X_val)\n\u001b[0;32m      9\u001b[0m     tab\u001b[38;5;241m.\u001b[39mappend(np\u001b[38;5;241m.\u001b[39mmean(y \u001b[38;5;241m!=\u001b[39m Y_val))\n",
+      "File \u001b[1;32mc:\\Users\\victo\\AppData\\Local\\Programs\\Python\\Python312\\Lib\\site-packages\\sklearn\\base.py:1474\u001b[0m, in \u001b[0;36m_fit_context.<locals>.decorator.<locals>.wrapper\u001b[1;34m(estimator, *args, **kwargs)\u001b[0m\n\u001b[0;32m   1467\u001b[0m     estimator\u001b[38;5;241m.\u001b[39m_validate_params()\n\u001b[0;32m   1469\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m config_context(\n\u001b[0;32m   1470\u001b[0m     skip_parameter_validation\u001b[38;5;241m=\u001b[39m(\n\u001b[0;32m   1471\u001b[0m         prefer_skip_nested_validation \u001b[38;5;129;01mor\u001b[39;00m global_skip_validation\n\u001b[0;32m   1472\u001b[0m     )\n\u001b[0;32m   1473\u001b[0m ):\n\u001b[1;32m-> 1474\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfit_method\u001b[49m\u001b[43m(\u001b[49m\u001b[43mestimator\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
+      "File \u001b[1;32mc:\\Users\\victo\\AppData\\Local\\Programs\\Python\\Python312\\Lib\\site-packages\\sklearn\\ensemble\\_forest.py:489\u001b[0m, in \u001b[0;36mBaseForest.fit\u001b[1;34m(self, X, y, sample_weight)\u001b[0m\n\u001b[0;32m    478\u001b[0m trees \u001b[38;5;241m=\u001b[39m [\n\u001b[0;32m    479\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_make_estimator(append\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m, random_state\u001b[38;5;241m=\u001b[39mrandom_state)\n\u001b[0;32m    480\u001b[0m     \u001b[38;5;28;01mfor\u001b[39;00m i \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(n_more_estimators)\n\u001b[0;32m    481\u001b[0m ]\n\u001b[0;32m    483\u001b[0m \u001b[38;5;66;03m# Parallel loop: we prefer the threading backend as the Cython code\u001b[39;00m\n\u001b[0;32m    484\u001b[0m \u001b[38;5;66;03m# for fitting the trees is internally releasing the Python GIL\u001b[39;00m\n\u001b[0;32m    485\u001b[0m \u001b[38;5;66;03m# making threading more efficient than multiprocessing in\u001b[39;00m\n\u001b[0;32m    486\u001b[0m \u001b[38;5;66;03m# that case. However, for joblib 0.12+ we respect any\u001b[39;00m\n\u001b[0;32m    487\u001b[0m \u001b[38;5;66;03m# parallel_backend contexts set at a higher level,\u001b[39;00m\n\u001b[0;32m    488\u001b[0m \u001b[38;5;66;03m# since correctness does not rely on using threads.\u001b[39;00m\n\u001b[1;32m--> 489\u001b[0m trees \u001b[38;5;241m=\u001b[39m \u001b[43mParallel\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m    490\u001b[0m \u001b[43m    \u001b[49m\u001b[43mn_jobs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mn_jobs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    491\u001b[0m \u001b[43m    \u001b[49m\u001b[43mverbose\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mverbose\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    492\u001b[0m \u001b[43m    \u001b[49m\u001b[43mprefer\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mthreads\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[0;32m    493\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m    494\u001b[0m \u001b[43m    \u001b[49m\u001b[43mdelayed\u001b[49m\u001b[43m(\u001b[49m\u001b[43m_parallel_build_trees\u001b[49m\u001b[43m)\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m    495\u001b[0m \u001b[43m        \u001b[49m\u001b[43mt\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    496\u001b[0m \u001b[43m        \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbootstrap\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    497\u001b[0m \u001b[43m        \u001b[49m\u001b[43mX\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    498\u001b[0m \u001b[43m        \u001b[49m\u001b[43my\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    499\u001b[0m \u001b[43m        \u001b[49m\u001b[43msample_weight\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    500\u001b[0m \u001b[43m        \u001b[49m\u001b[43mi\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    501\u001b[0m \u001b[43m        \u001b[49m\u001b[38;5;28;43mlen\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mtrees\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    502\u001b[0m \u001b[43m        \u001b[49m\u001b[43mverbose\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mverbose\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    503\u001b[0m \u001b[43m        \u001b[49m\u001b[43mclass_weight\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mclass_weight\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    504\u001b[0m \u001b[43m        \u001b[49m\u001b[43mn_samples_bootstrap\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mn_samples_bootstrap\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    505\u001b[0m \u001b[43m        \u001b[49m\u001b[43mmissing_values_in_feature_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmissing_values_in_feature_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    506\u001b[0m \u001b[43m    \u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    507\u001b[0m \u001b[43m    \u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mi\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mt\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43menumerate\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mtrees\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    508\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    510\u001b[0m \u001b[38;5;66;03m# Collect newly grown trees\u001b[39;00m\n\u001b[0;32m    511\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mestimators_\u001b[38;5;241m.\u001b[39mextend(trees)\n",
+      "File \u001b[1;32mc:\\Users\\victo\\AppData\\Local\\Programs\\Python\\Python312\\Lib\\site-packages\\sklearn\\utils\\parallel.py:67\u001b[0m, in \u001b[0;36mParallel.__call__\u001b[1;34m(self, iterable)\u001b[0m\n\u001b[0;32m     62\u001b[0m config \u001b[38;5;241m=\u001b[39m get_config()\n\u001b[0;32m     63\u001b[0m iterable_with_config \u001b[38;5;241m=\u001b[39m (\n\u001b[0;32m     64\u001b[0m     (_with_config(delayed_func, config), args, kwargs)\n\u001b[0;32m     65\u001b[0m     \u001b[38;5;28;01mfor\u001b[39;00m delayed_func, args, kwargs \u001b[38;5;129;01min\u001b[39;00m iterable\n\u001b[0;32m     66\u001b[0m )\n\u001b[1;32m---> 67\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[38;5;21;43m__call__\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43miterable_with_config\u001b[49m\u001b[43m)\u001b[49m\n",
+      "File \u001b[1;32mc:\\Users\\victo\\AppData\\Local\\Programs\\Python\\Python312\\Lib\\site-packages\\joblib\\parallel.py:1863\u001b[0m, in \u001b[0;36mParallel.__call__\u001b[1;34m(self, iterable)\u001b[0m\n\u001b[0;32m   1861\u001b[0m     output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_get_sequential_output(iterable)\n\u001b[0;32m   1862\u001b[0m     \u001b[38;5;28mnext\u001b[39m(output)\n\u001b[1;32m-> 1863\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m output \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mreturn_generator \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28;43mlist\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43moutput\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m   1865\u001b[0m \u001b[38;5;66;03m# Let's create an ID that uniquely identifies the current call. If the\u001b[39;00m\n\u001b[0;32m   1866\u001b[0m \u001b[38;5;66;03m# call is interrupted early and that the same instance is immediately\u001b[39;00m\n\u001b[0;32m   1867\u001b[0m \u001b[38;5;66;03m# re-used, this id will be used to prevent workers that were\u001b[39;00m\n\u001b[0;32m   1868\u001b[0m \u001b[38;5;66;03m# concurrently finalizing a task from the previous call to run the\u001b[39;00m\n\u001b[0;32m   1869\u001b[0m \u001b[38;5;66;03m# callback.\u001b[39;00m\n\u001b[0;32m   1870\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_lock:\n",
+      "File \u001b[1;32mc:\\Users\\victo\\AppData\\Local\\Programs\\Python\\Python312\\Lib\\site-packages\\joblib\\parallel.py:1792\u001b[0m, in \u001b[0;36mParallel._get_sequential_output\u001b[1;34m(self, iterable)\u001b[0m\n\u001b[0;32m   1790\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mn_dispatched_batches \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1\u001b[39m\n\u001b[0;32m   1791\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mn_dispatched_tasks \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1\u001b[39m\n\u001b[1;32m-> 1792\u001b[0m res \u001b[38;5;241m=\u001b[39m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m   1793\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mn_completed_tasks \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1\u001b[39m\n\u001b[0;32m   1794\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mprint_progress()\n",
+      "File \u001b[1;32mc:\\Users\\victo\\AppData\\Local\\Programs\\Python\\Python312\\Lib\\site-packages\\sklearn\\utils\\parallel.py:129\u001b[0m, in \u001b[0;36m_FuncWrapper.__call__\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m    127\u001b[0m     config \u001b[38;5;241m=\u001b[39m {}\n\u001b[0;32m    128\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m config_context(\u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mconfig):\n\u001b[1;32m--> 129\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfunction\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
+      "File \u001b[1;32mc:\\Users\\victo\\AppData\\Local\\Programs\\Python\\Python312\\Lib\\site-packages\\sklearn\\ensemble\\_forest.py:192\u001b[0m, in \u001b[0;36m_parallel_build_trees\u001b[1;34m(tree, bootstrap, X, y, sample_weight, tree_idx, n_trees, verbose, class_weight, n_samples_bootstrap, missing_values_in_feature_mask)\u001b[0m\n\u001b[0;32m    189\u001b[0m     \u001b[38;5;28;01melif\u001b[39;00m class_weight \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mbalanced_subsample\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n\u001b[0;32m    190\u001b[0m         curr_sample_weight \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m=\u001b[39m compute_sample_weight(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mbalanced\u001b[39m\u001b[38;5;124m\"\u001b[39m, y, indices\u001b[38;5;241m=\u001b[39mindices)\n\u001b[1;32m--> 192\u001b[0m     \u001b[43mtree\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_fit\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m    193\u001b[0m \u001b[43m        \u001b[49m\u001b[43mX\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    194\u001b[0m \u001b[43m        \u001b[49m\u001b[43my\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    195\u001b[0m \u001b[43m        \u001b[49m\u001b[43msample_weight\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcurr_sample_weight\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    196\u001b[0m \u001b[43m        \u001b[49m\u001b[43mcheck_input\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[0;32m    197\u001b[0m \u001b[43m        \u001b[49m\u001b[43mmissing_values_in_feature_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmissing_values_in_feature_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    198\u001b[0m \u001b[43m    \u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    199\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m    200\u001b[0m     tree\u001b[38;5;241m.\u001b[39m_fit(\n\u001b[0;32m    201\u001b[0m         X,\n\u001b[0;32m    202\u001b[0m         y,\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m    205\u001b[0m         missing_values_in_feature_mask\u001b[38;5;241m=\u001b[39mmissing_values_in_feature_mask,\n\u001b[0;32m    206\u001b[0m     )\n",
+      "File \u001b[1;32mc:\\Users\\victo\\AppData\\Local\\Programs\\Python\\Python312\\Lib\\site-packages\\sklearn\\tree\\_classes.py:472\u001b[0m, in \u001b[0;36mBaseDecisionTree._fit\u001b[1;34m(self, X, y, sample_weight, check_input, missing_values_in_feature_mask)\u001b[0m\n\u001b[0;32m    461\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m    462\u001b[0m     builder \u001b[38;5;241m=\u001b[39m BestFirstTreeBuilder(\n\u001b[0;32m    463\u001b[0m         splitter,\n\u001b[0;32m    464\u001b[0m         min_samples_split,\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m    469\u001b[0m         \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmin_impurity_decrease,\n\u001b[0;32m    470\u001b[0m     )\n\u001b[1;32m--> 472\u001b[0m \u001b[43mbuilder\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbuild\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtree_\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mX\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43my\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43msample_weight\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmissing_values_in_feature_mask\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    474\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mn_outputs_ \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m1\u001b[39m \u001b[38;5;129;01mand\u001b[39;00m is_classifier(\u001b[38;5;28mself\u001b[39m):\n\u001b[0;32m    475\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mn_classes_ \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mn_classes_[\u001b[38;5;241m0\u001b[39m]\n",
+      "\u001b[1;31mKeyboardInterrupt\u001b[0m: "
+     ]
+    }
+   ],
+   "source": [
+    "import matplotlib.pyplot as plt\n",
+    "\n",
+    "tab = []\n",
+    "\n",
+    "for i in range(1, 100, 10):\n",
+    "    adaboost_clf = RandomForestClassifier(n_estimators=i) # To change\n",
+    "    adaboost_clf.fit(X_train, Y_train)\n",
+    "    y = adaboost_clf.predict(X_val)\n",
+    "    tab.append(np.mean(y != Y_val))\n",
+    "\n",
+    "plt.bar(range(len(tab)), tab, color='skyblue')\n",
+    "plt.show()\n",
+    "\n",
+    "print(tab)\n",
+    "print(f\"Taux d'erreur : {np.mean(y != Y_val)}\")"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "venv",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.12.2"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/machine_learning/classification/SVM.ipynb b/machine_learning/classification/SVM.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..26ee1cc09bf7e8d99ea6665d97e6e3796f3a97b3
--- /dev/null
+++ b/machine_learning/classification/SVM.ipynb
@@ -0,0 +1,369 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Entrainement d'un modèle avec la méthode des SVM"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 20,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import os\n",
+    "import numpy as np\n",
+    "import random\n",
+    "from PIL import Image"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### 1) Fonctions de Preprocessing des datasets"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 21,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "AVERAGE_SIZE_IMAGE = (127, 145)  # Thanks to the stats, we know that size of bbox will be (127, 145) -> Average size of labels \n",
+    "\n",
+    "def generate_empty_bbox(image_width, image_height):\n",
+    "    \"\"\" \n",
+    "    Generate an empty box for images without label\n",
+    "    \"\"\"\n",
+    "    # Generating random coords for the bbox\n",
+    "    x_min = random.randint(0, image_width - AVERAGE_SIZE_IMAGE[0])\n",
+    "    y_min = random.randint(0, image_height - AVERAGE_SIZE_IMAGE[1])\n",
+    "    \n",
+    "    # Compute complete coords of the bbox\n",
+    "    x_max = x_min + AVERAGE_SIZE_IMAGE[0]\n",
+    "    y_max = y_min + AVERAGE_SIZE_IMAGE[1]\n",
+    "    \n",
+    "    return (x_min, y_min, x_max, y_max)\n",
+    "\n",
+    "def load_data(image_dir, label_dir):\n",
+    "    \"\"\" \n",
+    "    Create a dict with all the usefull datas of the dataset\n",
+    "    datas = {\n",
+    "        \"XXXX\" (name of the file) : {\n",
+    "            \"img\" : image as an array,\n",
+    "            \"labels\" (data of the labels): {\n",
+    "                \"X\" index of the label (0,1,...,n) : {\n",
+    "                    \"name\" : name of the label,\n",
+    "                    \"coord\" : coord of the label like xmin, ymin, xmax, ymax,\n",
+    "                    \"img\" : crooped img of the label,\n",
+    "                }\n",
+    "            }\n",
+    "        }\n",
+    "    }\n",
+    "    \"\"\"\n",
+    "    \n",
+    "    datas = {}\n",
+    "\n",
+    "    for image_file in os.listdir(image_dir):\n",
+    "        # Computing name and files paths\n",
+    "        image_path = image_dir + '/' + image_file\n",
+    "        name = image_file.split('.')[0]\n",
+    "        label_path = label_dir + '/' + name + '.csv'\n",
+    "        \n",
+    "        # Import image as array\n",
+    "        image = np.array(Image.open(image_path))\n",
+    "\n",
+    "        # Import labels as array \n",
+    "        with open(label_path, 'r') as file:\n",
+    "            rows = file.readlines()\n",
+    "\n",
+    "            label_data = {}\n",
+    "            if rows == ['\\n']:  # Create a random empty label to balance model\n",
+    "                # Create random coords for empty label\n",
+    "                xmin, ymin, xmax, ymax = generate_empty_bbox(image.shape[1], image.shape[0])\n",
+    "    \n",
+    "                # Get the cropped image (as array) of the label\n",
+    "                cropped_image = np.array(Image.fromarray(image[ymin:ymax, xmin:xmax]).resize(AVERAGE_SIZE_IMAGE))\n",
+    "               \n",
+    "                label_data[0] = {\n",
+    "                        \"name\":\"empty\",\n",
+    "                        \"coord\": (xmin, ymin, xmax, ymax),\n",
+    "                        \"img\":cropped_image\n",
+    "                    }\n",
+    "            else:\n",
+    "                for i, row in enumerate(rows):  # One image can contain several labels\n",
+    "                    row = row.strip().split(\",\")\n",
+    "\n",
+    "                    # Compute coords of the label\n",
+    "                    xmin, ymin, xmax, ymax = map(int, row[0:4])\n",
+    "\n",
+    "                    # Get the label name\n",
+    "                    class_name = row[4]\n",
+    "\n",
+    "                    # Get the cropped image (as array) of the label\n",
+    "                    cropped_image = np.array(Image.fromarray(image[ymin:ymax, xmin:xmax]).resize(AVERAGE_SIZE_IMAGE))\n",
+    "                    \n",
+    "                    # Adding to the json\n",
+    "                    label_data[i] = {\n",
+    "                        \"name\":class_name,\n",
+    "                        \"coord\": (xmin, ymin, xmax, ymax),\n",
+    "                        \"img\":cropped_image\n",
+    "                    }\n",
+    "\n",
+    "        datas[name] = {\n",
+    "             \"img\" : image,\n",
+    "             \"labels\" : label_data,\n",
+    "        }\n",
+    "       \n",
+    "    return datas\n",
+    "\n",
+    "# Dict to convert str class name to int\n",
+    "name_to_int = {\n",
+    "    \"danger\": 0,\n",
+    "    \"interdiction\": 1,\n",
+    "    \"obligation\": 2,\n",
+    "    \"stop\": 3,\n",
+    "    \"ceder\": 4,\n",
+    "    \"frouge\": 5,\n",
+    "    \"forange\": 6,\n",
+    "    \"fvert\": 7,\n",
+    "    \"ff\": 8,\n",
+    "    \"empty\": 9\n",
+    "}"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### 2) Fonction de création des datasets"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 22,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def create_xy(datas):\n",
+    "    # Creating arrays with all labels datas & classes\n",
+    "    X = []\n",
+    "    Y = []\n",
+    "\n",
+    "    for name, data in datas.items():\n",
+    "        for row in data[\"labels\"].values():\n",
+    "            image_as_array = np.array(row[\"img\"]).flatten()\n",
+    "            X.append(image_as_array)\n",
+    "            Y.append(name_to_int[row[\"name\"]])\n",
+    "\n",
+    "    X = np.array(X)\n",
+    "    Y = np.array(Y)\n",
+    "\n",
+    "    return X, Y"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### 3) Création des datasets"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 23,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Training dataset\n",
+    "datas_train = load_data(\"../../data/train/images\", \"../../data/train/labels\")\n",
+    "X_train, Y_train = create_xy(datas=datas_train)\n",
+    "\n",
+    "# Validation dataset\n",
+    "datas_val = load_data(\"../../data/val/images\", \"../../data/val/labels\")\n",
+    "X_val, Y_val = create_xy(datas=datas_val)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### 4) Application de la méthode des SVM"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 24,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Taux d'erreur : 0.226890756302521\n"
+     ]
+    }
+   ],
+   "source": [
+    "from sklearn import svm\n",
+    "\n",
+    "svm_model = svm.SVC(kernel='linear') \n",
+    "svm_model.fit(X_train, Y_train)\n",
+    "y = svm_model.predict(X_val)\n",
+    "\n",
+    "print(f\"Taux d'erreur : {np.mean(y != Y_val)}\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### 5) Test de la méthode SVM avec application des caractéristiques HOG"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 27,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from skimage.feature import hog\n",
+    "from skimage.color import rgb2gray\n",
+    "\n",
+    "def extract_hog(datas):\n",
+    "    # Creating X array with all HOG information of images\n",
+    "    X = []\n",
+    "\n",
+    "    for name, data in datas.items():\n",
+    "        for row in data[\"labels\"].values():\n",
+    "            image_as_array = np.array(hog(rgb2gray(row[\"img\"]))).flatten()\n",
+    "            X.append(image_as_array)\n",
+    "\n",
+    "    return np.array(X)\n",
+    "\n",
+    "\n",
+    "# Update training dataset\n",
+    "X_train_HOG = extract_hog(datas=datas_train)\n",
+    "\n",
+    "# Update validation dataset\n",
+    "X_val_HOG = extract_hog(datas=datas_val)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 28,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Taux d'erreur : 0.15966386554621848\n"
+     ]
+    }
+   ],
+   "source": [
+    "svm_model = svm.SVC(kernel='linear') \n",
+    "svm_model.fit(X_train_HOG, Y_train)\n",
+    "y_HOG = svm_model.predict(X_val_HOG)\n",
+    "\n",
+    "print(f\"Taux d'erreur : {np.mean(y_HOG != Y_val)}\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### 6) Test de la méthode SVM avec application des LPB"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 34,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "c:\\Users\\victo\\AppData\\Local\\Programs\\Python\\Python312\\Lib\\site-packages\\skimage\\feature\\texture.py:360: UserWarning: Applying `local_binary_pattern` to floating-point images may give unexpected results when small numerical differences between adjacent pixels are present. It is recommended to use this function with images of integer dtype.\n",
+      "  warnings.warn(\n"
+     ]
+    },
+    {
+     "ename": "ValueError",
+     "evalue": "setting an array element with a sequence. The requested array has an inhomogeneous shape after 1 dimensions. The detected shape was (1071,) + inhomogeneous part.",
+     "output_type": "error",
+     "traceback": [
+      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
+      "\u001b[1;31mValueError\u001b[0m                                Traceback (most recent call last)",
+      "Cell \u001b[1;32mIn[34], line 17\u001b[0m\n\u001b[0;32m     13\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m np\u001b[38;5;241m.\u001b[39marray(X)\n\u001b[0;32m     16\u001b[0m \u001b[38;5;66;03m# Update training dataset\u001b[39;00m\n\u001b[1;32m---> 17\u001b[0m X_train_LBP \u001b[38;5;241m=\u001b[39m \u001b[43mextract_LBP\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdatas\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdatas_train\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m     19\u001b[0m \u001b[38;5;66;03m# Update validation dataset\u001b[39;00m\n\u001b[0;32m     20\u001b[0m X_val_LBP \u001b[38;5;241m=\u001b[39m extract_LBP(datas\u001b[38;5;241m=\u001b[39mdatas_val)\n",
+      "Cell \u001b[1;32mIn[34], line 13\u001b[0m, in \u001b[0;36mextract_LBP\u001b[1;34m(datas)\u001b[0m\n\u001b[0;32m     10\u001b[0m         image_as_array \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39marray(hog(local_binary_pattern(rgb2gray(data[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mimg\u001b[39m\u001b[38;5;124m\"\u001b[39m]), P \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m8\u001b[39m, R \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1\u001b[39m)))\u001b[38;5;241m.\u001b[39mflatten()\n\u001b[0;32m     11\u001b[0m         X\u001b[38;5;241m.\u001b[39mappend(image_as_array)\n\u001b[1;32m---> 13\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mnp\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43marray\u001b[49m\u001b[43m(\u001b[49m\u001b[43mX\u001b[49m\u001b[43m)\u001b[49m\n",
+      "\u001b[1;31mValueError\u001b[0m: setting an array element with a sequence. The requested array has an inhomogeneous shape after 1 dimensions. The detected shape was (1071,) + inhomogeneous part."
+     ]
+    }
+   ],
+   "source": [
+    "import cv2\n",
+    "\n",
+    "def extract_SIFT(datas):\n",
+    "    # Creating X array with all HOG information of images\n",
+    "    X = []\n",
+    "    sift = cv2.SIFT_create()\n",
+    "\n",
+    "    for name, data in datas.items():\n",
+    "        for row in data[\"labels\"].values():\n",
+    "            gray_image = cv2.cvtColor(data[\"img\"], cv2.COLOR_RGB2GRAY)\n",
+    "            keypoints, descriptors = sift.detectAndCompute(gray_image, None)\n",
+    "            if descriptors is not None:\n",
+    "                X.append(descriptors.flatten())\n",
+    "\n",
+    "    return np.array(X)\n",
+    "\n",
+    "\n",
+    "# Update training dataset\n",
+    "X_train_LBP = extract_SIFT(datas=datas_train)\n",
+    "\n",
+    "# Update validation dataset\n",
+    "X_val_LBP = extract_SIFT(datas=datas_val)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "svm_model = svm.SVC(kernel='linear') \n",
+    "svm_model.fit(X_train_LBP, Y_train)\n",
+    "y_LBP = svm_model.predict(X_val_LBP)\n",
+    "\n",
+    "print(f\"Taux d'erreur : {np.mean(y_LBP != Y_val)}\")"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "venv",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.12.2"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/machine_learning/learn.ipynb b/machine_learning/learn.ipynb
deleted file mode 100644
index 18386e8cd90048a8b7bc3cdf9b5bc8d8a71aa9df..0000000000000000000000000000000000000000
--- a/machine_learning/learn.ipynb
+++ /dev/null
@@ -1,229 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Entrainement d'un modèle avec la méthode des SVM"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "#### Chargement des données d'entrainement"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 1,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "import os\n",
-    "import cv2\n",
-    "import numpy as np\n",
-    "import random"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 2,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "\"\"\" \n",
-    "We will create a dict with all the usefull datas of the training dataset\n",
-    "datas = {\n",
-    "    \"XXXX\" (name of the file) : {\n",
-    "        \"img\" : ndarray of the image,\n",
-    "        \"labels\" (data of the labels): {\n",
-    "            \"X\" index of the label (0,1,...,n) : {\n",
-    "                \"name\" : name of the label,\n",
-    "                \"coord\" : coord of the label like xmin, ymin, xmax, ymax,\n",
-    "                \"img\" : crooped img of the label,\n",
-    "            }\n",
-    "        }\n",
-    "    }\n",
-    "}\n",
-    "\n",
-    "\"\"\"\n",
-    "\n",
-    "def generate_empty_bbox(image_width, image_height):\n",
-    "    # Thanks to the stats, we know that size of bbox will be (127, 145) -> Average size of labels \n",
-    "    # Génération de coordonnées aléatoires pour le coin supérieur gauche de la boundebox\n",
-    "    x_min = random.randint(0, image_width - 127)\n",
-    "    y_min = random.randint(0, image_height - 145)\n",
-    "    \n",
-    "    # Calcul des coordonnées du coin inférieur droit de la boundebox\n",
-    "    x_max = x_min + 127\n",
-    "    y_max = y_min + 145\n",
-    "    \n",
-    "    return (x_min, y_min, x_max, y_max)\n",
-    "\n",
-    "def load_data(image_dir, label_dir):\n",
-    "    datas = {}\n",
-    "\n",
-    "    for image_file in os.listdir(image_dir):\n",
-    "        # Computing name and files paths\n",
-    "        image_path = image_dir + '/' + image_file\n",
-    "        name = image_file.split('.')[0]\n",
-    "        label_path = label_dir + '/' + name + '.csv'\n",
-    "        \n",
-    "        # Import image as array\n",
-    "        image = cv2.imread(image_path)\n",
-    "\n",
-    "        # Import labels as array \n",
-    "        with open(label_path, 'r') as file:\n",
-    "            rows = file.readlines()\n",
-    "\n",
-    "            label_data = {}\n",
-    "            if rows == ['\\n']:  # Create a random empty label to balance model\n",
-    "                # Create random coords for empty label\n",
-    "                xmin, ymin, xmax, ymax = generate_empty_bbox(image.shape[1], image.shape[0])\n",
-    "    \n",
-    "                # Get the cropped image (as array) of the label\n",
-    "                cropped_image = image[ymin:ymax, xmin:xmax]\n",
-    "               \n",
-    "                label_data[0] = {\n",
-    "                        \"name\":\"empty\",\n",
-    "                        \"coord\": (xmin, ymin, xmax, ymax),\n",
-    "                        \"img\":cropped_image\n",
-    "                    }\n",
-    "            else:\n",
-    "                for i, row in enumerate(rows):  # One image can contain several labels\n",
-    "                    row = row.strip().split(\",\")\n",
-    "\n",
-    "                    # Compute coords of the label\n",
-    "                    xmin, ymin, xmax, ymax = map(int, row[0:4])\n",
-    "\n",
-    "                    # Get the label name\n",
-    "                    class_name = row[4]\n",
-    "\n",
-    "                    # Get the cropped image (as array) of the label\n",
-    "                    cropped_image = image[ymin:ymax, xmin:xmax]\n",
-    "                    \n",
-    "                    # Adding to the json\n",
-    "                    label_data[i] = {\n",
-    "                        \"name\":class_name,\n",
-    "                        \"coord\": (xmin, ymin, xmax, ymax),\n",
-    "                        \"img\":cropped_image\n",
-    "                    }\n",
-    "\n",
-    "        datas[name] = {\n",
-    "             \"img\" : image,\n",
-    "             \"labels\" : label_data,\n",
-    "        }\n",
-    "       \n",
-    "    return datas"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 3,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Creating the dict of the datas \n",
-    "\n",
-    "datas = load_data(\"../data/train/images\", \"../data/train/labels\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 4,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "def extract_features(img):\n",
-    "    # Convertion to gray level\n",
-    "    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n",
-    "\n",
-    "    # Color Hist\n",
-    "    hist_color = cv2.calcHist([img], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])\n",
-    "    hist_color = cv2.normalize(hist_color, hist_color).flatten()\n",
-    "    \n",
-    "    # Gradient Hist\n",
-    "    sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=5)\n",
-    "    sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=5)\n",
-    "    grad_mag = np.sqrt(sobelx**2 + sobely**2)\n",
-    "    hist_gradient = cv2.calcHist([grad_mag.astype(np.uint8)], [0], None, [16], [0, 256])\n",
-    "    hist_gradient = cv2.normalize(hist_gradient, hist_gradient).flatten()\n",
-    "    \n",
-    "    return np.concatenate((hist_color, hist_gradient))\n",
-    "\n",
-    "\n",
-    "# Dict to convert str class name to int\n",
-    "name_to_int = {\n",
-    "    \"danger\": 0,\n",
-    "    \"interdiction\": 1,\n",
-    "    \"obligation\": 2,\n",
-    "    \"stop\": 3,\n",
-    "    \"ceder\": 4,\n",
-    "    \"frouge\": 5,\n",
-    "    \"forange\": 6,\n",
-    "    \"fvert\": 7,\n",
-    "    \"ff\": 8,\n",
-    "    \"empty\": 9\n",
-    "}\n",
-    "\n",
-    "\n",
-    "# Creating arrays with all labels datas & classes\n",
-    "X_train = []\n",
-    "Y_train = []\n",
-    "\n",
-    "for name, data in datas.items():\n",
-    "    for row in data[\"labels\"].values():\n",
-    "        X_train.append(extract_features(row[\"img\"]))\n",
-    "        Y_train.append(name_to_int[row[\"name\"]])\n",
-    "\n",
-    "X_train = np.array(X_train)\n",
-    "Y_train = np.array(Y_train)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 6,
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "[1 1 0 ... 1 5 7]\n"
-     ]
-    }
-   ],
-   "source": [
-    "from sklearn import svm\n",
-    "from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\n",
-    "\n",
-    "svm_model = svm.SVC(kernel='linear')  # Choix du noyau linéaire\n",
-    "svm_model.fit(X_train, Y_train)\n",
-    "\n",
-    "print(svm_model)"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "venv",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.10.11"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 2
-}