-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathclassical computer vision classification
1 lines (1 loc) · 24.6 KB
/
classical computer vision classification
1
{"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"name":"python","version":"3.10.12","mimetype":"text/x-python","codemirror_mode":{"name":"ipython","version":3},"pygments_lexer":"ipython3","nbconvert_exporter":"python","file_extension":".py"},"kaggle":{"accelerator":"none","dataSources":[{"sourceId":7137490,"sourceType":"datasetVersion","datasetId":4118830},{"sourceId":7240300,"sourceType":"datasetVersion","datasetId":4193445},{"sourceId":7266137,"sourceType":"datasetVersion","datasetId":4211644},{"sourceId":7266742,"sourceType":"datasetVersion","datasetId":4212086},{"sourceId":7269015,"sourceType":"datasetVersion","datasetId":4213706}],"dockerImageVersionId":30615,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":false}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"code","source":"# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load\n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the read-only \"../input/\" directory\n# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory\n\nimport os\nfor dirname, _, filenames in os.walk('/kaggle/input'):\n for filename in filenames:\n print(os.path.join(dirname, filename))\n\n# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using \"Save & Run All\" \n# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session","metadata":{"_uuid":"8f2839f25d086af736a60e9eeb907d3b93b6e0e5","_cell_guid":"b1076dfc-b9ad-4769-8c92-a6c4dae69d19","trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"","metadata":{},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"from pathlib import Path\nPath('/kaggle/working/train_destination').mkdir(parents=True, exist_ok=True)\nPath('/kaggle/working/validate_destination').mkdir(parents=True, exist_ok=True)","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"#os.remove('Trained Models/basic_CNN_Model.h5')\n#os.remove('Trained Models/basic_CNN_Model_ExtraTrain.h5')","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"import cv2\nimport os\nimport shutil\n\ndef apply_bilateral_filter(image):\n return cv2.bilateralFilter(image, d=9, sigmaColor=75, sigmaSpace=75)\n\n# Specify the path to your dataset\ndataset_dir = '/kaggle/input/dataest/Product Classification'\n\n# Specify the destination folders for train and validate sets\ntrain_destination = '/kaggle/working/train_destination'\nvalidate_destination = '/kaggle/working/validate_destination'\n\n# Create the destination folders if they don't exist\nos.makedirs(train_destination, exist_ok=True)\nos.makedirs(validate_destination, exist_ok=True)\n\n# Loop through the numbered folders (1 to 20)\nfor product_folder in range(1, 21):\n # Path to the current product's train folder\n train_folder_path = os.path.join(dataset_dir, str(product_folder), 'Train')\n\n # Path to the current product's validate folder\n validate_folder_path = os.path.join(dataset_dir, str(product_folder), 'Validation')\n\n # Check if the train folder exists for the current product\n if os.path.exists(train_folder_path):\n #print(f\"Train folder exists for product {product_folder}\")\n \n # Rest of the code\n # Loop through images in the train folder\n for image_file in os.listdir(train_folder_path):\n image_path = os.path.join(train_folder_path, image_file)\n image = cv2.imread(image_path)\n\n # Apply bilateral filter\n filtered_image = apply_bilateral_filter(image)\n\n # Save the filtered image to the train destination\n filtered_image_path = os.path.join(train_destination, f'product_{product_folder}_{image_file}')\n cv2.imwrite(filtered_image_path, filtered_image)\n\n # Check if the validate folder exists for the current product\n if os.path.exists(validate_folder_path):\n #print(f\"validate folder exists for product {product_folder}\")\n \n # Rest of the code\n # Loop through images in the validate folder\n for image_file in os.listdir(validate_folder_path):\n image_path = os.path.join(validate_folder_path, image_file)\n image = cv2.imread(image_path)\n\n # Apply bilateral filter\n filtered_image = apply_bilateral_filter(image)\n\n # Save the filtered image to the validate destination\n filtered_image_path = os.path.join(validate_destination, f'product_{product_folder}_{image_file}')\n cv2.imwrite(filtered_image_path, filtered_image)","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"import cv2\nimport os\nimport numpy as np\nfrom sklearn.cluster import KMeans\nfrom sklearn.preprocessing import StandardScaler\n\ndef apply_bilateral_filter(image):\n return cv2.bilateralFilter(image, d=9, sigmaColor=75, sigmaSpace=75)\n\ndef extract_sift_features(image):\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n sift = cv2.SIFT_create()\n keypoints, descriptors = sift.detectAndCompute(gray, None)\n return descriptors\n\ndef build_codebook(features, k=100):\n kmeans = KMeans(n_clusters=k)\n kmeans.fit(features)\n return kmeans\n\ndef image_to_hist(image, kmeans):\n sift = cv2.SIFT_create()\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n keypoints, descriptors = sift.detectAndCompute(gray, None)\n\n # Check if any descriptors were found\n if descriptors is not None and descriptors.shape[0] > 0:\n # Assign visual words to descriptors\n labels = kmeans.predict(descriptors)\n\n # Count the occurrences of each visual word\n hist = np.bincount(labels, minlength=len(kmeans.cluster_centers_))\n\n return hist\n else:\n # Return an empty histogram if no descriptors were found\n return np.zeros(len(kmeans.cluster_centers_))\n\n# Specify the path to your dataset\ndataset_dir = '/kaggle/input/dataest/Product Classification'\n\n# Specify the destination folders for train and validate sets\ntrain_destination = '/kaggle/working/train_destination'\nvalidate_destination = '/kaggle/working/validate_destination'\n\n# Create the destination folders if they don't exist\nos.makedirs(train_destination, exist_ok=True)\nos.makedirs(validate_destination, exist_ok=True)\n\n# Number of clusters (visual words) for the codebook\nk = 100\n\n# Collect SIFT features from training images\nall_train_features = []\n\n# Loop through the numbered folders (1 to 20)\nfor product_folder in range(1, 21):\n # Path to the current product's train folder\n train_folder_path = os.path.join(dataset_dir, str(product_folder), 'Train')\n\n # Check if the train folder exists for the current product\n if os.path.exists(train_folder_path):\n # Loop through images in the train folder\n for image_file in os.listdir(train_folder_path):\n image_path = os.path.join(train_folder_path, image_file)\n image = cv2.imread(image_path)\n\n # Apply bilateral filter\n filtered_image = apply_bilateral_filter(image)\n\n # Extract SIFT features\n features = extract_sift_features(filtered_image)\n all_train_features.extend(features)\n\n# Build the codebook using KMeans\nkmeans = build_codebook(np.array(all_train_features), k)\n\n# Loop through the numbered folders (1 to 20) for training set\nfor product_folder in range(1, 21):\n # Path to the current product's train folder\n train_folder_path = os.path.join(dataset_dir, str(product_folder), 'Train')\n\n # Check if the train folder exists for the current product\n if os.path.exists(train_folder_path):\n # Loop through images in the train folder\n for image_file in os.listdir(train_folder_path):\n image_path = os.path.join(train_folder_path, image_file)\n image = cv2.imread(image_path)\n\n # Apply bilateral filter\n filtered_image = apply_bilateral_filter(image)\n\n # Convert the image to a histogram of visual words\n hist = image_to_hist(filtered_image, kmeans)\n\n # Extract product folder number\n product_folder_number = str(product_folder)\n\n # Save the histogram to the train destination with the desired name format\n hist_name = f'{product_folder_number}_hist.npy'\n hist_path = os.path.join(train_destination, hist_name)\n np.save(hist_path, hist)\n\n# Loop through the numbered folders (1 to 20) for the validation set\nfor product_folder in range(1, 21):\n # Path to the current product's validation folder\n validate_folder_path = os.path.join(dataset_dir, str(product_folder), 'Validation')\n\n # Check if the validation folder exists for the current product\n if os.path.exists(validate_folder_path):\n # Loop through images in the validation folder\n for image_file in os.listdir(validate_folder_path):\n image_path = os.path.join(validate_folder_path, image_file)\n image = cv2.imread(image_path)\n\n # Apply bilateral filter\n filtered_image = apply_bilateral_filter(image)\n\n # Convert the image to a histogram of visual words\n hist = image_to_hist(filtered_image, kmeans)\n\n # Extract product folder number\n product_folder_number = str(product_folder)\n\n # Save the histogram to the validation destination with the desired name format\n hist_name = f'{product_folder_number}_hist.npy'\n hist_path = os.path.join(validate_destination, hist_name)\n np.save(hist_path, hist)\nimport os\nimport numpy as np\nfrom sklearn import svm\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n\n# Specify the destination folders for train and validate sets\ntrain_destination = '/kaggle/working/train_destination'\nvalidate_destination = '/kaggle/working/validate_destination'\n\n# Load histograms and labels for training set\ntrain_data = []\ntrain_labels = []\n\nfor product_folder in range(1, 21):\n # Load histograms for the training set\n hist_path_train = os.path.join(train_destination, f'{product_folder}_hist.npy')\n hist_train = np.load(hist_path_train)\n\n # Append histograms and labels for training set\n train_data.append(hist_train)\n train_labels.extend([product_folder] * hist_train.shape[0])\n\ntrain_data = np.concatenate(train_data, axis=0)\ntrain_labels = np.array(train_labels)\n\n# Split the data into training and testing sets\nX_train, X_test, y_train, y_test = train_test_split(train_data, train_labels, test_size=0.2, random_state=42)\n\n# Reshape the data before applying the scaler\nX_train_reshaped = X_train.reshape(-1, 1)\nX_test_reshaped = X_test.reshape(-1, 1)\n\n# Use StandardScaler to normalize the data\nscaler = StandardScaler()\nX_train_normalized = scaler.fit_transform(X_train_reshaped)\nX_test_normalized = scaler.transform(X_test_reshaped)\n\n\nprint(\"Shapes after normalization:\")\nprint(\"X_train_normalized shape:\", X_train_normalized.shape)\nprint(\"y_train shape:\", y_train.shape)\nprint(\"X_test_normalized shape:\", X_test_normalized.shape)\nprint(\"y_test shape:\", y_test.shape)\n\n# Train the SVM classifier\nclf = svm.SVC(kernel='linear')\nclf.fit(X_train_normalized, y_train)\n\n# Predict on the normalized test set\ny_pred = clf.predict(X_test_normalized)\n\n# Calculate accuracy\naccuracy = accuracy_score(y_test, y_pred)\nprint(f\"Accuracy: {accuracy * 100:.2f}%\")\n\n# Print some predictions and actual labels for debugging\nprint(\"Predictions:\", y_pred)\nprint(\"Actual Labels:\", y_test)\n\nprint(\"Labels:\")\nprint(\"Unique train labels:\", np.unique(y_train))\nprint(\"Unique test labels:\", np.unique(y_test))\nprint(\"Unique predicted labels:\", np.unique(y_pred))\n\nprint(\"\\nHistograms:\")\n","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"#class HOG\nimport os\nimport cv2\nimport numpy as np\nfrom random import shuffle\ndataset_dir = '/kaggle/input/dataest/Product Classification'\ndata_path = dataset_dir\n\n\ndef get_dataset(data_path,model_type, image_size):\n train_data_path, test_data_path = get_images_paths(data_path)\n\n train_data = read_images(train_data_path, model_type, image_size)\n test_data = read_images(test_data_path, model_type, image_size)\n\n shuffle(train_data)\n\n return train_data, test_data\n\n\ndef get_images_paths(data_path):\n train_images_path = []\n test_images_path = []\n\n for class_folder in os.listdir(data_path):\n for sub_folder in os.listdir(data_path +'/'+ class_folder):\n for img in os.listdir(data_path +'/'+ class_folder + '/' + sub_folder):\n\n image_path = os.path.join(data_path +'/'+ class_folder + '/' + sub_folder, img)\n if image_path.endswith(\".csv\"):\n continue\n\n if sub_folder == 'Train':\n train_images_path.append(image_path)\n else:\n test_images_path.append(image_path)\n \n #print(train_images_path)\n return train_images_path, test_images_path\n\n\ndef read_images(images_paths, model_type, image_size):\n images = []\n\n for i in images_paths:\n image = cv2.imread(i, 0)\n image = resize_image(image, image_size, model_type)\n\n image_label = create_label(i)\n images.append([np.array(image), image_label])\n\n return images\n\n\ndef resize_image(image, image_size, model_type):\n if model_type == 'HOG':\n return cv2.resize(image, (image_size, 2 * image_size))\n else:\n return cv2.resize(image, (image_size, image_size))\n\n\ndef create_label(image_path):\n image_label = image_path.split('/')[5]\n image_Classes = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]\n label_encoded = np.zeros((20, 1))\n\n for i in range(len(image_Classes)):\n if image_label == str(i+1):\n label_encoded[i] = 1\n\n \n return label_encoded\n\n\n\n","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"#class CNN\nfrom sklearn.metrics import accuracy_score, confusion_matrix, ConfusionMatrixDisplay\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow.keras import layers\nimport os\nimport keras\nimport cv2\nclass CNN:\n \n image_Classes = ['1', '2', '3', '4', '5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20']\n\n\n def generate_CNN_model(train_data):\n X_train, Y_train = CNN.reformat_dataset(train_data, image_size=128)\n model = CNN.buildSequentialModel()\n\n if os.path.exists('Trained Models/basic_CNN_Model.h5'):\n model = keras.models.load_model('Trained Models/basic_CNN_Model.h5')\n else:\n model.fit(X_train, Y_train, validation_split=0.2, epochs=20)\n prediction=model.predict(X_train)\n Y_true, prediction = CNN.reformat_labels(Y_train, prediction)\n print(\"Train Accuracy: \" + str(accuracy_score(Y_true, prediction)))\n\n model.save('Trained Models/basic_CNN_Model.h5')\n\n return model\n\n #to be study\n def reformat_dataset(data, image_size):\n X = np.array([i[0] for i in data], dtype=object).reshape(-1, image_size, image_size, 1)\n Y = np.array([i[1] for i in data])\n Y = Y.reshape(len(Y), 20)\n\n X = np.asarray(X).astype(np.float32)\n return X, Y\n\n\n def buildSequentialModel():\n model = tf.keras.Sequential([\n layers.Input(shape=(128, 128, 1)),\n layers.Conv2D(32, 5, strides=2, activation='relu'),\n layers.Conv2D(32, 5, activation='relu'),\n layers.MaxPool2D((3, 3)),\n layers.Conv2D(32, 5, strides=2, activation='relu'),\n layers.Conv2D(32, 5, activation='relu'),\n layers.Flatten(),\n layers.Dense(128, activation='relu'),\n layers.Dense(20, activation='softmax')\n ])\n\n model.summary()\n model.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n return model\n\n\n def test_model(test_data, model, visualise=False):\n X_test, Y_test = CNN.reformat_dataset(test_data, image_size=128)\n predictions = model.predict(X_test)\n print(\"Shape of predictions:\", predictions.shape)\n\n y_true, predictions = CNN.reformat_labels(Y_test, predictions)\n print(\"Testing Accuracy: \" + str(accuracy_score(y_true, predictions)))\n matrix = confusion_matrix(y_true, predictions)\n\n disp = ConfusionMatrixDisplay(confusion_matrix=matrix)\n disp.plot()\n plt.show()\n\n if visualise:\n CNN.Visualise_data(test_data, predictions)\n\n\n def Visualise_data(data, predictions):\n for i in range(len(data)):\n plt.imshow(cv2.cvtColor(data[i][0], cv2.COLOR_BGR2RGB))\n plt.title(\"Prediction is \" + str(image_Classes[predictions[i]]))\n plt.show()\n\n\n def reformat_labels(y_true, y_test):\n true = []\n prediction = []\n for i in range(len(y_true)):\n true.append(np.argmax(y_true[i]))\n prediction.append(np.argmax(y_test[i]))\n return true, prediction","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"\n#train and test(validation) accuracy for CNN\n\ntrain_data, test_data = get_dataset(data_path,model_type='CNN', image_size=128)\n\nmodel = CNN.generate_CNN_model(train_data)\n\nCNN.test_model(test_data, model, visualise=False)","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"#accuracy on validation set\ntest='/kaggle/input/product-test'\ntrain_data, test_data = get_dataset(test,model_type='CNN', image_size=128)\n\nmodel = keras.models.load_model('Trained Models/basic_CNN_Model.h5')\n\nCNN.test_model(test_data, model, visualise=False)","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"from sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score, confusion_matrix, ConfusionMatrixDisplay\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom skimage.feature import hog\nfrom joblib import dump, load\nimport os\nimport cv2\nclass HOG:\n \n image_Classes = ['1', '2', '3', '4', '5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20']\n\n # Specify the directory for train path\n directory_path = '/Trained Models/'\n\n # Create the directory if it doesn't exist\n os.makedirs(directory_path, exist_ok=True)\n\n def generate_HOG_model(train_data):\n X_train, Y_train = HOG.generate_hog_features(train_data)\n\n if os.path.exists('/Trained Models/HOG_model.joblib'):\n clf = load('/Trained Models/HOG_model.joblib')\n else:\n clf = LogisticRegression().fit(X_train, Y_train)\n dump(clf, os.path.join(HOG.directory_path, 'HOG_model.joblib'))\n\n return clf\n\n\n def generate_hog_features(data):\n hog_features = []\n images_labels = []\n\n for i in data:\n fd = hog(i[0], orientations=9, pixels_per_cell=(8, 8),\n cells_per_block=(2, 2), visualize=False)\n\n hog_features.append(fd)\n images_labels.append(HOG.reformat_label(i[1]))\n\n return np.array(hog_features), np.array(images_labels)\n\n\n def reformat_label(image_label):\n return np.argmax(image_label)\n\n\n def testModel(test_data, clf, visualise=False):\n X_test, Y_test = HOG.generate_hog_features(test_data)\n predictions = clf.predict(X_test)\n\n print(\"Testing Accuracy: \" + str(accuracy_score(Y_test, predictions)))\n\n matrix = confusion_matrix(Y_test, predictions)\n\n disp = ConfusionMatrixDisplay(confusion_matrix=matrix)\n disp.plot()\n plt.show()\n\n if visualise:\n HOG.Visualise_data(test_data, predictions)\n\n\n def Visualise_data(data, predictions):\n for i in range(len(data)):\n plt.imshow(cv2.cvtColor(data[i][0], cv2.COLOR_BGR2RGB))\n plt.title(\"Prediction is \" + str(image_Classes[predictions[i]]))\n plt.show()","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"#accracy on validation set for hog\nimport time\n\ntrain_data, test_data = get_dataset(data_path,model_type='HOG', image_size=64)\n\ntraining_start_time = time.time()\nclf = HOG.generate_HOG_model(train_data)\ntraining_stop_time = time.time()\n\ntesting_start_time = time.time()\nHOG.testModel(test_data, clf, visualise=False)\ntesting_stop_time = time.time()\n\n#print(f\"Training time: {training_stop_time - training_start_time}s\")\n#print(f\"Testing time: {testing_stop_time - testing_start_time}s\")","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"#test script for hog\ntest='/kaggle/input/product-test'\ntrain_data, test_data = get_dataset(test,model_type='HOG', image_size=64)\nclf = HOG.generate_HOG_model(train_data)\nHOG.testModel(test_data, clf, visualise=False)","metadata":{"_kg_hide-input":false,"_kg_hide-output":true,"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"#fitting CNN with validation set\ntrain_images_path2 = []\nfor class_folder in os.listdir(data_path):\n for sub_folder in os.listdir(data_path +'/'+ class_folder):\n for img in os.listdir(data_path +'/'+ class_folder + '/' + sub_folder):\n\n image_path = os.path.join(data_path +'/'+ class_folder + '/' + sub_folder, img)\n if image_path.endswith(\".csv\"):\n continue\n\n if sub_folder == 'Validation':\n train_images_path2.append(image_path)\ntrain_data2 = read_images(train_images_path2, 'CNN', 128)\nshuffle(train_data2)\nmodel = keras.models.load_model('Trained Models/basic_CNN_Model.h5')\nX_train, Y_train = CNN.reformat_dataset(train_data2, image_size=128)\nmodel.fit(X_train, Y_train, validation_split=0.2, epochs=15)\nmodel.save('Trained Models/basic_CNN_Model_ExtraTrain.h5')","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"train_data, test_data = get_dataset(test,model_type='CNN', image_size=128)\n\ntraining_start_time = time.time()\nmodel = keras.models.load_model('/kaggle/input/saved-model-for-cnn/basic_CNN_Model_ExtraTrain.h5')\ntraining_stop_time = time.time()\n\ntesting_start_time = time.time()\nCNN.test_model(test_data, model, visualise=False)","metadata":{"_kg_hide-output":true,"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"train_data2_hog = read_images(train_images_path2, 'HOG', 64)\nshuffle(train_data2_hog)\n#model = keras.models.load_model('Trained Models/basic_CNN_Model.h5')\nX_train, Y_train = HOG.generate_hog_features(train_data2_hog)\nclf = LogisticRegression().fit(X_train, Y_train)\ndump(clf, os.path.join('/Trained Models/', 'HOG_model2.joblib'))\n#model.fit(X_train, Y_train, validation_split=0.2, epochs=15)\n#model.save('Trained Models/basic_HOG_Model_ExtraTrain.h5')","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"'''\nclf = load('/Trained Models/HOG_model2.joblib')\ntest='/kaggle/input/product-test'\ntrain_data, test_data = get_dataset(test,model_type='HOG', image_size=64)\n\nHOG.testModel(test_data, clf, visualise=False)\n'''","metadata":{"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"test='/kaggle/input/product-test'\ntrain_data, test_data = get_dataset(test,model_type='HOG', image_size=64)\nclf = HOG.generate_HOG_model(train_data)\nHOG.testModel(test_data, clf, visualise=False)","metadata":{"_kg_hide-output":true,"trusted":true},"execution_count":null,"outputs":[]},{"cell_type":"code","source":"#os.remove('Trained Models/basic_CNN_Model.h5')","metadata":{"trusted":true},"execution_count":null,"outputs":[]}]}