From 7c7d2d4c27fa87b65ff602051a90f43e25bd10b6 Mon Sep 17 00:00:00 2001 From: ArtyZiff35 <42790063+ArtyZiff35@users.noreply.github.com> Date: Sat, 16 Feb 2019 19:56:38 -0600 Subject: [PATCH] Updated CNN Model --- imageElaboration.py | 35 +++++++++++++++++++++++------------ main.py | 38 ++++++++++++++++++++++---------------- plotter.py | 26 ++++++++++++++++++++++++++ testing.py | 6 +++--- 4 files changed, 74 insertions(+), 31 deletions(-) create mode 100644 plotter.py diff --git a/imageElaboration.py b/imageElaboration.py index 3d4a5b7..7d3d946 100644 --- a/imageElaboration.py +++ b/imageElaboration.py @@ -2,11 +2,10 @@ import cv2 from utilities import * - -# This function applies all elaboration steps to the image -def elaborateImage(newFrame): +# This function is able to highlight the contours of road lane markings using color thresholding and canny edge +def highlightRoadLaneMarkings(newFrame): # Adjusting brightness and contrast - newFrameAdjusted = apply_brightness_contrast(newFrame, 90, 90) + newFrameAdjusted = apply_brightness_contrast(newFrame, 100, 100) # Threshold so that only yellow and white are kept. Result is greyscale newFrameThreshold = thresholdWhiteAndYellow(newFrameAdjusted) @@ -21,8 +20,8 @@ def elaborateImage(newFrame): height, width = newFrameEdges.shape # Creating white polygonal shape on black image bottomLeft = [0, height - 130] - topLeft = [10, height / 2 - 15] - topRight = [width -30, height / 2 - 15] + topLeft = [width / 3 + 40, height / 2] + topRight = [width / 3 * 2 - 40, height / 2] bottomRight = [width, height - 130] pts = np.array([bottomLeft, topLeft, topRight, bottomRight], np.int32) pts = pts.reshape((-1, 1, 2)) @@ -31,17 +30,29 @@ def elaborateImage(newFrame): # Doing AND operation with newFrameEdges newFrameROI = cv2.bitwise_and(newFrameEdges, newFrameEdges, mask=polygonalShape) - # Hough transform to detect straight lines. Returns an array of r and theta values - lines = cv2.HoughLinesP(newFrameROI, 1, np.pi / 180, 15) - blackImage = np.zeros((height, width, 1), np.uint8) - newFrameHough = drawHoughTransformLines(blackImage, lines) + return newFrameROI + +# This function applies all elaboration steps to the image +def elaborateImage(newFrame): + # Drawing road from original frame - newFrameGrey = cv2.cvtColor(newFrame, cv2.COLOR_BGR2GRAY) + newFrameAdjusted = apply_brightness_contrast(newFrame, 30, 15) + newFrameGrey = cv2.cvtColor(newFrameAdjusted, cv2.COLOR_BGR2GRAY) + height, width = newFrameGrey.shape + bottomLeft = [0, height - 130] + topLeft = [0, height / 2 + 10] + topCenter = [width/2, height / 2 - 15] + topRight = [width, height / 2 + 10] + bottomRight = [width, height - 130] + pts = np.array([bottomLeft, topLeft, topCenter, topRight, bottomRight], np.int32) + pts = pts.reshape((-1, 1, 2)) + blackImage = np.zeros((height, width, 1), np.uint8) + polygonalShape = cv2.fillPoly(blackImage, [pts], (255, 255, 255)) coloredMaskedRoad = cv2.bitwise_and(newFrameGrey, newFrameGrey, mask=polygonalShape) #coloredMaskedRoad = cv2.equalizeHist(coloredMaskedRoad) + newFrameROI = highlightRoadLaneMarkings(newFrame) newFrameMaskAndRoad = cv2.add(coloredMaskedRoad, newFrameROI) # Adding canny edge overlay to highlight the lane markers - newFrameMaskAndRoadBlurred = cv2.GaussianBlur(newFrameMaskAndRoad, (5, 5), 0) # Cutting image basing on mask size result = cutTopAndBottom(coloredMaskedRoad, int(height / 2 - 15), int(height - 130)) diff --git a/main.py b/main.py index 4ee2e2b..467cab2 100644 --- a/main.py +++ b/main.py @@ -12,10 +12,6 @@ from imageElaboration import * -# TODO ignore lateral movements -# TODO do better evaluation -# TODO try other image preprocessing - # Setting up a Keras model of: 4 Conv and Pool + Flat + 5 Dense def setupNvidiaModel(inputShape): @@ -164,6 +160,10 @@ def setupTestModel(inputShape): videoFeed = cv2.VideoCapture('./sourceData/train.mp4') videoLengthInFrames = int(videoFeed.get(cv2.CAP_PROP_FRAME_COUNT)) +# Preparing for validation data retrieval +validationSize = int(videoLengthInFrames * 0.15) +validationGap = int(videoLengthInFrames/validationSize) + # Iterating through all couples of frames of the video coupleCounter = 0 frameCoupleArray = [] @@ -179,7 +179,7 @@ def setupTestModel(inputShape): nb_iterations = 2 deg_expansion = 5 STD = 1.3 -while(coupleCounter < videoLengthInFrames-20): +while(coupleCounter < videoLengthInFrames-1): # Read a couple of new frames from the video feed ret2, newFrame = videoFeed.read() @@ -216,16 +216,22 @@ def setupTestModel(inputShape): STD, 0) #flow = opticalFlowDense(oldFrame, newFrame) - # Saving the couple of data and label - batchFrames.append(flow) - batchSpeeds.append(speedTruthArray[coupleCounter]) + + # Check if this frame is for training or validation + if frameCounter == validationGap: + frameCounter = 0 + evalFrames.append(flow) + evalSpeeds.append(speedTruthArray[coupleCounter]) + else: + # Saving the couple of data and label for training + batchFrames.append(flow) + batchSpeeds.append(speedTruthArray[coupleCounter]) # Incrementing couples counter and swapping frames oldFrameROI = newFrameROI oldFrame = newFrame coupleCounter = coupleCounter + 1 - #cv2.imshow('frame', draw_flow(cv2.cvtColor(newFrame, cv2.COLOR_BGR2GRAY), flow)) - #cv2.imshow('frame', newFrameROI) + frameCounter = frameCounter + 1 cv2.imshow('frame',draw_flow(newFrameROI,flow)) cv2.waitKey(1) @@ -233,10 +239,15 @@ def setupTestModel(inputShape): # Shuffling data before training +# For training print("\n\n\n###############################\nSHUFFLING MODEL\n") unified = list(zip(batchFrames, batchSpeeds)) np.random.shuffle(unified) batchFrames, batchSpeeds = zip(*unified) +# For validation +unified = list(zip(evalFrames, evalSpeeds)) +np.random.shuffle(unified) +evalFrames, evalSpeeds = zip(*unified) # Training model @@ -252,13 +263,8 @@ def setupTestModel(inputShape): # Training batch index = index + 1 frameCounter = frameCounter + 1 - if frameCounter == batchSize or (coupleCounter+1) == videoLengthInFrames: + if frameCounter == batchSize or index==(len(batchSpeeds)-1) : print("\nWe are at " + str(index) + "\n") - # Choosing some frames to use for evaluation - evalFrames.append(trainBatchFrame.pop(int(batchSize/2))) - evalSpeeds.append(trainBatchSpeed.pop(int(batchSize/2))) - evalFrames.append(trainBatchFrame.pop(int(batchSize/4))) - evalSpeeds.append(trainBatchSpeed.pop(int(batchSize/4))) # Preparing data X = np.array(trainBatchFrame) Y = np.array(trainBatchSpeed) diff --git a/plotter.py b/plotter.py new file mode 100644 index 0000000..2d858cb --- /dev/null +++ b/plotter.py @@ -0,0 +1,26 @@ +import matplotlib.pyplot as plt + + + +file = open("./sourceData/train.txt") +speedTruthArrayString = file.readlines() +speedTruthArray = [] +for numeric_string in speedTruthArrayString: + numeric_string = numeric_string.strip('\n') + speedTruthArray.append(float(numeric_string)) +file.close() + + +plt.plot(speedTruthArray, label = "Ground truth speeds") +# naming the x axis +plt.xlabel('Frame') +# naming the y axis +plt.ylabel('Speed') +# giving a title to my graph +plt.title('Speed per frame chart') + +# show a legend on the plot +plt.legend() + +# function to show the plot +plt.show() \ No newline at end of file diff --git a/testing.py b/testing.py index 58aac85..5cd4731 100644 --- a/testing.py +++ b/testing.py @@ -8,7 +8,7 @@ # Reading all the speed ground truths print("Reading speed ground truths") -file = open("./sourceData/train.txt") +file = open("./sourceData/test.txt") speedTruthArrayString = file.readlines() speedTruthArray = [] for numeric_string in speedTruthArrayString: @@ -18,13 +18,13 @@ print("Read " + str(len(speedTruthArray)) + " values") # Loading the Keras trained model -model = load_model('./savedModels/greyMaskSimple_15epochs_32batch_500section.h5') +model = load_model('./savedModels/greyMask.h5') model.compile(Adam(lr=0.001), loss="mse", metrics=["mse"]) # Opening testing video -videoFeed = cv2.VideoCapture('./sourceData/train.mp4') +videoFeed = cv2.VideoCapture('./sourceData/test.mp4') videoLengthInFrames = int(videoFeed.get(cv2.CAP_PROP_FRAME_COUNT)) print(videoLengthInFrames)