Skip to content

Commit

Permalink
Updated CNN Model
Browse files Browse the repository at this point in the history
  • Loading branch information
ArtyZiff35 authored Feb 17, 2019
1 parent 32d829a commit 7c7d2d4
Show file tree
Hide file tree
Showing 4 changed files with 74 additions and 31 deletions.
35 changes: 23 additions & 12 deletions imageElaboration.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,10 @@
import cv2
from utilities import *


# This function applies all elaboration steps to the image
def elaborateImage(newFrame):
# This function is able to highlight the contours of road lane markings using color thresholding and canny edge
def highlightRoadLaneMarkings(newFrame):
# Adjusting brightness and contrast
newFrameAdjusted = apply_brightness_contrast(newFrame, 90, 90)
newFrameAdjusted = apply_brightness_contrast(newFrame, 100, 100)

# Threshold so that only yellow and white are kept. Result is greyscale
newFrameThreshold = thresholdWhiteAndYellow(newFrameAdjusted)
Expand All @@ -21,8 +20,8 @@ def elaborateImage(newFrame):
height, width = newFrameEdges.shape
# Creating white polygonal shape on black image
bottomLeft = [0, height - 130]
topLeft = [10, height / 2 - 15]
topRight = [width -30, height / 2 - 15]
topLeft = [width / 3 + 40, height / 2]
topRight = [width / 3 * 2 - 40, height / 2]
bottomRight = [width, height - 130]
pts = np.array([bottomLeft, topLeft, topRight, bottomRight], np.int32)
pts = pts.reshape((-1, 1, 2))
Expand All @@ -31,17 +30,29 @@ def elaborateImage(newFrame):
# Doing AND operation with newFrameEdges
newFrameROI = cv2.bitwise_and(newFrameEdges, newFrameEdges, mask=polygonalShape)

# Hough transform to detect straight lines. Returns an array of r and theta values
lines = cv2.HoughLinesP(newFrameROI, 1, np.pi / 180, 15)
blackImage = np.zeros((height, width, 1), np.uint8)
newFrameHough = drawHoughTransformLines(blackImage, lines)
return newFrameROI

# This function applies all elaboration steps to the image
def elaborateImage(newFrame):


# Drawing road from original frame
newFrameGrey = cv2.cvtColor(newFrame, cv2.COLOR_BGR2GRAY)
newFrameAdjusted = apply_brightness_contrast(newFrame, 30, 15)
newFrameGrey = cv2.cvtColor(newFrameAdjusted, cv2.COLOR_BGR2GRAY)
height, width = newFrameGrey.shape
bottomLeft = [0, height - 130]
topLeft = [0, height / 2 + 10]
topCenter = [width/2, height / 2 - 15]
topRight = [width, height / 2 + 10]
bottomRight = [width, height - 130]
pts = np.array([bottomLeft, topLeft, topCenter, topRight, bottomRight], np.int32)
pts = pts.reshape((-1, 1, 2))
blackImage = np.zeros((height, width, 1), np.uint8)
polygonalShape = cv2.fillPoly(blackImage, [pts], (255, 255, 255))
coloredMaskedRoad = cv2.bitwise_and(newFrameGrey, newFrameGrey, mask=polygonalShape)
#coloredMaskedRoad = cv2.equalizeHist(coloredMaskedRoad)
newFrameROI = highlightRoadLaneMarkings(newFrame)
newFrameMaskAndRoad = cv2.add(coloredMaskedRoad, newFrameROI) # Adding canny edge overlay to highlight the lane markers
newFrameMaskAndRoadBlurred = cv2.GaussianBlur(newFrameMaskAndRoad, (5, 5), 0)

# Cutting image basing on mask size
result = cutTopAndBottom(coloredMaskedRoad, int(height / 2 - 15), int(height - 130))
Expand Down
38 changes: 22 additions & 16 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,6 @@
from imageElaboration import *


# TODO ignore lateral movements
# TODO do better evaluation
# TODO try other image preprocessing

# Setting up a Keras model of: 4 Conv and Pool + Flat + 5 Dense
def setupNvidiaModel(inputShape):

Expand Down Expand Up @@ -164,6 +160,10 @@ def setupTestModel(inputShape):
videoFeed = cv2.VideoCapture('./sourceData/train.mp4')
videoLengthInFrames = int(videoFeed.get(cv2.CAP_PROP_FRAME_COUNT))

# Preparing for validation data retrieval
validationSize = int(videoLengthInFrames * 0.15)
validationGap = int(videoLengthInFrames/validationSize)

# Iterating through all couples of frames of the video
coupleCounter = 0
frameCoupleArray = []
Expand All @@ -179,7 +179,7 @@ def setupTestModel(inputShape):
nb_iterations = 2
deg_expansion = 5
STD = 1.3
while(coupleCounter < videoLengthInFrames-20):
while(coupleCounter < videoLengthInFrames-1):

# Read a couple of new frames from the video feed
ret2, newFrame = videoFeed.read()
Expand Down Expand Up @@ -216,27 +216,38 @@ def setupTestModel(inputShape):
STD,
0)
#flow = opticalFlowDense(oldFrame, newFrame)
# Saving the couple of data and label
batchFrames.append(flow)
batchSpeeds.append(speedTruthArray[coupleCounter])

# Check if this frame is for training or validation
if frameCounter == validationGap:
frameCounter = 0
evalFrames.append(flow)
evalSpeeds.append(speedTruthArray[coupleCounter])
else:
# Saving the couple of data and label for training
batchFrames.append(flow)
batchSpeeds.append(speedTruthArray[coupleCounter])

# Incrementing couples counter and swapping frames
oldFrameROI = newFrameROI
oldFrame = newFrame
coupleCounter = coupleCounter + 1
#cv2.imshow('frame', draw_flow(cv2.cvtColor(newFrame, cv2.COLOR_BGR2GRAY), flow))
#cv2.imshow('frame', newFrameROI)
frameCounter = frameCounter + 1
cv2.imshow('frame',draw_flow(newFrameROI,flow))
cv2.waitKey(1)

print(str(coupleCounter))


# Shuffling data before training
# For training
print("\n\n\n###############################\nSHUFFLING MODEL\n")
unified = list(zip(batchFrames, batchSpeeds))
np.random.shuffle(unified)
batchFrames, batchSpeeds = zip(*unified)
# For validation
unified = list(zip(evalFrames, evalSpeeds))
np.random.shuffle(unified)
evalFrames, evalSpeeds = zip(*unified)


# Training model
Expand All @@ -252,13 +263,8 @@ def setupTestModel(inputShape):
# Training batch
index = index + 1
frameCounter = frameCounter + 1
if frameCounter == batchSize or (coupleCounter+1) == videoLengthInFrames:
if frameCounter == batchSize or index==(len(batchSpeeds)-1) :
print("\nWe are at " + str(index) + "\n")
# Choosing some frames to use for evaluation
evalFrames.append(trainBatchFrame.pop(int(batchSize/2)))
evalSpeeds.append(trainBatchSpeed.pop(int(batchSize/2)))
evalFrames.append(trainBatchFrame.pop(int(batchSize/4)))
evalSpeeds.append(trainBatchSpeed.pop(int(batchSize/4)))
# Preparing data
X = np.array(trainBatchFrame)
Y = np.array(trainBatchSpeed)
Expand Down
26 changes: 26 additions & 0 deletions plotter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
import matplotlib.pyplot as plt



file = open("./sourceData/train.txt")
speedTruthArrayString = file.readlines()
speedTruthArray = []
for numeric_string in speedTruthArrayString:
numeric_string = numeric_string.strip('\n')
speedTruthArray.append(float(numeric_string))
file.close()


plt.plot(speedTruthArray, label = "Ground truth speeds")
# naming the x axis
plt.xlabel('Frame')
# naming the y axis
plt.ylabel('Speed')
# giving a title to my graph
plt.title('Speed per frame chart')

# show a legend on the plot
plt.legend()

# function to show the plot
plt.show()
6 changes: 3 additions & 3 deletions testing.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

# Reading all the speed ground truths
print("Reading speed ground truths")
file = open("./sourceData/train.txt")
file = open("./sourceData/test.txt")
speedTruthArrayString = file.readlines()
speedTruthArray = []
for numeric_string in speedTruthArrayString:
Expand All @@ -18,13 +18,13 @@
print("Read " + str(len(speedTruthArray)) + " values")

# Loading the Keras trained model
model = load_model('./savedModels/greyMaskSimple_15epochs_32batch_500section.h5')
model = load_model('./savedModels/greyMask.h5')
model.compile(Adam(lr=0.001),
loss="mse",
metrics=["mse"])

# Opening testing video
videoFeed = cv2.VideoCapture('./sourceData/train.mp4')
videoFeed = cv2.VideoCapture('./sourceData/test.mp4')
videoLengthInFrames = int(videoFeed.get(cv2.CAP_PROP_FRAME_COUNT))
print(videoLengthInFrames)

Expand Down

0 comments on commit 7c7d2d4

Please sign in to comment.