Skip to content

Commit

Permalink
Merge pull request #2 from OpenVisualCloud/master
Browse files Browse the repository at this point in the history
Merge all changes from master
  • Loading branch information
ttao1 authored Dec 19, 2019
2 parents c2cd3c3 + 83dc5b3 commit 656b4b7
Show file tree
Hide file tree
Showing 25 changed files with 20,168 additions and 8,806 deletions.
2 changes: 1 addition & 1 deletion analytics/common/Xeon/gst/Dockerfile.1.gst
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ RUN git clone ${VA_GSTREAMER_PLUGINS_REPO} && \
-DGIT_INFO=$(echo "git_$(git rev-parse --short HEAD)") \
-DCMAKE_BUILD_TYPE=Release \
-DDISABLE_SAMPLES=ON \
-DDISABLE_VAAPI=ON \
-DHAVE_VAAPI=OFF \
-DENABLE_PAHO_INSTALLATION=1 \
-DENABLE_RDKAFKA_INSTALLATION=0 \
-DENABLE_AVX2=ON -DENABLE_SSE42=ON \
Expand Down
101 changes: 70 additions & 31 deletions analytics/common/runva.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,38 +27,77 @@ def __init__(self, pipeline, version="2"):
def stop(self):
self._stop=True

def loop(self, sensor, location, uri, algorithm, topic):
pid,msg=PipelineManager.create_instance(self._pipeline,self._version,{
"source": {
"uri": uri,
"type":"uri"
},
"destination": {
"type": "mqtt",
"host": mqtthost,
"clientid": algorithm,
"topic": topic
},
"tags": {
"sensor": sensor,
"location": location,
"algorithm": algorithm,
"office": {
"lat": office[0],
"lon": office[1],
def loop(self, sensor, location, uri, topic, algorithm, algorithmName, resolution={}, zone=0, polygon=[]):
if algorithmName=="crowd-counting":
pid,msg=PipelineManager.create_instance(self._pipeline,self._version,{
"source": {
"uri": uri,
"type":"uri"
},
},
"destination": {
"type": "mqtt",
"host": mqtthost,
"clientid": algorithm,
"topic": topic,
},
"parameters": {
"every-nth-frame": every_nth_frame,
"recording_prefix": "/tmp/" + sensor,
},
})
"destination": {
"type": "mqtt",
"host": mqtthost,
"clientid": algorithm,
"topic": topic
},
"tags": {
"sensor": sensor,
"location": location,
"algorithm": algorithm,
"office": {
"lat": office[0],
"lon": office[1],
},
},
"destination": {
"type": "mqtt",
"host": mqtthost,
"clientid": algorithm,
"topic": topic,
},
"parameters": {
"crowd_count": {
"zone": zone,
"width": resolution["width"],
"height": resolution["height"],
"polygon": polygon
},
"every-nth-frame": every_nth_frame,
"recording_prefix": "/tmp/" + sensor,
}
})
else:
pid,msg=PipelineManager.create_instance(self._pipeline,self._version,{
"source": {
"uri": uri,
"type":"uri"
},
"destination": {
"type": "mqtt",
"host": mqtthost,
"clientid": algorithm,
"topic": topic
},
"tags": {
"sensor": sensor,
"location": location,
"algorithm": algorithm,
"office": {
"lat": office[0],
"lon": office[1],
},
},
"destination": {
"type": "mqtt",
"host": mqtthost,
"clientid": algorithm,
"topic": topic,
},
"parameters": {
"every-nth-frame": every_nth_frame,
"recording_prefix": "/tmp/" + sensor,
},
})
if pid is None:
print("Exception: "+str(msg), flush=True)
return
Expand Down
3 changes: 1 addition & 2 deletions analytics/crowd-counting/Xeon/gst/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
# smtc_analytics_crowd_counting_xeon_gst

FROM smtc_analytics_common_xeon_gst
RUN apt-get update -qq && apt-get install -qq python3-paho-mqtt python3-ply python3-requests python3-watchdog python3-pip && rm -rf /var/lib/apt/lists/*
RUN pip3 install Pillow
RUN apt-get update -qq && apt-get install -qq python3-paho-mqtt python3-ply python3-requests python3-watchdog python3-pillow && rm -rf /var/lib/apt/lists/*

COPY --from=smtc_common /home/*.py /home/
COPY *.py /home/
Expand Down
36 changes: 30 additions & 6 deletions analytics/crowd-counting/Xeon/gst/pipeline/2/pipeline.json
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
{
"name": "crowd_counting",
"version": 2,
"type": "GStreamer",
"template":"rtspsrc udp-buffer-size=212992 name=\"source\" ! queue ! rtph264depay ! h264parse ! video/x-h264 ! tee name=t ! queue ! decodebin ! videoconvert name=\"videoconvert\" ! video/x-raw,format=BGRx ! queue leaky=upstream ! gvainference infer-config=CPU_BIND_THREAD=NO model=\"{models[CSRNet_IR_model_2019R3][1][network]}\" model-proc=\"{models[CSRNet_IR_model_2019R3][1][proc]}\" name=\"detection\" ! queue ! gvametaconvert converter=json method=detection include-no-detections=true name=\"metaconvert\" ! gvapython name=\"crowdcounting\" module=\"crowd_counting\" class=\"CrowdCounting\" package=\"custom_transforms\" ! queue ! gvametapublish name=\"destination\" ! appsink name=appsink t. ! queue ! splitmuxsink max-size-time=60500000000 name=\"splitmuxsink\"",
"name": "crowd_counting",
"version": 2,
"type": "GStreamer",
"template":"rtspsrc udp-buffer-size=212992 name=\"source\" ! queue ! rtph264depay ! h264parse ! video/x-h264 ! tee name=t ! queue ! decodebin ! videoconvert name=\"videoconvert\" ! video/x-raw,format=BGRx ! queue leaky=upstream ! gvainference infer-config=CPU_BIND_THREAD=NO model=\"{models[CSRNet_IR_model_2019R3][1][network]}\" model-proc=\"{models[CSRNet_IR_model_2019R3][1][proc]}\" name=\"detection\" ! queue ! gvametaconvert converter=json method=detection include-no-detections=true name=\"metaconvert\" ! gvapython name=\"crowdcounting\" module=\"crowd_counting\" class=\"CrowdCounting\" package=\"custom_transforms\" ! queue ! gvametapublish name=\"destination\" ! appsink name=appsink t. ! queue ! splitmuxsink max-size-time=60500000000 name=\"splitmuxsink\"",
"description": "Crowd Counting Pipeline",
"source": {
"type": "object",
Expand All @@ -24,9 +24,33 @@
}
}
},
"parameters": {
"parameters": {
"type" : "object",
"properties" : {
"crowd_count": {
"element": {
"name": "crowdcounting",
"property": "args"
},
"type": "object",
"properties": {
"zone": {
"type": "integer"
},
"width": {
"type": "integer"
},
"height": {
"type": "integer"
},
"polygon": {
"type": "array",
"items": {
"type": "number"
}
}
}
},
"every-nth-frame": {
"element":"detection",
"type": "integer",
Expand Down Expand Up @@ -56,5 +80,5 @@
"default":"recording"
}
}
}
}
}
12 changes: 9 additions & 3 deletions analytics/crowd-counting/count-crowd.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,15 @@
runva=None
stop=False

def connect(sensor, location, algorithm, uri):
def connect(sensor, location, uri, algorithm, algorithmName, resolution, zonemap):
global mqtt2db, rec2db, runva

print("==============count-crowd:connect:zonemap=",zonemap,"========================",flush=True)
flatZonemap = []
for sublist in zonemap[0]["polygon"]:
for item in sublist:
flatZonemap.append(item)

try:
mqtt2db=MQTT2DB(algorithm) # this waits for mqtt
rec2db=Rec2DB(sensor)
Expand All @@ -39,7 +45,7 @@ def connect(sensor, location, algorithm, uri):

# any VA exit indicates a camera disconnect
with ThreadPoolExecutor(1) as e1:
e1.submit(runva.loop, sensor, location, uri, algorithm, topic)
e1.submit(runva.loop, sensor, location, uri, topic, algorithm, algorithmName, resolution, zonemap[0]["zone"],flatZonemap)

if not stop:
mqtt2db.stop()
Expand Down Expand Up @@ -88,7 +94,7 @@ def quit_service(signum, sigframe):

# stream from the sensor
print("Connected to "+sensor["_id"]+"...",flush=True)
connect(sensor["_id"],sensor["_source"]["location"],algorithm,sensor["_source"]["url"])
connect(sensor["_id"],sensor["_source"]["location"],sensor["_source"]["url"],algorithm,sensor["_source"]["algorithm"],sensor["_source"]["resolution"],sensor["_source"]["zonemap"])

# if exit, there is somehting wrong
r=dbs.update(sensor["_id"],{"status":"disconnected"})
Expand Down
62 changes: 25 additions & 37 deletions analytics/crowd-counting/custom_transforms/crowd_counting.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,63 +5,51 @@
from PIL import Image, ImageDraw

class CrowdCounting:
def __init__(self):
self.numZone = 8
self.mask=[0]*self.numZone
self.crowd_count=[0]*self.numZone
self.polygon=[0]*self.numZone

def __init__(self,zone=0,width=1024,height=768, polygon=[865,210,933,210,933,227,968,227,968,560,934,560,934,568,865,568,865,210]):
print("===========CrowdCounting:__init__:zone,width,height,polygon=", zone, width, height, polygon, "================")

#self._sensor = sensor
self.polygon[0] = [865,210,933,210,933,227,968,227,968,560,934,560,934,568,865,568,865,210]
self.polygon[1] = [830,49,861,49,893,56,922,71,946,93,960,122,967,151,967,228,934,228,934,211,899,211,867,209,864,183,854,165,836,149,814,144,759,144,759,114,795,114,795,84,830,83,830,49]
self.polygon[2] = [259,49,259,82,277,82,277,114,323,114,323,146,760,146,760,114,796,114,796,82,832,82,831,49,259,49]
self.polygon[3] = [259,49,259,82,277,82,277,114,322,114,322,144,269,144,246,146,226,156,212,173,204,190,204,212,174,212,172,214,143,214,143,161,157,127,182,103,214,87,231,83,230,49,259,49]
self.polygon[4] = [140,571,174,571,206,563,206,211,140,211,140,571]
self.polygon[5] = [206,563,174,569,142,569,142,599,158,630,182,654,212,668,242,673,298,672,298,644,326,644,326,612,271,612,248,609,227,600,215,583,206,563]
self.polygon[6] = [762,611,762,642,788,642,788,672,811,672,811,704,261,704,261,672,298,672,298,642,325,642,325,611,762,611]
self.polygon[7] = [966,561,966,586,964,615,954,646,933,676,900,695,866,702,810,702,788,674,788,644,762,644,762,611,817,611,840,604,857,587,868,566,896,574,901,567,933,567,933,561,966,561]
# self.polygons=[0]*8
# self.polygons[0] = [865,210,933,210,933,227,968,227,968,560,934,560,934,568,865,568,865,210]
# self.polygons[1] = [830,49,861,49,893,56,922,71,946,93,960,122,967,151,967,228,934,228,934,211,899,211,867,209,864,183,854,165,836,149,814,144,759,144,759,114,795,114,795,84,830,83,830,49]
# self.polygons[2] = [259,49,259,82,277,82,277,114,323,114,323,146,760,146,760,114,796,114,796,82,832,82,831,49,259,49]
# self.polygons[3] = [259,49,259,82,277,82,277,114,322,114,322,144,269,144,246,146,226,156,212,173,204,190,204,212,174,212,172,214,143,214,143,161,157,127,182,103,214,87,231,83,230,49,259,49]
# self.polygons[4] = [140,571,174,571,206,563,206,211,140,211,140,571]
# self.polygons[5] = [206,563,174,569,142,569,142,599,158,630,182,654,212,668,242,673,298,672,298,644,326,644,326,612,271,612,248,609,227,600,215,583,206,563]
# self.polygons[6] = [762,611,762,642,788,642,788,672,811,672,811,704,261,704,261,672,298,672,298,642,325,642,325,611,762,611]
# self.polygons[7] = [966,561,966,586,964,615,954,646,933,676,900,695,866,702,810,702,788,674,788,644,762,644,762,611,817,611,840,604,857,587,868,566,896,574,901,567,933,567,933,561,966,561]

self.zone = zone
self.mask=[0]
self.crowd_count=0
self.polygon=polygon

#no matter what resolution the input video is (currently 720x1280),
#it will resize to 1024x768 before sending to model
#AI data input is 1/8 of image resolution, 768x1024 image, data is 1x96x128x1
self.width = 1024>>3
self.height = 768>>3
for zone in range(self.numZone):
for t in range(len(self.polygon[zone])):
self.polygon[zone][t] = self.polygon[zone][t]>>3
self.width = width>>3
self.height = height>>3
for t in range(len(self.polygon)):
self.polygon[t] = self.polygon[t]>>3

#convert polygon to mask algorithm
#https://stackoverflow.com/questions/3654289/scipy-create-2d-polygon-mask
self.img = Image.new('L', (self.width, self.height), 0)
for zone in range(self.numZone):
self.img = Image.new('L', (self.width, self.height), 0)
ImageDraw.Draw(self.img).polygon(self.polygon[zone], outline=1, fill=1)
self.mask[zone] = numpy.array(self.img).flatten()
ImageDraw.Draw(self.img).polygon(self.polygon, outline=1, fill=1)
self.mask = numpy.array(self.img).flatten()

def process_frame(self, frame):
for tensor in frame.tensors():
data = tensor.data()
imgData = []
imgData.append(tensor.data())
for zone in range(self.numZone):
self.crowd_count[zone] = numpy.sum(self.mask[zone] * imgData)
self.crowd_count = numpy.sum(self.mask * imgData)

if (self.crowd_count):
messages = list(frame.messages())
if len(messages) > 0:
json_msg = json.loads(messages[0].get_message())
json_msg["count"] = {
"zone0":int(self.crowd_count[0]),
"zone1":int(self.crowd_count[1]),
"zone2":int(self.crowd_count[2]),
"zone3":int(self.crowd_count[3]),
"zone4":int(self.crowd_count[4]),
"zone5":int(self.crowd_count[5]),
"zone6":int(self.crowd_count[6]),
"zone7":int(self.crowd_count[7])
}
json_msg["count"] = {"zone"+str(self.zone):int(self.crowd_count)}
messages[0].set_message(json.dumps(json_msg))
else:
print("No JSON messages in frame")

return True
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"name": "object_detection",
"version": 2,
"type": "GStreamer",
"template":"rtspsrc udp-buffer-size=212992 name=source ! queue ! rtph264depay ! h264parse ! video/x-h264 ! tee name=t ! queue ! decodebin ! videoconvert name=\"videoconvert\" ! video/x-raw,format=BGRx ! queue leaky=upstream ! gvadetect infer-config=CPU_BIND_THREAD=NO model=\"{models[mobilenet-ssd_2019R3][1][network]}\" model-proc=\"{models[mobilenet-ssd_2019R3][1][proc]}\" name=\"detection\" ! gvametaconvert converter=json method=detection name=\"metaconvert\" ! queue ! gvametapublish name=\"destination\" ! appsink name=appsink t. ! queue ! splitmuxsink max-size-time=60500000000 name=\"splitmuxsink\"",
"template":"rtspsrc udp-buffer-size=212992 name=source ! queue ! rtph264depay ! h264parse ! video/x-h264 ! tee name=t ! queue ! decodebin ! videoconvert name=\"videoconvert\" ! video/x-raw,format=BGRx ! queue leaky=upstream ! gvadetect infer-config=CPU_BIND_THREAD=NO model=\"{models[person_detection_2019R3][1][network]}\" model-proc=\"{models[person_detection_2019R3][1][proc]}\" name=\"detection\" ! gvametaconvert converter=json method=detection name=\"metaconvert\" ! queue ! gvametapublish name=\"destination\" ! appsink name=appsink t. ! queue ! splitmuxsink max-size-time=60500000000 name=\"splitmuxsink\"",
"description": "Object Detection Pipeline",
"parameters": {
"type" : "object",
Expand Down
8 changes: 3 additions & 5 deletions analytics/object-detection/detect-object.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
myAlgorithm=""
version=0

def connect(sensor, location, algorithm, uri):
def connect(sensor, location, uri, algorithm, algorithmName):
global mqtt2db, rec2db, runva

try:
Expand All @@ -38,7 +38,7 @@ def connect(sensor, location, algorithm, uri):

# any VA exit indicates a camera disconnect
with ThreadPoolExecutor(1) as e1:
e1.submit(runva.loop, sensor, location, uri, algorithm, topic)
e1.submit(runva.loop, sensor, location, uri, topic, algorithm, algorithmName)

if not stop:
mqtt2db.stop()
Expand Down Expand Up @@ -87,16 +87,14 @@ def quit_service(signum, sigframe):
while not stop:
try:
print("Searching...", flush=True)
print("sensor:'camera' and status:'idle' and algorithm='queue-counting' and office:["+str(office[0])+","+str(office[1])+"]")
print("sensor:'camera' and status:'idle' and algorithm='"+myAlgorithm+"' and office:["+str(office[0])+","+str(office[1])+"]")
for sensor in dbs.search("sensor:'camera' and status:'idle' and algorithm='"+myAlgorithm+"' and office:["+str(office[0])+","+str(office[1])+"]"):
try:
# compete (with other va instances) for a sensor
r=dbs.update(sensor["_id"],{"status":"streaming"},seq_no=sensor["_seq_no"],primary_term=sensor["_primary_term"])

# stream from the sensor
print("Connected to "+sensor["_id"]+"...",flush=True)
connect(sensor["_id"],sensor["_source"]["location"],algorithm,sensor["_source"]["url"])
connect(sensor["_id"],sensor["_source"]["location"],sensor["_source"]["url"],algorithm,sensor["_source"]["algorithm"])

# if exit, there is somehting wrong
r=dbs.update(sensor["_id"],{"status":"disconnected"})
Expand Down
Binary file not shown.
Loading

0 comments on commit 656b4b7

Please sign in to comment.