Guest User

Untitled

a guest
Mar 15th, 2020
914
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 4.78 KB | None | 0 0
  1. import numpy as np
  2. import argparse
  3. import imutils
  4. import time
  5. import cv2
  6. import os
  7. from distanceDetect import distanceToCamera
  8. import paho.mqtt.client as mqtt
  9. from urllib.request import Request, urlopen
  10.  
  11. mqttc = mqtt.Client()
  12. mqttc.connect("test.mosquitto.org", 1883, 60)
  13.  
  14. # def getOutputsNames(net):
  15. #     # Get the names of all the layers in the network
  16. #     layersNames = net.getLayerNames()
  17. #     # Get the names of the output layers, i.e. the layers with unconnected outputs
  18. #     return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]
  19.  
  20. labelsPath = 'yolo/obj.names' #path to .names file
  21. LABELS = open(labelsPath).read().strip().split("\n")
  22.  
  23. # initialize a list of colors to represent each possible class label
  24. np.random.seed(42)
  25. COLOURS = np.random.randint(0, 255, size=(len(LABELS), 3),
  26.     dtype="uint8")
  27.  
  28.  
  29. weightsPath = 'yolo/yolov3-tiny-obj_2000.weights' #path to yolo weights file (2000?)
  30. configPath = 'yolo/yolov3-tiny-obj.cfg' #path to cfg file
  31. print("[INFO] loading YOLO from disk...")
  32.  
  33. net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
  34. ln = net.getLayerNames()
  35. ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
  36. #ln = net.getOu
  37. ## ^^^ this determines teh output layer names from YOLO
  38. streamURL = "http://192.168.0.23:8080/video"
  39. stream = urlopen(streamURL)
  40. bytes = b""
  41. print('open stream')
  42.  
  43. #vs = cv2.VideoCapture("http://937f8731.ngrok.io/stream") #fill in IP camera from ngrok
  44. writer = None
  45. (W, H) = (None, None)
  46.  
  47. minConfidence = 0.60 #some value (scaled 0 - 1)
  48. NMSthreshold = 0.70 #Non maxima supression
  49.  
  50. vs = cv2.VideoCapture(streamURL)
  51.  
  52. while True:
  53.     bytes += stream.read(1024)
  54.     a = bytes.find(b'\xff\xd8')
  55.     b = bytes.find(b'\xff\xd9')
  56.     if a != -1 and b != -1:
  57.         jpg = bytes[a:b+2]
  58.         bytes = bytes[b+2:]
  59.         frame = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_COLOR)
  60.         print('here')
  61.    
  62.         (grabbed, frame) = vs.read()
  63.         print('we have grabbed frame')
  64.         print(grabbed)
  65.         mqttc.publish("praxis3/test", 'frame')
  66.         print('nani???')
  67.         if not grabbed:
  68.         print('grabbed')
  69.         break
  70.  
  71.         # if the frame dimensions are empty, grab them
  72.        
  73.        
  74.        
  75.        
  76.         if W is None or H is None:
  77.             (W, H) = frame.shape[:2]
  78.         #print('frame grabbed')
  79.         #preprocess the image using blobFromImage
  80.         blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416), swapRB=True, crop=False)
  81.         #feed forward through the network
  82.         net.setInput(blob)
  83.         start = time.time()
  84.         layerOutputs = net.forward(ln)
  85.         end = time.time()
  86.  
  87.         boxes = []
  88.         boxCenter = []
  89.         confidences = []
  90.         classIDs = []
  91.  
  92.         # loop over each of the layer outputs
  93.         for output in layerOutputs:
  94.             # loop over each of the detections
  95.             for detection in output:
  96.             # extract the class ID and confidence (i.e., probability)
  97.             # of the current object detection
  98.                 scores = detection[5:]
  99.                 classID = np.argmax(scores)
  100.                 confidence = scores[classID]
  101.  
  102.                 # filter out weak predictions by ensuring the detected
  103.                 # probability is greater than the minimum probability
  104.                 if confidence > minConfidence:
  105.                     # scale the bounding box coordinates back relative to
  106.                     # the size of the image, keeping in mind that YOLO
  107.                     # actually returns the center (x, y)-coordinates of
  108.                     # the bounding box followed by the boxes' width and
  109.                     # height
  110.                     box = detection[0:4] * np.array([W, H, W, H])
  111.                     (centerX, centerY, width, height) = box.astype("int")
  112.  
  113.                     # use the center (x, y)-coordinates to derive the top
  114.                     # and and left corner of the bounding box
  115.                     x = int(centerX - (width / 2))
  116.                     y = int(centerY - (height / 2))
  117.  
  118.                     # update our list of bounding box coordinates,
  119.                     # confidences, and class IDs
  120.                     boxes.append([x, y, int(width), int(height)])
  121.                     confidences.append(float(confidence))
  122.                     classIDs.append(classID)
  123.        
  124.         idxs = cv2.dnn.NMSBoxes(boxes, confidences, minConfidence, NMSthreshold)
  125.         print('should be gucci')
  126.         if len(idxs) > 0:
  127.             # loop over the indexes we are keeping
  128.             for i in idxs.flatten():
  129.                 # extract the bounding box coordinates
  130.                 (x, y) = (boxes[i][0], boxes[i][1])
  131.                 (w, h) = (boxes[i][2], boxes[i][3])
  132.                 (cX, cY) = (x + w/2, y + (y/2))
  133.                 sX, sY, sZ = frame.shape
  134.                 rX, rY = cX/sX, cY/sY
  135.                 d = distanceToCamera(w)
  136.                 mqttc.publish("praxis3/test", {'rX': rX, 'rY': rY, 'd': d})
  137.                 mqttc.publish("praxis3/test", 'helloworld ndhfh')
  138.                 mqttc.loop(2)
  139.                 if d > 30.48 :
  140.                     print('ROBOT HAS STOPPED')
  141.                     # load mode 2
  142.                 # draw a bounding box rectangle and label on the frame
  143.                 colour = [int(c) for c in COLOURS[classIDs[i]]]
  144.                 cv2.rectangle(frame, (x, y), (x + w, y + h), colour, 2)
  145.                 text = "{}: {:.4f}".format(LABELS[classIDs[i]], confidences[i])
  146.                 cv2.putText(frame, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, colour, 2)
  147.  
  148.         # TODO: Show Images that have been bound!
Advertisement
Add Comment
Please, Sign In to add comment