python: execute a statement if an event occurs for 60 seconds











up vote
0
down vote

favorite
1












I have this opencv python code for motion detection. A rectangle box will outline the detected motion. Now i want to execute a statement(eg: print("you are standing in the frame for more than a minute"). The statement should be executed only when the person stands for more than 60 seconds.else the loop should run normally. This is my code.



# import the necessary packages
from imutils.video import VideoStream
import argparse
import datetime
import imutils
import time
import cv2

# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file")
ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
args = vars(ap.parse_args())

# if the video argument is None, then we are reading from webcam
if args.get("video", None) is None:
vs = VideoStream(src=0).start()
time.sleep(2.0)

# otherwise, we are reading from a video file
else:
vs = cv2.VideoCapture(args["video"])

# initialize the first frame in the video stream
firstFrame = None
# loop over the frames of the video
while True:
# grab the current frame and initialize the occupied/unoccupied
# text
frame = vs.read()
frame = frame if args.get("video", None) is None else frame[1]
text = "Unoccupied"

# if the frame could not be grabbed, then we have reached the end
# of the video
if frame is None:
break
# resize the frame, convert it to grayscale, and blur it
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)

# if the first frame is None, initialize it
if firstFrame is None:
firstFrame = gray
continue
# compute the absolute difference between the current frame and
# first frame
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]

# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]

# loop over the contours
for c in cnts:
# if the contour is too small, ignore it
if cv2.contourArea(c) < args["min_area"]:
continue

# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Occupied"
# draw the text and timestamp on the frame
cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)

# show the frame and record if the user presses a key
cv2.imshow("Security Feed", frame)
cv2.imshow("Thresh", thresh)
cv2.imshow("Frame Delta", frameDelta)
key = cv2.waitKey(1) & 0xFF

# if the `q` key is pressed, break from the lop
if key == ord("q"):
break

# cleanup the camera and close any open windows
vs.stop() if args.get("video", None) is None else vs.release()
cv2.destroyAllWindows()









share|improve this question




























    up vote
    0
    down vote

    favorite
    1












    I have this opencv python code for motion detection. A rectangle box will outline the detected motion. Now i want to execute a statement(eg: print("you are standing in the frame for more than a minute"). The statement should be executed only when the person stands for more than 60 seconds.else the loop should run normally. This is my code.



    # import the necessary packages
    from imutils.video import VideoStream
    import argparse
    import datetime
    import imutils
    import time
    import cv2

    # construct the argument parser and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-v", "--video", help="path to the video file")
    ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
    args = vars(ap.parse_args())

    # if the video argument is None, then we are reading from webcam
    if args.get("video", None) is None:
    vs = VideoStream(src=0).start()
    time.sleep(2.0)

    # otherwise, we are reading from a video file
    else:
    vs = cv2.VideoCapture(args["video"])

    # initialize the first frame in the video stream
    firstFrame = None
    # loop over the frames of the video
    while True:
    # grab the current frame and initialize the occupied/unoccupied
    # text
    frame = vs.read()
    frame = frame if args.get("video", None) is None else frame[1]
    text = "Unoccupied"

    # if the frame could not be grabbed, then we have reached the end
    # of the video
    if frame is None:
    break
    # resize the frame, convert it to grayscale, and blur it
    frame = imutils.resize(frame, width=500)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (21, 21), 0)

    # if the first frame is None, initialize it
    if firstFrame is None:
    firstFrame = gray
    continue
    # compute the absolute difference between the current frame and
    # first frame
    frameDelta = cv2.absdiff(firstFrame, gray)
    thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]

    # dilate the thresholded image to fill in holes, then find contours
    # on thresholded image
    thresh = cv2.dilate(thresh, None, iterations=2)
    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
    cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if imutils.is_cv2() else cnts[1]

    # loop over the contours
    for c in cnts:
    # if the contour is too small, ignore it
    if cv2.contourArea(c) < args["min_area"]:
    continue

    # compute the bounding box for the contour, draw it on the frame,
    # and update the text
    (x, y, w, h) = cv2.boundingRect(c)
    cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
    text = "Occupied"
    # draw the text and timestamp on the frame
    cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
    cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
    (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)

    # show the frame and record if the user presses a key
    cv2.imshow("Security Feed", frame)
    cv2.imshow("Thresh", thresh)
    cv2.imshow("Frame Delta", frameDelta)
    key = cv2.waitKey(1) & 0xFF

    # if the `q` key is pressed, break from the lop
    if key == ord("q"):
    break

    # cleanup the camera and close any open windows
    vs.stop() if args.get("video", None) is None else vs.release()
    cv2.destroyAllWindows()









    share|improve this question


























      up vote
      0
      down vote

      favorite
      1









      up vote
      0
      down vote

      favorite
      1






      1





      I have this opencv python code for motion detection. A rectangle box will outline the detected motion. Now i want to execute a statement(eg: print("you are standing in the frame for more than a minute"). The statement should be executed only when the person stands for more than 60 seconds.else the loop should run normally. This is my code.



      # import the necessary packages
      from imutils.video import VideoStream
      import argparse
      import datetime
      import imutils
      import time
      import cv2

      # construct the argument parser and parse the arguments
      ap = argparse.ArgumentParser()
      ap.add_argument("-v", "--video", help="path to the video file")
      ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
      args = vars(ap.parse_args())

      # if the video argument is None, then we are reading from webcam
      if args.get("video", None) is None:
      vs = VideoStream(src=0).start()
      time.sleep(2.0)

      # otherwise, we are reading from a video file
      else:
      vs = cv2.VideoCapture(args["video"])

      # initialize the first frame in the video stream
      firstFrame = None
      # loop over the frames of the video
      while True:
      # grab the current frame and initialize the occupied/unoccupied
      # text
      frame = vs.read()
      frame = frame if args.get("video", None) is None else frame[1]
      text = "Unoccupied"

      # if the frame could not be grabbed, then we have reached the end
      # of the video
      if frame is None:
      break
      # resize the frame, convert it to grayscale, and blur it
      frame = imutils.resize(frame, width=500)
      gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
      gray = cv2.GaussianBlur(gray, (21, 21), 0)

      # if the first frame is None, initialize it
      if firstFrame is None:
      firstFrame = gray
      continue
      # compute the absolute difference between the current frame and
      # first frame
      frameDelta = cv2.absdiff(firstFrame, gray)
      thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]

      # dilate the thresholded image to fill in holes, then find contours
      # on thresholded image
      thresh = cv2.dilate(thresh, None, iterations=2)
      cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
      cv2.CHAIN_APPROX_SIMPLE)
      cnts = cnts[0] if imutils.is_cv2() else cnts[1]

      # loop over the contours
      for c in cnts:
      # if the contour is too small, ignore it
      if cv2.contourArea(c) < args["min_area"]:
      continue

      # compute the bounding box for the contour, draw it on the frame,
      # and update the text
      (x, y, w, h) = cv2.boundingRect(c)
      cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
      text = "Occupied"
      # draw the text and timestamp on the frame
      cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
      cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
      cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
      (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)

      # show the frame and record if the user presses a key
      cv2.imshow("Security Feed", frame)
      cv2.imshow("Thresh", thresh)
      cv2.imshow("Frame Delta", frameDelta)
      key = cv2.waitKey(1) & 0xFF

      # if the `q` key is pressed, break from the lop
      if key == ord("q"):
      break

      # cleanup the camera and close any open windows
      vs.stop() if args.get("video", None) is None else vs.release()
      cv2.destroyAllWindows()









      share|improve this question















      I have this opencv python code for motion detection. A rectangle box will outline the detected motion. Now i want to execute a statement(eg: print("you are standing in the frame for more than a minute"). The statement should be executed only when the person stands for more than 60 seconds.else the loop should run normally. This is my code.



      # import the necessary packages
      from imutils.video import VideoStream
      import argparse
      import datetime
      import imutils
      import time
      import cv2

      # construct the argument parser and parse the arguments
      ap = argparse.ArgumentParser()
      ap.add_argument("-v", "--video", help="path to the video file")
      ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
      args = vars(ap.parse_args())

      # if the video argument is None, then we are reading from webcam
      if args.get("video", None) is None:
      vs = VideoStream(src=0).start()
      time.sleep(2.0)

      # otherwise, we are reading from a video file
      else:
      vs = cv2.VideoCapture(args["video"])

      # initialize the first frame in the video stream
      firstFrame = None
      # loop over the frames of the video
      while True:
      # grab the current frame and initialize the occupied/unoccupied
      # text
      frame = vs.read()
      frame = frame if args.get("video", None) is None else frame[1]
      text = "Unoccupied"

      # if the frame could not be grabbed, then we have reached the end
      # of the video
      if frame is None:
      break
      # resize the frame, convert it to grayscale, and blur it
      frame = imutils.resize(frame, width=500)
      gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
      gray = cv2.GaussianBlur(gray, (21, 21), 0)

      # if the first frame is None, initialize it
      if firstFrame is None:
      firstFrame = gray
      continue
      # compute the absolute difference between the current frame and
      # first frame
      frameDelta = cv2.absdiff(firstFrame, gray)
      thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]

      # dilate the thresholded image to fill in holes, then find contours
      # on thresholded image
      thresh = cv2.dilate(thresh, None, iterations=2)
      cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
      cv2.CHAIN_APPROX_SIMPLE)
      cnts = cnts[0] if imutils.is_cv2() else cnts[1]

      # loop over the contours
      for c in cnts:
      # if the contour is too small, ignore it
      if cv2.contourArea(c) < args["min_area"]:
      continue

      # compute the bounding box for the contour, draw it on the frame,
      # and update the text
      (x, y, w, h) = cv2.boundingRect(c)
      cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
      text = "Occupied"
      # draw the text and timestamp on the frame
      cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
      cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
      cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
      (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)

      # show the frame and record if the user presses a key
      cv2.imshow("Security Feed", frame)
      cv2.imshow("Thresh", thresh)
      cv2.imshow("Frame Delta", frameDelta)
      key = cv2.waitKey(1) & 0xFF

      # if the `q` key is pressed, break from the lop
      if key == ord("q"):
      break

      # cleanup the camera and close any open windows
      vs.stop() if args.get("video", None) is None else vs.release()
      cv2.destroyAllWindows()






      python python-3.x opencv logic






      share|improve this question















      share|improve this question













      share|improve this question




      share|improve this question








      edited Nov 13 at 12:54









      api55

      6,37232443




      6,37232443










      asked Nov 13 at 12:39









      Shabdhu

      11




      11





























          active

          oldest

          votes











          Your Answer






          StackExchange.ifUsing("editor", function () {
          StackExchange.using("externalEditor", function () {
          StackExchange.using("snippets", function () {
          StackExchange.snippets.init();
          });
          });
          }, "code-snippets");

          StackExchange.ready(function() {
          var channelOptions = {
          tags: "".split(" "),
          id: "1"
          };
          initTagRenderer("".split(" "), "".split(" "), channelOptions);

          StackExchange.using("externalEditor", function() {
          // Have to fire editor after snippets, if snippets enabled
          if (StackExchange.settings.snippets.snippetsEnabled) {
          StackExchange.using("snippets", function() {
          createEditor();
          });
          }
          else {
          createEditor();
          }
          });

          function createEditor() {
          StackExchange.prepareEditor({
          heartbeatType: 'answer',
          convertImagesToLinks: true,
          noModals: true,
          showLowRepImageUploadWarning: true,
          reputationToPostImages: 10,
          bindNavPrevention: true,
          postfix: "",
          imageUploader: {
          brandingHtml: "Powered by u003ca class="icon-imgur-white" href="https://imgur.com/"u003eu003c/au003e",
          contentPolicyHtml: "User contributions licensed under u003ca href="https://creativecommons.org/licenses/by-sa/3.0/"u003ecc by-sa 3.0 with attribution requiredu003c/au003e u003ca href="https://stackoverflow.com/legal/content-policy"u003e(content policy)u003c/au003e",
          allowUrls: true
          },
          onDemand: true,
          discardSelector: ".discard-answer"
          ,immediatelyShowMarkdownHelp:true
          });


          }
          });














           

          draft saved


          draft discarded


















          StackExchange.ready(
          function () {
          StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53281198%2fpython-execute-a-statement-if-an-event-occurs-for-60-seconds%23new-answer', 'question_page');
          }
          );

          Post as a guest















          Required, but never shown






























          active

          oldest

          votes













          active

          oldest

          votes









          active

          oldest

          votes






          active

          oldest

          votes
















           

          draft saved


          draft discarded



















































           


          draft saved


          draft discarded














          StackExchange.ready(
          function () {
          StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53281198%2fpython-execute-a-statement-if-an-event-occurs-for-60-seconds%23new-answer', 'question_page');
          }
          );

          Post as a guest















          Required, but never shown





















































          Required, but never shown














          Required, but never shown












          Required, but never shown







          Required, but never shown

































          Required, but never shown














          Required, but never shown












          Required, but never shown







          Required, but never shown







          Popular posts from this blog

          How to change which sound is reproduced for terminal bell?

          Can I use Tabulator js library in my java Spring + Thymeleaf project?

          Title Spacing in Bjornstrup Chapter, Removing Chapter Number From Contents