diff --git a/.flake8 b/.flake8 deleted file mode 100644 index 8bd7d2b..0000000 --- a/.flake8 +++ /dev/null @@ -1,3 +0,0 @@ -[flake8] -inline-quotes = " -indent-size = 4 diff --git a/camdetect.py b/camdetect.py index 3266934..b66dd01 100755 --- a/camdetect.py +++ b/camdetect.py @@ -116,7 +116,6 @@ gauge_queue_frames.labels("hold").set(0) gauge_queue_frames.labels("upload").set(0) counter_frames.labels("motion").inc(0) - assert SLIDE_WINDOW <= 8 # This is 256 frames which should be enough @@ -260,22 +259,20 @@ async def motion_detector(reference_frame, download_queue, upload_queue): # Handle event start if motion_detected and not event_id: result = await app.ctx.coll.find_one_and_update({ - "timestamp": dt, # TODO: Account for clock skew + "timestamp": dt, # TODO: Account for clock skew + "source": SOURCE_NAME, + }, { + "$setOnInsert": { + "timestamp": dt, "source": SOURCE_NAME, - }, { - "$setOnInsert": { - "timestamp": dt, - "source": SOURCE_NAME, - "event": "motion-detected", - "started": dt, - "finished": dt + timedelta(minutes=2), - "component": "camdetect", - "screenshots": [], - "action": "event", - } - }, - upsert = True, - return_document=ReturnDocument.AFTER) + "event": "motion-detected", + "started": dt, + "finished": dt + timedelta(minutes=2), + "component": "camdetect", + "screenshots": [], + "action": "event", + } + }, upsert=True, return_document=ReturnDocument.AFTER) app.ctx.event_id = event_id = result["_id"] # Handle buffering frames prior event start @@ -375,7 +372,7 @@ async def download(resp, queue): continue else: # Assemble JPEG blob - blob = buf + data[:marker+2] + blob = buf + data[:marker + 2] # Parse DCT coefficients try: @@ -394,7 +391,7 @@ async def download(resp, queue): except asyncio.QueueFull: counter_dropped_frames.labels("download").inc() gauge_queue_frames.labels("download").set(queue.qsize()) - data = data[marker+2:] + data = data[marker + 2:] buf = b"" counter_receive_frames.inc() @@ -446,8 +443,8 @@ async def bypass_stream_wrapper(request): counter_transmit_bytes.inc(len(data)) counter_transmit_frames.inc() return response.stream( - stream_camera, - content_type="multipart/x-mixed-replace; boundary=frame") + stream_camera, + content_type="multipart/x-mixed-replace; boundary=frame") @app.route("/debug") @@ -467,8 +464,8 @@ async def stream_wrapper(request): for y in range(0, app.ctx.mask.shape[0]): for x in range(0, app.ctx.mask.shape[1]): if app.ctx.mask[y][x] > 0: - img[y*DCT_BLOCK_SIZE:(y+1)*DCT_BLOCK_SIZE, - x*DCT_BLOCK_SIZE:(x+1)*DCT_BLOCK_SIZE, + img[y * DCT_BLOCK_SIZE:(y + 1) * DCT_BLOCK_SIZE, + x * DCT_BLOCK_SIZE:(x + 1) * DCT_BLOCK_SIZE, channel] = 255 # Compress modified frame as JPEG frame @@ -490,9 +487,9 @@ async def ready_check(request): # Check if S3 is accessible session = aioboto3.Session() async with session.resource("s3", - aws_access_key_id=AWS_ACCESS_KEY_ID, - aws_secret_access_key=AWS_SECRET_ACCESS_KEY, - endpoint_url=S3_ENDPOINT_URL) as s3: + aws_access_key_id=AWS_ACCESS_KEY_ID, + aws_secret_access_key=AWS_SECRET_ACCESS_KEY, + endpoint_url=S3_ENDPOINT_URL) as s3: bucket = await s3.Bucket(S3_BUCKET_NAME) await bucket.upload_fileobj(io.BytesIO(b"test"), "test")