from keras import models import keras import cv2 #import handle_data import numpy as np import tensorflow as tf import time #utilities.use_both_GPUs() config = tf.compat.v1.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.5 session = tf.compat.v1.Session(config=config) #config = tf.compat.v1.ConfigProto() #config.gpu_options.allow_growth = True #session = tf.compat.v1.Session(config=config) modelname = 'simple_unet_A' model_path = '../trained_models/' + modelname #device_nr = 1 #utilities.restrict_to_one_GPU(device_nr=device_nr) videopath = "/home/mounchili/vid_ab.mp4" cap = cv2.VideoCapture(videopath) total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) n_frame = 0 start_load = time.time() model = models.load_model(model_path) end_load = time.time() elapsed_load = end_load - start_load print("the model needs {:.2f}s for loading".format(elapsed_load)) print('Successfully reconstructed model. Start reading the video file...') while True: ret, frame = cap.read() if ret: #n_frame = n_frame + 1 #if n_frame <= total_frames: cv2.imshow("frame", frame) #output_image = run_inference(model,frame) img_resized = cv2.resize(frame, (1150, 674)) img = np.expand_dims(img_resized, axis=0) #img = img.astype(np.float32) print(img.shape) start_predict = time.time() predicted_img = model.predict(img) end_predict = time.time() elapsed_predict = end_predict - start_predict #pred = predicted_img['conv2d_14'].numpy() #output_image = np.squeeze(pred) #print(output_image.shape) #print(output_image) print("the model needs {:.2f}s for prediction".format(elapsed_predict)) #cv2.imwrite("/home/mounchili/Approx_Dataset/predictions/Output.png",output_image) cv2.imshow("predicted frame", predicted_img[0]) if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows()