Computer Vision News - August 2022
11 Detect Graphic Intensity and Power in Videos while count<tam: video_name = vid_names[count] image_batch = np.zeros(shape=shape, dtype=np.float16) image_batch = get_frames(in_dir, video_name) # Note that we use 16-bit floating points to save memory. shape = (_images_per_file, transfer_values_size) transfer_values = np.zeros(shape=shape, dtype=np.float16) transfer_values = \ image_model_transfer.predict(image_batch) labels1 = labels[count] aux = np.ones([20,2]) labelss = labels1*aux yield transfer_values, labels count+=1 Functions to save transfer values from VGG16 to later use We are going to define functions to get the transfer values from VGG16 with a defined number of files. Then save the transfer values files used from training in one file and the ones uses for testing in another one. def make_files(n_files): gen = proces_transfer(names_training, in_dir, labels_training) numer = 1 # Read the first chunk to get the column dtypes chunk = next(gen) row_count = chunk[0].shape[0] row_count2 = chunk[1].shape[0] with h5py.File('prueba.h5', 'w') as f: # Initialize a resizable dataset to hold the output maxshape = (None,) + chunk[0].shape[1:] maxshape2 = (None,) + chunk[1].shape[1:] dset = f.create_dataset('data', shape=chunk[0].shape, maxshape=maxshape, chunks=chunk[0].shape, dtype=chunk[0].dtype) dset2 = f.create_dataset('labels', shape=chunk[1].shape, maxshape=maxshape2, chunks=chunk[1].shape, dtype=chunk[1].dtype) # Write the first chunk of rows dset[:] = chunk[0] dset2[:] = chunk[1] for chunk in gen: if numer == n_files: break # Resize the dataset to accommodate the next chunk of rows dset.resize(row_count + chunk[0].shape[0], axis=0) dset2.resize(row_count2 + chunk[1].shape[0], axis=0) # Write the next chunk
Made with FlippingBook
RkJQdWJsaXNoZXIy NTc3NzU=