egrace479's picture
Deduplicate Dataset (#3)
6ef14cc verified
raw
history blame contribute delete
No virus
8.53 kB
# Built on Michelle's download script: https://huggingface.co/datasets/imageomics/Comparison-Subset-Jiggins/blob/977a934e1eef18f6b6152da430ac83ba6f7bd30f/download_jiggins_subset.py
# with modification of David's redo loop: https://github.com/Imageomics/data-fwg/blob/anomaly-data-challenge/HDR-anomaly-data-challenge/notebooks/download_images.ipynb
# and expanded logging and file checks. Further added checksum calculation for all downloaded images at end.
# Script to download Jiggins images from any of the master CSV files.
# Generates Checksum file for all images downloaded (<master filename>_checksums.csv).
# Logs image downloads and failures in json files (<master filename>_log.json & <master filename>_error_log.json).
# Logs record numbers and response codes as strings, not int64.
import requests
import shutil
import json
import pandas as pd
from checksum import get_checksums
from tqdm import tqdm
import os
import sys
import time
import argparse
EXPECTED_COLS = ["CAMID",
"X",
"Image_name",
"file_url",
"Taxonomic_Name",
"record_number",
"Dataset"
]
REDO_CODE_LIST = [429, 500, 502, 503, 504]
# Reset to appropriate index if download gets interrupted.
STARTING_INDEX = 0
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--csv", required=True, help="Path to CSV file with urls.", nargs="?")
parser.add_argument("--output", required=True, help="Main directory to download images into.", nargs="?")
return parser.parse_args()
def log_response(log_data, index, image, url, record_number, dataset, cam_id, response_code):
# log status
log_entry = {}
log_entry["Image"] = image
log_entry["file_url"] = url
log_entry["record_number"] = str(record_number) #int64 has problems sometimes
log_entry["dataset"] = dataset
log_entry["CAMID"] = cam_id
log_entry["Response_status"] = str(response_code)
log_data[index] = log_entry
return log_data
def update_log(log, index, filepath):
# save logs
with open(filepath, "a") as log_file:
json.dump(log[index], log_file, indent = 4)
log_file.write("\n")
def download_images(jiggins_data, image_folder, log_filepath, error_log_filepath):
log_data = {}
log_errors = {}
for i in tqdm(range(0, len(jiggins_data))) :
# species will really be <Genus> <species> ssp. <subspecies>, where subspecies indicated
species = jiggins_data["Taxonomic_Name"][i]
image_name = jiggins_data["X"][i].astype(str) + "_" + jiggins_data["Image_name"][i]
record_number = jiggins_data["record_number"][i]
# download the image from url if not already downloaded
# Will attempt to download everything in CSV (image_name is unique: <X>_<Image_name>), unless download restarted
if os.path.exists(f"{image_folder}/{species}/{image_name}") != True:
#get image from url
url = jiggins_data["file_url"][i]
dataset = jiggins_data["Dataset"][i]
cam_id = jiggins_data["CAMID"][i]
#download the image
redo = True
max_redos = 2
while redo and max_redos > 0:
try:
response = requests.get(url, stream=True)
except Exception as e:
redo = True
max_redos -= 1
if max_redos <= 0:
log_errors = log_response(log_errors,
index = i,
image = species + "/" + image_name,
url = url,
record_number = record_number,
dataset = dataset,
cam_id = cam_id,
response_code = str(e))
update_log(log = log_errors, index = i, filepath = error_log_filepath)
if response.status_code == 200:
redo = False
# log status
log_data = log_response(log_data,
index = i,
image = species + "/" + image_name,
url = url,
record_number = record_number,
dataset = dataset,
cam_id = cam_id,
response_code = response.status_code
)
update_log(log = log_data, index = i, filepath = log_filepath)
#create the species appropriate folder if necessary
if os.path.exists(f"{image_folder}/{species}") != True:
os.makedirs(f"{image_folder}/{species}", exist_ok=False)
# save image to appropriate folder
with open(f"{image_folder}/{species}/{image_name}", "wb") as out_file:
shutil.copyfileobj(response.raw, out_file)
# check for too many requests
elif response.status_code in REDO_CODE_LIST:
redo = True
max_redos -= 1
if max_redos <= 0:
log_errors = log_response(log_errors,
index = i,
image = species + "/" + image_name,
url = url,
record_number = record_number,
dataset = dataset,
cam_id = cam_id,
response_code = response.status_code)
update_log(log = log_errors, index = i, filepath = error_log_filepath)
else:
time.sleep(1)
else: #other fail, eg. 404
redo = False
log_errors = log_response(log_errors,
index = i,
image = species + "/" + image_name,
url = url,
record_number = record_number,
dataset = dataset,
cam_id = cam_id,
response_code = response.status_code)
update_log(log = log_errors, index = i, filepath = error_log_filepath)
del response
else:
if i > STARTING_INDEX:
# No need to print if download is restarted due to interruption (set STARTING_INDEX accordingly).
print(f"duplicate image: {jiggins_data['X']}, {jiggins_data['Image_name']}, from record {record_number}")
return
def main():
#get arguments from commandline
args = parse_args()
csv_path = args.csv #path to our csv with urls to download images from
image_folder = args.output #folder where dataset will be downloaded to
# log file location (folder of source CSV)
log_filepath = csv_path.split(".")[0] + "_log.json"
error_log_filepath = csv_path.split(".")[0] + "_error_log.json"
#load csv
jiggins_data = pd.read_csv(csv_path, low_memory = False)
# Check for required columns
missing_cols = []
for col in EXPECTED_COLS:
if col not in list(jiggins_data.columns):
missing_cols.append(col)
if len(missing_cols) > 0:
sys.exit(f"The CSV is missing column(s): {missing_cols}")
#dowload images from urls
download_images(jiggins_data, image_folder, log_filepath, error_log_filepath)
# generate checksums and save CSV to same folder as CSV used for download
checksum_path = csv_path.split(".")[0] + "_checksums.csv"
get_checksums(image_folder, checksum_path)
print(f"Images downloaded from {csv_path} to {image_folder}.")
print(f"Checksums recorded in {checksum_path} and download logs are in {log_filepath} and {error_log_filepath}.")
return
if __name__ == "__main__":
main()