# -*- coding: utf-8 -*- """Untitled3.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1OPk27uLuoRSbYWcNdKG0Qtq0Ffb9RrcT """ import requests import pandas as pd urls = ['https://nascent.colorado.edu/samples/all_samples','https://nascent.colorado.edu/datasets'] for url in urls: html = requests.get(url).content dfs = pd.read_html(html) # Make the first line header and string dfs[0].columns = dfs[0].iloc[0] dfs[0] = dfs[0].iloc[1:].astype(str) # pretty print dfs for i, df in enumerate(dfs): print(f"{i}: {df.shape}") print(df.head()) # Write to parquet file dfs[0].to_parquet(f"{url.split('/')[-1]}.parquet") # Make a csv version while we're here dfs[0].to_csv(f"{url.split('/')[-1]}.csv") # Scrap this website # https://nascent.colorado.edu/ # TODO once we pick out some samples of interest import requests from bs4 import BeautifulSoup response = requests.get('https://nascent.colorado.edu/samples/SRZ7741175') soup = BeautifulSoup(response.content, 'html.parser') bed_files = soup.find_all('a', href=re.compile('.bed$')) for bed_file in bed_files: print(bed_file['href'])