import hashlib
import json
import os

import requests
from bs4 import BeautifulSoup
from requests import HTTPError

from utils import Download


def parse_md5(path):
    with open(path, 'rb') as f:
        data = f.read()
    return hashlib.md5(data).hexdigest()


def download(url, filename, dirname):
    download = Download(url, fileName=filename, dirName=dirname)
    download.start()


if __name__ == '__main__':
    with open('./1000genomes.json', 'r') as f:
        data_source = json.load(f)

    # print(database)
    baseurl = data_source['baseurl']

    response = requests.get(url=baseurl)
    if response.status_code != 200:
        raise HTTPError("爬取失败，检查url在国内是否可以访问")
    soup = BeautifulSoup(response.text, 'html.parser')

    # 最后一个文件的td标签
    tr = soup.find_all('tr')[-2].find_all('td')
    dirname = tr[1].find("a").text
    update_time = tr[2].text

    if dirname == data_source['dirname'] or update_time != data_source['update_time']:
        sub_url = baseurl + dirname
        # 发起请求，获取页面的标签
        response = requests.get(url=sub_url)
        if response.status_code != 200:
            raise HTTPError("子文件夹遍历失败，检查代码是否正确")

        soup = BeautifulSoup(response.text, 'html.parser')
        tag_tr = soup.find_all('tr')
        file_list = []
        for tr in tag_tr[3:-2]:
            filename = tr.find_all('td')[1].text.strip()
            update_time = tr.find_all('td')[2].text

            filetype = os.path.splitext(filename)[1]
            if filetype == '.gz' or filetype == '.tbi':
                url = data_source['baseurl'] + data_source['dirname'] + filename
                file_list.append({
                    "filename": filename,
                    "update_time": update_time,
                    "url": url
                })
        # print(file_list)

        # 整个文件夹都更新了，需要下载里面所有的 gz和tbi文件
        if dirname == data_source['dirname']:
            data_source['subfiles'] = []
            for index, file in enumerate(file_list):
                path = os.path.join('download', '1000genomes', file['filename'])
                temp = {
                    'filename': file["filename"],
                    'update_time': file["update_time"],
                    'url': file["url"],
                    'path': path
                }
                data_source['subfiles'].insert(index, temp)
            else:
                for index, file in enumerate(data_source['subfiles']):
                    # 下载所有文件
                    url = file['url']
                    path = file['path']
                    filename = file['fileanme']
                    # urlretrieve(url, filename=path)
                    # 下载文件
                    download(url, filename=filename, dirname='1000genomes')

                    # 计算哈希值
                    if os.path.isfile(path):
                        data_source['subfiles'][index]['md5'] = parse_md5(path)

        # 子文件夹更新了，进去下载子文件夹
        elif update_time != data_source['update_time']:
            for current_file in data_source['subfiles']:
                for new_file in file_list:
                    if new_file['filename'] == current_file['filename'] and new_file['update_time'] != current_file['update_time']:
                        # 更新文件的更新时间，并下载这个文件
                        current_file['update_time'] = new_file['update_time']

                        # 下载文件
                        url = new_file['url']
                        filename = new_file['filename']
                        path = os.path.join('download', '1000genomes', filename)

                        # urlretrieve(url, filename=path)
                        # 下载文件
                        download(url, filename=filename, dirname='1000genomes')
                        # 计算哈希值
                        if os.path.isfile(path):
                            current_file['md5'] = parse_md5(path)

        data_source['dirname'] = dirname
        data_source['update_time'] = update_time

        with open('./1000genomes.json', 'w')as f:
            json.dump(data_source, f)
    else:
        print('没有任何更新')
