# !/usr/bin/env python3
# -*- coding: utf-8 -*-

import ssl
from urllib import request,error,parse
from bs4 import BeautifulSoup
from io import BytesIO
import gzip
import re
import time
import os

ssl._create_default_https_context = ssl._create_unverified_context


history = []
history_url = []
length = 0
count = 0
cPath = os.getcwd()
cPath.split('/')[-1] = 'public'
targetPath = cPath.split('/')
targetPath[-1] = 'public'
targetPath.append('img')
targetPath = '/'.join(targetPath)+'/'

header = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'Cookie': 'BAIDUID=6CBC63B104656434EA6B2CBC34214601:FG=1; BAIDUID_BFESS=6CBC63B104656434EA6B2CBC34214601:FG=1; Hm_lvt_010e9ef9290225e88b64ebf20166c8c4=1681355017; Hm_lpvt_010e9ef9290225e88b64ebf20166c8c4=1681461241',
    'Host': 'hanyu.baidu.com',
    'Pragma': 'no-cache',
    'Sec-Fetch-Dest': 'document',
    'Sec-Fetch-Mode': 'navigate',
    'Sec-Fetch-Site': 'same-origin',
    'Sec-Fetch-User': '?1',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
    'sec-ch-ua': '"Google Chrome";v="111", "Not(A:Brand";v="8", "Chromium";v="111"',
    'sec-ch-ua-mobile': '?0',
    'sec-ch-ua-platform': '"macOS"'
}

def download_img(img_url, name):
    global count
    response = request.urlopen(img_url)
    img_name = re.search(r'([^/]+)\.[a-z]+$',img_url).group()
    img_name = name +'.'+ img_name.split('.')[1]
    print(img_name)
    filename = targetPath +img_name
    if (response.getcode() == 200):
        with open(filename, 'wb') as f:
            f.write(response.read()) # 将内容写入图片
            f.close()
        count += 1
        history.append(name)
        print(f'完成第{count}个,总计{length}个')
    else:
        print('下载失败：'+response.getcode())
        return False


def requesthtml(t):
    global count
    url = f'https://hanyu.baidu.com/s?wd={parse.quote(t)}&ptype=zici'
    if url in history_url:
        print('跳过'+url)
        count += 1
        return
    if t in history:
        print('跳过'+t)
        count += 1
        return
    
    req = request.Request(url, headers=header)
    print('\n')
    print(f'开始抓取{url}')
    history_url.append(url)
    try:
        res = request.urlopen(req).read()
        buff = BytesIO(res)
        f = gzip.GzipFile(fileobj=buff)
        html_str = f.read().decode('utf-8')
        # html_str = res.read().decode('utf-8')
    except error.HTTPError as e:
        print(f'{url}页面打开失败', e.reason, e.code, seq='\n')
        return

    soup = BeautifulSoup(html_str, "html.parser")
    imgs = soup.find_all(id = 'word_bishun')

    if(len(imgs) > 0):
        img = imgs[0]
        if('data-gif' in img.attrs):
           imgSrc = img['data-gif']
           print(imgSrc)
           download_img(imgSrc, t)

shengzi = '十 好 变 天'.split(' ')

if __name__ == '__main__':
    length = len(shengzi)
    historyFile = open('history', 'r')
    history = historyFile.read(-1).split(' ')
    historyFile.close()
    
    for zi in shengzi:
        requesthtml(zi)

    historyFile = open('history', 'w')
    historyFile.write(' '.join(history))
    historyFile.close()