import os
import requests
import time
from selenium import webdriver
from bs4 import BeautifulSoup
import urllib.request


def openUrl(url, floderName, fileName):
    #  通过 Selenium 模拟浏览器抓取，滑动进度条，抓取百度图片
    driver_path = r'E:\Application\chromedriver.exe'
    driver = webdriver.Chrome(executable_path=driver_path)
    driver.implicitly_wait(20)  # 隐形等待，最长等待20秒
    driver.get(url)
    time.sleep(4)
    # 通过page_source获取网页源代码
    # print(driver.page_source)
    # 滚动进度条5次，根据你的需要
    for i in range(0, 5):
        driver.execute_script('window.scrollTo(0,document.body.scrollHeight);')
        time.sleep(1)
    # 将网页内容读取保存至本地
    htmlText = driver.page_source
    with open(f"{floderName}\\{fileName}.html", 'w+', encoding='utf-8') as f:
        f.write(htmlText)
    time.sleep(10)
    # driver.close()  # 关闭当前页面
    # driver.quit()  # 关闭浏览器


def parsingHTML(floderName, fileName):
    with open(f"{floderName}\\{fileName}.html", 'rb+') as f:
        htmlText = f.read()
    # lxml HTML解析器
    soup = BeautifulSoup(htmlText, 'lxml')
    # print(soup.title)
    print(soup.title.string)
    # 2、CSS选择器获取信息
    # for item in soup.select('div[class="imgpage"]'):
    #     item_str = str(item)
    #     with open(f"{floderName}\analyzeHtml.html", 'a+', encoding='utf-8') as f:
    #         f.write(item_str)

    # for item in soup.select('li[class="imgitem"]'):
    #     item_str = str(item)
    #     with open(f"{floderName}\\analyzeHtml.html", 'a+', encoding='utf-8') as f:
    #         f.write(item_str)

    with open(f"{floderName}\\analyzeHtml.html", 'a+', encoding='utf-8') as f:
        for item in soup.select('li[class="imgitem"]'):
            item_str = str(item)
            f.write(item_str)
            data1 = []
            data2 = []
            img_ = soup.find_all(name='img')
            for each in img_:
                imgUrl = each.get('src')
                data1.append(imgUrl)
                # print(imgUrl)
                # print(data1)
            names_ = soup.find_all('a', {'class': {"imgitem-title"}})
            for each in names_:
                imgTitle = each.get('title')
                data2.append({'imgTitle': imgTitle})
                # print(imgTitle)
                # print(data2)
        data.append(data1)
        data.append(data2)
        # print(len(data2))
        print(len(data1))
    print('save to list')
    saveImgUrl(data1)
    saveImage(data1)


def saveImgUrl(data1):
    with open(f"{floderName}\\imgUrl.txt", 'w+', encoding='utf-8') as f:
        for item in data1:
            if item.startswith('https://'):
                f.write(item+'\n')
            elif item.startswith('http://'):
                f.write(item+'\n')
            else:
                continue


def saveImage(data1):
    for index, url in enumerate(data1):
        if url.startswith('https://') or url.startswith('http://'):
            print(f'正在下载第{index}')
            response = requests.get(url)
            with open(os.path.join(f'{floderName}', str(index)+'.jpg'), mode='wb') as f:
                f.write(response.content)
            time.sleep(0.5)
        else:
            continue


def input_str():
    string = input()
    return string


def main():
    print("Enter a keyword：", end='')
    keyWord = input_str()
    if keyWord == '':
        url = 'https://image.baidu.com/search/index?tn=baiduimage&ipn=r&ct=201326592&cl=2&lm=-1&st=-1&sf=1&fmq=&pv=&ic=0&nc=1&z=&se=1&showtab=0&fb=0&width=&height=&face=0&istype=2&ie=utf-8&fm=index&pos=history&word=%E7%BE%8E%E5%A5%B3'
    else:
        # 编码
        kw = urllib.request.quote(keyWord)
        # 解码
        unencode_kw = urllib.request.unquote(kw)
        url = f'https://image.baidu.com/search/index?tn=baiduimage&ipn=r&ct=201326592&cl=2&lm=-1&st=-1&sf=1&fmq=&pv=&ic=0&nc=1&z=&se=1&showtab=0&fb=0&width=&height=&face=0&istype=2&ie=utf-8&fm=index&pos=history&word={kw}'
    openUrl(url, floderName, fileName)
    parsingHTML(floderName, fileName)


if __name__ == '__main__':
    floderName = 'resource'
    fileName = 'source'
    data = []
    main()
    print('done')
