#!/usr/bin/env python
#-*- coding:utf-8 -*-
# Author: LiuHuan
# Datetime: 2020/3/17 17:15

from bs4 import BeautifulSoup
from selenium import webdriver
import time
import random
import requests
from ValidateCode import recognize
from PIL import Image

def get_driver():
    chromedriver = r'C:\Program Files (x86)\Google\Chrome\Application\chromedriver.exe'
    driver = webdriver.Chrome(chromedriver)
    return driver

def get_soup_from_url(url):
    driver = get_driver()
    driver.set_window_size(1200, 800)
    driver.get('https://www.cnki.net/')
    time.sleep(3)
    driver.get(url)
    time.sleep(3)
    while '请输入验证码' in driver.page_source:
        element = driver.find_element_by_id('vImg')
        element.screenshot('temp.png')

        # # 保存截图
        # driver.get_screenshot_as_file('temp.png')
        #
        # # 获取验证码位置
        # element = driver.find_element_by_id('vImg')
        # left = int(element.location['x'])
        # top = int(element.location['y'])
        # right = int(element.location['x'] + element.size['width'])
        # bottom = int(element.location['y'] + element.size['height'])
        #
        # # 通过Image截取验证码
        # im = Image.open('temp.png')
        # im = im.crop((left, top, right, bottom))
        # im = im.convert('RGB')
        # im.save('code.jpg')

        im = Image.open('temp.png')
        im = im.convert('RGB')
        im.save('code.jpg')
        code = recognize('code.jpg')
        print(code)
        driver.find_element_by_id('vcode').send_keys(code)
        driver.find_element_by_class_name('c_btn').click()
        time.sleep(random.randint(5,15))
        if 'Object moved to' in driver.page_source:
            driver.find_element_by_tag_name('a').click()
            time.sleep(8)

    source = driver.page_source
    driver.quit()
    soup = BeautifulSoup(source,"html.parser")
    # print(soup.prettify())

    return soup


def get_soup_from_file(path):
    with open(path,'r',encoding='utf-8') as f:
        soup = BeautifulSoup(f.read(),"html.parser")
        # print(soup.prettify())
    return soup

def get_content(soup):
    content = []
    paras = soup.find_all('div',class_="p1")
    for pa in paras:
        [s.extract() for s in pa('citation')]
        print(pa.get_text())
        content.append(pa.get_text())
    return content

if __name__ == '__main__':
    # get_content(get_soup_from_file('cnki_full.html'))

    with open('urls_1_60.txt','r',encoding='utf-8') as f:
        i = 500
        for line in f.readlines()[501:601]:
            i += 1
            url = line.strip()
            soup = get_soup_from_url(url)
            with open(str(i) + '.html', 'w', encoding='utf-8') as fw:
                fw.write(soup.prettify())
            content = get_content(soup)
            with open(str(i) + '.txt','w',encoding='utf-8') as fw:
                [fw.write(con) for con in content]
            print(i, 'Done')
            time.sleep(random.randint(100,200))