# -*- coding: utf-8 -*-
import time
import re
import random
import requests
from bs4 import BeautifulSoup
import pymysql
import csv
import os

# connection = pymysql.connect(host='',
#                              user='',
#                              password='',
#                              db='',
#                              port=3306,
#                              charset='utf8')  # notice:utf8 not utf-8

# # Get cursor
# cursor = connection.cursor()

#url = 'http://epub.cnki.net/grid2008/brief/detailj.aspx?filename=RLGY201806014&dbname=CJFDLAST2018'

#This header information must be included, otherwise the website will redirect your request to other pages
headers = {
    'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
    'Accept-Encoding':'gzip, deflate, sdch',
    'Accept-Language':'zh-CN,zh;q=0.8',
    'Connection':'keep-alive',
    'Host':'www.cnki.net',
    'Referer':'http://search.cnki.net/search.aspx?q=%E4%BD%9C%E8%80%85%E5%8D%95%E4%BD%8D%3a%E6%AD%A6%E6%B1%89%E5%A4%A7%E5%AD%A6&rank=relevant&cluster=zyk&val=CDFDTOTAL',
    'Upgrade-Insecure-Requests':'1',
    'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
}

headers1 = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'
    }

document_info_save_file = 'C:\\Users\\31435\\Desktop\\document_dataset\\document_info.csv'

def get_url_list(start_url):
    depth = 20
    url_list = []
    for i in range(depth):
        try:
            url = start_url + "&p=" + str(i * 15)
            search = requests.get(url.replace('\n', ''), headers=headers1)
            soup = BeautifulSoup(search.text, 'html.parser')
            for art in soup.find_all('div', class_='wz_tab'):
                print(art.find('a')['href'])
                if art.find('a')['href'] not in url_list:
                    url_list.append(art.find('a')['href'])
            print("Crawl page" + str(i) + "succeed")
            time.sleep(random.randint(1, 3))
        except:
            print("Crawl page" + str(i) + "succeed")
    return url_list

def get_data(url_list, wordType):
    try:
        # ͨpass through url_results.txt to read and get access to the link.
        for url in url_list:
            i = 1;
            if url == pymysql.NULL or url == '':
                continue
            try:
                html = requests.get(url.replace('\n', ''), headers=headers)
                soup = BeautifulSoup(html.text, 'html.parser')
            except:
                print("connection failed")
            try:
                print(url)
                if soup is None:
                    continue
                # get title
                title = soup.find('title').get_text().split('-')[0]
                # get author
                author = ''
                for a in soup.find('div', class_='summary pad10').find('p').find_all('a', class_='KnowledgeNetLink'):
                    author += (a.get_text() + ' ')
                # get abstract
                abstract = soup.find('span', id='ChDivSummary').get_text()
                # get keywords, there are no keywords sometimes
            except:
                print("get data invalid partly")
                pass
            try:
                key = ''
                for k in soup.find('span', id='ChDivKeyWord').find_all('a', class_='KnowledgeNetLink'):
                    key += (k.get_text() + ' ')
            except:
                pass
            print("number" + str(i) + "url")
            print("Title:" + title)
            print("author:" + author)
            print("abstract:" + abstract)
            print("key:" + key)
            # save document data
            with open(document_info_save_file, 'w+', encoding='utf-8') as data_save_file:
                writer = csv.writer(data_save_file)
                writer.writerows('(%s, %s, %s, %s, %s)', (wordType, title, author, abstract, key))
                # writer.writerows(birth_data)
                data_save_file.close()
                # cursor.execute('INSERT INTO cnki VALUES (NULL, %s, %s, %s, %s, %s)', (wordType, title, author, abstract, key))
            # Submit to the database for execution
            # connection.commit()

            print()
        print("Crawl completed")
    finally:
        print()

if __name__ == '__main__':
    try:
        # for wordType in {"�󳦸˾�", "��Ⱥ����", "��֬��", "�����"}:
        for wordType in {"本构", "耦合"}:
            wordType = "土+" + wordType
            start_url = "http://search.cnki.net/search.aspx?q=%s&rank=relevant&cluster=zyk&val=" % wordType
            url_list = get_url_list(start_url)
            print("Crawl begining")
            get_data(url_list, wordType)
            print("One type of crawling is complete")
        print("All Crawl completed")
    finally:
        print("C+rawling completed")
        # connection.close()

#use MySQL database, Does not meet the needs.

birth_weight_file = 'birth_weight.csv'

# # download data and create data file if file does not exist in current directory
# # 如果当前文件夹下没有birth_weight.csv数据集则下载dat文件并生成csv文件
# if not os.path.exists(birth_weight_file):
#     birthdata_url = 'https://github.com/nfmcclure/tensorflow_cookbook/raw/master/01_Introduction/07_Working_with_Data_Sources/birthweight_data/birthweight.dat'
#     birth_file = requests.get(birthdata_url)
#     birth_data = birth_file.text.split('\r\n')
#     # split分割函数,以一行作为分割函数，windows中换行符号为'\r\n',每一行后面都有一个'\r\n'符号。
#     birth_header = birth_data[0].split('\t')
#     # 每一列的标题，标在第一行，即是birth_data的第一个数据。并使用制表符作为划分。
#     birth_data = [[float(x) for x in y.split('\t') if len(x) >= 1] for y in birth_data[1:] if len(y) >= 1]
#     print(np.array(birth_data).shape)
#     # (189, 9)
#     # 此为list数据形式不是numpy数组不能使用np,shape函数,但是我们可以使用np.array函数将list对象转化为numpy数组后使用shape属性进行查看。
#     with open(birth_weight_file, "w", newline='') as f:
#     # with open(birth_weight_file, "w") as f:
#         writer = csv.writer(f)
#         writer.writerows([birth_header])
#         writer.writerows(birth_data)
#         f.close()
