'''
@Descripttion: getting links information and storing them in database
@Author: Chen Chenxi
@Date: 2019-12-14 14:19:44
@LastEditTime: 2019-12-14 20:04:07
'''
import re
import requests
import sys
import urllib.request
import urllib.parse
from bs4 import BeautifulSoup as bs
import os
import pymysql
import time
import random


# header protocol
headers = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
    'Accept-Encoding': 'gzip, deflate, compress',
    'Accept-Language': 'en-us;q=0.5,en;q=0.3',
    'Cache-Control': 'max-age=0',
    'Connection': 'keep-alive',
    'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100101 Firefox/22.0'
}



# proxy = {'http': '125.127.24.239:808'}
# proxy_support = urllib.request.ProxyHandler(proxy)
# opener=urllib.request.build_opener(proxy_support)
# opener.addheaders=[('User-Agent','Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36')]
# urllib.request.install_opener(opener)

conn = pymysql.connect(host='39.107.112.113', user='root', password='zhangtianze1997', port=3306,db='recogact', charset='utf8')
PATH1 = './classInd2.txt'
PATH2 = './UCF_list/classInd.txt'
CNLIST = []
ENLIST = []
cursor = conn.cursor()
f = open(PATH1,'r',encoding='utf-8')
line = f.readline()
while line:
    CNLIST.append(line[:-1])
    line = f.readline()
f = open(PATH2,'r',encoding='utf-8')
line = f.readline()
while line:
    ENLIST.append(line)
    line = f.readline()

def searh_baidu(keywords,pagenum):
    keywords = urllib.parse.quote(keywords)
    pagenum = urllib.parse.quote(keywords)
    url = 'http://www.baidu.com/s?wd=%s&pn=%s'%(keywords,pagenum)
    # r = urllib.request.urlopen(url).read()
    r = requests.get(url = url,headers = headers)
    # r = opener.open(url).read
    
    return r.content

def crawl(keywords,COUNT):
    count = 0
    # print(CNLIST)
    while count<= 1:
        html = searh_baidu(keywords,str(count))
        soup = bs(html,"html.parser")
        # print(soup)
        for item in soup.find_all("div",{"class":"result"}):
            result_list = [0]*3
            a_click = item.find('a')
            if a_click:
                result_list[0] = a_click.get_text()
                temp = a_click.get('href')
                a = requests.get(url = temp,headers = headers)
                result_list[1] = a.url
            c_abstract = item.find("div",{"class":"c-abstract"})
            if c_abstract:
                strtmp = c_abstract.get_text()   
                result_list[2] = strtmp

            c_id = CNLIST.index(keywords)+1
            label_name_en = ENLIST[c_id-1]
            insert_sql = ("insert into recact_contentlist (id, c_id,label_name_en, label_name_cn,is_baike,link,link_abstract,link_title) values(%d, %d, '%s', '%s', 0,'%s','%s','%s') " % (COUNT, c_id, label_name_en, keywords, result_list[1],result_list[2],result_list[0]))
            try:
                cursor.execute(insert_sql)
                print("submit is good")
                conn.commit()
                COUNT+= 1
            except Exception as e:
                print("error is "+str(e))
            result = str(result_list)
            # with open('./UCF_list/'+keywords+".txt","a") as f:
            #     f.write(result+'\n')
            #     f.close()
            
        time.sleep(1)
        count+= 1
    return COUNT

if __name__ == "__main__":
    path = './classInd2.txt'
    cn_list = []
    f = open(path,'r',encoding='utf-8')
    line = f.readline()
    while line:
        cn_list.append(line[:-1])
        line = f.readline()

    count = 1
    C = 1166
    index_l = [29,60,97,99]
    new_cn_list = []
    for i in index_l:
        new_cn_list.append(cn_list[i-1])
    print(new_cn_list)
    for kw in new_cn_list:
        C = crawl(kw,C)
        print('%d/%d processed'%(count,len(new_cn_list)))
        count+= 1
        time.sleep(10)