import requests
import pymysql
import datetime
import re

headers1 = {
    'cookie': 'tt_webid=6750805972391904782',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3730.400 QQBrowser/10.5.3805.400'
}
headers2 = {
 'cookie': 'tt_webid=6749789110511093262',
 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'
}
headers3 = {
 'cookie': 'tt_webid=6744890312793818627',
 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36'
}
headers_list = [headers1, headers2,headers3]
headers = {
    'cookie': 'tt_webid=6749789110511093262',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'
}

#改变请求头方法
def change_headers(headers_list, num, headers):
    if num % 13 == 0:
        k = headers_list.index(headers)
        headers = headers_list[(k + 1) % 2]
        return headers
    else:
        return headers
# 解析数据方法
def pares_first(url ,headers,num):
    global headers1
    global headers2
    item ={}
    count = 1
    response = requests.get(url=url, headers=headers)

def down_image(url,path):
    print('下载图片')

def get_url():
    db = pymysql.connect(host="localhost", user='root', password='root', db='spiders')
    cursor = db.cursor()
    sql = 'select id,title,link from cmf_jrtt_list limit 200'
    cursor.execute(sql)
    results = cursor.fetchall()
    return results;

# 发起请求解析数据方法
def parse_data(results):
    item={}
    for row in results:
        print(row[2])
        response = requests.get(url=row[2], headers=headers)
        result = response.content.decode('utf8')
        try:
            if 匹配文章:
                item['title'] = re.search("title: '.*?'", result, re.DOTALL | re.MULTILINE).group(0).replace("title: ",
                                                                                                             "")
                item['content'] = re.search("content: '.*?'", result, re.DOTALL | re.MULTILINE).group(0).replace(
                    "content: ", "")
                item['writer'] = re.search("source: '.*?'", result, re.DOTALL | re.MULTILINE).group(0).replace(
                    "source: ", "")
                item['publishtime'] = re.search("time: '.*?'", result, re.DOTALL | re.MULTILINE).group(0).replace(
                    "time: ", "")
                item['sourceurl'] = row[2]
            else 匹配问答:

        except:
        #     换请求头,重新发起请求
            headers=change_headers(headers_list,10,headers)
            response = requests.get(url=row[2], headers=headers)
            result = response.content.decode('utf8')
        save_data(item)
def save_data(item):
    db = pymysql.connect(host="localhost", user='root', password='root', db='spiders')
    cursor = db.cursor()
    sql = 'insert into article(`title`,`writer`,`publish`,`content`,`sourceurl`,`createtime`) values (%s,%s,%s,%s,%s,%s)'
    try:
        cursor.execute(sql, (
        # item['title'], item['writer'], item['publishtime'], pymysql.escape_string(item['content']), row[2],
        item['title'], item['writer'], item['publishtime'],item['content'], item['sourceurl'],datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
        db.commit()
        print('插入成功')
    except:
        db.rollback()
        print('插入失败')
def main():
    urls=get_url()
    parse_data(urls)


if __name__ == '__main__':
    main()