import threading   # 多线程
import lxml
import requests
from lxml import etree
import re
from selenium import webdriver
from selenium.webdriver.common.by import By
import pymysql



class ConsumerThread(threading.Thread):  # 继承Thread类 多线程
    def __init__(self,startUrl,headers,startNum,endNum,tname):  # 初始化 开始的的url 请求头 开始页数 结束页数 线程名
        threading.Thread.__init__(self)
        self.startUrl=startUrl
        self.headers=headers
        self.startNum=startNum
        self.endNum=endNum
        self.tname=tname

    # 数据清洗
    def run(self):
        sum=314  # 由于url后面拼接的问题需要稍作调整
        for page in range(sum-self.startNum, sum-self.endNum, -1):
            print("正在提取数据....")
            """
            https://www.qfnu.edu.cn/xxyw.htm
            https://www.qfnu.edu.cn/xxyw/312.htm

            """
            res = request_page(self.startUrl + str(page) + '.htm', self.headers)  # 拿到数据开始清洗
            res = lxml.etree.HTML(res)
            for li in res.xpath('//*[@id="line_u11_0"]/a/@href'):  # 找到所有新闻url的共同位置
                options = webdriver.ChromeOptions()
                options.add_argument('--headless')
                options.add_argument('--disable-gpu')
                driver = webdriver.Chrome(executable_path='D:\BaiduNetdiskDownload\爬虫\代码\接单考核\chromedriver.exe',options=options)
                driver.get("https://www.qfnu.edu.cn/"+li)
                driver.implicitly_wait(10)
                data = driver.page_source
                html = etree.HTML(data)
                title = html.xpath('//*[@id="main"]/div/div[2]/div/div/form/div[1]/h2/text()')[0]
                date = html.xpath('//*[@id="main"]/div/div[2]/div/div/form/div[1]/p/text()')[0] + html.xpath('//*[@id="main"]/div/div[2]/div/div/form/div[1]/p/span/text()')[0]
                text = str(html.xpath('//*[@id="vsb_content"]/div/p[1]/span/span/text()')).replace('[','').replace(']','')+str(html.xpath('//*[@id="vsb_content"]/div/p[*]/span/text()')).replace('[','').replace(']','').replace(r"\xa0",'').replace("'",'')
                check = re.findall(r'>审核：(.*?)</span>',data)
                # print(title,date,text)
                save_file(title, date, text,check)
# 获取网页数据
def request_page(startUrl,headers):
    response=requests.get(startUrl,headers=headers,verify=False)
    response.encoding="gbk"  # 设置编码格式 防止乱码
    return response.text  # 简易判断 从而确定数据

# 获取请求头
def get_headers():
    headers={
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
        'Host': 'www.qfnu.edu.cn',
        'Cookie': 'Hm_lvt_56826aaa17b4c3ce4280b3d20836936c=1666856114; JSESSIONID=54349406AEB833D449550E33C017EDC0; Hm_lpvt_56826aaa17b4c3ce4280b3d20836936c=1666856303',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36'
    }
    return headers

# 保存文件
def save_file(title,date,text,check):
    db = pymysql.connect(host='localhost',user='root',password='123456',port=3306,charset='utf8',db='news')
    cursor = db.cursor()
    # cursor.execute("create database news;")
    # cursor.execute("use news;")
    # cursor.execute("create table newss(title TEXT(65535),dates TEXT(65535),text TEXT(65535),checks TEXT(65535));")
    sql = f"insert into newss(title,dates,text,checks) values ('{title}','{date}','{text}','{check}')"

    cursor.execute(sql)
    db.commit()
    db.close()
# 程序入口
if __name__ == '__main__':
    url = 'https://www.qfnu.edu.cn/xxyw/'   # 原始url 后面要进行url拼接
    header=get_headers()    # 获取请求头
    # 这里随时修改需要爬取的页数
    thread1 = ConsumerThread(url,header,1,140,'A')  # 线程A
    thread2 = ConsumerThread(url,header,141,280,'B') # 线程B
    # 多线程的开启
    thread1.start()
    thread2.start()
    thread1.join()
    thread2.join()




