import time
import sys
from selenium.webdriver import Chrome, ChromeOptions
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
import requests
from bs4 import BeautifulSoup
import re
from lxml import etree
import pymysql
import traceback


def get_conn():
    # 建立连接
    conn = pymysql.connect(host="#", user="#", password="#", db="hotsearch", charset="utf8")
    # c创建游标
    cursor = conn.cursor()
    return conn, cursor


def close_conn(conn, cursor):
    if cursor:
        cursor.close()
    if conn:
        conn.close()


# 爬取b站弹幕
def get_bzhan_hot():
    headers = {
        'User-Agent': 'Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) '
                      'Version/9.0 Mobile/13B143 Safari/601.1 '
    }

    url = "https://www.bilibili.com/v/popular/rank/all"
    get_data = requests.get(url, headers=headers).text
    a = etree.HTML(get_data)
    time.sleep(1)
    # 爬虫与反爬，模拟人等待1秒
    # c为题目
    c = a.xpath('//*[@id="app"]/div/div[2]/div[2]/ul/li/div/div[2]/a')  # 获得td节点下的文本
    # context = [i.text for i in c]
    context = [''.join(re.findall(r'[\u4e00-\u9fa5a-zA-Z0-9]',i.text) )for i in c]
    # d为热度
    d = a.xpath('//*[@id="app"]/div/div[2]/div[2]/ul/li/div/div[2]/div/div/span[1]/text()')  # 获得td节点下的文本
    connum = [re.findall(r"[-+]?\d*\.\d+|\d+", j)[0] for j in d]

    return context, connum

# 爬取微博热搜
def get_weibo_hot():
    wb_url = 'https://s.weibo.com/top/summary/'
    headers = {
        'Host': 's.weibo.com',
        "Cookie": "SINAGLOBAL=1693965547758.034.1651588697205; XSRF-TOKEN=_CYUR9D_h9MN2S5TDwwEIEmB; "
                  "_s_tentry=www.baidu.com; UOR=,,www.baidu.com; Apache=962238256358.7561.1651752863370; "
                  "ULV=1651752863383:2:2:2:962238256358.7561.1651752863370:1651588697266; "
                  "SUB=_2A25Pd6QfDeRhGeNH4lYW8S3JyD2IHXVsBJLXrDV8PUNbmtAfLUf7kW9NSnQMj2ggLnI0Vmufn5MHyVawEAGGvDv4; "
                  "SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WWnKsk0QoI8cBWxJ4MKaCrz5JpX5KzhUgL.Fo"
                  "-41KBNeKefe022dJLoINRLxKBLBonL1h5LxK-L12qLB-2LxKML1hqL122LxKML12eLB-zLxKML1-2L1hBLxKnL1heL12eLxK"
                  "-LB.BLBo2LxKqL1-eL1h.LxKBLB.zL1h5LxKBLBo.L1hnLxK-L1-BL12qLxK-LB.qL1heLxKqL1h5L1-BLxK-L1K2LBKzt; "
                  "ALF=1683294158; SSOLoginState=1651758159; "
                  "WBPSESS=CiQETiCir9LgF9MfEU_Ia_zEX_s-qYhdsLtdjGPrb-2fCUlNtQLhiinsuxZ1MDCEzldKdN-BP30FcOgNdjY"
                  "-AgcY7gd-jekFkttCYZjyDzFTaLAK64CovYkb5iYy_JiUEEoOMpu7eIhF2U3P7zTr-w==",
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) '
                      'Chrome/92.0.4515.131 Safari/537.36 '
    }
    get_response = requests.get(wb_url, headers=headers)
    get_data = get_response.text
    a = etree.HTML(get_data)
    time.sleep(1)
    # 爬虫与反爬，模拟人等待1秒
    # c为题目
    c = a.xpath('//*[@id="pl_top_realtimehot"]/table/tbody/tr/td[2]/a')  # 获得td节点下的文本
    c.pop(0)
    c.pop(3)
    c.pop(6)
    context = [i.text for i in c]
    # d为热度
    d = a.xpath('//*[@id="pl_top_realtimehot"]/table/tbody/tr/td[2]/span')  # 获得td节点下的文本
    d.pop(3)
    d.pop(6)
    connum = [re.findall('\d+', j.text)[0] for j in d]


    return context, connum


# 爬取知乎热搜
def get_zhihu_hot():
    headers = {
        'User-Agent': 'Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) '
                      'Version/9.0 Mobile/13B143 Safari/601.1 '
    }

    url = "https://www.zhihu.com/hot"
    r = requests.get(url, headers=headers)
    soup = BeautifulSoup(r.text, 'lxml')
    hotitem = soup.select('.css-3yucnr')
    context = [i.text for i in hotitem]

    hotnum = soup.select('.css-1iqwfle')
    connum = [re.findall('\d+', j.text)[0] for j in hotnum]
    return context, connum


# 爬取百度热搜数据
def get_baidu_hot():
    option = ChromeOptions()
    option.add_argument("--headless")  # 隐藏游览器
    # option.add_argument("--no--sandbox") #linux
    s = Service("chromedriver.exe")
    browser = Chrome(options=option, service=s)

    url = "https://top.baidu.com/board?tab=realtime"
    browser.get(url)
    # print(browser.page_source)
    time.sleep(1)
    # 爬虫与反爬，模拟人等待1秒
    # c为题目
    c = browser.find_elements(By.XPATH, '//*[@id="sanRoot"]/main/div[2]/div/div[2]/div/div[2]/a/div[1]')
    context = [i.text for i in c]
    # d为热度
    d = browser.find_elements(By.XPATH, '//*[@id="sanRoot"]/main/div[2]/div/div[2]/div/div[1]/div[2]')
    connum = [j.text for j in d]
    browser.close()
    return context, connum


def update_baidu_hotsearch():
    cursor = None
    conn = None
    try:
        context, connum = get_baidu_hot()
        print(f"{time.asctime()}开始更新数据")
        conn, cursor = get_conn()
        sql = "insert into baiduhot(dt,content,num) values(%s,%s,%s)"
        ts = time.strftime("%Y-%m-%d %X")
        for i in range(len(context)):
            cursor.execute(sql, (ts, context[i], connum[i]))
        conn.commit()
        print(f"{time.asctime()}数据更新完毕")
    except:
        traceback.print_exc()
    finally:
        close_conn(conn, cursor)


def update_zhihu_hotsearch():
    cursor = None
    conn = None
    try:
        context, connum = get_zhihu_hot()
        print(f"{time.asctime()}开始更新数据")
        conn, cursor = get_conn()
        sql = "insert into zhihuhot(dt,content,num) values(%s,%s,%s)"
        ts = time.strftime("%Y-%m-%d %X")
        for i in range(len(context)):
            cursor.execute(sql, (ts, context[i], connum[i]))
        conn.commit()
        print(f"{time.asctime()}数据更新完毕")
    except:
        traceback.print_exc()
    finally:
        close_conn(conn, cursor)


def update_weibo_hotsearch():
    cursor = None
    conn = None
    try:
        context, connum = get_weibo_hot()
        print(f"{time.asctime()}开始更新数据")
        conn, cursor = get_conn()
        sql = "insert into weibohot(dt,content,num) values(%s,%s,%s)"
        ts = time.strftime("%Y-%m-%d %X")
        for i in range(len(context)):
            cursor.execute(sql, (ts, context[i], connum[i]))
        conn.commit()
        print(f"{time.asctime()}数据更新完毕")
    except:
        traceback.print_exc()
    finally:
        close_conn(conn, cursor)

def update_bzhan_hotsearch():
    cursor = None
    conn = None
    try:
        context, connum = get_bzhan_hot()
        print(f"{time.asctime()}开始更新数据")
        conn, cursor = get_conn()
        sql = "insert into bzhanhot(dt,content,num) values(%s,%s,%s)"
        ts = time.strftime("%Y-%m-%d %X")
        for i in range(len(context)):
            cursor.execute(sql, (ts, context[i], connum[i]))
        conn.commit()
        print(f"{time.asctime()}数据更新完毕")
    except:
        traceback.print_exc()
    finally:
        close_conn(conn, cursor)



def test():
    context, connum = get_bzhan_hot()
    flag = 0

    for i in context:
        print(i)
        flag += 1
    for i in connum:
        print(i)
        # print(str(flag) + i)
        # flag += 1


def pa():
    update_baidu_hotsearch()
    update_weibo_hotsearch()
    update_zhihu_hotsearch()
    update_bzhan_hotsearch()

if __name__ == "__main__":
    # test()
    # update_baidu_hotsearch()
    # update_weibo_hotsearch()
    # update_zhihu_hotsearch()
    update_bzhan_hotsearch()


    # l = len(sys.argv)
    # if l == 1:
    #     s = """
    #     请输入参数
    #     参数说明，
    #     up_his 更新历史记录表
    #     up_hot 更新实时热搜
    #     up_det 更新详细表
    #     """
    #     print(s)
    # else:
    #     order = sys.argv[1]
    #     if order == "up_baidu":
    #         update_baidu_hotsearch()
    #     elif order == "up_weibo":
    #         update_weibo_hotsearch()
    #     elif order == "up_zhihu":
    #         update_zhihu_hotsearch()
