import time
from pandas import Series, DataFrame
from selenium import webdriver
import csv
import pymysql

Titles = []
links = []
# 链接数据库
conn = pymysql.connect(
    host="localhost",
    user="root", password='1213',
    database="zhihu",
    charset="utf8")

cursor = conn.cursor()

try:
    # 如果有表就删除
    cursor.execute("drop table zhihu")
except:
    pass
try:
    # 建立新的表
    sql = sql = """
    CREATE TABLE zhihu (
    id INT auto_increment PRIMARY KEY ,
    title VARCHAR (255) NOT NULL,
    link varchar(255) NOT NULL
    )ENGINE=innodb DEFAULT CHARSET=utf8;
"""
    cursor.execute(sql)
except:
    pass


# cursor.execute(sql)


def GetTitle():
    start = time.time()
    driver = webdriver.Chrome()
    driver.get('https://tophub.today/n/mproPpoq6O')
    for i in range(1, 51):
        header = driver.find_element_by_xpath(
            '//*[@id="page"]/div[2]/div[2]/div[1]/div[2]/div/div[1]/table/tbody/tr[' + str(i) + ']/td[2]/a').text
        # print("NO" + str(i) + "." + header)
        # a = '//*[@id="page"]/div[2]/div[2]/div[1]/div[2]/div/div[1]/table/tbody/tr[' + str(i) + ']/td[2]/a'

        link = driver.find_element_by_xpath(
            '//*[@id="page"]/div[2]/div[2]/div[1]/div[2]/div/div[1]/table/tbody/tr[' + str(
                i) + ']/td[2]/a').get_attribute('href')

        query = 'insert into zhihu(title, link) values(%s, %s)'
        title = header
        link1 = link
        values = (title, link1)
        cursor.execute(query, values)
        conn.commit()
        # 提交到数据库执行
        i = i + 1

        # 保存进数据库
        # ZhiHu = DataFrame({
        #     '日期': Titles,
        #     '步数': links
        # })
        # ZhiHu.to_csv("知乎热榜TOP50.csv")
    find = "select * from zhihu where title like '%%%%%s%%%%'"
    find = find % ("英国")
    cursor.execute(find)
    data = cursor.fetchall()
    print(data)

GetTitle()
cursor.close()
conn.close()
