import urllib3
from bs4 import BeautifulSoup
import sqlite3
import csv
import os
import tkinter as tk
import tkinter.ttk as ttk
import threading
import time

http = urllib3.PoolManager()  # 创建PoolManager对象生成请求, 由该实例对象处理与线程池的连接以及线程安全的所有细节
# 数据库名
db_name = "./vjshi.db"

is_crawl = False
crawl_thread = None
interval = 60 # 爬数据间隔60秒
crawl_times = 1
log_text = None
log_index = 1

def get_db_connection():
    return sqlite3.connect(db_name)


def crawl_data():
    """
    pull data from website
    """
    response = http.request('GET', 'https://www.vjshi.com/ranking/sales')  # get方式请求
    html = response.data.decode('utf-8')
    soup = BeautifulSoup(html, 'html.parser')
    success_count = 0
    fail_count = 0
    db_connection = get_db_connection()
    # print(soup.prettify())
    link_list = soup.find('div', attrs={"class": "css-wposug"})
    for current_div in link_list.children:
        if "div" == current_div.name:
            element_a = current_div.find("a")
            detail_link = element_a.attrs['href']
            elements_in_a = [a for a in element_a.children]
            if len(elements_in_a) < 2:
                div_in_a = [div for div in elements_in_a[0].children]
                img_div = div_in_a[0]
                title_div = div_in_a[1]
            else:
                div_in_a = [div for div in elements_in_a[1].children]
                img_div = div_in_a[1]
                title_div = div_in_a[2]
            p_tags = img_div.find_all("p", attrs={"class": "chakra-text"})
            search_keyword = "无"
            if len(p_tags) > 3:
                search_keyword = img_div.find_all("p", attrs={"class": "chakra-text"})[3].text
                search_keyword = search_keyword.replace("搜索词：", "")
            img_url = img_div.find("img").attrs['src']
            title = title_div.text
            # print(title + "\t" + search_keyword + "\t" + img_url + "\t" + detail_link)
            try:
                insert_data(db_connection, detail_link, title, search_keyword, img_url)
                success_count = success_count + 1
            except Exception as e:
                fail_count = fail_count + 1
    db_connection.close()
    return [success_count, fail_count]


def create_table():
    db_connection = get_db_connection()
    """
    create table
    """
    sql = """
        CREATE TABLE IF NOT EXISTS `crawl_record`(
           `id` VARCHAR(100) PRIMARY KEY,
           `title` VARCHAR(100),
           `keyword` VARCHAR(100),
           `image_url` VARCHAR(100),
           `detail_page_url` VARCHAR(100)
        )
    """
    db_connection.execute(sql)
    db_connection.close()


def insert_data(db_connection, detail_page_url, title, keyword, image_url):
    """
    insert data
    :param detail_page_url:
    :param title:
    :param keyword:
    :param image_url:
    """
    sql="""
        insert into crawl_record (id, title, keyword, image_url) values (?, ?, ?, ?)
    """
    db_connection.execute(sql, (detail_page_url, title, keyword, image_url))
    db_connection.commit()
    


def export_data():
    db_connection = get_db_connection()
    sql="""
        select title, keyword, image_url, id from crawl_record order by id
    """
    try:
        response = db_connection.execute(sql)
        data = response.fetchall()
        headers = ["标题", "关键词", "图片缩略图", "详情页面"]
        output_path = os.getcwd() + "/export.csv"
        if os.path.exists(output_path):
            os.remove(output_path)
        with open(output_path, 'w', encoding='utf-8') as output:
            writer = csv.writer(output)
            writer.writerow(headers)
            writer.writerows(data)
            current_dir = os.getcwd()
            log("共"+ str(len(data)) +"条数据，已保存到：" + output_path)
    except:
        # print("插入失败，数据已存在")
        ignore = True
    finally:
        db_connection.close()


def loop_crawl():
    global crawl_times
    log("开始第" + str(crawl_times) + "次爬取.")
    result = crawl_data()
    log("第" + str(crawl_times) + "次爬取完成, 插入 " + str(result[0]) + " 条新数据, " + str(result[1]) + " 条数据已存在")
    crawl_times = crawl_times + 1
    global is_crawl, crawl_thread
    if is_crawl:
        crawl_thread = threading.Timer(interval = interval, function=loop_crawl)
        crawl_thread.start()

def aaa():
    print(111)

def start_crawl():
    global is_crawl, crawl_thread
    if is_crawl == True:
        log("已经在爬数据了...")
    else:
        log("开始爬数据...")
        is_crawl = True
        tt = threading.Thread(target=loop_crawl)
        tt.start()


def stop_crawl():
    global is_crawl, crawl_thread
    if is_crawl != True:
        log("已经停止了.")
    else:
        is_crawl = False
        crawl_thread.cancel()
        log("停止爬数据成功.")


def open_frame():
    root = tk.Tk()
    root.geometry('500x400+500+300')
    root.title("爬虫")
    ttk.Button(root, text='开始', command=start_crawl).pack()
    ttk.Button(root, text='结束', command=stop_crawl).pack()
    ttk.Button(root, text='导出csv文件', command=export_data).pack()
    scrollbar = tk.Scrollbar(orient='vertical')
    scrollbar.pack(side='right', fill='y')
    global log_text
    log_text = tk.Text(root, width=200, height=100, yscrollcommand=scrollbar.set) 
    log_text.pack()
    root.mainloop()


def log(msg):
    global log_text,log_index
    if log_text is not None:
        log_text.insert("end", msg + "\n")
        log_text.yview_scroll(1, 'units')
        log_index = log_index +1

if __name__ == '__main__':
    create_table()
    open_frame()
