# -*- coding: utf-8 -*-

"""
Datetime: 2020/06/01
Author: Zhang Yafei
Description: 
"""
import os
import time
import traceback

from DBUtils.PooledDB import PooledDB
from lxml import etree
from pandas import DataFrame
from psycopg2.errors import UniqueViolation
from selenium import webdriver
from selenium.common.exceptions import StaleElementReferenceException
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from sqlalchemy import create_engine
from concurrent.futures import ThreadPoolExecutor


class DBPoolHelper(object):
    def __init__(self, dbname, user=None, password=None, db_type='postgressql', host='localhost', port=5432):
        """
        # sqlite3
        # 连接数据库文件名，sqlite不支持加密，不使用用户名和密码
        import sqlite3
        config = {"datanase": "path/to/your/dbname.db"}
        pool = PooledDB(sqlite3, maxcached=50, maxconnections=1000, maxusage=1000, **config)
        # mysql
        import pymysql
        pool = PooledDB(pymysql,5,host='localhost', user='root',passwd='pwd',db='myDB',port=3306) #5为连接池里的最少连接数
        # postgressql
        import psycopg2
        POOL = PooledDB(creator=psycopg2, host="127.0.0.1", port="5342", user, password, database)
        # sqlserver
        import pymssql
        pool = PooledDB(creator=pymssql, host=host, port=port, user=user, password=password, database=database, charset="utf8")
        :param type:
        """
        if db_type == 'postgressql':
            import psycopg2
            pool = PooledDB(creator=psycopg2, host=host, port=port, user=user, password=password, database=dbname)
        elif db_type == 'mysql':
            import pymysql
            pool = PooledDB(pymysql, 5, host='localhost', user='root', passwd='pwd', db='myDB',
                            port=3306)  # 5为连接池里的最少连接数
        elif db_type == 'sqlite':
            import sqlite3
            config = {"datanase": dbname}
            pool = PooledDB(sqlite3, maxcached=50, maxconnections=1000, maxusage=1000, **config)
        else:
            raise Exception('请输入正确的数据库类型, db_type="postgresql" or db_type="mysql" or db_type="sqlite"')
        self.conn = pool.connection()
        self.cursor = self.conn.cursor()

    def connect_close(self):
        """关闭连接"""
        self.cursor.close()
        self.conn.close()

    def execute(self, sql, params=tuple()):
        self.cursor.execute(sql, params)  # 执行这个语句
        self.conn.commit()

    def execute_many(self, sql, params=tuple()):
        self.cursor.executemany(sql, params)
        self.conn.commit()

    def fetchone(self, sql, params=tuple()):
        self.cursor.execute(sql, params)
        data = self.cursor.fetchone()
        return data

    def fetchall(self, sql, params=tuple()):
        self.cursor.execute(sql, params)
        data = self.cursor.fetchall()
        return data

    def __del__(self):
        self.connect_close()


class DingxiangyuanTopic(object):
    def __init__(self, url, board, page_num, good_pages, headless: bool = True):
        self.url = url
        self.board = board
        if not os.path.exists(f'topic_html/{self.board}'):
            os.makedirs(f'topic_html/{self.board}')
        self.page_num = page_num
        self.good_pages = good_pages
        if headless:
            chrome_options = Options()
            chrome_options.add_argument("--headless")
            chrome_options.add_argument('--disable-gpu')
            self.driver = webdriver.Chrome(options=chrome_options)
        else:
            self.driver = webdriver.Chrome()

    def download_html(self, page):
        try:
            if page == 1:
                WebDriverWait(self.driver, 10).until(
                    EC.presence_of_element_located((By.XPATH, '//div[@id="area"]/div[3]'))
                )
                self.driver.find_element_by_xpath('//div[@id="area"]/div[3]').click()

                WebDriverWait(self.driver, 10).until(
                    EC.presence_of_element_located((By.CLASS_NAME, 'wrapper___1YlSM'))
                )
                html = self.driver.page_source
                with open(f"topic_html/{self.board}/{page}.html", mode='w', encoding='utf-8') as f:
                    f.write(html)
            else:
                WebDriverWait(self.driver, 10).until(
                    EC.presence_of_element_located(
                        (By.XPATH, '//div[@class="ant-pagination-options-quick-jumper"]/input'))
                )
                self.driver.find_element_by_xpath('//div[@class="ant-pagination-options-quick-jumper"]/input').click()
                self.driver.find_element_by_xpath(
                    '//div[@class="ant-pagination-options-quick-jumper"]/input').send_keys(page)
                self.driver.find_element_by_xpath(
                    '//div[@class="ant-pagination-options-quick-jumper"]/input').send_keys(Keys.ENTER)
                WebDriverWait(self.driver, 10).until(
                    EC.presence_of_element_located((By.XPATH, '//div[@id="area"]/div[3]'))
                )
                self.driver.find_element_by_xpath('//div[@id="area"]/div[3]').click()

                WebDriverWait(self.driver, 10).until(
                    EC.presence_of_element_located((By.CLASS_NAME, 'wrapper___1YlSM'))
                )

                html = self.driver.page_source
                with open(f"topic_html/{self.board}/{page}.html", mode='w', encoding='utf-8') as f:
                    f.write(html)
            print(f"{self.board}\t{page}\t下载成功")

        except StaleElementReferenceException:
            time.sleep(1)

        except Exception as e:
            print(e)
            exit()

    def get_filter_page(self):
        if os.listdir(f'topic_html/{self.board}'):
            page_list = {int(file.replace(".html", "")) for file in os.listdir(f'topic_html/{self.board}')}
            return set(range(1, self.page_num + 1)) - page_list
        else:
            return set(range(1, self.page_num + 1))

    def start(self):
        self.driver.get(self.url)
        filter_pages = self.get_filter_page()
        print(f'{self.board}\t还剩{len(filter_pages)}页')
        if not filter_pages:
            print(f'{self.board}\t下载完成')
            return
        for page in filter_pages:
            self.download_html(page)
            time.sleep(0.5)

    def start_good_topic(self):
        self.driver.get(self.url)
        WebDriverWait(self.driver, 10).until(
            EC.presence_of_element_located(
                (By.XPATH, '//div[@id="area"]/div[4]'))
        )
        self.driver.find_element_by_xpath('//div[@id="area"]/div[4]').click()
        self.driver.find_element_by_xpath('//div[@id="area"]/div[6]/div/div/ul/li[2]').click()
        if self.good_pages > 1:
            for page in range(1, self.good_pages + 1):
                time.sleep(1)
                self.download_html(page)
        else:
            self.download_html(page=1)

    # def __del__(self):
    #     self.driver.close()


def pandas_db_helper():
    """
    'postgresql://postgres:0000@127.0.0.1:5432/xiaomuchong'
    "mysql+pymysql://root:0000@127.0.0.1:3306/srld?charset=utf8mb4"
    "sqlite: ///sqlite3.db"
    """
    DATABASE_ENGINE = 'postgresql://postgres:0000@127.0.0.1:5432/dingxiangyuan'
    engine = create_engine(DATABASE_ENGINE)
    conn = engine.connect()
    return conn


def get_db_conn():
    postgres = DBPoolHelper(db_type='postgressql', dbname='dingxiangyuan', user='postgres', password='0000',
                            host='localhost', port='5432')
    return postgres


def get_file_list(dir_path):
    file_list = set()
    for base_path, folders, files in os.walk(dir_path):
        for file in files:
            file_path = os.path.join(base_path, file)
            file_list.add(file_path.replace('\\', '/'))
    if os.path.exists("history.txt"):
        with open('history.txt', encoding='utf-8') as f:
            has_files = {file.strip() for file in f}
        files = file_list - has_files
        print(f'共\t{len(file_list)}\t已解析{len(has_files)}\t还剩\t{len(files)}')
        return files
    return file_list


def read_html(file):
    with open(file, encoding='utf-8') as f:
        return f.read()


def good_html_parse():
    postgres = get_db_conn()
    files = get_file_list(dir_path="topic_html")
    history_file = open("history.txt", mode='a', encoding='utf-8')
    insert_sql = 'insert into topics(topic_url, topic_title, board_id, board_name, author_name, top, author_url, post_time, reply_num, click_num) VALUES (%s, %s, %s, %s, %s,%s, %s,%s,%s, %s) ON conflict(topic_url) DO UPDATE SET top=EXCLUDED.top'
    for file in files:
        try:
            text = read_html(file)
            board_name = file.split("/")[1]
            board_id = BOARD_ID_MAP[board_name]
            html = etree.HTML(text)
            for item in html.xpath('//div[@class="wrapper___1YlSM"]'):
                item_dict = {}
                item_dict['board_id'] = board_id
                item_dict['board_name'] = board_name
                topic_title = item.xpath(
                    'div[@class="mainContentWrapper___bLCCa"]//a[contains(@class, "subject___2unSX")]/text()')
                item_dict['topic_title'] = topic_title[0] if topic_title else ''
                topic_url = item.xpath(
                    'div[@class="mainContentWrapper___bLCCa"]//a[contains(@class, "subject___2unSX")]/@href')
                if topic_url:
                    item_dict['topic_url'] = "http://www.dxy.cn/bbs/topic/" + topic_url[0].replace(
                        "/bbs/newweb/pc/post/", '')
                else:
                    continue
                author_name = item.xpath('div[@class="commonWrapper___16Bhg hostWrapper___3yrSU"]/a/text()')
                item_dict['author_name'] = author_name[0] if author_name else ''
                author_url = item.xpath('div[@class="commonWrapper___16Bhg hostWrapper___3yrSU"]/a/@href')
                item_dict['author_url'] = "https://www.dxy.cn" + author_url[0] if author_url else ''
                item_dict['post_time'] = \
                item.xpath('div[@class="commonWrapper___16Bhg hostWrapper___3yrSU"]/div[1]/text()')[0]
                item_dict['reply_num'] = \
                item.xpath('div[@class="commonWrapper___16Bhg replyWrapper___BBHy2"]/div[1]/text()')[0]
                item_dict['click_num'] = \
                item.xpath('div[@class="commonWrapper___16Bhg replyWrapper___BBHy2"]/div[2]/text()')[0]
                if '万' in item_dict['click_num']:
                    item_dict['click_num'] = float(item_dict['click_num'].replace('万', '')) * 10000
                if '万' in item_dict['reply_num']:
                    item_dict['reply_num'] = float(item_dict['reply_num'].replace('万', '')) * 10000
                item_dict['top'] = 1
                params = (
                item_dict['topic_url'], item_dict['topic_title'], item_dict['board_id'], item_dict["board_name"],
                item_dict["author_name"],
                item_dict['top'], item_dict['author_url'], item_dict["post_time"], item_dict['reply_num'],
                item_dict['click_num'])
                postgres.execute(insert_sql, params=params)
            history_file.write(f"{file}\n")
            print(f'{file} 解析完成 数据插入成功')
        except UniqueViolation:
            print("topic_url重复")
            continue
        except Exception as e:
            print(file)
            traceback.print_exc()
            continue

    history_file.close()


def html_parse():
    conn = pandas_db_helper()
    postgres = get_db_conn()
    files = get_file_list(dir_path="topic_html")
    history_file = open("history.txt", mode='a', encoding='utf-8')
    insert_sql = 'insert into topics(topic_url, topic_title, board_id, board_name, author_name, top, author_url, post_time, reply_num, click_num) VALUES (%s, %s, %s, %s, %s,%s, %s,%s,%s, %s) ON conflict(topic_url) DO UPDATE SET top=EXCLUDED.top'
    for file in files:
        try:
            data_list = []
            text = read_html(file)
            board_name = file.split("/")[1]
            board_id = BOARD_ID_MAP[board_name]
            html = etree.HTML(text)
            for item in html.xpath('//div[@class="wrapper___1YlSM"]'):
                item_dict = {}
                item_dict['board_id'] = board_id
                item_dict['board_name'] = board_name
                topic_title = item.xpath(
                    'div[@class="mainContentWrapper___bLCCa"]//a[contains(@class, "subject___2unSX")]/text()')
                item_dict['topic_title'] = topic_title[0] if topic_title else ''
                topic_url = item.xpath(
                    'div[@class="mainContentWrapper___bLCCa"]//a[contains(@class, "subject___2unSX")]/@href')
                if topic_url:
                    item_dict['topic_url'] = "http://www.dxy.cn/bbs/topic/" + topic_url[0].replace(
                        "/bbs/newweb/pc/post/", '')
                else:
                    continue
                author_name = item.xpath('div[@class="commonWrapper___16Bhg hostWrapper___3yrSU"]/a/text()')
                item_dict['author_name'] = author_name[0] if author_name else ''
                author_url = item.xpath('div[@class="commonWrapper___16Bhg hostWrapper___3yrSU"]/a/@href')
                item_dict['author_url'] = "https://www.dxy.cn" + author_url[0] if author_url else ''
                item_dict['post_time'] = \
                    item.xpath('div[@class="commonWrapper___16Bhg hostWrapper___3yrSU"]/div[1]/text()')[0]
                item_dict['reply_num'] = \
                    item.xpath('div[@class="commonWrapper___16Bhg replyWrapper___BBHy2"]/div[1]/text()')[0]
                item_dict['click_num'] = \
                    item.xpath('div[@class="commonWrapper___16Bhg replyWrapper___BBHy2"]/div[2]/text()')[0]
                if item.xpath(
                        'div[@class="mainContentWrapper___bLCCa"]//span[contains(@class, "blockTopTag___3edIp")]'):
                    item_dict['top'] = 1
                else:
                    item_dict['top'] = 0
                params = (item_dict['topic_url'], item_dict['topic_title'], item_dict['board_id'], item_dict["board_name"], item_dict["author_name"],
                  item_dict['top'], item_dict['author_url'], item_dict["post_time"], item_dict['reply_num'], item_dict['click_num'])
                postgres.execute(insert_sql, params=params)
                # data_list.append(item_dict)
            # df = DataFrame(data_list)
            # df.to_sql("topics", conn, if_exists='append', index=False)
            history_file.write(f"{file}\n")
            print(f'{file} 解析完成 数据插入成功')
        except UniqueViolation:
            print("topic_url重复")
            continue
        except Exception as e:
            print(file)
            traceback.print_exc()
            continue

    history_file.close()


def spider_topic(kwarg):
    dingxiangyuan = DingxiangyuanTopic(**kwarg)
    dingxiangyuan.start()


def spider_good_topic(kwarg):
    dingxiangyuan = DingxiangyuanTopic(**kwarg)
    dingxiangyuan.start_good_topic()


if __name__ == '__main__':
    board_map_list = [
        # {
        #     "url": "https://www.dxy.cn/bbs/newweb/pc/board/47",
        #     "board": '心血管',
        #     "page_num": 1000,
        #     "good_pages": 9,
        # },
        # 临床医学
        {
            "url": "https://www.dxy.cn/bbs/newweb/pc/board/58",
            'board': "呼吸胸外",
            "page_num": 1000,
            "good_pages": 9,
        },
        {
            "url": "https://www.dxy.cn/bbs/newweb/pc/board/112",
            'board': "危重急救",
            "page_num": 1000,
            "good_pages": 23,
        },

        # 学习考试
        {
            "url": "https://www.dxy.cn/bbs/newweb/pc/board/45",
            'board': "论文写作投稿",
            "page_num": 1000,
            "good_pages": 5,
        },
        {
            "url": "https://www.dxy.cn/bbs/newweb/pc/board/82",
            'board': "临床执考",
            "page_num": 1000,
            "good_pages": 10,
        },

        # 信息交流
        {
            "url": "https://www.dxy.cn/bbs/newweb/pc/board/6",
            'board': "检索知识",
            "page_num": 977,
            "good_pages": 5,
        },

        # 药学实验
        {
            "url": "https://www.dxy.cn/bbs/newweb/pc/board/114",
            'board': "新药信息",
            "page_num": 1000,
            "good_pages": 5,
        },
        {
            "url": "https://www.dxy.cn/bbs/newweb/pc/board/199",
            'board': "动物与组织学技术",
            "page_num": 1000,
            "good_pages": 4,
        },

        # 基础公卫
        {
            "url": "https://www.dxy.cn/bbs/newweb/pc/board/73",
            'board': "生物信息学",
            "page_num": 788,
            "good_pages": 6,
        },
        {
            "url": "https://www.dxy.cn/bbs/newweb/pc/board/126",
            'board': "预防医学",
            "page_num": 716,
            "good_pages": 1,
        },

        # 休闲区
        {
            "url": "https://www.dxy.cn/bbs/newweb/pc/board/15",
            'board': "心情驿站",
            "page_num": 1000,
            "good_pages": 24,
        },

        # 人文医学
        {
            "url": "https://www.dxy.cn/bbs/newweb/pc/board/144",
            'board': "卫生事业与医院管理",
            "page_num": 502,
            "good_pages": 8,
        },
        # {
        #     "url": "https://www.dxy.cn/bbs/newweb/pc/board/53",
        #     'board': '卫生法律人文',
        #     "page_num": 796,
        #     "good_pages": 18,
        # },
        # {
        #     "url": "https://www.dxy.cn/bbs/newweb/pc/board/239",
        #     'board': "医学哲学和医学史",
        #     "page_num": 44,
        #     "good_pages": 1
        # },
    ]
    BOARD_ID_MAP = {
        '心血管': 1,
        '肿瘤医学': 2,
        '呼吸胸外': 3,
        '危重急救': 4,

        '卫生法律人文': 5,
        '医学哲学和医学史': 6,
        '卫生事业与医院管理': 7,

        '新药信息': 8,
        '动物与组织学技术': 9,
        '生物信息学': 10,
        '预防医学': 11,

        '论文写作投稿': 12,
        '临床执考': 13,

        '检索知识': 14,

        '心情驿站': 15,
    }

    # for kwarg in board_map_list:
    #     dingxiangyuan = DingxiangyuanTopic(**kwarg)
    #     dingxiangyuan.start()

    # with ThreadPoolExecutor() as pool:
    #     pool.map(spider_topic, board_map_list)

    # html_parse()

    # for kwarg in board_map_list:
    #     dingxiangyuan = DingxiangyuanTopic(**kwarg)
    #     dingxiangyuan.start_good_topic()

    # with ThreadPoolExecutor() as pool:
    #     pool.map(spider_good_topic, board_map_list)

    good_html_parse()
