#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
@author: caijj
@version: V1.0
@file: national_house_price_page.py
@time: 2022/02/10
"""
import os
import random
import re
import uuid
from time import sleep
import pandas
import pandas as pd
from lxml import etree
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from sqlalchemy import create_engine
from sqlalchemy.exc import ResourceClosedError
from common.globalvar import GlobalVar
from config.conf import cm
from tools import timeutil, fileutil
from tools.database_tool import DataBaseTool
from tools.decorator import update_flag
from tools.readyaml import ReadYaml

root_url = 'https://www.creprice.cn/urban/{city}.html'
area_estate_root_url = 'https://www.creprice.cn'
area_house_price_table_name = 'mega_{city}_area_house_price'
area_estate_house_price_table_name = 'mega_{city}_area_estate_house_price'
area_house_price_file_name = '{city}区房价排行榜.csv'
area_estate_house_price_file_name = '{city}楼盘小区房价排行榜.csv'


class NationalHousePricePage():

    @staticmethod
    def get_table_content(tree, table_id):
        # 定位获取表格信息
        tb = tree.xpath(table_id)
        # 将byte类型解码为str类型
        tb = etree.tostring(tb[0], encoding='utf8').decode()
        return tb

    @staticmethod
    def firefox_enter_target_page(url='https://www.creprice.cn/urban/su.html'):  # firefox进入目标页面
        firefox_options = Options()
        # firefox_options.headless = True
        web_driver = webdriver.Firefox(options=firefox_options)
        web_driver.maximize_window()
        web_driver.get(url)
        return web_driver

    @staticmethod
    def chrome_enter_target_page(url):  # chrome进入目标页面
        chrome_options = webdriver.ChromeOptions()
        chrome_options.add_argument('--headless')
        chrome_options.add_argument('--disable-gpu')
        web_driver = webdriver.Chrome(options=chrome_options)
        web_driver.maximize_window()
        web_driver.get(url)
        return web_driver

    @staticmethod
    def quit(web_driver):  # 退出
        if web_driver is not None:
            web_driver.quit()
            sleep(5)

    def get_area_house_price(self, city_info, csv_name, data_table_name):
        try:
            web_driver = self.chrome_enter_target_page(root_url.format(city=city_info[0]))
            html_source = web_driver.page_source  # 该属性可以获取当前浏览器的当前页的源码（html）
            tree = etree.HTML(html_source)
            tb_xpath = '//h3[contains(text(),"各区市县房价")]/parent::div//following-sibling::div[@class="ucont1"]/table[' \
                       '@class="table-style2"] '
            tb = self.get_table_content(tree, tb_xpath)
            # 解析表格数据
            text_list = tree.xpath("//h3[contains(text(),'各区市县房价')]/parent::div//following-sibling::div//tbody["
                                   "@id='order_f']/tr/td[3]/@class")  # 获取价格颜色
            trend_list = ['-1' if i == 'green' else '1' if i == 'red' else '0' if i == 'gray6' else i for i in
                          text_list]
            df = pandas.read_html(tb, encoding='utf-8', header=0)[0]
            df['id'] = df.apply(lambda _: uuid.uuid4(), axis=1)
            df['create_time'] = timeutil.dt_strptime("%Y-%m-%d %H%M%S")
            df['trend'] = trend_list  # 房价trend
            # 将虎丘区替换为高新区
            df.replace('虎丘区', '高新区', inplace=True)
            # 保存为csv格式
            # df.to_csv(csv_name, mode="a", encoding="utf-8", header=True, index=False)
            df.to_csv(csv_name, mode="a",
                      columns=['id', '排名', '行政区', '平均单价(元/㎡)', 'create_time', 'trend'],
                      encoding="utf-8",
                      header=True, index=False)
            # self.insert_data(csv_name, table_name)
            self.quit(web_driver)
        except Exception as e:
            self.quit(web_driver)
            print(e)
            print('区市县房价排行榜需手动验证 + 访问地址为%s' % (root_url.format(city=city_info[0])))
            sleep(30)
            return self.get_area_house_price(city_info, csv_name, data_table_name)

    def get_area_estate_house_link(self, city_info):
        city_area_link = []
        city_area_list = city_info[1]
        try:
            web_driver = self.chrome_enter_target_page(root_url.format(city=city_info[0]))
            sleep(1.5)
            html_source = web_driver.page_source  # 该属性可以获取当前浏览器的当前页的源码（html）
            tree = etree.HTML(html_source)
            if city_info[0] == 'nt':
                new_city_area_list = ['南通经开区' if area == '南通经济技术开发区' else area for area in city_area_list]
            elif city_info[0] == 'cq':
                new_city_area_list = ['秀山县' if area == '秀山土家族苗族自治县' else
                                      '酉阳县' if area == '酉阳土家族苗族自治县' else
                                      '彭水县' if area == '彭水苗族土家族自治县' else area for area in city_area_list]
            elif city_info[0] == 'yz':
                new_city_area_list = ['江都区' if area == '江都市' else area for area in city_area_list]
            else:
                new_city_area_list = city_area_list
            for city_area in new_city_area_list[1:]:
                # link = tree.xpath("//div[@class='area-select']//span[contains(text(),"
                #                   "'" + city_area + "')]/parent::a//@href")
                area_link = tree.xpath("//div[@class='area-select']//span[contains(text(),"
                                       "'" + city_area + "')]/parent::a//@href")[0]
                city_area_link.append(area_estate_root_url + area_link)
            web_driver.quit()
            area_dict = dict(zip(city_area_list[1:], city_area_link))
            return city_info[0], area_dict
        except Exception as e:
            print(e)
            print(('区域楼盘排行榜link异常 + 地址：%s' % root_url.format(city=city_info[0])))
            sleep(20)
            return self.get_area_estate_house_link(city_info)

    def get_area_estate_house_price(self, city, area_dict, create_time, csv_name, data_table_name, index=0):
        web_driver = None
        try:
            for area in list(area_dict.keys())[index:]:
                web_driver = self.chrome_enter_target_page(area_dict[area])
                html_source = web_driver.page_source  # 该属性可以获取当前浏览器的当前页的源码（html）
                sleep(1.5)
                tree = etree.HTML(html_source)
                tb_xpath = '//h3[contains(text(),"楼盘小区房价")]/parent::div//following-sibling::div[' \
                           '@class="ucont1"]/table[@class="table-style2"] '
                tb = self.get_table_content(tree, tb_xpath)
                # 解析表格数据
                text_list = tree.xpath("//h3[contains(text(),'楼盘小区房价')]/parent::div//following-sibling::div//tbody["
                                       "@id='order_f']/tr/td[3]/@class")  # 获取价格颜色
                trend_list = ['-1' if i == 'green' else '1' if i == 'red' else '0' if i == 'gray6' else i for i in
                              text_list]
                df = pandas.read_html(tb, encoding='utf-8', header=0)[0]
                df['id'] = df.apply(lambda _: uuid.uuid4(), axis=1)
                df['create_time'] = create_time
                df['area'] = area
                df['trend'] = trend_list  # 房价trend
                if area == '相城区':  # 将万科安元路项目替换为高新区
                    df.replace('万科安元路项目', '万科锦上和风华苑', inplace=True)
                # 保存为csv格式
                df.to_csv(csv_name, mode="a",
                          columns=['id', '序号', '楼盘名称', '单价(元/㎡)', 'create_time', 'area', 'trend'],
                          encoding="utf-8",
                          header=False,
                          index=False)
                self.quit(web_driver)
                sleep(random.randint(5, 10))
                index += 1
            # self.insert_estate_house_price_data(csv_name, data_table_name)
        except Exception as e:
            self.quit(web_driver)
            print(e)
            print('市区楼盘小区房价排行榜需手动验证 + 访问地址为%s' % (area_dict[area]))
            sleep(15)
            return self.get_area_estate_house_price(city, area_dict, create_time, csv_name, data_table_name, index)

    @staticmethod
    @update_flag()
    def insert_area_house_price_data(csv_name, data_table_name):
        engine = create_engine('mssql+pymssql://base:base@192.168.106.21:1433/JRZF_BASE?charset=utf8')
        # 读取本地CSV文件
        if not os.path.exists(csv_name):
            raise FileNotFoundError("配置文件%s不存在！" % csv_name)
        else:
            df = pd.read_csv(csv_name, sep=',', header=0)
            df.columns = ['id', 'ranking', 'area', 'average_unit_price', 'create_time', 'trend']
            df.drop_duplicates(subset=['ranking', 'area'], keep='first', inplace=True)
            # 将新建的DataFrame储存为MySQL中的数据表，不储存index列
            try:
                df.to_sql(data_table_name, engine, index=False, if_exists='fail')
            except ValueError as e:
                try:
                    pd.read_sql_query("delete from {}".format(data_table_name), con=engine)
                except ResourceClosedError as e:
                    df.to_sql(data_table_name, engine, index=False, if_exists='append')

    @staticmethod
    @update_flag()
    def insert_estate_house_price_data(csv_name, data_table_name):
        engine = create_engine('mssql+pymssql://base:base@192.168.106.21:1433/JRZF_BASE?charset=utf8')
        # 读取本地CSV文件
        if not os.path.exists(csv_name):
            raise FileNotFoundError("配置文件%s不存在！" % csv_name)
        else:
            df = pd.read_csv(csv_name, sep=',', header=None)
            df.columns = ['id', 'serial_number', 'estate_name', 'unit_price', 'create_time', 'area', 'trend']
            df.drop_duplicates(subset=['serial_number', 'area'], keep='first', inplace=True)
            # 将新建的DataFrame储存为MySQL中的数据表，不储存index列
            try:
                df.to_sql(data_table_name, engine, index=False, if_exists='fail')
            except ValueError as e:
                try:
                    pd.read_sql_query("delete from {}".format(data_table_name), con=engine)
                except ResourceClosedError as e:
                    df.to_sql(data_table_name, engine, index=False, if_exists='append')

    @staticmethod
    def check_file_is_exists(csv_name):
        if os.path.isfile(csv_name):
            os.remove(csv_name)


if __name__ == '__main__':
    time = timeutil.dt_strptime("%Y-%m-%d %H%M%S")
    city_dict = GlobalVar.city_env
    for item in city_dict.items():
        if item[0] == 'sz':
            new_item = ('su', item[1])
        elif item[0] == 'changzhou':
            new_item = ('cz', item[1])
        elif item[0] == 'hb':
            new_item = ('hebi', item[1])
        elif item[0] == 'zj':
            new_item = ('zhenjiang', item[1])
        else:
            new_item = item
        # 文件目录
        table_name = area_house_price_table_name.format(city=item[0])
        file_name = area_house_price_file_name.format(city=item[0])
        area_estate_table_name = area_estate_house_price_table_name.format(city=item[0])
        area_estate_file_name = area_estate_house_price_file_name.format(city=item[0])

        # 删除文件
        fileutil.del_file(file_name)
        fileutil.del_file(area_estate_file_name)

        # # 获取数据
        NationalHousePricePage().get_area_house_price(new_item, fileutil.set_file_path(file_name), table_name)
        city_name, area_dict = NationalHousePricePage().get_area_estate_house_link(new_item)  # 获取各区url
        NationalHousePricePage().get_area_estate_house_price(city_name, area_dict, time,
                                                             fileutil.set_file_path(area_estate_file_name),
                                                             area_estate_table_name)

        # 插入数据库
        NationalHousePricePage().insert_area_house_price_data(fileutil.set_file_path(file_name), table_name)
        NationalHousePricePage().insert_estate_house_price_data(fileutil.set_file_path(area_estate_file_name),
                                                                area_estate_table_name)
