#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""
@author: caijj
@version: V1.0
@file: zg_house_price_trend_page.py
@time: 2022/02/11
"""
import ast
import datetime
import os
import random
import re
import uuid
import pandas as pd
from bs4 import BeautifulSoup
# from fake_useragent import UserAgent
from lxml import etree
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from sqlalchemy import create_engine
from sqlalchemy.exc import ResourceClosedError
from common.globalvar import GlobalVar
from config.conf import cm
from tools import timeutil, fileutil
from tools.database_tool import DataBaseTool
from tools.decorator import update_flag
from tools.timeutil import sleep

root_url = 'https://{city}.fangjia.zhuge.com'
xf_root_ul = 'https://{city}.fang.anjuke.com/fangjia'
real_time_transaction_data_table_name = 'mega_{city}_real_time_transaction_data'
new_house_price_trend_table_name = 'mega_{city}_new_house_price_trend'
second_hand_house_price_trend_table_name = 'mega_{city}_second_hand_house_price_trend'
real_time_transaction_data_file_name = 'mega_{city}_real_time_transaction_data.csv'
new_real_time_transaction_data_file_name = 'mega_{city}_new_real_time_transaction_data.csv'
new_house_price_trend_file_name = 'mega_{city}_new_house_price_trend.csv'
second_hand_house_price_file_name = 'mega_{city}_second_hand_house_price_trend.csv'


class HousePriceTrendPage():
    # def agent():
    #     ua = UserAgent(verify_ssl=False)
    #     headers = {'User-Agent': ua.random}
    #     return headers
    @staticmethod
    def firefox_enter_target_page(url='https://su.xinfang.zhuge.com/suzhoufangjiazoushi/'):  # firefox进入目标页面
        firefox_options = Options()
        firefox_options.headless = True
        web_driver = webdriver.Firefox(options=firefox_options)
        web_driver.maximize_window()
        web_driver.get(url)
        return web_driver

    @staticmethod
    def chrome_enter_target_page(url='https://su.xinfang.zhuge.com/suzhoufangjiazoushi/'):  # chrome进入目标页面
        chrome_options = webdriver.ChromeOptions()
        chrome_options.add_argument('--headless')
        chrome_options.add_argument('--disable-gpu')
        web_driver = webdriver.Chrome(options=chrome_options)
        web_driver.maximize_window()
        web_driver.get(url)
        return web_driver

    @staticmethod
    def quit(web_driver):  # 退出
        if web_driver is not None:
            web_driver.quit()
            sleep(5)

    # @staticmethod
    # def insert_new_house_real_time_price_data(data, table_name):
    #     db = DataBaseTool()
    #     sql = "update {} set new_house_average_price=%s, new_on_month_growth=%s, new_trend=%s where area=%s". \
    #         format(table_name)
    #     try:
    #         cursor = db.conn.cursor()
    #         # cursor.executemany(sql, data)
    #         cursor.execute(sql, data)
    #         db.conn.commit()
    #     except Exception as e:
    #         print(e)
    #     db.conn.close()

    @staticmethod
    @update_flag()
    def insert_house_price_trend_data(csv_name, table_name):
        engine = create_engine('mssql+pymssql://base:base@192.168.106.21:1433/JRZF_BASE?charset=utf8')
        # 读取本地CSV文件
        if not os.path.exists(csv_name):
            raise FileNotFoundError("配置文件%s不存在！" % csv_name)
        else:
            df = pd.read_csv(csv_name, sep=',', header=None)
            df.columns = ['month', 'price', 'id', 'create_time', 'area', 'tag']
            # try:
            #     df.to_sql(table_name, engine, index=False, if_exists='fail')
            # except ValueError as e:
            #     self.del_data(table_name)
            #     df.to_sql(table_name, engine, index=False, if_exists='append')
            try:
                df.to_sql(table_name, engine, index=False, if_exists='fail')
            except ValueError as e:
                try:
                    pd.read_sql_query("delete from {}".format(table_name), con=engine)
                except ResourceClosedError as e:
                    df.to_sql(table_name, engine, index=False, if_exists='append')

    @staticmethod
    @update_flag()
    def insert_transaction_data(csv_name, table_name, new_csv_name):
        engine = create_engine('mssql+pymssql://base:base@192.168.106.21:1433/JRZF_BASE?charset=utf8')
        # 读取本地CSV文件
        if not os.path.exists(csv_name):
            raise FileNotFoundError("配置文件%s不存在！" % csv_name)
        else:
            sql_result = DataBaseTool().get_column_name_sql(table_name)
            column_name_list = [column_item[0] for column_item in sql_result]
            df = pd.read_csv(csv_name, sep=',', header=None)
            new_df = pd.read_csv(new_csv_name, sep=',', header=None)
            new_df.columns = ['new_house_average_price', 'new_on_month_growth', 'new_trend']
            df.columns = column_name_list
            df['new_house_average_price'] = new_df['new_house_average_price'].values.tolist()
            df['new_on_month_growth'] = new_df['new_on_month_growth'].values.tolist()
            df['new_trend'] = new_df['new_trend'].values.tolist()
            df.drop_duplicates(subset=['area'], keep='first', inplace=True)
            try:
                df.to_sql(table_name, engine, index=False, if_exists='fail')
            except ValueError as e:
                try:
                    pd.read_sql_query("delete from {}".format(table_name), con=engine)
                except ResourceClosedError as e:
                    df.to_sql(table_name, engine, index=False, if_exists='append')

    @staticmethod
    def del_data(table_name='mega_wx_second_hand_house_price_trend'):
        db = DataBaseTool()
        sql = "delete from {}".format(table_name)
        db.update_sql(sql)

    @staticmethod
    def check_file_is_exists(csv_name):
        if os.path.isfile(csv_name):
            os.remove(csv_name)

    def get_second_house_trend_link(self, city_info):
        city_area_link = []
        city_area_list = city_info[1]
        try:
            web_driver = self.chrome_enter_target_page(root_url.format(city=city_info[0]))
            sleep(5)
            html_source = web_driver.page_source  # 该属性可以获取当前浏览器的当前页的源码（html）
            tree = etree.HTML(html_source)
            if city_info[0] == 'su':
                new_city_area_list = ['园区' if area == '工业园区' else '新区' if area == '高新' else area for area in
                                      city_area_list]
            elif city_info[0] == 'wx':
                new_city_area_list = ['江阴' if area == '江阴市' else '宜兴' if area == '宜兴市' else area for area in
                                      city_area_list]
            elif city_info[0] == 'cz':
                new_city_area_list = [area[:-2] for area in city_area_list]
            elif city_info[0] == 'nj':
                new_city_area_list = ['雨花' if area == '雨花台' else area for area in city_area_list]
            elif city_info[0] == 'nt':
                new_city_area_list = ['开发区' if area == '南通经济技术开发区' else area[:-1] for area in city_area_list]
            elif city_info[0] == 'nb':
                new_city_area_list = [area if area == '宁波' else area[:-1] for area in city_area_list]
            elif city_info[0] == 'jh':
                new_city_area_list = [area if area == '金华' else area[:-1] for area in city_area_list]
            elif city_info[0] == 'yz':
                new_city_area_list = [area if area == '扬州' else area[:-1] for area in city_area_list]
            # elif city_info[0] == 'zhenjiang':
            #     new_city_area_list = ['润州' if area == '润江' else area for area in city_area_list]
            elif city_info[0] == 'sx':
                new_city_area_list = [area if area == '绍兴' else area[:-1] for area in city_area_list]
            elif city_info[0] == 'jx':
                new_city_area_list = [area if area == '嘉兴' else area[:-1] for area in city_area_list]
            else:
                new_city_area_list = city_area_list
            for city_area in new_city_area_list:
                if new_city_area_list.index(city_area) == 0:
                    area_link = root_url.format(city=city_info[0])
                else:
                    link = tree.xpath("//div[@class='l-filtrate-box']/a[contains(text(),'" + city_area + "')]/@href")[0]
                    area_link = root_url.format(city=city_info[0]) + link
                city_area_link.append(area_link)
            self.quit(web_driver)
            second_area_dict = dict(zip(city_area_list, city_area_link))
            return city_info[0], second_area_dict
        except Exception as e:
            print(e)
            print(('获取二手房走势link异常 + 地址：%s' % root_url.format(city=city_info[0])))
            sleep(20)
            return self.get_second_house_trend_link(city_info)

    def get_second_hand_house_price_trend(self, city, second_area_dict, create_time, trend_file=None,
                                          transaction_file=None, trend_table_name=None,
                                          transaction_table_name=None, index=0):
        web_driver = None
        area_list = list(second_area_dict.keys())
        try:
            for area in area_list[index:]:
                real_time_price_data = []
                get_tag = lambda: '0' if area_list.index(area) == 0 else '1'
                tag = get_tag()
                web_driver = self.chrome_enter_target_page(second_area_dict[area])
                sleep(2)
                html_source = web_driver.page_source  # 该属性可以获取当前浏览器的当前页的源码（html）
                tree = etree.HTML(html_source)
                if area == '鹤山区':
                    pass
                else:
                    data = tree.xpath("//input[@class='fangjiaTrend']")[0].get('value')
                    data_dict = ast.literal_eval(data)
                    x_data = data_dict['fangjiaTrendX']
                    y_data = data_dict['fangjiaTrendY1']
                    dic = {"month": x_data,
                           "price": y_data}
                    df = pd.DataFrame(dic)
                    df['id'] = df.apply(lambda _: uuid.uuid4(), axis=1)
                    df['create_time'] = create_time
                    df['area'] = area
                    df['tag'] = tag
                    # 保存为csv格式
                    df.to_csv(trend_file, mode="a",
                              # columns=['id, month, price, create_time, area'],
                              encoding="utf-8",
                              header=False,
                              index=False)
                second_hand_house_average_price = \
                    tree.xpath("//div[contains(text(),'最新二手房均价')]//following-sibling::div["
                               "@class='average-price']/p")[0].text
                second_trend = \
                    tree.xpath("//div[contains(text(),'最新二手房均价')]//following-sibling::div//span[contains(text(),"
                               "'环比上月')]")[0].text
                second_percentage = \
                    tree.xpath("//div[contains(text(),'最新二手房均价')]//following-sibling::div//span[contains(text(),"
                               "'环比上月')]//following-sibling::span")[0].text
                if second_trend[-2:] == '上升':
                    second_trend = 1
                elif second_trend[-2:] == '持平':
                    second_trend = 0
                else:
                    second_trend = -1
                new_house_average_price, new_trend, new_percentage = '', '', ''
                self.quit(web_driver)
                real_time_price_data.append(
                    (uuid.uuid4(), second_hand_house_average_price, new_house_average_price,
                     '环比' + second_percentage,
                     new_trend + new_percentage, '2022-02', create_time, area, second_trend, new_trend, tag))
                df = pd.DataFrame(data=real_time_price_data)
                # 保存为csv格式
                df.to_csv(transaction_file, mode="a",
                          encoding="utf-8",
                          header=False,
                          index=False)
                index += 1
        except Exception as e:
            print(e)
            self.quit(web_driver)
            print('未知异常%s' % (second_area_dict[area]))
            sleep(30)
            return self.get_second_hand_house_price_trend(city, second_area_dict, create_time, trend_file,
                                                          transaction_file, trend_table_name,
                                                          transaction_table_name, index)

    def get_new_house_trend_link(self, city_info):
        city_area_link = []
        city_area_list = city_info[1]
        try:
            web_driver = self.chrome_enter_target_page(xf_root_ul.format(city=city_info[0]))
            sleep(3)
            html_source = web_driver.page_source  # 该属性可以获取当前浏览器的当前页的源码（html）
            # print(html_source)
            tree = etree.HTML(html_source)
            if city_info[0] == 'su':
                new_city_area_list = ['高新' if area == '高新区' else area for area in city_area_list]
            elif city_info[0] == 'wx':
                new_city_area_list = ['江阴' if area == '江阴市' else '宜兴' if area == '宜兴市' else area for area in
                                      city_area_list]
            elif city_info[0] == 'cz':
                new_city_area_list = [area[:-2] for area in city_area_list]
            elif city_info[0] == 'nt':
                new_city_area_list = ['开发区' if area == '南通经济技术开发区' else area[:-1] for area in city_area_list]
            elif city_info[0] == 'huzhou':
                new_city_area_list = ['南浔' if area == '南浔区' else area[:-1] for area in city_area_list]
            elif city_info[0] == 'nb':
                new_city_area_list = [area if area == '宁波' else area[:-1] for area in city_area_list]
            elif city_info[0] == 'jh':
                new_city_area_list = ['武义' if area == '武义县' else area for area in
                                      city_area_list]
            elif city_info[0] == 'yz':
                new_city_area_list = ['宝应市' if area == '宝应县' else '江都区' if area == '江都市' else area for area in
                                      city_area_list]
            # elif city_info[0] == 'zhenjiang':
            #     new_city_area_list = ['润州' if area == '润江' else area for area in city_area_list]
            elif city_info[0] == 'jx':
                new_city_area_list = [area if area == '嘉兴' else area[:-1] for area in city_area_list]
            else:
                new_city_area_list = city_area_list
            for city_area in new_city_area_list:
                if new_city_area_list.index(city_area) == 0:
                    area_link = xf_root_ul.format(city=city_info[0])
                elif city_area == '太仓' or city_area == '昆山':
                    area_link = tree.xpath("//div[@class='jia-con']/div[@class='jia-bd']/"
                                           "a[contains(text(),'" + city_area + "')]/@href")[0]
                else:
                    area_link = tree.xpath("//div[@class='filter-item']/a[contains(text(),'" + city_area + "')]/@href")[
                        0]
                city_area_link.append(area_link)
            web_driver.quit()
            new_area_dict = dict(zip(city_area_list, city_area_link))
            return city_info[0], new_area_dict
        except Exception as e:
            self.quit(web_driver)
            print('获取新房走势link异常 + 地址：%s' % xf_root_ul.format(city=city_info[0]))
            sleep(30)
            return self.get_new_house_trend_link(city_info)

    def get_new_house_price_trend(self, city, new_area_dict, create_time, trend_file=None, trend_table_name=None,
                                  new_transaction_file=None,
                                  transaction_table_name=None, index=0):
        fixed_month = ()
        web_driver = None
        # pattern = re.compile(r"var xData = '(.*?)';$", re.MULTILINE | re.DOTALL)
        # scripts = soup.find("script", text=pattern)
        # x_pattern = re.findall(r"(?<=var xData = ')(.*?)(?=';)", str(scripts), re.MULTILINE | re.DOTALL)
        # y_pattern = re.findall(r"(?<=var yData = ')(.*?)(?=';)", str(scripts), re.MULTILINE | re.DOTALL)
        area_list = list(new_area_dict.keys())
        try:
            for area in area_list[index:]:
                get_tag = lambda: '0' if area_list.index(area) == 0 else '1'
                tag = get_tag()
                web_driver = self.chrome_enter_target_page(new_area_dict[area])
                sleep(3)
                html_source = web_driver.page_source  # 该属性可以获取当前浏览器的当前页的源码（html）
                tree = etree.HTML(html_source)
                soup = BeautifulSoup(html_source, "html.parser")
                pattern = re.compile(r"xdata:", re.MULTILINE | re.DOTALL)
                scripts = soup.find("script", text=pattern)
                if area == '常熟市' or area == '太仓市' or area == '张家港市' or area == '长宁':  # 处理页面源码源码月份不正确问题
                    x_pattern = fixed_month
                else:
                    x_pattern = re.findall(r"(?<=xdata:\[)(.*?)(?=\])", str(scripts), re.MULTILINE | re.DOTALL)
                    x_pattern = x_pattern[0].encode('utf-8').decode("unicode_escape")
                    fixed_month = x_pattern
                x_data = ast.literal_eval(x_pattern)
                x_data = x_data[0:-1]  # 截取前11个月份
                x_data = [x[:-1] for x in x_data]  # 去掉’月‘
                if x_data[0] == '01':
                    new_month = '12'
                elif int(x_data[0]) > 10:
                    new_month = int(x_data[0]) - 1
                    new_month = str(new_month)
                else:
                    new_month = int(x_data[0]) - 1
                    new_month = '0' + str(new_month)
                x_data = [new_month] + x_data
                now_year = datetime.datetime.now().year
                x_data = [
                    str(now_year - 1) + '-' + x if x_data.index(x) <= x_data.index('12') else str(now_year) + '-' + x
                    for x in x_data]  # 月份加上年份
                y_pattern = re.findall(r"(?<=ydata:)(.*?)(?=\}\);)", str(scripts), re.MULTILINE | re.DOTALL)
                y_pattern = y_pattern[0].encode('utf-8').decode("unicode_escape").strip()
                y_data = ast.literal_eval(y_pattern)[0]['data']
                y_data = y_data[0:-1]
                y_data = [y_data[0]] + y_data
                dic = {"month": x_data,
                       "price": y_data}
                df = pd.DataFrame(dic)
                df['id'] = df.apply(lambda _: uuid.uuid4(), axis=1)
                df['create_time'] = create_time
                df['area'] = area
                df['tag'] = tag
                # 保存为csv格式
                df.to_csv(trend_file, mode="a",
                          # columns=['id, month, price, create_time, area'],
                          encoding="utf-8",
                          header=False,
                          index=False)

                new_house_average_price = tree.xpath("//strong/span[contains(text(),'新房均价')]//following-sibling::em")[
                    0].text
                new_trend = tree.xpath("//span[contains(text(),'环比上月')]")[0].text
                if new_trend[-2:] == '上升':
                    new_trend = 1
                    new_percentage = tree.xpath("//span[contains(text(),'环比上月')]//following-sibling::em")[0].text[
                                     1:].strip()
                    new_percentage = '环比' + new_percentage
                elif new_trend[-2:] == '持平':
                    new_trend = 0
                    new_percentage = '持平'
                else:
                    new_trend = -1
                    new_percentage = tree.xpath("//span[contains(text(),'环比上月')]//following-sibling::em")[0].text[
                                     1:].strip()
                    new_percentage = '环比' + new_percentage
                df = pd.DataFrame({'new_house_average_price': [new_house_average_price],
                                   'new_on_month_growth': [new_percentage],
                                   'new_trend': [new_trend]
                                   })
                df.to_csv(new_transaction_file, mode="a",
                          # columns=['new_house_average_price, new_on_month_growth, new_trend'],
                          encoding="utf-8",
                          header=False,
                          index=False)
                index += 1
                self.quit(web_driver)
                sleep(random.randint(8, 10))
        except Exception as e:
            self.quit(web_driver)
            # print(e)
            print('新房价格走势需手动验证 + 访问地址为%s' % (new_area_dict[area]))
            sleep(30)
            return self.get_new_house_price_trend(city, new_area_dict, create_time, trend_file, trend_table_name,
                                                  new_transaction_file,
                                                  transaction_table_name, index)


if __name__ == '__main__':
    city_dict = GlobalVar.city_env
    for item in city_dict.items():
        if item[0] == 'sz':
            new_item = ('su', item[1])
        elif item[0] == 'changzhou':
            new_item = ('cz', item[1])
        elif item[0] == 'hb':
            new_item = ('hebi', item[1])
        elif item[0] == 'zj':
            new_item = ('zhenjiang', item[1])
        else:
            new_item = item
        time = timeutil.dt_strptime("%Y-%m-%d %H%M%S")
        # 文件目录
        new_trend_file_name = new_house_price_trend_file_name.format(city=item[0])
        second_trend_file_name = second_hand_house_price_file_name.format(city=item[0])
        transaction_data_file_name = real_time_transaction_data_file_name.format(city=item[0])
        new_transaction_data_file_name = new_real_time_transaction_data_file_name.format(city=item[0])
        second_trend_table_name = second_hand_house_price_trend_table_name.format(city=item[0])
        transaction_data_table_name = real_time_transaction_data_table_name.format(city=item[0])
        new_trend_table_name = new_house_price_trend_table_name.format(city=item[0])

        # # 删除文件
        fileutil.del_file(second_trend_file_name)
        fileutil.del_file(transaction_data_file_name)
        fileutil.del_file(new_trend_file_name)
        fileutil.del_file(new_transaction_data_file_name)

        # # 获取数据
        city_name, area_dict = HousePriceTrendPage().get_second_house_trend_link(new_item)
        HousePriceTrendPage().get_second_hand_house_price_trend(city_name, area_dict, time,
                                                                fileutil.set_file_path(second_trend_file_name),
                                                                fileutil.set_file_path(transaction_data_file_name),
                                                                second_trend_table_name, transaction_data_table_name)

        city_name, area_dict = HousePriceTrendPage().get_new_house_trend_link(new_item)
        HousePriceTrendPage().get_new_house_price_trend(city_name, area_dict, time,
                                                        fileutil.set_file_path(new_trend_file_name),
                                                        new_trend_table_name,
                                                        fileutil.set_file_path(new_transaction_data_file_name),
                                                        transaction_data_table_name)

        # # 插入数据库
        HousePriceTrendPage().insert_house_price_trend_data(fileutil.set_file_path(second_trend_file_name),
                                                            second_trend_table_name)  # 插入二手走势

        HousePriceTrendPage().insert_house_price_trend_data(fileutil.set_file_path(new_trend_file_name),
                                                            new_trend_table_name)  # 插入新房走势

        HousePriceTrendPage().insert_transaction_data(fileutil.set_file_path(transaction_data_file_name),
                                                      transaction_data_table_name,
                                                      fileutil.set_file_path(new_transaction_data_file_name))  # 插入实时数据
