
import sys
import time
import random
import re

from Config import *
from SpiderUtil import *
from ProxyUtil import *
from RabbitUtil import RabbitUtil
from SeleniumUtil import SeleniumUtil
from SeleniumUtil import CheckDriverExit
from TimeUtil    import *

#selenium
from selenium import webdriver
from selenium.webdriver import ChromeOptions
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.keys import Keys
#反爬
import undetected_chromedriver as uc
#BeautifulSoup
from bs4 import BeautifulSoup

from pymysql.converters import escape_string

from ErrorCode import *

#多线程处理
from threading import Thread
#线程池相关
from concurrent.futures import ThreadPoolExecutor, wait, as_completed


#解析页面中人员信息，形成字典
def ParsePersonInfos(soup):
    PersonMap = {};
    if soup.text:
        perArr = soup.text.split("|")
        if len(perArr) == 3:
            for i, item in enumerate(perArr):
                v = re.findall(r"\d+", item)[0]
                if i == 0:
                    PersonMap['围观'] = v;
                if i == 1:
                    PersonMap['报名'] = v;
                if i == 2:
                    PersonMap['提醒'] = v;

    # PersonMap['报名'] = Spans[0].select("em")[0].get_text(); #报名
    # PersonMap['围观'] = Spans[1].select("em")[0].get_text(); #提醒
    # PersonMap['提醒'] = Spans[2].select("em")[0].get_text(); #围观
    #end
    return PersonMap;

# 获取一个详情页面的源码
def GetDetailPageSource(ItemID, driver, URL):
    #循环等待处理成功
    while True:
        #检查浏览器是否可用
        CheckDriverExit(driver)

        try:
            #打开详细页面
            driver.get(URL);

            #等待页面打开
            time.sleep(1);

            #如果页面不是我需要的页面
            while not driver.current_url.endswith(f"{ItemID}"):
                WarningPrompt(Type=EDetail["E201"]);
                time.sleep(5);
            
            # # 判断页面是否成功加载  因为公拍网存在纯数字的情况 所以这个就不判了  还得加这个判断  再加个数字吧
            if not CheckChinese(driver.title) and not has_numbers(driver.title):
                # 关闭浏览器
                driver.quit()
                sys.exit(1);
            
            return driver.page_source;
        except:
            print("Detail Parse error! Try Again!!!");
            WarningPrompt(Type=EDetail["E202"]);
            time.sleep(5);
            continue;
    #end while True

# 详情页面解析
def GetDetailPageParse(HtmlContent, URL, ItemID):
    soup = BeautifulSoup(HtmlContent, "lxml");

    #--------------解析字典清空--------------
    OutMap  = {}; #信息字典
    ImgList = []; #图片列表

    soupHead = soup
    #头部信息区
    if soup.select(".details-main"):
        soupHead = soup.select(".details-main")[0]; #class="pm-main-top"
    # soupHead = soup

    #人员围观数等信息 
    if len(soupHead.select('div.wgnoClass')) > 0:
        PersonMap = ParsePersonInfos(soupHead.select('div.wgnoClass')[0]);

    #相关图片信息
    ImgItems = None;
    if soupHead.select('div.swiper-wrapper'):
        ImgItems = soupHead.select('div.swiper-wrapper')[0].select('div.pic-item span img');
    
    if ImgItems and len(ImgItems) > 0:
        ImgList.extend([Img['src'] for Img in ImgItems]);

    #'auction_url', '拍卖url'
    OutMap['auctionUrl'] = URL;

    #'browse_count', '浏览记数'
    OutMap['browseCount'] = PersonMap['围观'];
    #'subscriber_count', '订阅者计数'
    OutMap['subscriberCount'] = PersonMap['提醒'];
    #'participant_count', '参与者计数'
    OutMap['participantCount'] = PersonMap['报名'];


    DescLis = soupHead.select('div.action_infos div.action_infos_item');
    if DescLis:
        for actionItem in DescLis:
            InfoLis = actionItem.select('p.infos_item_detail')
            if InfoLis:
                for infoItem in InfoLis:
                    InfoTitle = infoItem.select('span.item_detail_title')[0].text.strip()
                    InfoValue = infoItem.select('span.item_detail_value')[0].text.strip()

                    if InfoTitle.startswith('起拍价'):
                        OutMap['priceYuan'] = int(float(InfoValue.replace('￥', '')))
                        OutMap['dealPrice'] = int(float(InfoValue.replace('￥', '')))
                        OutMap['marketPriceYuan'] = int(float(InfoValue.replace('￥', '')))
                    if InfoTitle.startswith('保证金'):
                        OutMap['depositYuan'] = int(float(InfoValue.replace('￥', '')))
                    if InfoTitle.startswith('评估价'):
                        if '￥' in InfoValue:
                            OutMap['estimatePriceYuan'] = int(float(InfoValue.replace('￥', '')))
                    if InfoTitle.startswith('加价幅度'):
                        OutMap['increasePriceYuan'] = int(float(InfoValue.replace('￥', '')))
                    if InfoTitle.startswith('数量单位'):
                        # TODO 存在一套的问题   1套
                        if '平方米' in InfoValue:
                            OutMap['builtUpAreaSquareCentimeter'] = InfoValue.replace('平方米', '')
                    if InfoTitle.startswith('拍卖次数'):
                        if InfoValue.startswith('第一次'):
                            OutMap['stage'] = '一拍'
                        elif InfoValue.startswith('第二次') or InfoValue.startswith('重新拍卖'):
                            OutMap['stage'] = '二拍'
                        else:
                            OutMap['stage'] = InfoValue
    
    if 'estimatePriceYuan' in OutMap and 'marketPriceYuan' in OutMap and float(OutMap['estimatePriceYuan']) > 10000:
        OutMap['discountRate'] = round(float(OutMap['marketPriceYuan'])*10 / float(OutMap['estimatePriceYuan']), 2);
    
    # 成交价获取和当前价获取  市场价等于当前价 如果没有等于超拍价
    MainDetail = soupHead.select('div.main_detail');
    if MainDetail:
        match1 = re.search(r"成交价 ￥ (\d+)", MainDetail[0].text)
        if match1:
            OutMap['dealPrice'] = int(float(match1.group(1)))
            OutMap['marketPriceYuan'] = int(float(match1.group(1)))
        # TODO 这个当前价不确定有没有 需要看下拍买中的数据是否有当前价， 这个是我瞎蒙的
        match2 = re.search(r"当前价 ￥ (\d+)", MainDetail[0].text)
        if match2:
            OutMap['dealPrice'] = int(float(match2.group(1)))
            OutMap['marketPriceYuan'] = int(float(match2.group(1)))


    if 'builtUpAreaSquareCentimeter' not in OutMap:
        AllText = soup.get_text();
        
        Keyword = '房屋建筑面积';
        MianJi = re.findall(r''+Keyword+'.\D{0,20}(\d+\.?\d*)', AllText.replace(Keyword, Keyword+' '));
        
        if len(MianJi)==0:
            Keyword = '建筑面积';
            MianJi = re.findall(r''+Keyword+'.\D{0,20}(\d+\.?\d*)', AllText.replace(Keyword, Keyword+' '));
        if len(MianJi)==0:
            Keyword = '面积';
            MianJi = re.findall(r''+Keyword+'.\D{0,20}(\d+\.?\d*)', AllText.replace(Keyword, Keyword+' '));
        if len(MianJi)==0:
            MianJi = re.findall(r'(\d+\.?\d*)\s*㎡', AllText);
        if len(MianJi)==0:
            MianJi = re.findall(r'(\d+\.?\d*)\s*平方米', AllText);
        
        if len(MianJi):
            MianjiTemp = -1;
            for item in MianJi:
                if float(item) > 1 and float(item) < 10000:
                    if float(item) > MianjiTemp:
                        MianjiTemp = float(item)
            OutMap['builtUpAreaSquareCentimeter'] = MianjiTemp;

    OutMap['origin'] = "公拍网";
    
    PanelCon = soup.select('div.panel-con')
    if PanelCon:
        if PanelCon[0].select('div.list-item-1'):
            OutMap['description'] = str(PanelCon[0].select('div.list-item-1')[0]);
            # 找房屋类型
            htmatch = re.search(r"房屋类型：(.*?)；", PanelCon[0].select('div.list-item-1')[0].text)
            if htmatch:
                OutMap['houseType'] = htmatch.group(1) + '用房'
        if PanelCon[0].select('div.list-item-2'):
            OutMap['announcement'] = str(PanelCon[0].select('div.list-item-2')[0]);
        if PanelCon[0].select('div.list-item-3'):
            OutMap['notice'] = str(PanelCon[0].select('div.list-item-3')[0]);
    # description 详情   0
    # announcement 公告  1
    # notice 须知        2
    
    OutMap['nameMd5'] = ItemID;

    #banner 第一张图片
    if len(ImgList):
        OutMap['banner'] = ImgList[0];
    return (OutMap, ImgList);
    

#单个拍卖资产页解析
def SFOneParse(driver, URL):
    #解析直到成功
    # while True:
    try:
        #房产ID及URL提取
        ItemID = re.findall(r'id=(\d+)', URL)[0];
        # 获取到页面的源码
        HtmlContent = GetDetailPageSource(ItemID, driver, URL);

        # 网页解析
        return GetDetailPageParse(HtmlContent, URL, ItemID);
    except Exception as e:
        print("Parse html error! Try Again!", URL, e);
        WarningPrompt(Type=EDetail["E203"]);
        #检查浏览器是否可用
        CheckDriverExit(driver)
        time.sleep(5);
        # continue;
    return (None, None);


# 详情消费者
def DetailConsumer(AuctionUrl, driver, SeleniumH):
    try:
        # 测试使用
        # AuctionUrl = "https://zc.gpai.net/zc/detail?id=2304587?111"
        print("Consumer AuctionUrl=", AuctionUrl)
        #如果浏览器关闭，则触发异常直接结束任务
        if SeleniumH.Check()==False:
            SeleniumH.Quit();
            sys.exit(1);
        
        # 使用正则表达式匹配问号后的数字
        match = re.search(r"\?(\d+)$", AuctionUrl)
        spiderHouseId = match.group(1)

        # return 0
        #解析一个房产的页面
        (SFItemMap,ImgList) = SFOneParse(driver, AuctionUrl.replace("?"+spiderHouseId, ""));
        
        # 能正常爬虫状态修改为进行中
        StatusModify(Status=3)

        #解析成功，调用保存接口
        if SFItemMap!=None:
            # 图片处理
            storageList = [];
            for Img in ImgList:
                storageList.append({"filename": Img, "category": "banner", "relatedObjectId": SFItemMap["nameMd5"]})
            if len(spiderHouseId) > 1:
                SFItemMap["spiderHouseId"] = spiderHouseId;
            SFItemMap["storageRelationships"] = storageList;
            # TODO 调用保存接口,图片和基本信息
            PageDetailSave(SFItemMap)
        else:
            # 抓取失败  TODO  失败停了
            PageDetailFail(spiderHouseId);
        return 0;
    except Exception as e:
        print("queue QueueDetail except info ", e)


# 详情解析线程
def DetailParse(proxyUrl, serverIp):
    #获得配置参数
    RabbitParam = Config["RabbitMQ"];
    #初始化Rabbit 
    RabbitH = RabbitUtil(host=RabbitParam["host"], user=RabbitParam["user"], passwd=RabbitParam["passwd"], vhost=RabbitParam["vhost"]);

    #Selenium类 初始化
    SeleniumH = SeleniumUtil(ProxyInfo=PorxyInfo(proxyUrl));
    driver = SeleniumH.GetDriver();
    

    #等待浏览器打开
    time.sleep(2);

    # 消费队列
    def DetailQueue(AuctionUrl):
        # return 0;
        return DetailConsumer(AuctionUrl.decode('utf-8'), driver, SeleniumH)

    serverIp = serverIp.replace(".", "_");
    QueueName = Config["Spider"]["gpwdqName"]+serverIp
    RabbitH.CreateQueues([QueueName]);
    #从RabbitMQ中获得详情地址
    RabbitH.RegistConsumer(QueueName, DetailQueue);

    #释放资源
    RabbitH.Close();
    SeleniumH.Quit();
    #end


# 执行详情信息获取
if __name__ == "__main__":
    # 获取爬虫参数线程数
    ParaMap = GetSpiderParams()
    threadNum = ParaMap["threadNum"]

    #创建2个线程的线程池
    with ThreadPoolExecutor(max_workers=threadNum) as t:
        #创建待运行线程（如果中途线程中，关掉浏览器，则直接结束任务，并触发新的任务执行）
        all_task = [ t.submit(DetailParse, ParaMap['proxyUrlGpw'], ParaMap['serverIp']) for _ in range(threadNum*20) ];
        #print(wait(all_task, timeout=2.5));
        #等待所有任务完成
        for future in as_completed(all_task):
            data = future.result();
            print("in main: get page {}s success".format(data));

    sys.exit(0);
