
import sys
import time
import random
import re

from Config import *
from SpiderUtil import *
from ProxyUtil import *
from RabbitUtil import RabbitUtil
from SeleniumUtil import SeleniumUtil
from SeleniumUtil import CheckDriverExit
from TimeUtil    import *

#selenium
from selenium import webdriver
from selenium.webdriver import ChromeOptions
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.keys import Keys
#反爬
import undetected_chromedriver as uc
#BeautifulSoup
from bs4 import BeautifulSoup

from pymysql.converters import escape_string

from ErrorCode import *

#多线程处理
from threading import Thread
#线程池相关
from concurrent.futures import ThreadPoolExecutor, wait, as_completed


#解析页面中人员信息，形成字典
def ParsePersonInfos(soup):
    PersonMap = {};
    Spans = soup.select('span');

    PersonMap['报名'] = Spans[0].select("em")[0].get_text(); #报名
    PersonMap['围观'] = Spans[1].select("em")[0].get_text(); #提醒
    PersonMap['提醒'] = Spans[2].select("em")[0].get_text(); #围观
    #end
    return PersonMap;

def ParsePersonInfos1(soup):
    TextArr = soup.text.split("\u3000")
    pattern = r'\d+'
    PersonMap = {};
    for item in TextArr:
        if '报名' in item:
            PersonMap['报名'] = re.findall(pattern, item)[0];
        if '围观' in item:
            PersonMap['围观'] = re.findall(pattern, item)[0];
        if '提醒' in item:
            PersonMap['提醒'] = re.findall(pattern, item)[0];
    return PersonMap;

def ParsePersonInfos2(soup):
    PersonMap = {};
    Spans = soup.select('span:nth-of-type(odd)');

    PersonMap['报名'] = Spans[0].select("em")[0].get_text(); #报名
    PersonMap['围观'] = Spans[1].select("em")[0].get_text(); #提醒
    PersonMap['提醒'] = Spans[2].select("em")[0].get_text(); #围观
    #end
    return PersonMap;

# 获取一个详情页面的源码
def GetDetailPageSource(ItemID, driver, URL):
    #循环等待处理成功
    while True:
        #检查浏览器是否可用
        CheckDriverExit(driver)

        try:
            #打开详细页面
            driver.get(URL);

            #等待页面打开
            time.sleep(1);

            #如果页面不是我需要的页面
            while not driver.current_url.endswith(f"{ItemID}"):
                # WarningPrompt(Type=EDetail["E201"]);
                # time.sleep(5);
                return None;
            
            # # 判断页面是否成功加载
            if not CheckChinese(driver.title):
                # 关闭浏览器
                driver.quit()
                sys.exit(1);
            
            return driver.page_source;
        except:
            print("Detail Parse error! Try Again!!!");
            WarningPrompt(Type=EDetail["E202"]);
            time.sleep(5);
            continue;
    #end while True

# 详情页面解析
def GetDetailPageParse(HtmlContent, URL, ItemID):
    soup = BeautifulSoup(HtmlContent, "lxml");

    #--------------解析字典清空--------------
    OutMap  = {}; #信息字典
    ImgList = []; #图片列表

    #头部信息区
    # if soup.select(".pm-main-top"):
    #     soupHead = soup.select(".pm-main-top")[0]; #class="pm-main-top"
    # if soup.select(".pm-head"):
    #     soupHead = soup.select(".pm-head")[0];
    soupHead = soup

   
    #人员围观数等信息 
    if len(soupHead.select('div.times')) > 0:
        PersonMap = ParsePersonInfos(soupHead.select('div.times')[0]);
    if len(soupHead.select('div.endtime')) > 0:
        PersonMap = ParsePersonInfos1(soupHead.select('div.endtime')[0]);
    if len(soupHead.select('div.pm-people')) > 0:
        PersonMap = ParsePersonInfos2(soupHead.select('div.pm-people')[0]);
    

    #相关图片信息  
    # ImgItems = soup.select('ul[id="J_UlThumb"] li a img');
    if soup.select('div.pm-preview-wrap'):
        ImgItems = soup.select('div.pm-preview-wrap')[0].select('div.list ul li img');
    if soup.select('div.spec-items'):
        ImgItems = soup.select('div.spec-items')[0].select('ul li img');
    
    ImgList.extend(['https:'+Img['src'].replace('s50x50','s350x350').replace("n5", "imgb") for Img in ImgItems]);

    #'auction_url', '拍卖url'
    OutMap['auctionUrl'] = URL;
    
    TitleElem = None;
    #'stage', '拍卖状态' 二拍
    if soupHead.select('div.pm-name'):
        TitleElem = soupHead.select('div.pm-name');
    if soupHead.select('div.title-container'):
        TitleElem = soupHead.select('div.title-container');
    if TitleElem:
        stateResult = re.search(r"\【(.*?)\】", TitleElem[0].text)
        if stateResult:
            OutMap['stage'] = stateResult.group(1)  # 输出匹配到的结果

    
    #'browse_count', '浏览记数'
    OutMap['browseCount'] = PersonMap['围观'];
    #'subscriber_count', '订阅者计数'
    OutMap['subscriberCount'] = PersonMap['提醒'];
    #'participant_count', '参与者计数'
    OutMap['participantCount'] = PersonMap['报名'];


    DescLis = soupHead.select('div.description ul li');
    if not DescLis:
        DescLis = soupHead.select('div.pm-attachment ul li');
    
    if DescLis:
        for item in DescLis:
            if '加价幅度' in item.text:
                pattern = r"￥(\d+(?:,\d+)*)"
                match = re.search(pattern, item.text.replace("¥", "￥"))
                if match:
                    OutMap['increasePriceYuan'] = match.group(1).replace(",", "")
                break

    #从公告中 文本识别 获得或人机交互获得
    #'builtUpAreaSquareCentimeter', '建筑面积'
    # AllText = re.sub(r"\s+", "", soup.get_text());
    AllText = soup.get_text();
    
    Keyword = '房屋建筑面积';
    MianJi = re.findall(r''+Keyword+'.\D{0,20}(\d+\.?\d*)', AllText.replace(Keyword, Keyword+' '));
    
    if len(MianJi)==0:
        Keyword = '建筑面积';
        MianJi = re.findall(r''+Keyword+'.\D{0,20}(\d+\.?\d*)', AllText.replace(Keyword, Keyword+' '));
    if len(MianJi)==0:
        Keyword = '面积';
        MianJi = re.findall(r''+Keyword+'.\D{0,20}(\d+\.?\d*)', AllText.replace(Keyword, Keyword+' '));
    if len(MianJi)==0:
        MianJi = re.findall(r'(\d+\.?\d*)\s*㎡', AllText);
    if len(MianJi)==0:
        MianJi = re.findall(r'(\d+\.?\d*)\s*平方米', AllText);
    
    if len(MianJi):
        MianjiTemp = -1;
        for item in MianJi:
            if float(item) > 1 and float(item) < 10000:
                if float(item) > MianjiTemp:
                    MianjiTemp = float(item)
        OutMap['builtUpAreaSquareCentimeter'] = MianjiTemp;
        # OutMap['builtUpAreaSquareCentimeter'] = MianJi[len(MianJi)-1];
        # #'unitPriceYuan', '单价'
        # if 'marketPriceYuan' in OutMap and float(OutMap['builtUpAreaSquareCentimeter'])> 0.1:
        #     OutMap['unitPriceYuan'] = round(float(OutMap['marketPriceYuan']) / float(OutMap['builtUpAreaSquareCentimeter']), 2);
    

    #'taxation', ''
    #'origin', ''
    OutMap['origin'] = "京东法拍";
    
    #'announcement', '公告' 
    if soup.select('ul.floors'):
        FloorsLis = soup.select('ul.floors')[0].find_all('li');
        for itemLi in FloorsLis:
            if itemLi.select('h3.floor-title'):
                floorTitle = itemLi.select('h3.floor-title')[0].text
                if floorTitle.startswith('标的物详情'):
                    OutMap['description'] = str(itemLi);
                if floorTitle.startswith('竞买公告'):
                    OutMap['announcement'] = str(itemLi);
                if floorTitle.startswith('竞买须知'):
                    OutMap['notice'] = str(itemLi);
    else:
        description = soup.find(id="addition-desc")
        if description:
            OutMap['description'] = str(description);
        announcement = soup.find(id="bid-announce")
        if announcement:
            OutMap['announcement'] = str(announcement);
        notice = soup.find(id="bid-notice")
        if notice:
            OutMap['notice'] = str(notice);

    # description 详情   0
    # announcement 公告  1
    # notice 须知        2
    # 

    OutMap['nameMd5'] = ItemID;

    #banner 第一张图片
    if len(ImgList):
        OutMap['banner'] = ImgList[0];
    return (OutMap, ImgList);
    

#单个拍卖资产页解析
def SFOneParse(driver, URL):
    #解析直到成功
    # while True:
    try:
        #房产ID及URL提取
        ItemID = re.findall(r'/(\d+)$', URL)[0];
        # 获取到页面的源码
        HtmlContent = GetDetailPageSource(ItemID, driver, URL);

        if HtmlContent:
            # 网页解析
            return GetDetailPageParse(HtmlContent, URL, ItemID);
    except Exception as e:
        print("Parse html error! Try Again!", URL, e);
        WarningPrompt(Type=EDetail["E203"]);
        #检查浏览器是否可用
        CheckDriverExit(driver)
        time.sleep(5);
        # continue;
    return (None, None);


# 详情消费者
def DetailConsumer(AuctionUrl, driver, SeleniumH):
    try:
        print("Consumer AuctionUrl=", AuctionUrl)
        #如果浏览器关闭，则触发异常直接结束任务
        if SeleniumH.Check()==False:
            SeleniumH.Quit();
            sys.exit(1);
        
        UAT = AuctionUrl.split("?");

        # return 0
        #解析一个房产的页面
        (SFItemMap,ImgList) = SFOneParse(driver, UAT[0]);
        
        # 能正常爬虫状态修改为进行中
        StatusModify(Status=3)

        #解析成功，调用保存接口
        if SFItemMap!=None:
            # 图片处理
            storageList = [];
            for Img in ImgList:
                storageList.append({"filename": Img, "category": "banner", "relatedObjectId": SFItemMap["nameMd5"]})
            if len(UAT) > 1:
                SFItemMap["spiderHouseId"] = UAT[1];
            SFItemMap["storageRelationships"] = storageList;
            # TODO 调用保存接口,图片和基本信息
            PageDetailSave(SFItemMap)
        else:
            # 抓取失败  TODO  失败停了
            PageDetailFail(UAT[1]);
        return 0;
    except Exception as e:
        print("queue QueueDetail except info ", e)


# 详情解析线程
def DetailParse(proxyUrl, serverIp):
    #获得配置参数
    RabbitParam = Config["RabbitMQ"];
    #初始化Rabbit 
    RabbitH = RabbitUtil(host=RabbitParam["host"], user=RabbitParam["user"], passwd=RabbitParam["passwd"], vhost=RabbitParam["vhost"]);

    #Selenium类 初始化
    SeleniumH = SeleniumUtil(ProxyInfo=PorxyInfo(proxyUrl));
    driver = SeleniumH.GetDriver();

    # 测试使用
    # AuctionUrl = 'https://paimai.jd.com/302858212?904519'
    # DetailConsumer(AuctionUrl, driver, SeleniumH)
    

    #等待浏览器打开
    time.sleep(2);

    # 消费队列
    def DetailQueue(AuctionUrl):

        # TODO 验证代理是否还可用 也就是页面还能不能打开

        # driver.get("https://www.baidu.com")
        # time.sleep(3)
        # # 判断页面是否成功加载
        # if driver.title == "百度一下，你就知道":
        #     print("浏览器能正常打开")
        # else:
        #     print("浏览器打开异常")
        # # 关闭浏览器
        # driver.quit()

        # return 0;
        return DetailConsumer(AuctionUrl.decode('utf-8'), driver, SeleniumH)

    serverIp = serverIp.replace(".", "_");
    QueueName = Config["Spider"]["jddqName"]+serverIp
    RabbitH.CreateQueues([QueueName]);
    #从RabbitMQ中获得详情地址
    RabbitH.RegistConsumer(QueueName, DetailQueue);

    #释放资源
    RabbitH.Close();
    SeleniumH.Quit();
    #end


# 执行详情信息获取
if __name__ == "__main__":
    # 获取爬虫参数线程数
    ParaMap = GetSpiderParams()
    threadNum = ParaMap["threadNum"]

    #创建2个线程的线程池
    with ThreadPoolExecutor(max_workers=threadNum) as t:
        #创建待运行线程（如果中途线程中，关掉浏览器，则直接结束任务，并触发新的任务执行）
        all_task = [ t.submit(DetailParse, ParaMap['proxyUrlJd'], ParaMap['serverIp']) for _ in range(threadNum*20) ];
        #print(wait(all_task, timeout=2.5));
        #等待所有任务完成
        for future in as_completed(all_task):
            data = future.result();
            print("in main: get page {}s success".format(data));

    sys.exit(0);
