
import sys
import time
import random
import re

from Config import *
from SpiderUtil import *
from ProxyUtil import *
from RabbitUtil import RabbitUtil
from SeleniumUtil import SeleniumUtil
from SeleniumUtil import CheckDriverExit
from TimeUtil    import *

#selenium
from selenium import webdriver
from selenium.webdriver import ChromeOptions
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.keys import Keys
#反爬
import undetected_chromedriver as uc
#BeautifulSoup
from bs4 import BeautifulSoup

from pymysql.converters import escape_string

from ErrorCode import *

#多线程处理
from threading import Thread
#线程池相关
from concurrent.futures import ThreadPoolExecutor, wait, as_completed

# 获取一个详情页面的源码
def GetDetailPageSource(ItemID, driver, URL):
    #循环等待处理成功
    while True:
        #检查浏览器是否可用
        CheckDriverExit(driver)

        try:
            #打开详细页面
            driver.get(URL);

            #等待页面打开
            time.sleep(1);

            #如果页面不是我需要的页面
            while ItemID not in driver.current_url:
                # WarningPrompt(Type=EDetail["E201"]);
                # time.sleep(5);
                # TODO 原来这里是登录判断  小平台不需要这个了 如果不对了直接放到失败中得了
                return None;
            
            # # 判断页面是否成功加载  因为公拍网存在纯数字的情况 所以这个就不判了  还得加这个判断  再加个数字吧
            if not CheckChinese(driver.title) and not has_numbers(driver.title):
                # 关闭浏览器
                driver.quit()
                sys.exit(1);
            
            return driver.page_source;
        except:
            print("Detail Parse error! Try Again!!!");
            WarningPrompt(Type=EDetail["E202"]);
            time.sleep(5);
            continue;
    #end while True

# 详情页面解析
def GetDetailPageParse(driver, HtmlContent, URL, ItemID, MeetID):
    soup = BeautifulSoup(HtmlContent, "lxml");

    #--------------解析字典清空--------------
    OutMap  = {}; #信息字典
    ImgList = []; #图片列表

    soupHead = soup
    #头部信息区
    if soup.select("div.pm-main"):
        soupHead = soup.select("div.pm-main")[0];
    # soupHead = soup

    #人员围观数等信息  从列表里拿了 不用在详情里爬了   中拍只有一个围观
    # if len(soupHead.select('div.wgnoClass')) > 0:
    #     PersonMap = ParsePersonInfos(soupHead.select('div.wgnoClass')[0]);

    #相关图片信息  TODO 图片需要挨个点击拿到大图 /ll
    ImgEles = driver.find_elements(By.CSS_SELECTOR, 'div#showsum p span')
    if len(ImgEles) > 0:
        for ele in ImgEles:
            ele.click()
            time.sleep(0.3)
            # 点完了拿图片
            ShowImg = driver.find_element(By.CSS_SELECTOR, 'img.showimg')
            if ShowImg:
                ImgList.extend(['https://paimai.caa123.org.cn'+ShowImg.get_attribute('src')])

    #'auction_url', '拍卖url'
    OutMap['auctionUrl'] = URL;

    # 保证金
    if soupHead.select('span#cashDeposit'):
        OutMap['depositYuan'] = soupHead.select('span#cashDeposit')[0].text.replace(",", "");
    # 加价幅度
    if soupHead.select('span#rateLadder'):
        OutMap['increasePriceYuan'] = soupHead.select('span#rateLadder')[0].text.replace(",", "");
    # 评估价
    if soupHead.select('span#assessPrice'):
        if '无' not in soupHead.select('span#assessPrice')[0].text:
            OutMap['estimatePriceYuan'] = soupHead.select('span#assessPrice')[0].text.replace(",", "");
    # 地址
    if soupHead.select('div.location span'):
        OutMap['address'] = soupHead.select('div.location span')[0].text
    # 面积
    if soupHead.select('span#lot_allnum'):
        OutMap['builtUpAreaSquareCentimeter'] = soupHead.select('span#lot_allnum')[0].text.replace("平方米", "")

    # 法院 列表中有了 这里就不需要了
    # if soup.select('a.companyName'):
    #     OutMap['court'] = soup.select('a.companyName')[0].text;

    # TODO 面积取的不对
    if 'builtUpAreaSquareCentimeter' not in OutMap:
        AllText = soup.get_text();
        
        Keyword = '房屋建筑面积';
        MianJi = re.findall(r''+Keyword+'.\D{0,20}(\d+\.?\d*)', AllText.replace(Keyword, Keyword+' '));
        
        if len(MianJi)==0:
            Keyword = '建筑面积';
            MianJi = re.findall(r''+Keyword+'.\D{0,20}(\d+\.?\d*)', AllText.replace(Keyword, Keyword+' '));
        if len(MianJi)==0:
            Keyword = '面积';
            MianJi = re.findall(r''+Keyword+'.\D{0,20}(\d+\.?\d*)', AllText.replace(Keyword, Keyword+' '));
        if len(MianJi)==0:
            MianJi = re.findall(r'(\d+\.?\d*)\s*㎡', AllText);
        if len(MianJi)==0:
            MianJi = re.findall(r'(\d+\.?\d*)\s*平方米', AllText);
        
        if len(MianJi):
            MianjiTemp = -1;
            for item in MianJi:
                if float(item) > 1 and float(item) < 10000:
                    if float(item) > MianjiTemp:
                        MianjiTemp = float(item)
            OutMap['builtUpAreaSquareCentimeter'] = MianjiTemp;

    OutMap['origin'] = "中拍平台";
    
    if soup.select('div#RemindTip'):
        OutMap['description'] = str(soup.select('div#RemindTip')[0]);
    if soup.select('div#NoticeDetail'):
        OutMap['announcement'] = str(soup.select('div#NoticeDetail')[0]);
    if soup.select('div#ItemNotice'):
        OutMap['notice'] = str(soup.select('div#ItemNotice')[0]);
    # description 详情   0
    # announcement 公告  1
    # notice 须知        2
    
    # 使用lotId和meetId拼在一起 要不有可能和其它平台重复
    OutMap['nameMd5'] = ItemID+MeetID;

    #banner 第一张图片
    if len(ImgList):
        OutMap['banner'] = ImgList[0];
    return (OutMap, ImgList);
    

#单个拍卖资产页解析
def SFOneParse(driver, URL):
    #解析直到成功
    # while True:
    try:
        #房产ID及URL提取
        ItemID = re.findall(r'lotId=(\d+)', URL)[0];
        MeetID = re.findall(r'meetId=(\d+)', URL)[0];
        # 获取到页面的源码
        HtmlContent = GetDetailPageSource(ItemID, driver, URL);

        if HtmlContent:
            # 网页解析
            return GetDetailPageParse(driver, HtmlContent, URL, ItemID, MeetID);
    except Exception as e:
        print("Parse html error! Try Again!", URL, e);
        WarningPrompt(Type=EDetail["E203"]);
        #检查浏览器是否可用
        CheckDriverExit(driver)
        time.sleep(5);
        # continue;
    return (None, None);


# 详情消费者
def DetailConsumer(AuctionUrl, driver, SeleniumH):
    try:
        # 测试使用https://paimai.caa123.org.cn/pages/lots/profession.html?lotId=1563292&meetId=203450
        # AuctionUrl = "https://paimai.caa123.org.cn/pages/lots/profession.html?lotId=1608839&meetId=209964?111111"
        print("Consumer AuctionUrl=", AuctionUrl)
        #如果浏览器关闭，则触发异常直接结束任务
        if SeleniumH.Check()==False:
            SeleniumH.Quit();
            sys.exit(1);
        
        # 使用正则表达式匹配问号后的数字
        match = re.search(r"\?(\d+)$", AuctionUrl)
        spiderHouseId = match.group(1)

        # return 0
        #解析一个房产的页面
        (SFItemMap,ImgList) = SFOneParse(driver, AuctionUrl.replace("?"+spiderHouseId, ""));
        
        # 能正常爬虫状态修改为进行中
        StatusModify(Status=3)

        #解析成功，调用保存接口
        if SFItemMap!=None:
            # 图片处理
            storageList = [];
            for Img in ImgList:
                storageList.append({"filename": Img, "category": "banner", "relatedObjectId": SFItemMap["nameMd5"]})
            if len(spiderHouseId) > 1:
                SFItemMap["spiderHouseId"] = spiderHouseId;
            SFItemMap["storageRelationships"] = storageList;
            # TODO 调用保存接口,图片和基本信息
            PageDetailSave(SFItemMap)
        else:
            # 抓取失败  TODO  失败停了
            PageDetailFail(spiderHouseId);
        return 0;
    except Exception as e:
        print("queue QueueDetail except info ", e)


# 详情解析线程
def DetailParse(proxyUrl, serverIp):
    #获得配置参数
    RabbitParam = Config["RabbitMQ"];
    #初始化Rabbit 
    RabbitH = RabbitUtil(host=RabbitParam["host"], user=RabbitParam["user"], passwd=RabbitParam["passwd"], vhost=RabbitParam["vhost"]);

    #Selenium类 初始化
    SeleniumH = SeleniumUtil(ProxyInfo=PorxyInfo(proxyUrl));
    driver = SeleniumH.GetDriver();
    

    #等待浏览器打开
    time.sleep(2);

    # 消费队列
    def DetailQueue(AuctionUrl):
        # return 0;
        return DetailConsumer(AuctionUrl.decode('utf-8'), driver, SeleniumH)

    serverIp = serverIp.replace(".", "_");
    QueueName = Config["Spider"]["zpptdqName"]+serverIp
    RabbitH.CreateQueues([QueueName]);
    #从RabbitMQ中获得详情地址
    RabbitH.RegistConsumer(QueueName, DetailQueue);

    #释放资源
    RabbitH.Close();
    SeleniumH.Quit();
    #end


# 执行详情信息获取
if __name__ == "__main__":
    # 获取爬虫参数线程数
    ParaMap = GetSpiderParams()
    threadNum = ParaMap["threadNum"]

    #创建2个线程的线程池
    with ThreadPoolExecutor(max_workers=threadNum) as t:
        #创建待运行线程（如果中途线程中，关掉浏览器，则直接结束任务，并触发新的任务执行）
        all_task = [ t.submit(DetailParse, ParaMap['proxyUrlZppt'], ParaMap['serverIp']) for _ in range(threadNum*20) ];
        #print(wait(all_task, timeout=2.5));
        #等待所有任务完成
        for future in as_completed(all_task):
            data = future.result();
            print("in main: get page {}s success".format(data));

    sys.exit(0);
