
import sys
import time
import random
import re

from Config import *
from SpiderUtil import *
from ProxyUtil import *
from RabbitUtil import RabbitUtil
from SeleniumUtil import SeleniumUtil
from SeleniumUtil import CheckDriverExit


#selenium
from selenium import webdriver
from selenium.webdriver import ChromeOptions
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.keys import Keys
#反爬
import undetected_chromedriver as uc
#BeautifulSoup
from bs4 import BeautifulSoup

from ErrorCode import *

#多线程处理
from threading import Thread
#线程池相关
from concurrent.futures import ThreadPoolExecutor, wait, as_completed

def StatusHandle(status):
    if '即将开始' in status:
        return Status.TODO.value;
    if '正在进行' in status:
        return Status.DOING.value;
    if '成交' in status:
        return Status.DONE.value;
    elif '撤拍' in status:
        return Status.REVOCATION.value;
    elif '流标' in status:
        return Status.FAILURE.value;
    elif '悔拍' in status:
        return Status.REVOCATION.value;
    elif '缓拍' in status:
        return Status.BREAK.value;
    elif '中止' in status:
        return Status.STOP.value;
    elif '取消' in status:
        return Status.REVOCATION.value;
    else:
        return status;

#----------------------------------------解析网页 一个列表页面也就是48个详情----------------------------------------
#解析网页中拍卖房产信息列表 "./html/SFList.html"
def SFListParse(soup):
    #保存一个临时文件
    # with open(Config["Spider"]["workDir"] + "/Temp.html", "wb") as f:
    #     f.write(htmlcontent);

    #开始解析
    # soup = BeautifulSoup(htmlcontent, "lxml");

    #解析出来的信息
    SFInfoList = [];

    #获得拍卖房产信息
    if soup.select('div.filt-result-list'):
        SFList = soup.select('div.filt-result-list')[0].select("ul li");

        #逐个信息解析
        for Idx,SF in enumerate(SFList):
            try:
                #详细信息页链接 //sf-item.taobao.com/sf_item/712418425380.htm?track_id=405a29ac-7e74-4283-8675-59b884f033ea
                href = SF.select_one("a")['href'];

                match = re.search(r'Web_Item_ID=(\d+)', href)
                code = match.group(1)

                #房屋名称
                title = SF.select_one('div.item-tit a').text.strip();

                # 从详情取吧 列表没有这个字段
                # stage = ''
                # stateResult = re.search(r"\【(.*?)\】", title)
                # if stateResult:
                #     stage = stateResult.group(1)  # 输出匹配到的结果


                # 状态
                Status = ''
                if SF.select('span.badge-icon'):
                    Status = StatusHandle(SF.select('span.badge-icon')[0].text.strip());
                    

                StartPrice = '';
                CurrentPrice = '';
                EstimatePrice = '';
                StartTime = '';
                EndTime = '';
                GPaiInfos = SF.select('div.gpai-infos p');
                if GPaiInfos:
                    for item in GPaiInfos:
                        if item.text.strip():
                            ivArr = item.text.strip().split("：")
                            if len(ivArr) == 2:
                                if ('起拍价' in ivArr[0]):
                                    StartPrice = AmountHandle(ivArr[1].replace("元", ""));
                                if ('成交价' in ivArr[0]):
                                    CurrentPrice = AmountHandle(ivArr[1].replace("元", ""));
                                if ('评估价' in ivArr[0]):
                                    EstimatePrice = AmountHandle(ivArr[1].replace("元", ""));
                                if ('开始时间' in ivArr[0]):
                                    StartTime = DateHandle(ivArr[1])
                                if ('结束时间' in ivArr[0] or '预计结束' in ivArr[0]):
                                    EndTime = DateHandle(ivArr[1])

                #输出数据列表
                resultTuple = {
                    "houseCode": "GPW_"+code,
                    "auctionUrl": "https:" + href,
                    # "banner": "https:" + img,
                    "title": title,
                    "minPrice": StartPrice,
                    "currentPrice": CurrentPrice,
                    "assessmentPrice": EstimatePrice,
                    "startTime": StartTime,
                    "endTime": EndTime,
                    "houseType":  '房产',
                    # "province": Province,
                    # "city": City,
                    "status": Status,
                    "origin": '公拍网',
                }
                # resultTuple = (code, "https:"+href, "https:"+img, title, PriceInfo1.strip(), PriceInfo2.strip(), StartTime.strip(), GZInfo.strip(), StartTimeStr, AboutEndTimeStr, EndTimeStr, PropName);
                SFInfoList.append(resultTuple);
                #show for debug
                # print("-"*10, Idx, resultTuple);
                #end for
            except Exception as e:
                print("list parse except info ", e)
        

    return SFInfoList;



#根据地市信息，获得各地市资产信息列表
def GetSFList(CurrPage, driver):

    #延时等待浏览器打开完成
    wait = WebDriverWait(driver, 2);

    try:
        CheckDriverExit(driver)

        AliSFURL = "https://s.gpai.net/sf/search.do?at=376&Page=";
        #打开资产列表页面
        driver.get(AliSFURL);
        time.sleep(1);

        TotalPage = None;
        # 获取到总页数 然后循环打开
        soup = BeautifulSoup(driver.page_source, "lxml");
        if soup.select('span.page-infos'):
            result = re.findall(r"\d+", soup.select('span.page-infos')[0].text)
            if result:
                TotalPage = result[0]

        SFList = SFListParse(soup)
         #存
        if len(SFList):
            # 数据存到数据库 调用接口
            PageListSave({"grabPage": str(1), "spiderHouses": SFList});
            # 状态修改为进行中
            StatusModify(Status=3)
        
        if CurrPage == None:
            CurrPage = 0;
        if TotalPage:
            for p in range(int(TotalPage)):
                if p > 0 and p >= int(CurrPage):
                    driver.get(AliSFURL+str(p+1));
                    time.sleep(1);
                    soup = BeautifulSoup(driver.page_source, "lxml");
                    SFList = SFListParse(soup)
                    #存
                    if len(SFList):
                        # 数据存到数据库 调用接口
                        PageListSave({"grabPage": str(p+1), "spiderHouses": SFList});
                        # 状态修改为进行中
                        StatusModify(Status=3)
                        print("Gpw List Success,Page="+str(p+1))
        

    except Exception as e:
        print("Parse List Error!!!!", e);
        WarningPrompt(Type=EList["E105"]);


# 处理省份函数
def ProvConsumer(CurrPage, driver, SeleniumH):
    try:
        #如果浏览器关闭，则触发异常直接结束任务
        if SeleniumH.Check()==False:
            SeleniumH.Quit();
            sys.exit(1);
        #
        GetSFList(CurrPage, driver);
        #输出结果
        return 0;
    except Exception as e:
        print("queue QueueProv except info ", e)
    
# 省份解析
def ProvParse(proxyUrl, CurrPage, serverIp):
    RabbitParam = Config["RabbitMQ"];
    #初始化Rabbit
    # RabbitH = RabbitUtil(host=RabbitParam["host"], user=RabbitParam["user"], passwd=RabbitParam["passwd"], vhost=RabbitParam["vhost"]);
    #Selenium类 初始化
    SeleniumH = SeleniumUtil(ProxyInfo=PorxyInfo(proxyUrl)); #ProxyInfo=PorxyInfo());
    driver = SeleniumH.GetDriver();

    #等待浏览器打开
    time.sleep(2);


    ProvConsumer(CurrPage, driver, SeleniumH)

    # 消费队列
    # def ProvQueue(Prov):
    #     return ProvConsumer(Prov.decode('utf-8'), City, CurrPage, HouseType, driver, SeleniumH)
    
    # serverIp = serverIp.replace(".", "_");
    # QueueName = Config["Spider"]["alilqName"]+serverIp
    # RabbitH.CreateQueues([QueueName]);
    # RabbitH.RegistConsumer(QueueName, ProvQueue);
    # RabbitH.Close();
    #释放资源
    SeleniumH.Quit();


# 执行列表信息获取
if __name__ == "__main__":
    # 获取爬虫基本参数
    ParaMap = GetSpiderParams()
    threadNum = ParaMap["threadNum"]

    #创建2个线程的线程池
    with ThreadPoolExecutor(max_workers=threadNum) as t:
        #创建待运行线程（如果中途线程中，关掉浏览器，则直接结束任务，并触发新的任务执行）
        all_task = [ t.submit(ProvParse, ParaMap['proxyUrlGpw'], ParaMap['grabPage'], ParaMap['serverIp']) for _ in range(threadNum*20) ];
        #print(wait(all_task, timeout=2.5));
        #等待所有任务完成
        for future in as_completed(all_task):
            data = future.result();
            print("in main: get page {}s success".format(data));

    sys.exit(0);





