
import random
import re
import sys
import time
from Config import *
from ErrorCode import *
from ProxyUtil import *
from SeleniumUtil import CheckDriverExit
from SeleniumUtil import SeleniumUtil
from SpiderUtil import *
# 反爬
# BeautifulSoup
from bs4 import BeautifulSoup
# 多线程处理
# 线程池相关
from concurrent.futures import ThreadPoolExecutor, as_completed
# selenium
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select


def StatusHandle(status):
    if 'sfpm_icon_jjz' in status:
        return '进行中';
    elif 'sfpm_icon_ch' in status:
        return '已撤回';
    elif 'sfpm_icon_jjks' in status:
        return '预告中'
    elif 'sfpm_icon_jjzt' in status or 'sfpm_icon_qx' in status or 'sfpm_icon_zh' in status:
        return '已暂缓'
    elif 'sfpm_icon_jjjs' in status:
        return '已成交'
    elif 'sfpm_icon_lp' in status:
        return '已流拍'
    elif 'sfpm_icon_zz' in status:
        return '已中止'
    else:
        return '未知';

#----------------------------------------解析网页 一个列表页面也就是48个详情----------------------------------------
#解析网页中拍卖房产信息列表 "./html/SFList.html"
def SFListParse(HouseType, soup):
    #保存一个临时文件
    # with open(Config["Spider"]["workDir"] + "/Temp.html", "wb") as f:
    #     f.write(htmlcontent);

    #开始解析
    # soup = BeautifulSoup(htmlcontent, "lxml");

    #解析出来的信息
    SFInfoList = [];

    #获得拍卖房产信息
    if soup.select('ul#sfpmList'):
        SFList = soup.select('ul#sfpmList li');

        #逐个信息解析
        for Idx,SF in enumerate(SFList):
            try:
                #详细信息页链接 //sf-item.taobao.com/sf_item/712418425380.htm?track_id=405a29ac-7e74-4283-8675-59b884f033ea
                href = SF.select_one("a")['href'];

                match = re.search(r"/sfpm/detail/(\w+)\.html", href)
                code = match.group(1)

                #房屋名称
                title = SF.select('div.sfpm_cont a')[0].text.strip()

                # 从详情取吧 列表没有这个字段
                # stage = ''
                # stateResult = re.search(r"\【(.*?)\】", title)
                # if stateResult:
                #     stage = stateResult.group(1)  # 输出匹配到的结果


                # 状态
                Status = ''
                if SF.select_one('a div.xmbq i'):
                    StatusClass = SF.select_one('a div.xmbq i')['class'][1]
                    Status = StatusHandle(StatusClass)
                    
                StartPrice = '';
                CurrentPrice = '';
                EstimatePrice = '';
                StartTime = '';
                EndTime = '';
                GPaiInfos = SF.select('div.sfpm_cont div.sfpm_box p.sfpm_price');
                if GPaiInfos:
                    for item in GPaiInfos:
                        PText = item.text.strip().replace('\n', '').replace(' ', '');
                        ivArr = PText.split("¥")
                        if len(ivArr) == 2:
                            if ('当前价' in ivArr[0]):
                                CurrentPrice = AmountHandle(ivArr[1]);
                            if ('成交价' in ivArr[0]):
                                CurrentPrice = AmountHandle(ivArr[1]);
                            if ('评估价' in ivArr[0]):
                                EstimatePrice = AmountHandle(ivArr[1]);

                #输出数据列表
                resultTuple = {
                    "houseCode": code,
                    "auctionUrl": "https://otc.cbex.com" + href,
                    # "banner": "https:" + img,
                    "title": title,
                    # "minPrice": StartPrice,
                    "currentPrice": CurrentPrice,
                    "assessmentPrice": EstimatePrice,
                    # "startTime": StartTime,
                    # "endTime": EndTime,
                    "houseType":  HouseType,
                    # "province": Province,
                    # "city": City,
                    "status": Status,
                    "origin": '北交互联',
                }
                # resultTuple = (code, "https:"+href, "https:"+img, title, PriceInfo1.strip(), PriceInfo2.strip(), StartTime.strip(), GZInfo.strip(), StartTimeStr, AboutEndTimeStr, EndTimeStr, PropName);
                SFInfoList.append(resultTuple);
                #show for debug
                # print("-"*10, Idx, resultTuple);
                #end for
            except Exception as e:
                print("list parse except info ", e)
        

    return SFInfoList;



#根据地市信息，获得各地市资产信息列表
def GetSFList(CurrPage, HouseType, driver):

    #延时等待浏览器打开完成
    wait = WebDriverWait(driver, 2);

    try:
        CheckDriverExit(driver)

        CbexSFURL = "https://otc.cbex.com/page/sfpm/list/index.html";
        # #打开资产列表页面
        driver.get(CbexSFURL);
        time.sleep(3);

        # 测试使用
        # CurrPage = 250;

        htMap = {"住宅用房":"6",
                 "商业用房":"26",
                 "工业用房":"27",
                 "其他用房":"28"}

        for ht in htMap:
            if HouseType != None and HouseType != ht:
                continue
            HouseType = None;
            
            # 点一下标的物类型
            element = driver.find_element(By.XPATH, '//dd[@id="bdwlx"]/a[@data-value="'+htMap[ht]+'"]')
            element.click()
            time.sleep(3);

            # CurrPage = 28

            soup = BeautifulSoup(driver.page_source, "lxml");
            TotalPage = len(soup.select_one('select.pagecurpage').find_all('option'))

            for p in range(TotalPage):
                if CurrPage:
                    if p+1 != int(CurrPage):
                        continue
                CurrPage = None
                PageElement = driver.find_element(By.CSS_SELECTOR, 'select.pagecurpage')
                select = Select(PageElement)
                select.select_by_value(str(p+1))
                time.sleep(random.choice(range(3,10)));

                soup = BeautifulSoup(driver.page_source, "lxml");
                SFList = SFListParse(ht, soup)
                # 获取到当前页码
                # PageNo = PageElement.find('option', selected=True)['value']
                #存
                if len(SFList):
                    # 数据存到数据库 调用接口
                    PageListSave({"grabPage": str(p+1), "houseType": ht, "spiderHouses": SFList});
                    # 状态修改为进行中
                    StatusModify(Status=3)
                    print("CBEX List Success,Type="+ht+",Page="+str(p+1))

             
    except Exception as e:
        print("Parse List Error!!!!", e);
        WarningPrompt(Type=EList["E105"]);


# 处理省份函数
def ProvConsumer(CurrPage, HouseType, driver, SeleniumH):
    try:
        #如果浏览器关闭，则触发异常直接结束任务
        if SeleniumH.Check()==False:
            SeleniumH.Quit();
            sys.exit(1);
        #
        GetSFList(CurrPage, HouseType, driver);
        #输出结果
        return 0;
    except Exception as e:
        print("queue QueueProv except info ", e)
    
# 省份解析
def ProvParse(proxyUrl, CurrPage, HouseType, serverIp):
    RabbitParam = Config["RabbitMQ"];
    #初始化Rabbit
    # RabbitH = RabbitUtil(host=RabbitParam["host"], user=RabbitParam["user"], passwd=RabbitParam["passwd"], vhost=RabbitParam["vhost"]);
    #Selenium类 初始化
    SeleniumH = SeleniumUtil(ProxyInfo=PorxyInfo(proxyUrl)); #ProxyInfo=PorxyInfo());
    driver = SeleniumH.GetDriver();

    #等待浏览器打开
    time.sleep(2);


    ProvConsumer(CurrPage, HouseType, driver, SeleniumH)

    # 消费队列
    # def ProvQueue(Prov):
    #     return ProvConsumer(Prov.decode('utf-8'), City, CurrPage, HouseType, driver, SeleniumH)
    
    # serverIp = serverIp.replace(".", "_");
    # QueueName = Config["Spider"]["alilqName"]+serverIp
    # RabbitH.CreateQueues([QueueName]);
    # RabbitH.RegistConsumer(QueueName, ProvQueue);
    # RabbitH.Close();
    #释放资源
    SeleniumH.Quit();


# 执行列表信息获取
if __name__ == "__main__":
    # 获取爬虫基本参数
    ParaMap = GetSpiderParams()
    threadNum = ParaMap["threadNum"]

    #创建2个线程的线程池
    with ThreadPoolExecutor(max_workers=threadNum) as t:
        #创建待运行线程（如果中途线程中，关掉浏览器，则直接结束任务，并触发新的任务执行）
        all_task = [ t.submit(ProvParse, ParaMap['proxyUrlCbex'], ParaMap['grabPage'], ParaMap['houseType'], ParaMap['serverIp']) for _ in range(threadNum*20) ];
        #print(wait(all_task, timeout=2.5));
        #等待所有任务完成
        for future in as_completed(all_task):
            data = future.result();
            print("in main: get page {}s success".format(data));

    sys.exit(0);





