
import random
import re
import sys
import time
from Config import *
from ErrorCode import *
from ProxyUtil import *
from SeleniumUtil import CheckDriverExit
from SeleniumUtil import SeleniumUtil
from SpiderUtil import *
from TimeUtil import *
# 反爬
# BeautifulSoup
from bs4 import BeautifulSoup
# 多线程处理
# 线程池相关
from concurrent.futures import ThreadPoolExecutor, as_completed
# selenium
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select

#----------------------------------------解析网页 一个列表页面也就是48个详情----------------------------------------
#解析网页中拍卖房产信息列表 "./html/SFList.html"
def SFListParse(soup, Status):
    #解析出来的信息
    SFInfoList = [];

    #获得拍卖房产信息
    if soup.select('div#project_1'):
        SFList = soup.select('div#project_1 div.product');

        #逐个信息解析
        for Idx,SF in enumerate(SFList):
            try:
                #详细信息页链接 //sf-item.taobao.com/sf_item/712418425380.htm?track_id=405a29ac-7e74-4283-8675-59b884f033ea
                href = SF.select_one("a")['href'];
                code = SF.get('id')
                #房屋名称
                title = SF.select_one('a')['title'].strip()

                # stage = ''
                # stateResult = re.search(r"\【(.*?)\】", title)
                # if stateResult:
                #     stage = stateResult.group(1)  # 输出匹配到的结果
   
                CurrentPrice = '';
                StartPrice = '';
                # 当前价
                if SF.select('p.prod-price'):
                    if Status == '中止暂缓撤回' or Status == '已流拍':
                        StartPrice = AmountHandle(SF.select('p.prod-price')[0].text.strip())
                    else:
                        CurrentPrice = AmountHandle(SF.select('p.prod-price')[0].text.strip())
                
                
                # 状态
                if Status == '中止暂缓撤回':
                    if SF.select('div.prod-alink'):
                        StatusTest = SF.select('div.prod-alink')[0].text;
                        if '取消' in StatusTest or '中止' in StatusTest:
                            Status = '已中止'
                        if '暂缓' in StatusTest:
                            Status = '已暂缓'
                        if '撤回' in StatusTest:
                            Status = '已撤回'
                

                EstimatePrice = '';
                StartTime = '';
                EndTime = '';
                GPaiInfos = SF.select('div.prod-guj p');
                if GPaiInfos:
                    for item in GPaiInfos:
                        PText = item.text.strip();
                        if ('起拍价' in PText):
                            StartPrice = AmountHandle(PText.replace('起拍价', ''));
                        if ('评估值' in PText):
                            EstimatePrice = AmountHandle(PText.replace('评估值', ''));
                        if ('开始时间' in PText):
                            # TODO 根据不同状态区分有没有年
                            StartTime = TimeStrFormatIn(PText.replace('开始时间', ''), '%m月%d日%H:%M')
                            EndTime = change_date(StartTime, 1)
                        # if ('拍卖阶段' in PText):
                        #     stage = PText.replace('拍卖阶段', '');



                #输出数据列表
                resultTuple = {
                    "houseCode": 'ZCW_' + code,
                    "auctionUrl": "https://www1.rmfysszc.gov.cn/" + href,
                    # "banner": "https:" + img,
                    "title": title,
                    "minPrice": StartPrice,
                    "currentPrice": CurrentPrice,
                    "assessmentPrice": EstimatePrice,
                    "startTime": StartTime,
                    "endTime": EndTime,
                    # "houseType":  '',
                    # "province": Province,
                    # "city": City,
                    "status": Status,
                    "origin": '诉讼资产网',
                }
                # resultTuple = (code, "https:"+href, "https:"+img, title, PriceInfo1.strip(), PriceInfo2.strip(), StartTime.strip(), GZInfo.strip(), StartTimeStr, AboutEndTimeStr, EndTimeStr, PropName);
                SFInfoList.append(resultTuple);
                #show for debug
                # print("-"*10, Idx, resultTuple);
                #end for
            except Exception as e:
                print("list parse except info ", e)
        

    return SFInfoList;



#根据地市信息，获得各地市资产信息列表
def GetSFList(CurrPage, driver):

    #延时等待浏览器打开完成
    wait = WebDriverWait(driver, 2);

    try:
        CheckDriverExit(driver)

        CbexSFURL = "https://www1.rmfysszc.gov.cn/projects.shtml?dh=3&gpstate=1&wsbm_slt=1";
        # #打开资产列表页面
        driver.get(CbexSFURL);
        time.sleep(10);
        driver.refresh()
        time.sleep(10);

        # 点房屋
        driver.find_elements(By.CSS_SELECTOR, 'div.search2')[0].find_elements(By.CSS_SELECTOR, 'div.search2_1 a')[1].click()
        time.sleep(3);

        # 点司法拍卖
        driver.find_elements(By.CSS_SELECTOR, 'div.search2')[3].find_elements(By.CSS_SELECTOR, 'div.search2_1 a')[1].click()
        time.sleep(3);



        # TODO 循环状态  1-5    正在进行 即将开始 中止暂缓撤回 拍卖成功 拍卖失败（流拍）

        StatusArr = ["进行中", "预告中", "中止暂缓撤回", "已成交", "已流拍"]
        # StatusArr = ["", "", "", "", "已流拍"]

        for i in range(5):
            Status = StatusArr[i]

            if Status == '':
                continue
            # 点拍卖状态
            driver.find_elements(By.CSS_SELECTOR, 'div.search2')[4].find_elements(By.CSS_SELECTOR, 'div.search2_1 a')[i+1].click()
            time.sleep(3);
        
            # 点搜索按钮
            driver.find_element(By.CSS_SELECTOR, 'input#search_sub').click()
            time.sleep(20);

            # # 测试使用
            # # CurrPage = 250;

            soup = BeautifulSoup(driver.page_source, "lxml");
            TotalPage = 0
            # 获取总页数
            if soup.find('a', text='尾页'):
                last_page_link = soup.find('a', text='尾页')
                onclick_attr = last_page_link.get('onclick')
                TotalPage = int(onclick_attr.split('(')[1].split(')')[0])
            
            if TotalPage > 100:
                TotalPage = 100

            for i in range(TotalPage):
                if i > 0:
                    # 点下一页
                    driver.execute_script('javascript:post('+str(i+1)+');');
                    time.sleep(random.choice(range(3,10)));

                soup = BeautifulSoup(driver.page_source, "lxml");
                SFList = SFListParse(soup, Status)
                if len(SFList):
                    # 数据存到数据库 调用接口
                    PageListSave({"grabPage": str(i+1), "houseType": '', "spiderHouses": SFList});
                    # 状态修改为进行中
                    StatusModify(Status=3)
                    print("SSZCW List Success , Status="+Status+" Page="+str(i+1))

    except Exception as e:
        print("Parse List Error!!!!", e);
        WarningPrompt(Type=EList["E105"]);


# 处理省份函数
def ProvConsumer(CurrPage, driver, SeleniumH):
    try:
        #如果浏览器关闭，则触发异常直接结束任务
        if SeleniumH.Check()==False:
            SeleniumH.Quit();
            sys.exit(1);
        #
        GetSFList(CurrPage, driver);
        #输出结果
        return 0;
    except Exception as e:
        print("queue QueueProv except info ", e)
    
# 省份解析
def ProvParse(proxyUrl, CurrPage, serverIp):
    RabbitParam = Config["RabbitMQ"];
    #初始化Rabbit
    # RabbitH = RabbitUtil(host=RabbitParam["host"], user=RabbitParam["user"], passwd=RabbitParam["passwd"], vhost=RabbitParam["vhost"]);
    #Selenium类 初始化
    SeleniumH = SeleniumUtil(ProxyInfo=PorxyInfo(proxyUrl)); #ProxyInfo=PorxyInfo());
    driver = SeleniumH.GetDriver();

    #等待浏览器打开
    time.sleep(2);


    ProvConsumer(CurrPage, driver, SeleniumH)

    # 消费队列
    # def ProvQueue(Prov):
    #     return ProvConsumer(Prov.decode('utf-8'), City, CurrPage, HouseType, driver, SeleniumH)
    
    # serverIp = serverIp.replace(".", "_");
    # QueueName = Config["Spider"]["alilqName"]+serverIp
    # RabbitH.CreateQueues([QueueName]);
    # RabbitH.RegistConsumer(QueueName, ProvQueue);
    # RabbitH.Close();
    #释放资源
    SeleniumH.Quit();


# 执行列表信息获取
if __name__ == "__main__":
    # 获取爬虫基本参数
    ParaMap = GetSpiderParams()
    threadNum = ParaMap["threadNum"]

    #创建2个线程的线程池
    with ThreadPoolExecutor(max_workers=threadNum) as t:
        #创建待运行线程（如果中途线程中，关掉浏览器，则直接结束任务，并触发新的任务执行）
        all_task = [ t.submit(ProvParse, ParaMap['proxyUrlSszcw'], ParaMap['grabPage'], ParaMap['serverIp']) for _ in range(threadNum*20) ];
        #print(wait(all_task, timeout=2.5));
        #等待所有任务完成
        for future in as_completed(all_task):
            data = future.result();
            print("in main: get page {}s success".format(data));

    sys.exit(0);





