
import sys
import time
import random
import re

from Config import *
from SpiderUtil import *
from ProxyUtil import *
from RabbitUtil import RabbitUtil
from SeleniumUtil import SeleniumUtil
from SeleniumUtil import CheckDriverExit
from TimeUtil    import *

#selenium
from selenium import webdriver
from selenium.webdriver import ChromeOptions
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.keys import Keys
#反爬
import undetected_chromedriver as uc
#BeautifulSoup
from bs4 import BeautifulSoup

from pymysql.converters import escape_string

from ErrorCode import *

#多线程处理
from threading import Thread
#线程池相关
from concurrent.futures import ThreadPoolExecutor, wait, as_completed


#解析页面中人员信息，形成字典
def ParsePersonInfos(soup):
    PersonMap = {};

    # string = "0人报名321人设置提醒2693次围观"

    # 使用正则表达式提取数字信息
    pattern = r'\d+'
    matches = re.findall(pattern, soup.text)

    # 解析提取到的数字信息
    # person_signup = int(matches[0])
    # person_remind = int(matches[1])
    # watch_count = int(matches[2])

    PersonMap['报名'] = matches[0]; #报名
    PersonMap['提醒'] = matches[1]; #提醒
    PersonMap['围观'] = matches[2]; #围观

    # Spans = soup.select('span:nth-of-type(odd)');

    # PersonMap['报名'] = Spans[0].select("em")[0].get_text(); #报名
    # PersonMap['提醒'] = Spans[1].select("em")[0].get_text(); #提醒
    # PersonMap['围观'] = Spans[2].select("em")[0].get_text(); #围观
    #end
    return PersonMap;

#解析页面中各种价格的部分，形成一个字典
def ParsePriceInfos(soup):
    PriceMap = {};
    #获得tr列表（每行）
    Trs = soup.select('tr');
    for Tr in Trs:
        #遍历Tr中的每个Td
        Tds = Tr.select('td');
        #遍历Td中的两个Span
        for Td in Tds:
            Spans = Td.select('span');
            if len(Spans)>=4:
                Name = Spans[0].get_text();
                Value = Spans[3].get_text();
                PriceMap[Name] = Value;
        #end for
    #特殊处理
    #1. 评估价没有时的特殊处理
    if '评估价' not in PriceMap:
        if '市场价' in PriceMap:
            PriceMap['评估价'] = PriceMap['市场价'];
        else:
            PriceMap['评估价'] = '0';
    #2. 起始价 不是 起拍价
    if '起始价' in PriceMap:
        PriceMap['起拍价'] = PriceMap['起始价'];
    #3. 变卖价
    if '变卖价' in PriceMap:
        PriceMap['起拍价'] = PriceMap['变卖价'];
    return PriceMap;

# 获取一个详情页面的源码
def GetDetailPageSource(ItemID, driver, URL):
    #循环等待处理成功
    while True:
        #检查浏览器是否可用
        CheckDriverExit(driver)

        try:
            time.sleep(random.choice(range(10,30)));
            #打开详细页面
            driver.get(URL);

            #等待页面打开
            # time.sleep(1);
            # 哎  多睡一会吧  这个登录呀。。。。闹腾
            time.sleep(random.choice(range(50,90)));

            #如果页面不是我需要的页面
            while not driver.current_url.endswith(f"{ItemID}.htm"):
                WarningPrompt(Type=EDetail["E201"]);
                time.sleep(5);
            
            # # 判断页面是否成功加载
            if not CheckChinese(driver.title):
                # 关闭浏览器
                driver.quit()
                sys.exit(1);

            #如果存在"我知道了" 点击确定
            #<a class="btn btn-close" tabindex="12">我知道了</a>
            ele = driver.find_elements(By.CSS_SELECTOR, 'a.btn.btn-close');
            if len(ele) and ele[0].is_enabled() and ele[0].is_displayed():
                ele[0].click();
            else:
                ele = driver.find_elements(By.CSS_SELECTOR, 'div.message-bottom > span.close-modal');
                if len(ele) and ele[0].is_enabled() and ele[0].is_displayed():
                    ele[0].click();

            # 页面向下滚动  为了加载公告等详情数据
            curr_height = 0;
            while True:
                curr_height+=800;
                driver.execute_script('window.scrollTo(0, arguments[0]);', curr_height);
                time.sleep(1.8);
                scroll_height = driver.execute_script('return document.body.scrollHeight')
                if curr_height >= scroll_height:
                    break;
            
            return driver.page_source;
        except:
            print("Detail Parse error! Try Again!!!");
            WarningPrompt(Type=EDetail["E202"]);
            time.sleep(5);
            continue;
    #end while True

# 详情页面解析
def GetDetailPageParse(HtmlContent, URL, ItemID):
    soup = BeautifulSoup(HtmlContent, "lxml");

    #--------------解析字典清空--------------
    OutMap  = {}; #信息字典
    ImgList = []; #图片列表

    #--------------信息定位分区--------------
    #头部信息区
    soupHead = soup.select(".grid-c")[0]; #class="grid-c"
    #价格区域   pm-ui="mod.rule-info.idx-7"
    # soupPrice = soupHead.select('tbody#J_HoverShow')[0];
    
    soupPrice = soupHead.find('tbody', {'pm-ui': 'mod.rule-info.idx-7'})
    #解析价格区内的信息
    PriceMap = ParsePriceInfos(soupPrice);
    #当前价
    select1 = soup.select('#J_PriceInput'); #拍卖时
    select2 = soup.select('#sf-price > div > p.i-info-wrap.i-left > span > em'); #拍卖结束
    if len(select1):
        PriceMap['当前价'] = select1[0]['value'];
    elif len(select2):
        PriceMap['当前价'] = select2[0].get_text();

    #内容信息区
    soupInfo = soup.select("#J_desc")[0];
    #人员围观数等信息
    PersonMap = ParsePersonInfos(soupHead.select('div.remindNums--fXn5ITo2')[0]);
    #房产名称及地址
    NameItem = soupHead.select('div.pm-main > h1');
    Name = NameItem[0].contents[-1].strip();
    
    #相关图片信息
    ImgItems = soup.select('ul[id="J_UlThumb"] li a img');
    ImgList.extend(['https:'+Img['src'].replace('80x80','460x460') for Img in ImgItems]);

    #--------------信息解析--------------
    #'title', 'Title名称'  //*[@id="page"]/div[4]/div/div/h1/text()
    OutMap['title'] = Name;
    #'province', '省份'
    #'city', '城市'
    #'district', '地区'
    #OutMap['province'] = '广东';
    #OutMap['city']     = Area;
    #OutMap['district'] = '全市';
    #'address', '地址'
    OutMap['address']  = Name;
    #'auction_url', '拍卖url'
    OutMap['auctionUrl'] = URL;
    #'priceYuan', '总价' 评估价
    if len(re.findall(r'([\d,]+)', PriceMap['起拍价'])):
        OutMap['priceYuan'] = re.findall(r'([\d,]+)', PriceMap['起拍价'])[0].replace(",","");
    
    #'dealPrice', '交易价' 当前价
    if '当前价' in PriceMap and len(re.findall(r'([\d,]+)', PriceMap['当前价'])):
        OutMap['dealPrice'] = re.findall(r'([\d,]+)', PriceMap['当前价'])[0].replace(",","");
    
    #'stage', '拍卖状态' 二拍
    if len(soupHead.select('span[class="item-status"]')):
        OutMap['stage'] = soupHead.select('span[class="item-status"]')[0].get_text();
    #'status', '' 拍卖进行中 已结束等
    select1 = soupHead.select('div[class="pm-bid-eye"] > span');
    if len(select1):
        statusClass = select1[0]["class"];
        statusMap = {"status-tip status-ing": Status.DOING.value, "status-tip": Status.TODO.value};
        if len(statusClass):
            classStr = " ".join(statusClass);
            if classStr in statusMap:
                OutMap['status'] = statusMap[classStr];
            else:
                OutMap['status'] = Status.DONE.value;
    else:
        OutMap['status'] = Status.DONE.value;
    
    #寻找bid-over标签
    try:
        bidover = soupHead.select_one('div.pm-bid.bid-over');
        if bidover!=None:
            #找到“已流拍”，则标记为已流拍状态
            if re.search(r'本场已流拍', bidover.get_text()):
                OutMap['status'] = Status.FAILURE.value;
            if re.search(r'本场已撤回', bidover.get_text()):
                OutMap['status'] = Status.REVOCATION.value;
            if re.search(r'本场已暂缓', bidover.get_text()):
                OutMap['status'] = Status.BREAK.value;
    except:
        pass;
    
    '''
    #'start_time', '开始时间'
    if OutMap['status'] == "即将开始":
        TimeList = soup.select('#sf-countdown > span:nth-child(3)');
        if len(TimeList):
            #解析时间串 "(2023-04-17 10:00开拍)""
            TimeStr = re.findall(r'(\d{4}-\d{2}-\d{2} \d{2}:\d{2})', TimeList[0].get_text());
            if len(TimeStr):
                OutMap['start_time'] = TimeStr[0] + ":00";
                #结束时间设置
                #'end_time', '结束时间'
                OutMap['end_time'] = TimeStrAddHour(OutMap['start_time'], Hours=24);
    #end if
    '''
    #尝试解析时间
    try:
        TimeInfo = soup.select_one(".J_PItem");
        #开始时间
        OutMap["startTime"] = TS2TimeStr(float(TimeInfo["data-start"])/1000);
        #结束时间
        OutMap["endTime"] = TS2TimeStr(float(TimeInfo["data-end"])/1000);
    except:
        pass;
    
    #'create_time', '创建时间'  #系统当前时间
    OutMap['createTime'] = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()));
    #'browse_count', '浏览记数' 
    OutMap['browseCount'] = PersonMap['围观'];
    #'subscriber_count', '订阅者计数'
    OutMap['subscriberCount'] = PersonMap['提醒'];
    #'participant_count', '参与者计数'
    OutMap['participantCount'] = PersonMap['报名'];
    #'estimatePriceYuan', '评估价'
    if '评估价'in PriceMap and len(re.findall(r'([\d,]+)', PriceMap['评估价'])):
        OutMap['estimatePriceYuan'] = re.findall(r'([\d,]+)', PriceMap['评估价'])[0].replace(",","");
    
    #'marketPriceYuan', '市场价'
    if '当前价' in PriceMap and len(re.findall(r'([\d,]+)', PriceMap['当前价'])):
        OutMap['marketPriceYuan'] = re.findall(r'([\d,]+)', PriceMap['当前价'])[0].replace(",","");
    elif '起拍价' in PriceMap and len(re.findall(r'([\d,]+)', PriceMap['起拍价'])):
        OutMap['marketPriceYuan'] = re.findall(r'([\d,]+)', PriceMap['起拍价'])[0].replace(",","");
    
    #'discount_rate', '折扣' 市场价 / 评估价 * 10
    if 'estimatePriceYuan' in OutMap and 'marketPriceYuan' in OutMap and float(OutMap['estimatePriceYuan']) > 10000:
        OutMap['discountRate'] = round(float(OutMap['marketPriceYuan'])*10 / float(OutMap['estimatePriceYuan']), 2);
    
    #'depositYuan', '保证金'
    if '保证金' in PriceMap and len(re.findall(r'([\d,]+)', PriceMap['保证金'])):
        OutMap['depositYuan'] = re.findall(r'([\d,]+)', PriceMap['保证金'])[0].replace(",","");
    
    #'increasePriceYuan', '加价幅度'
    if '加价幅度' in PriceMap and len(re.findall(r'([\d,]+)', PriceMap['加价幅度'])):
        OutMap['increasePriceYuan'] = re.findall(r'([\d,]+)', PriceMap['加价幅度'])[0].replace(",","");
    
    #'residentialQuarterName', '住宅_季度名称'
    OutMap['residentialQuarterName'] = Name;

    #从公告中 文本识别 获得或人机交互获得
    #'builtUpAreaSquareCentimeter', '建筑面积'
    # AllText = re.sub(r"\s+", "", soup.get_text());
    
    # AllText = soup.get_text();
    AllText = soup.select_one('#J_DetailTabMain').get_text();
    
    Keyword = '房屋建筑面积';
    MianJi = re.findall(r''+Keyword+'.\D{0,20}(\d+\.?\d*)', AllText.replace(Keyword, Keyword+' '));
    
    if len(MianJi)==0:
        Keyword = '建筑面积';
        MianJi = re.findall(r''+Keyword+'.\D{0,20}(\d+\.?\d*)', AllText.replace(Keyword, Keyword+' '));
    if len(MianJi)==0:
        Keyword = '面积';
        MianJi = re.findall(r''+Keyword+'.\D{0,20}(\d+\.?\d*)', AllText.replace(Keyword, Keyword+' '));
    if len(MianJi)==0:
        MianJi = re.findall(r'(\d+\.?\d*)\s*㎡', AllText);
    if len(MianJi)==0:
        MianJi = re.findall(r'(\d+\.?\d*)\s*平方米', AllText);
    
    # 法院
    NoticeDetail = soup.select_one("#J_ItemNotice");
    if NoticeDetail!=None:
        prefixCourt = "账户名称：,开户名称：,户名：";
        for prefix in prefixCourt.split(","):
            pattern = prefix + ".*?([\u4e00-\u9fa5]+)"
            # match = re.search(pattern, NoticeDetail.decode_contents())
            match = re.search(pattern, NoticeDetail.text)
            if match:
                OutMap['court'] = match.group(1);
                break;
    
    #
    if len(MianJi):
        MianjiTemp = -1;
        for item in MianJi:
            if float(item) > 1 and float(item) < 10000:
                if float(item) > MianjiTemp:
                    MianjiTemp = float(item)
        OutMap['builtUpAreaSquareCentimeter'] = MianjiTemp;
        #'unitPriceYuan', '单价'
        if 'marketPriceYuan' in OutMap and float(OutMap['builtUpAreaSquareCentimeter'])> 0.1:
            OutMap['unitPriceYuan'] = round(float(OutMap['marketPriceYuan']) / float(OutMap['builtUpAreaSquareCentimeter']), 2);
            # print("面积=%s 单价=%s"%(OutMap['builtUpAreaSquareCentimeter'],OutMap['unitPriceYuan']));
    #else:
    #    print("No 面积！！");

    #'house_level', '房屋水平'
    #'use', '用途'
    #'free_up_status', '空闲状态'
    #'viewing_detail', '' 拍卖公告

    #'taxation', ''
    #'origin', ''
    OutMap['origin'] = "阿里法拍";
    #'phone', '手机号'
    #'announcement', '公告'
    NoticeItems = soup.select('#NoticeDetail');
    if len(NoticeItems):
        # OutMap['announcement'] = escape_string(str(NoticeItems[0]));
        OutMap['announcement'] = str(NoticeItems[0]);

        startTime, endTime = extract_start_end_time(OutMap['announcement'])
        if startTime and endTime:
            OutMap["startTime"] = startTime
            OutMap["endTime"] = endTime

    #'notice', '通知' 竞买须知
    NoticeItems = soup.select('#ItemNotice');
    if len(NoticeItems):
        # OutMap['notice'] = escape_string(str(NoticeItems[0]));
        OutMap['notice'] = str(NoticeItems[0]);
    #'description', '描述'
    NoticeItems = soup.select('#J_ItemDetailContent');
    if len(NoticeItems):
        # OutMap['description'] = escape_string(str(NoticeItems[0]));
        OutMap['description'] = str(NoticeItems[0]);


    #'traffic', '交通'
    #'update_time', ''
    # OutMap['update_time'] = OutMap['create_time']; #更新时使用
    #'education', '教育'
    #'hospital', '医院'
    #'amenity', '便利设施'
    #'house_type', '房屋类型'
    #'upload_time', '上传时间'
    #'name_md5', ''
    OutMap['nameMd5'] = ItemID;

    #banner 第一张图片
    if len(ImgList):
        OutMap['banner'] = ImgList[0];
    return (OutMap, ImgList);


def extract_start_end_time(html_content):
    html_content = html_content.replace('起', '')
    # Define the regex pattern to match the date and time
    pattern = r'(\d{4}年\d{1,2}月\d{1,2}日\d{1,2}时)至(\d{4}年\d{1,2}月\d{1,2}日\d{1,2}时)'

    # Use re.search to find the pattern in the HTML content
    match = re.search(pattern, html_content)

    if match:
        # Extract the start time and format it
        startTime = match.group(1)
        startTime_formatted = startTime.replace('时', '').replace('日', ' ')
        startTime_formatted = re.sub(r'年|月', '-', startTime_formatted)
        if len(startTime_formatted.split(':')) == 1:
            startTime_formatted += ':00:00'
        elif len(startTime_formatted.split(':')) == 2:
            startTime_formatted += ':00'
        # Extract the end time and format it
        endTime = match.group(2)
        endTime_formatted = endTime.replace('时', '').replace('日', ' ')
        endTime_formatted = re.sub(r'年|月', '-', endTime_formatted)
        if len(endTime_formatted.split(':')) == 1:
            endTime_formatted += ':00:00'
        elif len(endTime_formatted.split(':')) == 2:
            endTime_formatted += ':00'
        return startTime_formatted, endTime_formatted
    else:
        return None, None

#单个拍卖资产页解析
def SFOneParse(driver, URL):
    #解析直到成功
    # while True:
    try:
        #房产ID及URL提取
        ItemID = re.findall(r"(\d+)\.htm", URL)[0];
        # 获取到页面的源码
        HtmlContent = GetDetailPageSource(ItemID, driver, URL);

        # 网页解析
        return GetDetailPageParse(HtmlContent, URL, ItemID);
    except Exception as e:
        print("Parse html error! Try Again!", URL, e);
        WarningPrompt(Type=EDetail["E203"]);
        #检查浏览器是否可用
        CheckDriverExit(driver)
        time.sleep(10);
        # continue;
        return (None, None);


# 详情消费者
def DetailConsumer(AuctionUrl, driver, SeleniumH):
    try:
        print("Consumer AuctionUrl=", AuctionUrl)
        #如果浏览器关闭，则触发异常直接结束任务
        if SeleniumH.Check()==False:
            SeleniumH.Quit();
            sys.exit(1);
        
        UAT = AuctionUrl.split("?");

        # return 0
        #解析一个房产的页面
        (SFItemMap,ImgList) = SFOneParse(driver, UAT[0]);
        

        #解析成功，调用保存接口
        if SFItemMap!=None:
            # 能正常爬虫状态修改为进行中
            StatusModify(Status=3)

            # 图片处理
            storageList = [];
            for Img in ImgList:
                storageList.append({"filename": Img, "category": "banner", "relatedObjectId": SFItemMap["nameMd5"]})
            if len(UAT) > 1:
                SFItemMap["spiderHouseId"] = UAT[1];
            SFItemMap["storageRelationships"] = storageList;
            # TODO 调用保存接口,图片和基本信息
            PageDetailSave(SFItemMap)
            print("Save Data Success 围观人数= "+SFItemMap["browseCount"])
            return 0;
        else:
            StatusModify(Status=5)
            # 抓取失败
            # PageDetailFail(UAT[1]);
            return 1;
    except Exception as e:
        print("queue QueueDetail DetailConsumer except info ", e)
        return 1


# 详情解析线程
def DetailParse(proxyUrl, serverIp):
    try:
        #获得配置参数
        RabbitParam = Config["RabbitMQ"];
        #初始化Rabbit
        RabbitH = RabbitUtil(host=RabbitParam["host"], user=RabbitParam["user"], passwd=RabbitParam["passwd"], vhost=RabbitParam["vhost"]);

        #Selenium类 初始化
        SeleniumH = SeleniumUtil(ProxyInfo=PorxyInfo(proxyUrl));
        driver = SeleniumH.GetDriver();

        #等待浏览器打开
        time.sleep(2);

        # 测试使用
        # AuctionUrl = 'https://sf-item.taobao.com/sf_item/830578110595.htm';
        # DetailConsumer(AuctionUrl, driver, SeleniumH)

        # 消费队列
        def DetailQueue(AuctionUrl):
            Res = 1
            try:
                Res = DetailConsumer(AuctionUrl.decode('utf-8'), driver, SeleniumH)
            except Exception as e:
                print("queue item except info ", e)
            return Res

        serverIp = serverIp.replace(".", "_");
        QueueName = Config["Spider"]["alidqName"]+serverIp
        RabbitH.CreateQueues([QueueName]);
        #从RabbitMQ中获得详情地址
        RabbitH.RegistConsumer(QueueName, DetailQueue);
        
        

        #释放资源
        RabbitH.Close();
        SeleniumH.Quit();
        #end
    except Exception as e:
        print("queue QueueDetail DetailParse except info ", e)
    time.sleep(5)


# 执行详情信息获取
if __name__ == "__main__":
    # 获取爬虫参数线程数
    ParaMap = GetSpiderParams()
    threadNum = ParaMap["threadNum"]

    #创建2个线程的线程池
    with ThreadPoolExecutor(max_workers=threadNum) as t:
        #创建待运行线程（如果中途线程中，关掉浏览器，则直接结束任务，并触发新的任务执行）
        all_task = [ t.submit(DetailParse, ParaMap['proxyUrl'], ParaMap['serverIp']) for _ in range(threadNum*20) ];
        #print(wait(all_task, timeout=2.5));
        #等待所有任务完成
        for future in as_completed(all_task):
            data = future.result();
            print("in main: get page {}s success".format(data));

    sys.exit(0);
