import datetime
import json
import re
import time
from copy import deepcopy

import scrapy


class QnSpider(scrapy.Spider):
    name = 'qn'
    allowed_domains = ['qunar.com']
    # 酒店 度假 门票
    start_urls = ['https://hotel.qunar.com',
                  # "http://bnb.qunar.com",
                  # "https://dujia.qunar.com",
                  # "https://piao.qunar.com"
                  ]

    def start_requests(self):

        cookie_str = """
        QN1=00002b80306c2a2ce210b575; QN300=auto_4e0d874a; QN99=6207; QunarGlobal=10.86.213.148_-1d98207b_17546cc9603_2159|1603212867331; QN269=EA779DC012F411EB8470FA163E9C4675; QN601=2cafcfcfc9fd46c2c8b34ac87c5ca92b; _i=DFiEZPVbGD6wbb96-BIzXFWR2AOw; QN48=000030802f102a2ce2208137; fid=ac17e3e3-f4d0-42ce-a4a9-b6e4a90afad2; HN1=v1680a2aefa52eeb1bc8fe4830f8192865; HN2=quzqulzgrsckc; quinn=2825af1e87394e548ae81e4cda22b3a3aac90ee0fc41562cc6f983855e8a0f8010c9709ac05bfd49f6444259158e08ad; QN57=16032262491530.49894655373461605; QN58=1603226249150%7C1603226300474%7C2; QN42=uqfh9503; _q=U.qcezbce1357; _t=26831163; csrfToken=5plycBK9exIzMkBoTVdwzVxftWCuopCM; _s=s_XNPGVRAE5W6TXUOGJLHZX7FMN4; _v=hvnEbaNvEvXssKK7JuVfe3qhz7waE3ikmR-pI56G3Q_iF4pl5dEDGBgxlrH-0q1AT0mLqnpQgesI7jc_c7uoIgUPLv39yMIi4WvxnUI9Dnob1xktt8FVdQ8QpKEYglg7ck675W0oWsbsyhV-0wPvMp3-SbHcxPHcNogejJEZliit; QN205=organic; QN277=organic; _jzqa=1.3572027552134019000.1603226529.1603816277.1604014461.6; _jzqx=1.1603226529.1604014461.4.jzqsr=bnb%2Equnar%2Ecom|jzqct=/.jzqsr=hotel%2Equnar%2Ecom|jzqct=/render/ga_new%2Ejsp; __utma=183398822.684468910.1604014528.1604014528.1604014528.1; __utmz=183398822.1604014528.1.1.utmcsr=bnb.qunar.com|utmccn=(referral)|utmcmd=referral|utmcct=/city/changchun/; s_fid=0219B4156686C1B6-2EB086B79AE00E67; _qzja=1.1593813115.1603226528720.1603816277118.1604014461380.1604014461380.1604015041992..0.0.16.6; ls=%u5E7F%u5DDE; lsc=i-gyeonggi_do_gwangju; QN73=4063-4064; QN44=qcezbce1357; tabIndex=0; cityUrl=shenzhen; cityName=%25E6%25B7%25B1%25E5%259C%25B3; checkInDate=2020-11-01; checkOutDate=2020-11-02; _vi=19I_c4_f9kmnbrenOLobFypr6yY96SrjlqeJdMq6fvuQv2pSBeqfB7s1uQyU9UAZF6509D_tevd39FtbxF8W9u8d9XDRh5GtKLO78dEhM2Yy0Ks2MTHrYc61nIjcQ-ix-jCQfE2jnwVjQqiowO07B4KoI2VbTzbBpGeFEyRAKt9E; QN271=e9e87ea0-0ee4-4c93-b81a-751fd021b16f; QN267=57033652729fb1f41; __qt=v1%7CVTJGc2RHVmtYMTlvb3NQeXNxRlBPbnNtN3p4QTl1ZHNQSS92UnVEL1JkTGQyQ0tzUy8wSC9ieDBpUHBmS2Nwem02SC9SLzdXVXZIQlJvd21HbzN1M3lLNm02VXlDanYwK2xQTkJzTlBZMnBFY3ZscExMbHlmV1liMHhvaVBEVkFPWVVyMzhtUVFFMjFUN2lJTEd1RjhBPT0%3D%7C1604201491788%7CVTJGc2RHVmtYMS9DSXpZbm9BWHNBL1BMVzhxUU02VFpCTGtwT2RSZHdXbHA4NFFkRnBhYlo2VDBiMXNpMTczR1A1NVk2dy8wQlZFYWJIQ0FzYlBTRVE9PQ%3D%3D%7CVTJGc2RHVmtYMTlkZlpVNVZhZklDdDR0UldCZDBORHJNMlVCR1NlUHlQK0ljUjMvUTlObjVaRUJaZWJQemMvVmhJaDVUMmRyazlBMDZkUkVzZExvQzREeFgzOGlOR3p6US9GeVJqL0U4MDdMVmZBQlR4R3FKWm9tTWdjK0xGWFN6Y0lFUlhCYnZ4UGhmOHNXd1dVeEhFR0FHY01WdnYyNHc4MFNhRm91dTYxcUp3bzlCcXlUaUtXL04rL0Q0SlFsU05xU1o2TzVtNCs3NU03NEd3aGRQQ3U1aGRJbkNwZkgyWldzSUtUeG9iVHZ4UkhsN0ZXZ1M5ZWtoWk1scU45Z2pMZTZIZ041VGw4d2ppRlZuVkg5Z0hnSkFueUhGVko2OUtaMXlyTzhYbmhoNDBNbzNyMnBaUUlRcjMrTUNvSDV5TGdwMUdiam5mY0ZBQmVxYVMrRFQ0cUpTeS9zT05GLzJ1VGhkakE1ZTNwcGdwaTF6cSt6MUtFTHFCdS9VdkNxdDd5alpSSFE4UXRLTUI0ZHdsOVFIdzd4VUxXdzlzU083ZE5xSnBTb3loODZFbEg1eEcyZ1dmTlkzczRLRTAzUmNWVFJRbU0yZldvNTdGcE9BNVcrOEpUR2daSkw5QmxFSUZ0YkFjdFlkUDlNTUY1K0ZwUStYT21DUDVYalptYWNIQURlVkxuVjA2Uk92Zmt2NC92TXZ5aXBWbmVKaC92THl1b0MvQ1hlUHRhSUR5SHFXdjE1VzVuZE1MaDRkbHlUVGtiVTkwQmkzdFFHcWxpcSs3ZklWbDF5Z0pyazZqdFFJT3B1OHlaV3JtN1FSb2pBbzBrUDdPdTExTFk5cm8xY2hjYkVOSW5TRG5GWVVyZ2o0RTMxdFg5Ky9oNGtSemp2ODMva0ZKOTUvOEU9
        """
        cookies_dict = {i.split("=")[0]: i.split("=")[1] for i in cookie_str.split("; ")}
        url = "https://hotel.qunar.com/cityList.json"
        yield scrapy.Request(
            url,
            callback=self.parse,
            cookies=cookies_dict,
            # headers={
            #     "accept": "application/json, text/plain, */*",
            #     "accept-encoding": "gzip, deflate, br",
            #     "content-type": "application/json;charset=UTF-8"}
        )

    def __init__(self):
        super().__init__()
        from selenium import webdriver
        # Headless模式，无界面
        options = webdriver.ChromeOptions()
        options.add_argument("--headless")
        # options.add_argument("user-agent='{}'".format(ua))
        self.driver = webdriver.Chrome(options=options)
        self.driver.implicitly_wait(1)

    def close(self, reason):
        self.driver.close()

    def parse(self, response):
        # city_lists = json.loads(response.body)[1:]
        city_lists = [
            [{
                "key": "Y",
                "value": [
                    {
                        "name": "玉树",
                        "url": "yushu"
                    },

                ]
            }]
        ]
        for city_list in city_lists:
            for i in city_list:
                city_data = i["value"]
                for city in city_data:
                    item = {}
                    item["location"] = {}
                    item["location"]["city_name"] = city["name"]
                    hotel_city_url = "http://hotel.qunar.com/cn/" + city["url"]
                    # 无域名url
                    item["location"]["city_url"] = city["url"]
                    # 某城市所有酒店url 如：http://hotel.qunar.com/cn/shenzhen
                    item["location"]["hotel_city_url"] = hotel_city_url
                    # print(item)
                    yield scrapy.Request(
                        url=hotel_city_url,
                        callback=self.parse_hotel_list,
                        meta={"item": deepcopy(item),
                              # "city": city["url"]
                              },
                    )

    def get_date(self):
        from_date = datetime.datetime.now().strftime('%Y-%m-%d')
        to_date = (datetime.datetime.now() + datetime.timedelta(days=1)).strftime('%Y-%m-%d')
        return from_date, to_date

    def parse_hotel_list(self, response):
        item = response.meta["item"]
        html = response.body.decode()
        # page_one_dada_json_str = re.findall(r'window.INITIAL_STATE={"list":(.*?),"fixTab":false', html)[0] + "}"
        # page_one_data = json.loads(page_one_dada_json_str)
        # item["hotel_total"] = page_one_data["tcount"]
        # for h in page_one_data["hotels"]:
        #     item["hotel_info"] = {}
        #     item["hotel_info"]["hotel_name"] = h["name"]
        #     item["hotel_info"]["hotel_dangci"] = h["dangciText"]
        #     item["hotel_info"]["score"] = h["score"]
        #     item["hotel_info"]["commentCount"] = h["commentCount"]
        #     item["hotel_info"]["price"] = h["currencySign"] + h["price"]
        #     item["hotel_info"]["location_info"] = h["locationInfo"]
        #     item["hotel_info"]["hotel_id"] = h["seqNo"]

        li_list = response.xpath('//div/ul[@id="hotel_lst_body"]/li')
        for li in li_list:
            item["hotel_info"] = {}
            item["hotel_info"]["hotel_name"] = li.xpath('.//div//p[@class="name"]/a/text()').extract_first()
            item["hotel_info"]["hotel_dangci"] = li.xpath(
                './/div//p[@class="name"]/span[@class="type"]/text()').extract_first()
            item["hotel_info"]["score"] = li.xpath('.//div//p[@class="comm"]/span[@class="num"]/text()').extract_first()
            item["hotel_info"]["commentCount"] = li.xpath(
                './/div//p[@class="comm"]//span[@class="total"]/text()').extract_first()
            item["hotel_info"]["price"] = li.xpath('.//div//p[@class="price_new"]/a/text()').extract_first()
            item["hotel_info"]["location_info"] = li.xpath('.//div//p[@class="adress"]/text()').extract_first()
            item["hotel_info"]["activity"] = li.xpath('.//div[@class="cont"]/div/span/text()').extract()
            hotel_url = self.start_urls[0] + li.xpath('.//div//p[@class="name"]/a/@href').extract_first()
            item["hotel_info"]["hotel_id"] = item["location"]["city_url"] + "_" + hotel_url.split("-")[-1][0:-1]  # 去斜杠
            # 酒店详情url
            item["hotel_info"]["hotel_detail_url"] = hotel_url
            yield scrapy.Request(
                url=hotel_url,
                callback=self.parse_hotel_detail,
                meta={"item": deepcopy(item)},
                headers={
                    'referer': item["location"]["hotel_city_url"],
                },
            )

        city_url = item["location"]["city_url"]
        # city_url = response.meta["city"]
        from_date, to_date = self.get_date()
        request_data = {
            "b": {"bizVersion": "17", "cityUrl": city_url, "cityName": item["location"]["city_name"],
                  "fromDate": from_date,
                  "toDate": to_date, "q": "", "qFrom": 3, "start": 20, "num": 20, "minPrice": 0, "maxPrice": -1,
                  "level": "", "sort": 0, "cityType": 1, "fromForLog": 1, "uuid": "", "userName": "qcezbce1357",
                  "userId": "", "searchType": 0, "hourlyRoom": False, "locationAreaFilter": [],
                  "comprehensiveFilter": [], "vtoken": "pclist-v1-89412dc54851e3f68e034ceba77df341", "channelId": 1}}

        # 第一页酒店数量比总数少，请求第二页(可能因页面加载问题，有时取不到)
        # if len(page_one_data["hotels"]) < item["hotel_total"]:
        # post_data_str = re.findall(r'"requestParam":(.*?),"seo"', html)[0]
        # print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
        # post_data = json.loads(post_data_str)
        # post_data.update(start=20)
        # request_data = {"b": post_data}

        # 第二页开始请求此url
        url = "http://hotel.qunar.com/napi/list"
        # 第二页开始
        yield scrapy.Request(
            url,
            method="POST",
            body=json.dumps(request_data),
            callback=self.parse_hotel_list_info,
            headers={
                'content-type': 'application/json;charset=UTF-8',
                'referer': response.url,
                'origin': self.start_urls[0]
            },
            meta={"item": deepcopy(item)}
        )

    def parse_hotel_list_info(self, response):
        item = response.meta["item"]
        city_url = item["location"]["city_url"]
        res_data = json.loads(response.body)
        total_hotel = 0
        # if res_data["bstatus"]["code"] != 0:
        #     pass

        # 第二页开始请求得到数据格式不同
        if res_data["bstatus"]["code"] == 0:
            total_hotel = res_data["data"]["tcount"]
            for hotel in res_data["data"]["hotels"]:
                item["hotel_info"] = {}
                item["hotel_info"]["hotel_name"] = hotel["name"]
                item["hotel_info"]["hotel_dangci"] = hotel["dangciText"]
                item["hotel_info"]["score"] = hotel["score"]
                item["hotel_info"]["commentCount"] = hotel["commentCountDesc"]
                item["hotel_info"]["price"] = hotel["currencySign"] + hotel["price"] if hotel["price"] else "暂无报价"
                item["hotel_info"]["location_info"] = hotel["locationInfo"]
                hotel_id = hotel["seqNo"]
                item["hotel_info"]["hotel_id"] = hotel_id
                activities = []
                if hotel.get("activity"):
                    for activity in hotel.get("activity"):
                        act = activity.get("label")
                        activities.append(act)
                item["hotel_info"]["activity"] = activities if len(activities) != 0 else "无"

                detail_url = item["location"]["hotel_city_url"] + "/dt-" + hotel_id.split("_")[-1]
                item["hotel_info"]["hotel_detail_url"] = detail_url
                # print(item)

                yield scrapy.Request(
                    url=detail_url,
                    callback=self.parse_hotel_detail,
                    meta={"item": deepcopy(item)},
                    headers={
                        'referer': item["location"]["hotel_city_url"],
                    },
                )

            # 第二页酒店个数
            num = len(res_data["data"]["hotels"])
            post_data = res_data["data"]["requestParam"]
            # 此页酒店数量小于20不再请求
            if num < post_data["num"]:
                return
            for start in range(40, total_hotel, 20):
                post_data.update(start=start)
                next_request_data = {"b": post_data}

            # from_date, to_date = self.get_date()
            # # 此url地址从第二页开始，翻页
            # for start in range(40, total_hotel, 20):
            #     next_request_data = {
            #         "b": {"bizVersion": "17", "cityUrl": city_url, "cityName": item["location"]["city_name"],
            #               "fromDate": from_date, "toDate": to_date, "q": "", "qFrom": 3, "start": start, "num": 20,
            #               "minPrice": 0, "maxPrice": -1, "level": "", "sort": 0, "cityType": 1, "fromForLog": 1,
            #               "uuid": "", "userName": "qcezbce1357", "userId": "", "searchType": 0, "hourlyRoom": False,
            #               "locationAreaFilter": [], "comprehensiveFilter": [],
            #               "vtoken": "pclist-v1-89412dc54851e3f68e034ceba77df341", "channelId": 1}}
                yield scrapy.Request(
                    response.url,
                    method="POST",
                    body=json.dumps(next_request_data),
                    callback=self.parse_hotel_list_info,
                    headers={'content-type': 'application/json;charset=UTF-8',
                             'referer': item["location"]["hotel_city_url"],
                             'origin': self.start_urls[0]
                             },
                    meta={"item": deepcopy(item)}
                )

    def parse_hotel_detail(self, response):
        item = response.meta["item"]
        # about_hotel = re.findall('"dinfo":(.*?),"hotSaleCard"', response.body.decode())  # 数据有时获取不到
        item["about"] = {}
        item["about"]["comm_total"] = response.xpath("//div[@class='detail_top']//p[@class='total']/text()").extract()
        about_list = response.xpath("//div[@class='hotel_desc']")
        for about in about_list:
            item["about"]["about_hotel_phone"] = about.xpath("./dl/dd/text()").extract_first()
            item["about"]["about_total_rooms"] = about.xpath("./dl/dd/span/text()").extract_first()
            item["about"]["about_hotel_intro"] = about.xpath("./dl/dd[@class='cont']/div/p/text()").extract_first()

            hotel_policys = about.xpath("./div[1]/div")
            hotel_policy_list = []
            for policy in hotel_policys:
                policy_list = []
                policy_name = policy.xpath(".//dt/text()").extract_first()
                policy_item = policy.xpath(".//dd/text()").extract()
                policy_list.append({policy_name: policy_item})
                hotel_policy_list += policy_list
            item["about"]["hotel_policys"] = hotel_policy_list

            hotel_services = about.xpath("./div[2]/dl")
            hotel_service_list = []
            for service in hotel_services:
                service_list = []
                service_name = service.xpath("./dt/text()").extract_first()
                service_item = service.xpath("./dd/p/span[2]/text()").extract()
                service_list.append({service_name: service_item})
                hotel_service_list += service_list
            item["about"]["hotel_services"] = hotel_service_list

        url = "http://hotel.qunar.com/napi/detailPrice"
        from_date, to_date = self.get_date()
        # post_data = re.findall('"queryParam":(.*?)',"timestamp",response.text)[0]
        request_data = {"b": {"cityUrl": item["location"]["city_url"], "fromDate": from_date, "toDate": to_date,
                              "ids": item["hotel_info"]["hotel_id"], "userName": "qcezbce1357", "userId": "",
                              "fromForLog": 0,
                              "preListPrice": "", "preListAvgPrice": "", "preListDiscount": "",
                              "preListType": 0, "extra": "", "resultExtraInfo": "", "channel": 0,
                              "onlyTeamRoom": False, "showHotelRec": True, "isTujiaHotel": False,
                              "checkParam": "", "priceType": 0, "bizVersion": 18, "hourlyRoom": False}}
        yield scrapy.Request(
            url,
            method="POST",
            body=json.dumps(request_data),
            callback=self.parse_hotel_rooms,
            headers={'content-type': 'application/json',
                     'referer': item["hotel_info"]["hotel_detail_url"]
                     },
            meta={"item": deepcopy(item)}
        )

    def parse_hotel_rooms(self, response):
        item = response.meta["item"]
        rooms_data = json.loads(response.body).get("data")
        if rooms_data:
            room_list = []
            rooms = rooms_data.get("rooms")  # 房间信息
            if rooms:
                for room in rooms:
                    all_rooms = {}
                    all_rooms["room_name"] = room.get("roomName")
                    all_rooms["room_price"] = room.get("mprice")
                    all_rooms["room_origin_price"] = room.get("originPriceDesc")
                    all_rooms["room_bed_type"] = room.get("bedType")
                    all_rooms["room_window"] = room.get("window")
                    all_rooms["room_info"] = room.get("roomInfoList")

                    all_facilities = []

                    facilities = []
                    room_facilities = room.get("roomFloatFacilities")
                    if room_facilities:
                        for room_facility in room_facilities:  # 房间设施
                            room_facility_list = []
                            room_facility_type = room_facility.get("type")
                            for facility in room_facility.get("datas"):
                                room_facility_list.append(facility["item"])
                            facilities.append({room_facility_type: room_facility_list})

                    all_rooms["room_facilities"] = facilities
                    room_list.append(all_rooms)

            item["rooms"] = room_list
            # print(item)
            url = "http://hotel.qunar.com/napi/ugcCmtList?hotelSeq={}&page=1&onlyGuru=false&rate=all&sort=hot".format(item["hotel_info"]["hotel_id"])
            yield scrapy.Request(
                url,
                callback=self.parse_comments,
                meta={"item": deepcopy(item)},
                headers={
                    "referer": item["hotel_info"]["hotel_detail_url"]
                }
            )

    def parse_comments(self, response):
        item = response.meta["item"]
        # print(item)
        res = json.loads(response.body).get("data")
        # 可能会没有评论
        if res:
            item["comments"] = {}
            item["comments"]["comment_count"] = res.get("count")
            ratingStat = res.get("ratingStat")
            if ratingStat:
                item["comments"]["positiveCount"] = ratingStat.get("positiveCount")  # 好评
                item["comments"]["neutralCount"] = ratingStat.get("neutralCount")  # 中评
                item["comments"]["negativeCount"] = ratingStat.get("negativeCount")  # 差评

            item["comments"]["comment_list"] = []
            comment_list = []
            feedOid_list = []
            comment_data = res.get("list")
            if comment_data:
                for comment in comment_data:
                    comments = {}
                    # comments["nickName"] = comment.get("nickName")
                    feedOid = comment.get("feedOid")
                    comments["feedOid"] = feedOid
                    feedOid_list.append(feedOid)

                    comment_contents = comment.get("content")
                    if comment_contents:
                        contents = json.loads(comment_contents)
                        comments["title"] = contents.get("title")
                        comments["content"] = contents.get("feedContent")
                        comments["modtime"] = contents.get("modtime")
                        comments["room_type"] = contents.get("roomType")
                        comments["evaluation"] = contents.get("evaluationDesc")
                        comments["describe"] = contents.get("subScores")

                    labels = comment.get("label")
                    if labels:
                        label = json.loads(labels)
                        comments["extra_evaluation"] = label.get("positive") + label.get("negative")
                    comment_list.append(comments)

            item["comments"]["comment_list"] += comment_list
            # print(item)

            # 获取非匿名url
            name_url = "http://hotel.qunar.com/napi/ugcCmtStat?feedOids={}".format(",".join(feedOid_list))
            yield scrapy.Request(
                name_url,
                callback=self.parse_comment_name,
                meta={"item":deepcopy(item)},
                headers={
                    'Referer':item["hotel_info"]["hotel_detail_url"]
                }
            )

            per_page_conut = 10
            n = res.get("count") % per_page_conut
            total = res.get("count") // per_page_conut
            if total < 1:
                return
            if total == 1:
                if n == 0:
                    return
                else:
                    total += 2
            if total > 1:
                if n == 0:
                    total += 1
                else:
                    total += 2

            for i in range(2, total):
                url = "http://hotel.qunar.com/napi/ugcCmtList?hotelSeq={}&page={}&onlyGuru=false&rate=all&sort=hot".format(item["hotel_info"]["hotel_id"], i)
                yield scrapy.Request(
                    url,
                    callback=self.parse_comments,
                    meta={"item": deepcopy(item)},
                    headers={
                        "referer": item["hotel_info"]["hotel_detail_url"]
                    }
                )

    def parse_comment_name(self, response):
        item = response.meta["item"]
        res = json.loads(response.body)
        comments = item["comments"]["comment_list"]
        for comment in comments:
            nick_name = res[comment["feedOid"]]["nick"]
            comment["nick_name"] = nick_name
        print(item)
