import requests
import parsel
from tqdm import tqdm
import random
import time
import csv
import re


class MuniaoCrawler:

    def __init__(self, url):
        self.url = url
        self.headers = {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
            "Accept-Language": "zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7,ja;q=0.6,ko;q=0.5",
            "Referer": "https://www.muniao.com/",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
        }
        self.cookies = {
            "SECKEY_ABVK": "yN6sMd5ARVvSUB7X+vkXdb85MW4RrPxfoAzGDHDuMqg%3D",
            "BMAP_SECKEY": "MXqX4OT-tHaH65jB1haMsdeaNYKKGyXNTNSuLFa5Uu72LCLizm_Oqf128nKJYFEmekL90aD45thZLA4hBtP6lNnzLYL-OfJWKW9relGuClDEL4-38hPtio6VfPsdyD3XLi5gtXTZnpJpPZCLzzldllOJis49A5boSzOggyn9HM98C4mJcu92iRcUxi2QFdPX",
            "sl-session": "anFLZYO68mWWbRsVqWmdQQ==",
            "Front_LoginUserKey": "85EFE19FA89BC480D9A151A2EBB7C2B964A976C7C811E31E55CC922667FCDFD39DDA0F1A124B3A8342A102566BBC4D7B2654ADD69E94DAF1A97E664623876B85948F11FB20D203FE152133823E1F7957D53F804B96CBB3EA1FE39D4392505870EAD2556543D044C98D7A077E4043B0B5A0D44C6A8F6DEF4A0CA8370DFB6DDA88A2222F21717722383A6549279A625DBA3C8EC87A17E1E5A54806748BFBF8C63A4BF510504425F56514DB8656DC356FCBE600F2C3196D35D9FCDB79FF12FE019A891AA6B811567DF0",
            "route": "e028bf8e2ee7f213721872762d23fbc1",
            "ASP.NET_SessionId": "lnlomv3t1jdzx4jq35uk3pr4"
        }
        self.html = ""
        self.all_homestay_list = []

    def send_request(self, url):
        try:
            res = requests.get(url=url, headers=self.headers, cookies=self.cookies)
            return res
        except Exception as e:
            print(e)

    def get_list(self):
        try:
            res = self.send_request(url=self.url)
            res.encoding = "utf-8"
            self.html = res.text
            selector = parsel.Selector(text=self.html)
            homestay_id = selector.css("#Lmain_con li::attr(data-id)").getall()
            url_list = [f'https://www.muniao.com/room/{x}.html' for x in homestay_id]
            self.all_homestay_list.extend(url_list)
        except Exception as e:
            print(e)

    def get_detail(self):

        for homestay_url in tqdm(self.all_homestay_list, unit='条', desc='下载详情页中'):
            print(homestay_url)
            try:
                detail = {}
                res = self.send_request(url=homestay_url)
                res.encoding = "utf-8"
                self.html = res.text
                selector = parsel.Selector(text=self.html)
                detail['room_id'] = re.search('https://www.muniao.com/room/(.*?).html', homestay_url).group(1)
                detail['room_title'] = selector.css('span.room_title a::text').get().replace(',', '')
                detail['room_address'] = selector.css('#ass::text').get().strip()
                detail['room_price'] = selector.css('div.room_price span.f30::text').get()
                hx, cz, bed, people, area = selector.css('ul.room_ttbottomL li::text').getall()
                detail.update({
                    'hx': hx,
                    'cz': cz,
                    'bed': bed,
                    'people': people,
                    'area': area,
                })
                detail['applause_rate'] = selector.css('ul.room_ttbottomR > li:nth-child(1) > p.f26::text').get().strip()
                self.save_data(detail)
                time.sleep(random.uniform(1, 3))
            except Exception as e:
                print(e)

    def save_data(self, detail_dict):
        with open('homestay.csv', 'a', encoding='utf-8-sig', newline='') as f:
            csv_writer = csv.DictWriter(f, fieldnames=[
                'room_id', 'room_title', 'room_address', 'hx', 'cz', 'bed', 'people', 'area', 'applause_rate', 'room_price'
            ])
            csv_writer.writerow(detail_dict)

    def run(self):
        ls = self.url.split('.')
        for page in tqdm(range(1, 11), desc='下载列表页中', unit='页'):
            # ls[-2] = f'com/dali/null-0-0-0-0-0-0-0-{page}'  # 这里hx=0固定住了，导致爬取数据大量重复
            ls[-2] = ls[-2][:-1] + str(page)
            print(ls[-2])
            self.url = '.'.join(ls)
            self.get_list()
            time.sleep(random.uniform(1, 3))

        self.get_detail()


if __name__ == '__main__':
    with open('homestay.csv', 'a', encoding='utf-8-sig', newline='') as f:
        csv_writer = csv.DictWriter(f, fieldnames=[
            'room_id', 'room_title', 'room_address', 'hx', 'cz', 'bed', 'people', 'area', 'applause_rate', 'room_price'
        ])
        csv_writer.writeheader()
    # main_url = "https://www.muniao.com/dali/dali-0-0-0-0-0-0-0-1.html?tn=mn19091015"
    for hx in tqdm(range(0, 6), desc='下载不同户型中', unit='室'):
        hx_url = f"https://www.muniao.com/dali/null-0-0-{hx}-0-0-0-0-1.html?tn=mn19091015"
        spider = MuniaoCrawler(url=hx_url)
        spider.run()
