import os
import csv
import json
import time
import requests
from bs4 import BeautifulSoup

# --- Configuration ---
BASE_URL = "https://wh.ke.com/ershoufang/{area}/pg{pageNum}dp1sf1y2l3l4l5l6p{asset}/"
PROGRESS_FILE = "progress.json"
HEADERS = {
  'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
  'Accept-Language': 'zh-CN,zh;q=0.9',
  'Connection': 'keep-alive',
  'Cookie': 'SECKEY_ABVK=wWyI+aG/npT5eyfyIFqEEqZob+uhmga2v/J8RkzB8L0%3D; BMAP_SECKEY=vmKgUFdfheEn1o1G4Clz2cJ7BSdgljKdRoeUw1kAAVBrNswHQyOniqdwzjW35p6h9u9rD4snUZ5TYxl1ZsID_zLIySOiULzmNnoi1atUENLBzEpGMJtE9ifhCqfCBIUn-ctVbmKB6YyypOh6HfDalMcxyDgkxyrpVSjdi7Xk6LOlspuXQ8CmRDjyBHEYI8cv; lianjia_uuid=db215f42-cca3-4a42-8609-fb71f793124b; crosSdkDT2019DeviceId=-dnfkga--5y761b-1zzj7i9q9s3bj3h-2pmsodfb1; Hm_lvt_b160d5571570fd63c347b9d4ab5ca610=1751331231; ftkrc_=18bdd645-25f9-4ef8-a338-9d9335e75923; lfrc_=ec272f61-b7d5-4b8b-95bc-fc04b19a1702; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%221979f7d726f22a4-0bfb323ac627a9-77144e16-1405320-1979f7d7270299c%22%2C%22%24device_id%22%3A%221979f7d726f22a4-0bfb323ac627a9-77144e16-1405320-1979f7d7270299c%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22%22%2C%22%24latest_referrer_host%22%3A%22%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%7D%7D; digv_extends=%7B%22utmTrackId%22%3A%22%22%7D; select_city=420100; lianjia_ssid=76113b35-3791-4bc9-8dd4-0fc3e0efdcc9; login_ucid=2000000223631814; lianjia_token=2.0015912a5770661e7d043c0366de713c23; lianjia_token_secure=2.0015912a5770661e7d043c0366de713c23; security_ticket=Saz9XCcaeE8toMy5yaNcxlTbtKwt7EqcVYSRMRevY7KjRqnXJy35fA+t52STDxrDqZk3JmhaGBayZat8Vg8dehtBcqHIV6CHiZfWZGU4YOnLbCatCO1Lp15ZJJRN8ksDG6mTY4cKdrDn1viF5tYgClm7PS1iagreCV5JS5AUcv0=; hip=UBoVcyzW5sKVk-Q1P4CkPJQVvMFG0nRkjo_uZMdY1JSNxeDrHFEimwMCUChZ-jn5g11Dvz0C7xzU6CxKx01Jb_vpNwBG-Kw-XampT3voy4lgX1OGPnZeB_Z9n8J-Uf7xzHxaq0UMXTQ-wAD4WClqGMHuraSwE3J01eN4ln22FUPzjgamPNA%3D; srcid=eyJ0Ijoie1wiZGF0YVwiOlwiZmVlMTEyZDdmOTcxYWVmYTZlODg3NDdmMWE0ODBhMDU2YWUxMTgzMzI5MTBmY2VkYWM2OTRlY2EzYmViZmQ5ZWFjMmU4MzM2MTcxZjcyNGQxMGQ3NDE2MmNiNzYwZGMwOTdlMzk3NjU3ZmFhNjVhMDAyMWI4OTc3NTQxZWE3N2EzNmEyNjA5M2Y3MWRlYTM0YWZhMGViZDk3MTUyYWU5MTM1MWYxMWJiNDUzYTdkNzMwYWI4Y2JlYjY2YzA4NTQwMGE4MjEwOWE2MTg0MWY4ZDA1ZDNlZTZjM2E1YzllOTc5MGZlZGViODBhZWJiMzhjODA0NDk2MTEzYWMzNzkwZVwiLFwia2V5X2lkXCI6XCIxXCIsXCJzaWduXCI6XCIwOGNiNjNmOFwifSIsInIiOiJodHRwczovL3doLmtlLmNvbS9lcnNob3VmYW5nL2ppYW5nYW4vIiwib3MiOiJ3ZWIiLCJ2IjoiMC4xIn0=',
  'Referer': 'https://clogin.ke.com/',
  'Sec-Fetch-Dest': 'document',
  'Sec-Fetch-Mode': 'navigate',
  'Sec-Fetch-Site': 'same-site',
  'Sec-Fetch-User': '?1',
  'Upgrade-Insecure-Requests': '1',
  'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36',
  'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"',
  'sec-ch-ua-mobile': '?0',
  'sec-ch-ua-platform': '"macOS"'
}
# --- Functions from existing files ---

def is_last_page(html_content: str) -> bool:
    """
    Determines if the current page is the last one based on pagination HTML.
    """
    soup = BeautifulSoup(html_content, 'html.parser')
    # 查找包含分页数据的元素
    page_box = soup.find('div', class_='house-lst-page-box')
    if page_box:
        # 提取 page-data 属性
        page_data_str = page_box.get('page-data')
        if page_data_str:
            try:
                # 解析 JSON 格式的 page-data
                page_data = json.loads(page_data_str)
                total_page = page_data.get('totalPage', 1)
                cur_page = page_data.get('curPage', 1)
                # 比较当前页和总页数
                return cur_page >= total_page
            except json.JSONDecodeError:
                pass
    return False

def get_house_urls(html_content: str):
    """
    Parses an HTML file to extract house detail URLs.
    """
    soup = BeautifulSoup(html_content, 'html.parser')
    urls = []
    list_items = soup.select('ul.sellListContent li.clear .title a')
    for item in list_items:
        url = item.get('href')
        if url:
            urls.append(url)
    return urls

def parse_detail_page(html_content: str):
    """
    Parses the detail page to extract house attributes.
    """
    soup = BeautifulSoup(html_content, 'html.parser')
    house_data = {}

    # Price & Basic Info
    house_data['total_price'] = soup.select_one('.total').get_text(strip=True) if soup.select_one('.total') else ''
    house_data['unit_price'] = soup.select_one('.unitPriceValue').get_text(strip=True) if soup.select_one('.unitPriceValue') else ''
    house_data['community_name'] = soup.select_one('.communityName .info').get_text(strip=True) if soup.select_one('.communityName .info') else ''
    house_data['area_name'] = ' '.join([a.get_text(strip=True) for a in soup.select('.areaName .info a')])

    # Basic Attributes
    base_info_div = soup.find('div', class_='base')
    if base_info_div:
        for li in base_info_div.find_all('li'):
            label_tag = li.find('span', class_='label')
            if label_tag:
                label = label_tag.get_text(strip=True)
                value = li.get_text(strip=True).replace(label, '', 1)
                house_data[label] = value

    # Transaction Attributes
    transaction_div = soup.find('div', class_='transaction')
    if transaction_div:
        for li in transaction_div.find_all('li'):
            label_tag = li.find('span', class_='label')
            if label_tag:
                label = label_tag.get_text(strip=True)
                value = li.get_text(strip=True).replace(label, '', 1)
                house_data[label] = value
            
    return house_data

# --- Progress Management ---

def load_progress():
    """Loads scraping progress from the progress file."""
    if os.path.exists(PROGRESS_FILE):
        with open(PROGRESS_FILE, 'r') as f:
            return json.load(f)
    return None

def save_progress(area, asset, page_num):
    """Saves scraping progress."""
    with open(PROGRESS_FILE, 'w') as f:
        json.dump({"current_area": area, "current_asset": asset, "current_page": page_num}, f)

def get_config_params():
    """Reads area and asset parameters from House_Attributes.md."""
    with open("House_Attributes.md", 'r', encoding='utf-8') as f:
        content = f.read()
    
    areas = []
    assets = []
    
    in_area_section = False
    in_asset_section = False

    for line in content.splitlines():
        if line.strip() == '### Area':
            in_area_section = True
            in_asset_section = False
            continue
        elif line.strip() == '### Asset':
            in_asset_section = True
            in_area_section = False
            continue
        
        if in_area_section and line.strip() and not line.startswith('|'):
            areas.append(line.strip())
        elif in_asset_section and line.strip() and not line.startswith('|'):
            assets.append(line.strip())
            
    return areas, assets

# --- CSV Management ---

def write_to_csv(data, area):
    """Appends a dictionary of data to a CSV file named after the area."""
    # INSERT_YOUR_CODE
    import os

    # Ensure the 'csvs' directory exists
    csv_dir = "csvs"
    if not os.path.exists(csv_dir):
        os.makedirs(csv_dir)

    filename = os.path.join(csv_dir, f"{area}.csv")
    file_exists = os.path.exists(filename)
    
    with open(filename, 'a', newline='', encoding='utf-8') as f:
        # Define all possible headers from House_Attributes.md to ensure consistent column order
        fieldnames = [
            'url', 'total_price', 'unit_price', 'community_name', 'area_name',
            '房屋户型', '所在楼层', '建筑面积', '户型结构', '套内面积', '建筑类型', '房屋朝向', '建筑结构', '装修情况', '梯户比例', '配备电梯', '产权年限',
            '挂牌时间', '交易权属', '上次交易', '房屋用途', '房屋年限', '产权所属', '抵押信息', '房本备件'
        ]
        writer = csv.DictWriter(f, fieldnames=fieldnames, extrasaction='ignore')
        
        if not file_exists:
            writer.writeheader()
            
        writer.writerow(data)

# --- Main Scraper Logic ---

def main():
    """Main function to run the scraper."""
    try:
        areas, assets = get_config_params()
    except FileNotFoundError:
        print("Error: House_Attributes.md not found.")
        return

    if not areas or not assets:
        print("Error: Could not read areas or assets from House_Attributes.md.")
        return

    progress = load_progress()
    start_area, start_asset, start_page = None, None, 1
    if progress:
        start_area = progress.get("current_area")
        start_asset = progress.get("current_asset")
        start_page = progress.get("current_page", 1)
        print(f"Resuming from Area: {start_area}, Asset: {start_asset}, Page: {start_page}")

    processed_urls = set()
    
    # Load already processed URLs to avoid duplicates on resume
    for area_file in areas:
        filename = f"{area_file}.csv"
        if os.path.exists(filename):
            with open(filename, 'r', encoding='utf-8') as f:
                reader = csv.DictReader(f)
                for row in reader:
                    if 'url' in row:
                        processed_urls.add(row['url'])


    area_found = not start_area
    for area in areas:
        if not area_found and area != start_area:
            continue
        area_found = True
        
        asset_found = not start_asset
        for asset in assets:
            if not asset_found and asset != start_asset:
                continue
            asset_found = True
            
            page_num = start_page if progress else 1
            progress = None # Clear resume state after first use

            while True:
                list_url = BASE_URL.format(area=area, pageNum=page_num, asset=asset)
                print(f"Scraping list page: {list_url}")

                try:
                    response = requests.get(list_url, headers=HEADERS)
                    response.raise_for_status()
                except requests.RequestException as e:
                    print(f"Error fetching list page {list_url}: {e}")
                    time.sleep(10)
                    continue

                detail_urls = get_house_urls(response.text)
                if not detail_urls:
                    print("No detail URLs found on this page.")
                    save_progress(area, asset, page_num)
                    break

                for url in detail_urls:
                    if url in processed_urls:
                        print(f"  -> Skipping already processed URL: {url}")
                        continue
                    
                    print(f"  -> Scraping detail page: {url}")
                    try:
                        detail_response = requests.get(url, headers=HEADERS)
                        detail_response.raise_for_status()
                        house_data = parse_detail_page(detail_response.text)
                        house_data['url'] = url
                        write_to_csv(house_data, area)
                        processed_urls.add(url)
                        time.sleep(10) # Politeness delay
                    except requests.RequestException as e:
                        print(f"Error fetching detail page {url}: {e}")
                        time.sleep(10)


                # # INSERT_YOUR_CODE
                # with open(f"{area}_detail_urls.txt", "a", encoding="utf-8") as f:
                #     for url in detail_urls:
                #         f.write(url + "\n")
                
                save_progress(area, asset, page_num)

                if is_last_page(response.text):
                    print(f"Last page reached for Area: {area}, Asset: {asset}.")
                    break
                
                page_num += 1
                time.sleep(5) # Politeness delay between list pages

    print("Scraping complete.")
    if os.path.exists(PROGRESS_FILE):
        os.remove(PROGRESS_FILE)

if __name__ == "__main__":
    main()
