'''
爬取的网址：https://www.zhenai.com/zhenghun/guangzhou/1
'''
import time
import requests
from lxml import etree
import pandas as pd
import logging

# 配置日志记录
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')


def request_url(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                      'Chrome/88.0.4324.146 Safari/537.36',
    }
    try:
        r = requests.get(url, headers=headers)
        r.raise_for_status()
        return r.text
    except requests.RequestException as e:
        logging.error(f"Failed to retrieve data from {url}. Error: {e}")
        return None


def parse_html(text):
    try:
        html = etree.HTML(text)
        names = html.xpath('//table/tbody/tr[1]/th/a/text()')
        sexes = html.xpath('//table/tbody/tr[2]/td[1]/text()')
        ages = html.xpath('//table/tbody/tr[3]/td[1]/text()')
        locations = html.xpath('//table/tbody/tr[2]/td[2]/text()')
        salary = html.xpath('//table/tbody/tr[3]/td[2]/text()')
        heights = html.xpath('//table/tbody/tr[4]/td[2]/text()')
        marital_statuses = html.xpath('//table/tbody/tr[4]/td[1]/text()')
        introduces = html.xpath('//div[@class="g-list"]/div[@class="list-item"]/div[2]/div[1]/text()')

        data = {
            '姓名': names,
            '性别': sexes,
            '年龄': ages,
            '居住地': locations,
            '薪资': salary,
            '身高': heights,
            '婚姻状况': marital_statuses,
            '自我介绍': introduces
        }
        return data
    except Exception as e:
        logging.error(f"Error parsing HTML: {e}")
        return {}


def save_to_csv(data_list, filename):
    all_dfs = []
    for data in data_list:
        if data:
            df = pd.DataFrame(data)
            all_dfs.append(df)
    if all_dfs:
        combined_df = pd.concat(all_dfs, ignore_index=True)
        combined_df.to_csv(filename, index=False, encoding='utf-8-sig')


def main(start_page, end_page):
    all_data = []
    for page in range(start_page, end_page + 1):
        url = f"https://www.zhenai.com/zhenghun/guangzhou/{page}"
        logging.info(f"开始爬取：{url}")
        time.sleep(3)
        text = request_url(url)
        if text:
            data = parse_html(text)
            if data:
                all_data.append(data)
    if all_data:
        save_to_csv(all_data, "zhenai_guangzhou_all_pages.csv")
        logging.info(f"数据已保存到 zhenai_guangzhou_all_pages.csv")


if __name__ == '__main__':
    start_page = 1
    end_page = 5
    main(start_page, end_page)
    logging.info('Done!')