# getZ.py
from bs4 import BeautifulSoup
import urllib.parse
import urllib.request
from urllib.parse import quote
import sys
from sc.ZDao import getConnect  # 确保路径正确

def parse_salary(salary_str):
    # 保持原函数不变
    try:
        if '天' in salary_str:
            daily = float(salary_str.replace('天', '').replace('k', '000').split('-')[0])
            return int(daily * 21.75)  # 转换为整数
        elif '千' in salary_str:
            monthly = float(salary_str.replace('千', '')) * 1000
            return int(monthly)
        elif '万' in salary_str:
            monthly = float(salary_str.replace('万', '')) * 10000
            return int(monthly)
        else:
            return 5000
    except:
        return 5000

def run():
    city_mapping = {
        '北京': '530', '上海': '538', '广州': '763', '深圳': '765',
        '天津': '531', '武汉': '736', '西安': '854', '成都': '801',
        '沈阳': '599', '南京': '635', '杭州': '653', '苏州': '639',
        '重庆': '551', '长沙': '749', '厦门': '682', '南昌': '691'
    }

    try:
        jName = input("请输入城市: ").strip()
        if jName not in city_mapping:
            print("无效的城市名称")
            return

        aName = input("请输入岗位: ").strip()
        if not aName:
            print("岗位不能为空")
            return

        base_url = "https://sou.zhaopin.com/?jl={}&kw={}&p=1"
        encoded_url = base_url.format(city_mapping[jName], quote(aName))

        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
        }
        req = urllib.request.Request(encoded_url, headers=headers)
        res = urllib.request.urlopen(req)
        content = res.read().decode('utf-8', errors='ignore')

        soup = BeautifulSoup(content, 'lxml')
        job_list = []

        for job in soup.select(".joblist-box__item"):
            try:
                job_dic = {}

                job_dic["job_name"] = job.select_one('.jobinfo__name').get_text(strip=True)
                job_dic["company_name"] = job.select_one('.companyinfo__name').get_text(strip=True)

                salary_text = job.select_one('.jobinfo__salary').get_text(strip=True)
                job_dic["job_salary"] = parse_salary(salary_text)

                info_items = job.select('.jobinfo__other-info-item')
                if len(info_items) >= 3:
                    job_loc = info_items[0].get_text(strip=True).split("-")[0]
                    job_exp = info_items[1].get_text(strip=True)
                    job_edu = info_items[2].get_text(strip=True)

                    job_dic.update({
                        "job_loc": job_loc,
                        "job_exp": job_exp,
                        "job_edu": job_edu
                    })

                job_list.append(job_dic)

            except AttributeError as e:
                print(f"解析职位信息时出错: {str(e)}")
                continue

        db = getConnect()
        if db is None:
            print("数据库连接失败，无法存储数据")
            return

        try:
            with db.cursor() as cursor:
                sql = """
                    INSERT INTO jobs 
                    (job_name, company_name, job_sal, job_loc, job_exp, job_edu) 
                    VALUES (%s, %s, %s, %s, %s, %s)
                """
                for job in job_list:
                    # 确保字段类型匹配（特别是数值类型）
                    cursor.execute(sql, (
                        job["job_name"],
                        job["company_name"],
                        int(job["job_salary"]),  # 转换为整数
                        job["job_loc"],
                        job["job_exp"],
                        job["job_edu"]
                    ))
                db.commit()
            print(f"成功爬取并存储 {len(job_list)} 条数据")

        except Exception as e:
            db.rollback()
            print(f"数据库操作失败: {str(e)}")
        finally:
            db.close()

    except Exception as e:
        print(f"程序运行出错: {str(e)}")

if __name__ == "__main__":
    run()