import re
import pandas as pd
import pymysql
import ast
from sqlalchemy import create_engine

def str_split(job_str):
    try:
        list_data = ast.literal_eval(job_str)
        return list_data
    except:
        items = job_str.split('｜')
        return items

def calculate_salary(salary_str):
    # 移除元/月，并统一处理k为千的单位
    salary_str = salary_str.replace("元/月", "").replace("K", "").replace("k", "")

    # 处理只有下限或只有上限的情况，例如 "3-6K" 或 "6-11K"
    if "-" not in salary_str:
        salary_parts = salary_str.split()
        if len(salary_parts) == 1:  # 只有一个数字，假设为下限
            lower, upper = float(salary_parts[0]), float(salary_parts[0])
        else:  # 两个独立的数字，分别作为下限和上限
            lower, upper = float(salary_parts[0]), float(salary_parts[1])
    else:  # 一般情况，包含"-"的区间
        lower, upper = map(float, salary_str.split("-"))
    if float(((upper - lower) / 2) * 12) < 1:
        print(salary_str)

    # 计算平均年薪
    return float(((upper + lower) / 2) * 12)


# 定义函数来提取数字并计算
def calculate_salary_num(salary_str):
    match = re.search(r'(\d+-\d+)', salary_str)
    salary = match.group(1)
    # print(salary)
    match_num = re.search(r'(\d+)薪', salary_str)
    salary_num = int(match_num.group(1))
    # print(salary_num)
    salary_str = salary

    # 处理只有下限或只有上限的情况，例如 "3-6K" 或 "6-11K"
    if "-" not in salary_str:
        salary_parts = salary_str.split()
        if len(salary_parts) == 1:  # 只有一个数字，假设为下限
            lower, upper = float(salary_parts[0]), float(salary_parts[0])
        else:  # 两个独立的数字，分别作为下限和上限
            lower, upper = float(salary_parts[0]), float(salary_parts[1])
    else:  # 一般情况，包含"-"的区间
        lower, upper = map(float, salary_str.split("-"))
    return float(((upper + lower) / 2) * salary_num)

if __name__ == '__main__':
    csv_name=""
    tags=""
    combined_df = pd.read_csv(csv_name)
    # 识别含有NaN值的行
    rows_with_nan = combined_df[combined_df.isnull().any(axis=1)]
    # 去掉nan值
    combined_df.fillna("无", inplace=True)
    job_salary = combined_df['job_salary']
    pattern_internship_day = r'\d+元/天'
    job_internship_day = job_salary[job_salary.str.contains(pattern_internship_day)]
    pattern_internship_hour = r'\d+元/时'
    job_internship_hour = job_salary[job_salary.str.contains(pattern_internship_hour)]

    pattern_extract_13 = r'\b\d{1,5}(?:-\d{1,5})?[kK]·\d{1,2}薪\b'
    pattern_extract_13 = job_salary[job_salary.str.contains(pattern_extract_13)]
    pattern_extract_12 = r'\b\d{1,5}(?:-\d{1,5})?[kK]\b(?!\s*·)'
    pattern_extract_12 = job_salary[job_salary.str.contains(pattern_extract_12)]

    annual_salary = pattern_extract_12.apply(calculate_salary)
    annual_salary_num = pattern_extract_13.apply(calculate_salary_num)
    all_annual_salary = pd.concat([annual_salary, annual_salary_num], axis=0)

    combined_df['annual_salary'] = all_annual_salary

    company_df=combined_df[['company_name','company_intro','company_status','company_size','company_detailed_address','company_type']]
    # 根据'company_detailed_address'列进行去重


    unique_company_df = company_df.drop_duplicates(subset='company_detailed_address')
    # 在unique_company_df中新增一列company_id，该列的值从1开始递增
    unique_company_df.insert(0, 'company_id', range(1, len(unique_company_df) + 1))
    unique_company_df['phone'] = '无'

    sample_company_df = unique_company_df[['company_id', 'company_detailed_address']]
    combined_df.insert(0, 'job_id', range(1, len(combined_df) + 1))
    # 左连接示例
    merged_df_left = pd.merge(combined_df, sample_company_df, on='company_detailed_address', how='left')
    jobs = merged_df_left[
        ['job_id', 'company_id', 'company_name', 'job_title', 'hr_name', 'job_salary', 'annual_salary',
         'job_description', 'company_brief_address']]
    jobs.loc[:, 'deleted'] = 0
    merged_df_left['job_tags'] = merged_df_left['job_tags'].apply(str_split)
    # 拆分job_tags列
    tags_split = merged_df_left['job_tags'].apply(pd.Series)
    # 重命名新生成的列
    tags_split.columns = ['资历', '学历']
    # 合并新列到原始DataFrame
    merged_df_left = pd.concat([merged_df_left, tags_split], axis=1)
    jobs = pd.concat([jobs, tags_split], axis=1)
    jobs = jobs.dropna()
    jobs.loc[:, 'tags'] = tags

    # 保留jobs
    jobs.to_csv(f'{csv_name}_annual_salary.csv')
#     公司信息存储到数据中
    # 假设 unique_company_df 中包含所有需要的列，我们仅选择需要的列并保持所需顺序
    selected_columns = unique_company_df[['company_id', 'company_name', 'phone', 'company_intro', 'company_status', 'company_size', 'company_detailed_address', 'company_type']]
    # 数据库连接配置
    config = {
        'host': 'localhost',
        'user': 'qwy',
        'password': '190601',
        'db': 'jobs',
        'charset': 'utf8mb4',
    }
    # 建立连接
    connection = pymysql.connect(**config)
    cursor = connection.cursor()
    cols_mapping = {
        'company_id': 'companyID',
        'company_name': 'name',
        'phone': 'phone',
        'company_intro': 'introduction',
        'company_status': 'status',
        'company_size': 'size',
        'company_detailed_address': 'address',
        'company_type': 'companyType',
    }
    df_renamed = selected_columns.rename(columns=cols_mapping)

    database_url = 'mysql+pymysql://qwy:190601@localhost:3306/jobs'  # 数据库URL，根据实际情况修改

    # 创建数据库引擎
    engine = create_engine(database_url)

    df_renamed.to_sql(
        name='company',
        con=engine,
        if_exists='append',
        index=False,
    )