import pandas as pd
import re


# 定义清理函数
def clean_tweet(tweet):
    # Remove URLs
    tweet = re.sub(r"https?:\/\/t.co\/[A-Za-z0-9]+", "", tweet)

    # Words with punctuations and special characters
    punctuations = '@#!?+&*[]-%.:/();$=><|{}^' + "'`"
    for p in punctuations:
        tweet = tweet.replace(p, f' {p} ')

    # Replace ... and ..
    tweet = tweet.replace('...', ' ... ')
    if '...' not in tweet:
        tweet = tweet.replace('..', ' ... ')

        # Acronyms
    acronyms = {
        "MH370": "Malaysia Airlines Flight 370",
        "mÌ¼sica": "music",
        "okwx": "Oklahoma City Weather",
        "arwx": "Arkansas Weather",
        "gawx": "Georgia Weather",
        "scwx": "South Carolina Weather",
        "cawx": "California Weather",
        "tnwx": "Tennessee Weather",
        "azwx": "Arizona Weather",
        "alwx": "Alabama Weather",
        "wordpressdotcom": "wordpress",
        "usNWSgov": "United States National Weather Service",
        "Suruc": "Sanliurfa"
    }
    for acronym, replacement in acronyms.items():
        tweet = re.sub(rf"\b{re.escape(acronym)}\b", replacement, tweet)

    # Grouping same words without embeddings
    tweet = re.sub(r"Bestnaijamade", "bestnaijamade", tweet)
    tweet = re.sub(r"SOUDELOR", "Soudelor", tweet)

    return tweet


# 读取数据集
df_train = pd.read_csv('E:/nlp-getting-started/train.csv')

# 对null进行填充
for col in ['keyword', 'location']:
    df_train[col] = df_train[col].fillna(f'no_{col}')

# 对text列进行清理
df_train['text'] = df_train['text'].apply(clean_tweet)

# 保存处理后的数据集为CSV文件
df_train.to_csv('E:/nlp-getting-started/cleanedtrain.csv', index=False)

print("处理后的数据集已保存为 cleanedtrain.csv")

#读取数据集
df_train = pd.read_csv('E:/nlp-getting-started/test.csv')

# 对null进行填充
for col in ['keyword', 'location']:
    df_train[col] = df_train[col].fillna(f'no_{col}')

# 对text列进行清理
df_train['text'] = df_train['text'].apply(clean_tweet)

# 保存处理后的数据集为CSV文件
df_train.to_csv('E:/nlp-getting-started/cleanedtest.csv', index=False)

print("处理后的数据集已保存为 cleanedtest.csv")