{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 转换文件\n",
    "## 将 json 文件转化为 csv 文件"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import json\n",
    "import csv\n",
    "\n",
    "def parse_opendigger_json(file_path):\n",
    "    with open(file_path,'r',encoding='utf-8') as file:\n",
    "        data=json.load(file)\n",
    "    return data\n",
    "\n",
    "def write_to_csv(data,csv_file_path):\n",
    "\n",
    "    fieldnames=[\"指标名称\",\"指标值\"]\n",
    "    try:\n",
    "        with open(csv_file_path,'w',newline='',encoding='utf-8') as csv_file:\n",
    "            writer=csv.DictWriter(csv_file,fieldnames=fieldnames)\n",
    "            writer.writeheader()\n",
    "\n",
    "            for key, value in data.items():\n",
    "                writer.writerow({\"指标名称\": key,\"指标值\": value})\n",
    "        print(f\"已保存成功:{csv_file_path}\")\n",
    "    except Exception as e:\n",
    "        print(f\"写入CSV失败: {e}\")\n",
    "\n",
    "def batch_process_json_files(root_dir, output_dir):\n",
    "    \n",
    "    if not os.path.exists(output_dir):\n",
    "        os.makedirs(output_dir)\n",
    "\n",
    "    for subdir, _, files in os.walk(root_dir):\n",
    "        # 获取子文件夹名称和父文件夹名称，用于CSV文件命名\n",
    "        folder_name=os.path.basename(subdir)\n",
    "        parent_folder_name=os.path.basename(os.path.dirname(subdir)) # 获取子文件夹名的父文件名（仓库名）\n",
    "        \n",
    "        if not folder_name:  # 如果是根目录则跳过\n",
    "            continue\n",
    "        \n",
    "        json_files=[f for f in files if f.endswith('.json')]\n",
    "        for json_file in json_files:\n",
    "            json_file_path=os.path.join(subdir,json_file)\n",
    "            \n",
    "            base_name, _=os.path.splitext(json_file)  # 分离文件名和扩展名\n",
    "            \n",
    "            # 构建输出CSV文件名，使用父文件夹和当前文件夹名称\n",
    "            csv_file_name=f\"{parent_folder_name}_{folder_name}_{base_name}.csv\"\n",
    "            csv_file_path=os.path.join(output_dir, csv_file_name)\n",
    "            \n",
    "            data=parse_opendigger_json(json_file_path)\n",
    "            write_to_csv(data,csv_file_path)\n",
    "            print(f\"Processed JSON file: {json_file_path}\")\n",
    "\n",
    "root_directory = './top_300_metrics'\n",
    "output_directory = './outputdata 原数据'  # 输出CSV文件的目标文件夹路径\n",
    "batch_process_json_files(root_directory, output_directory)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 转换 contributor 指标\n",
    "\n",
    "## 将 contributor.email 指标数据 转化为贡献者数量数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "from ast import literal_eval\n",
    "import os\n",
    "import re\n",
    "\n",
    "def generate_contributor_filename(output_dir, input_file_name):\n",
    "\n",
    "    match = re.match(r'^([^_]+_[^_]+)', input_file_name)\n",
    "    if match:\n",
    "        base_name = match.group(1)\n",
    "    else:\n",
    "        # 如果没有匹配到，则使用默认的基础名称\n",
    "        base_name = \"default\"\n",
    "    \n",
    "    return f\"{base_name}_contributor.csv\"\n",
    "\n",
    "def process_all_files(base_dir,output_dir):\n",
    "    if not os.path.exists(output_dir):\n",
    "        os.makedirs(output_dir)\n",
    "\n",
    "    for root, dirs, files in os.walk(base_dir):\n",
    "        for file in files:\n",
    "             if file.endswith('contributor_email_suffixes.csv'):\n",
    "                input_file_path=os.path.join(root, file)\n",
    "                data=pd.read_csv(input_file_path)\n",
    "                df=pd.DataFrame(data)\n",
    "                total1=[]\n",
    "                df['parsed_suffixes'] = df['指标值'].apply(literal_eval) # 将字符串解析为实际的列表\n",
    "                df['num'] = df['parsed_suffixes'].apply(lambda x: sum(int(item[1]) for item in x))\n",
    "\n",
    "                df['指标值']=df['num']\n",
    "                df=df[['指标名称','指标值']]\n",
    "\n",
    "                output_file_name = generate_contributor_filename(output_dir, file)\n",
    "                output_file_path = os.path.join(output_dir, output_file_name)\n",
    "\n",
    "                df.to_csv(output_file_path, index=False)\n",
    "\n",
    "                print(f\"已处理并保存到 {output_file_path}\")\n",
    "                \n",
    "base_dir='./outputdata 原数据'\n",
    "output_dir='./contributor 无补0'\n",
    "process_all_files(base_dir,output_dir)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 填补日期\n",
    "\n",
    "## 对缺失的日期进行填补 完善日期数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import pandas as pd \n",
    "\n",
    "def load_reference_columns(reference_file_path,column_name='指标名称'):\n",
    "    df_ref=pd.read_csv(reference_file_path)\n",
    "    if column_name in df_ref.columns:\n",
    "        return df_ref[column_name]\n",
    "\n",
    "def process_single_file(input_file_path, reference_series, output_dir):\n",
    "    try:\n",
    "        # 加载CSV文件\n",
    "        df = pd.read_csv(input_file_path)\n",
    "        # 获取当前文件的列名\n",
    "        column_name='指标名称'\n",
    "        # 对比并补全缺失的列\n",
    "        if column_name not in df.columns:\n",
    "            # 如果当前文件不存在该列，则该列填充为NaN\n",
    "            df[column_name]=float('nan')\n",
    "            \n",
    "        # 确保日期格式一致\n",
    "        reference_series = pd.to_datetime(reference_series, format='%Y-%m').dt.strftime('%Y-%m')\n",
    "        df[column_name] = pd.to_datetime(df[column_name], format='%Y-%m', errors='coerce').dt.strftime('%Y-%m')\n",
    "\n",
    "        # 创建一个完整的日期索引以检查缺失的日期\n",
    "        complete_dates =pd.DataFrame({'指标名称': reference_series.unique()})\n",
    "        df_complete=pd.merge(complete_dates,df,on='指标名称',how='left')\n",
    "        # 构建输出文件路径\n",
    "        base_name=os.path.basename(input_file_path)\n",
    "        output_file_path=os.path.join(output_dir, base_name)\n",
    "\n",
    "        # 保存更新后的数据到新的CSV文件\n",
    "        df_complete.to_csv(output_file_path, index=False)\n",
    "\n",
    "        print(f\"已处理并保存到 {output_file_path}\")\n",
    "\n",
    "    except Exception as e:\n",
    "        print(f\"处理文件 {input_file_path} 时出错: {e}\")\n",
    "\n",
    "def find_matching_active_file(base_dir, input_file_path):\n",
    "    dir_path = os.path.dirname(input_file_path)\n",
    "    file_name=os.path.basename(input_file_path)\n",
    "    parts=file_name.split('_',2)\n",
    "    file_prefix=parts[0]+'_'+parts[1]\n",
    "    for root, dirs, files in os.walk(dir_path):\n",
    "        for file in files: \n",
    "            if file.endswith('active_dates_and_times.csv') and file.startswith(file_prefix): # 匹配寻找对应的时间基准\n",
    "                return os.path.join(root, file)\n",
    "    return None\n",
    "\n",
    "def process_all_files(base_dir,output_dir):\n",
    "    if not os.path.exists(output_dir):\n",
    "        os.makedirs(output_dir)\n",
    "\n",
    "    for root, dirs, files in os.walk(base_dir):\n",
    "        for file in files:\n",
    "             if file.endswith('.csv') and not file.endswith('active_dates_and_times.csv'):\n",
    "                input_file_path=os.path.join(root, file)\n",
    "                # 查找对应的 active_dates_and_times.csv 文件\n",
    "                matching_active_file=find_matching_active_file(base_dir, input_file_path)\n",
    "                if matching_active_file:\n",
    "                    # 加载参考文件的列名\n",
    "                    reference_series=load_reference_columns(matching_active_file)\n",
    "                    # 处理单个文件\n",
    "                    process_single_file(input_file_path, reference_series, output_dir)\n",
    "                else:\n",
    "                    print(f\"警告: 没有找到与 {input_file_path} 对应的 active_dates_and_times.csv 文件\")\n",
    "\n",
    "\n",
    "base_dir='./outputdata 原数据'\n",
    "output_dir='./add 仅填补日期 无补0'\n",
    "process_all_files(base_dir,output_dir)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 填补数据\n",
    "\n",
    "## 对缺失的指标数据（例如 代码增加行数等）进行相应的补 0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import pandas as pd\n",
    "\n",
    "def process_all_files(base_dir, output_dir):\n",
    "    if not os.path.exists(output_dir):\n",
    "        os.makedirs(output_dir)\n",
    "\n",
    "    for root, dirs, files in os.walk(base_dir):\n",
    "        for file in files:\n",
    "            if  file.endswith('contributor.csv') or file.endswith('new_contributors.csv') or file.endswith('inactive_contributors.csv') or file.endswith('change_requests.csv') or file.endswith('change_requests_accepted.csv') or file.endswith('change_requests_reviews.csv') or file.endswith('issues_new.csv') or file.endswith('issues_closed.csv') or file.endswith('code_change_lines_add.csv') or file.endswith('code_change_lines_remove.csv') or file.endswith('code_change_lines_sum.csv') or file.endswith('issue_comments.csv') or file.endswith('bus_factor.csv'):\n",
    "                input_file_path = os.path.join(root, file)\n",
    "                # 处理单个文件\n",
    "                df=pd.read_csv(input_file_path)\n",
    "                df['指标值']=df['指标值'].fillna(0)\n",
    "\n",
    "                base_name = os.path.basename(input_file_path)\n",
    "                output_file_path = os.path.join(output_dir, base_name)\n",
    "\n",
    "                df.to_csv(output_file_path, index=False)\n",
    "\n",
    "                print(f\"已处理并保存到 {output_file_path}\")\n",
    "\n",
    "# 指定基础目录和输出目录\n",
    "base_dir = './add 仅填补日期 无补0'\n",
    "output_dir = './填补数据 补0+KNN近邻+问题变更等'\n",
    "process_all_files(base_dir, output_dir)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 填补数据 KNN 近邻算法\n",
    "\n",
    "# 对于另外一些缺失数据的处理（例如 stars值等） 进行 KNN 近邻填补"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "import os\n",
    "from sklearn.impute import KNNImputer\n",
    "\n",
    "# 特征工程 提取年份 year 和 月份 month\n",
    "def extract_time_features(df):\n",
    "    df['year'] = df['指标名称'].dt.year\n",
    "    df['month'] = df['指标名称'].dt.month\n",
    "    return df\n",
    "\n",
    "def process_all_files(base_dir, output_dir):\n",
    "\n",
    "    if not os.path.exists(output_dir):\n",
    "        os.makedirs(output_dir)\n",
    "    \n",
    "    for root, dirs, files in os.walk(base_dir):\n",
    "        for file in files:\n",
    "            if  file.endswith('attention.csv') or file.endswith('participants.csv') or file.endswith('stars.csv') or file.endswith('technical_fork.csv'):\n",
    "                input_file_path = os.path.join(root, file)\n",
    "                data = pd.read_csv(input_file_path)\n",
    "        \n",
    "                data['指标名称'] = pd.to_datetime(data['指标名称']) # 转化为日期时间类型\n",
    "                data = extract_time_features(data) # 提取时间特征\n",
    "\n",
    "                imputer = KNNImputer(n_neighbors=10) # 使用 KNNImputer 填补缺失值\n",
    "                X = data[['year', 'month', '指标值']].copy() # 准备特征矩阵，包括年、月和指标值\n",
    "                X_imputed = imputer.fit_transform(X) # 将 DataFrame 转换为 numpy 数组进行填补\n",
    "\n",
    "                # 将填补后的数据转换回 DataFrame\n",
    "                imputed_df = pd.DataFrame(X_imputed, columns=['year', 'month', '指标值'], index=data.index)\n",
    "                # 更新原始数据中的 '指标值' 列\n",
    "                data['指标值'] = imputed_df['指标值']\n",
    "                final_data=data[['指标名称','指标值']]\n",
    "\n",
    "                base_name = os.path.basename(input_file_path)\n",
    "                output_file_path = os.path.join(output_dir, base_name)\n",
    "\n",
    "                final_data.to_csv(output_file_path, index=False)\n",
    "\n",
    "                print(f\"已处理并保存到 {output_file_path}\")\n",
    "\n",
    "base_dir = './add 仅填补日期 无补0'\n",
    "output_dir = './填补数据 补0+KNN近邻+问题变更等'\n",
    "process_all_files(base_dir, output_dir)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 处理数据——问题，变更请求响应时间等 特殊数据\n",
    "\n",
    "## 采用平均的方式进行数据处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import ast\n",
    "\n",
    "def process_all_files(base_dir, output_dir):\n",
    "    if not os.path.exists(output_dir):\n",
    "        os.makedirs(output_dir)\n",
    "\n",
    "    for root, dirs, files in os.walk(base_dir):\n",
    "        for file in files:\n",
    "            # 对不同的指标数据进行填补                                                                                                                                        \n",
    "            if file.endswith('change_request_response_time.csv') or file.endswith('change_request_resolution_duration.csv') or file.endswith('issue_response_time.csv') or file.endswith('issue_resolution_duration.csv'):\n",
    "                input_file_path = os.path.join(root, file)                                                                                                                                   \n",
    "                df=pd.read_csv(input_file_path)\n",
    "\n",
    "                data=ast.literal_eval(df['指标值'][0])\n",
    "                total_sum=0\n",
    "                count=0\n",
    "                # 遍历数据并累加数值\n",
    "                for value in data.values():\n",
    "                    total_sum += value\n",
    "                    count +=1\n",
    "                avg=total_sum/count\n",
    "                df['平均数']=float('nan')  # 初始化整列为NaN\n",
    "                df.at[0, '平均数'] = avg     # 设置第一个元素为avg\n",
    "                base_name = os.path.basename(input_file_path)\n",
    "                output_file_path = os.path.join(output_dir, base_name)\n",
    "                # 保存更新后的数据到新的CSV文件\n",
    "                df.to_csv(output_file_path, index=False) \n",
    "                print(f\"找到与 {input_file_path} 对应的 new_contributors.csv 文件\")   \n",
    "\n",
    "# 指定基础目录和输出目录\n",
    "base_dir = './outputdata 原数据'\n",
    "output_dir = './填补数据 补0+KNN近邻+问题变更等'\n",
    "process_all_files(base_dir, output_dir)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import pandas as pd\n",
    "\n",
    "\n",
    "def process_all_files(base_dir, output_dir):\n",
    "    if not os.path.exists(output_dir):\n",
    "        os.makedirs(output_dir)\n",
    "\n",
    "    for root, dirs, files in os.walk(base_dir):\n",
    "        for file in files:\n",
    "            # 对不同的指标数据进行填补                                                                                                                                        \n",
    "            if file.endswith('new_contributors.csv') or file.endswith('inactive_contributors.csv') or file.endswith('change_requests.csv') or file.endswith('change_requests_reviews.csv') or file.endswith('issues_new.csv') or file.endswith('issues_closed.csv') or file.endswith('code_change_lines_add.csv') or file.endswith('code_change_lines_remove.csv') or file.endswith('code_change_lines_sum.csv'):\n",
    "                input_file_path = os.path.join(root, file)                                                                                                                                   \n",
    "                df=pd.read_csv(input_file_path)\n",
    "                df['指标值']=df['指标值'].fillna(0)\n",
    "                # 构建输出文件路径\n",
    "                base_name = os.path.basename(input_file_path)\n",
    "                output_file_path = os.path.join(output_dir, base_name)\n",
    "                # 保存更新后的数据到新的CSV文件\n",
    "                df.to_csv(output_file_path, index=False) \n",
    "                print(f\"找到与 {input_file_path} 对应的 new_contributors.csv 文件\")   \n",
    "\n",
    "# 指定基础目录和输出目录\n",
    "base_dir = './add 仅填补日期 无补0'\n",
    "output_dir = './填补数据 补0+KNN近邻+问题变更等'\n",
    "process_all_files(base_dir, output_dir)"
   ]
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
