#!/usr/bin/env python
# coding: utf-8

# In[25]:


import pandas as pd

# In[26]:


df1 = pd.read_csv('OriginalData/fact_epsfb_bigxdr_20220614.csv')
datacomb1 = df1[(df1['epsfb_type'] == 1) & (df1['epsfb_mode'] == 1) & (df1['n2_handoverout_procedurestatus'] == 2)]
datacomb1

# In[27]:


df2 = pd.read_csv('OriginalData/fact_epsfb_bigxdr_20220615.csv')
datacomb2 = df2[(df2['epsfb_type'] == 1) & (df2['epsfb_mode'] == 1) & (df2['n2_handoverout_procedurestatus'] == 2)]

# In[28]:


df3 = pd.read_csv('OriginalData/fact_epsfb_bigxdr_20220616.csv')
datacomb3 = df3[(df3['epsfb_type'] == 1) & (df3['epsfb_mode'] == 1) & (df3['n2_handoverout_procedurestatus'] == 2)]

# In[29]:


df4 = pd.read_csv('OriginalData/fact_epsfb_bigxdr_20220617.csv')
datacomb4 = df4[(df4['epsfb_type'] == 1) & (df4['epsfb_mode'] == 1) & (df4['n2_handoverout_procedurestatus'] == 2)]

# In[30]:


df5 = pd.read_csv('OriginalData/fact_epsfb_bigxdr_20220618.csv')
datacomb5 = df5[(df4['epsfb_type'] == 1) & (df5['epsfb_mode'] == 1) & (df5['n2_handoverout_procedurestatus'] == 2)]

# In[31]:


df6 = pd.read_csv('OriginalData/fact_epsfb_bigxdr_20220619.csv')
datacomb6 = df6[(df4['epsfb_type'] == 1) & (df6['epsfb_mode'] == 1) & (df6['n2_handoverout_procedurestatus'] == 2)]

# In[32]:


df7 = pd.read_csv('OriginalData/fact_epsfb_bigxdr_20220620.csv')
datacomb7 = df7[(df7['epsfb_type'] == 1) & (df7['epsfb_mode'] == 1) & (df7['n2_handoverout_procedurestatus'] == 2)]
datacomb7

# In[33]:


# import pandas as pd

# # 定义日期范围
# start_date = pd.to_datetime('2022-06-14')
# end_date = pd.to_datetime('2022-06-17')

# # 创建一个空的字典，用于存储每个日期的数据
# datacomb_dict = {}

# # 循环遍历日期范围
# current_date = start_date
# while current_date <= end_date:
#     # 构建文件路径
#     file_path = f'OriginalData/fact_epsfb_bigxdr_{current_date.strftime("%Y%m%d")}.csv'

#     # 读取 CSV 文件
#     df = pd.read_csv(file_path)

#     # 根据条件筛选数据
#     datacomb = df[(df['epsfb_type'] == 1) & (df['epsfb_mode'] == 1) & (df['n2_handoverout_procedurestatus'] == 2)]

#     # 存储数据到字典中
#     datacomb_dict[current_date.strftime("%Y%m%d")] = datacomb

#     # 增加一天
#     current_date += pd.Timedelta(days=1)

# # 打印字典中的数据
# for date, data in datacomb_dict.items():
#     print(f'Date: {date}')
#     print(data)


# In[34]:


# import pandas as pd

# # 定义日期范围
# start_date = pd.to_datetime('2022-06-14')
# end_date = pd.to_datetime('2022-06-20')

# # 创建一个空的字典，用于存储每个日期的数据
# datacomb_dict = {}

# # 循环遍历日期范围
# current_date = start_date
# while current_date <= end_date:
#     # 构建文件路径
#     file_path = f'OriginalData/fact_epsfb_bigxdr_{current_date.strftime("%Y%m%d")}.csv'

#     # 读取 CSV 文件
#     df = pd.read_csv(file_path)

#     # 根据条件筛选数据
#     datacomb = df[(df['epsfb_type'] == 1) & (df['epsfb_mode'] == 1) & (df['n2_handoverout_procedurestatus'] == 2)]

#     # 存储数据到字典中
#     datacomb_dict[current_date.strftime("%Y%m%d")] = datacomb

#     # 增加一天
#     current_date += pd.Timedelta(days=1)

# # 打印字典中的数据
# for date, data in datacomb_dict.items():
#     print(f'Date: {date}')
#     print(data)


# In[35]:


dfcomb = pd.concat(
    [datacomb1, datacomb2, datacomb3, datacomb4, datacomb5, datacomb6, datacomb7, datacomb1, datacomb2, datacomb3,
     datacomb4, datacomb5, datacomb6, datacomb7, datacomb1, datacomb2, datacomb3, datacomb4, datacomb5, datacomb6,
     datacomb7, datacomb1, datacomb2, datacomb3, datacomb4, datacomb5, datacomb6, datacomb7, datacomb1, datacomb2,
     datacomb3, datacomb4, datacomb5, datacomb6, datacomb7, datacomb1, datacomb2, datacomb3, datacomb4, datacomb5,
     datacomb6, datacomb7, datacomb1, datacomb2, datacomb3, datacomb4, datacomb5, datacomb6, datacomb7, datacomb1,
     datacomb2, datacomb3, datacomb4, datacomb5, datacomb6, datacomb7, datacomb1, datacomb2, datacomb3, datacomb4,
     datacomb5, datacomb6, datacomb7, datacomb1, datacomb2, datacomb3, datacomb4, datacomb5, datacomb6, datacomb7,
     datacomb1, datacomb2, datacomb3, datacomb4, datacomb5, datacomb6, datacomb7, datacomb1, datacomb2, datacomb3,
     datacomb4, datacomb5, datacomb6, datacomb7])

# In[37]:


dfcomb

# In[38]:


df1 = pd.concat([df1, dfcomb])
df2 = pd.concat([df2, dfcomb])
df3 = pd.concat([df3, dfcomb])
df4 = pd.concat([df4, dfcomb])
df5 = pd.concat([df5, dfcomb])
df6 = pd.concat([df6, dfcomb])
df7 = pd.concat([df7, dfcomb])

# In[39]:


df1

# In[40]:


date_mapping = {
    '2022-06-15': '2022-06-14',
    '2022-06-16': '2022-06-14',
    '2022-06-17': '2022-06-14',
    '2022-06-18': '2022-06-14',
    '2022-06-19': '2022-06-14',
    '2022-06-20': '2022-06-14'
}

df1 = df1.replace(date_mapping, regex=True)

df1

# In[41]:


df1.to_csv(r'CleanData\fact_epsfb_bigxdr_20220614.csv', index=False)

# In[42]:


date_mapping = {
    '2022-06-14': '2022-06-15',
    '2022-06-16': '2022-06-15',
    '2022-06-17': '2022-06-15',
    '2022-06-18': '2022-06-15',
    '2022-06-19': '2022-06-15',
    '2022-06-20': '2022-06-15'
}

df2 = df2.replace(date_mapping, regex=True)

df2

df2.to_csv(r'CleanData\fact_epsfb_bigxdr_20220615.csv', index=False)

# In[43]:


date_mapping = {
    '2022-06-14': '2022-06-16',
    '2022-06-15': '2022-06-16',
    '2022-06-17': '2022-06-16',
    '2022-06-18': '2022-06-16',
    '2022-06-19': '2022-06-16',
    '2022-06-20': '2022-06-16'
}

df3 = df3.replace(date_mapping, regex=True)

df3

df3.to_csv(r'CleanData\fact_epsfb_bigxdr_20220616.csv', index=False)

# In[44]:


date_mapping = {
    '2022-06-14': '2022-06-17',
    '2022-06-15': '2022-06-17',
    '2022-06-16': '2022-06-17',
    '2022-06-18': '2022-06-17',
    '2022-06-19': '2022-06-17',
    '2022-06-20': '2022-06-17'
}

df4 = df4.replace(date_mapping, regex=True)

df4

df4.to_csv(r'CleanData\fact_epsfb_bigxdr_20220617.csv', index=False)

# In[45]:


date_mapping = {
    '2022-06-14': '2022-06-18',
    '2022-06-15': '2022-06-18',
    '2022-06-16': '2022-06-18',
    '2022-06-17': '2022-06-18',
    '2022-06-19': '2022-06-18',
    '2022-06-20': '2022-06-18'
}

df5 = df5.replace(date_mapping, regex=True)

df5

df5.to_csv(r'CleanData\fact_epsfb_bigxdr_20220618.csv', index=False)

# In[46]:


date_mapping = {
    '2022-06-14': '2022-06-19',
    '2022-06-15': '2022-06-19',
    '2022-06-16': '2022-06-19',
    '2022-06-17': '2022-06-19',
    '2022-06-18': '2022-06-19',
    '2022-06-20': '2022-06-19'
}

df6 = df6.replace(date_mapping, regex=True)

df6

df6.to_csv(r'CleanData\fact_epsfb_bigxdr_20220619.csv', index=False)

# In[47]:


date_mapping = {
    '2022-06-14': '2022-06-20',
    '2022-06-15': '2022-06-20',
    '2022-06-16': '2022-06-20',
    '2022-06-17': '2022-06-20',
    '2022-06-18': '2022-06-20',
    '2022-06-19': '2022-06-20'
}

df7 = df7.replace(date_mapping, regex=True)

df7

df7.to_csv(r'CleanData\fact_epsfb_bigxdr_20220620.csv', index=False)

# In[48]:


df7

# In[ ]:


try:
    get_ipython().system('Jupyter nbconvert --to python Original数据清洗为clean.ipynb')
except:
    pass

# In[ ]:
