import pandas as pd
import numpy as np
import json
import os

def scan_files(directory,prefix=None,postfix=None):
    files_list=[]
    for root, sub_dirs, files in os.walk(directory):
        for special_file in files:
            if postfix:
                if special_file.endswith(postfix):
                    files_list.append(os.path.join(root,special_file))
            elif prefix:
                if special_file.startswith(prefix):
                    files_list.append(os.path.join(root,special_file))
            else:
                files_list.append(os.path.join(root,special_file))
               
    return files_list

def if_major(path):    
    try:
        df = pd.read_csv(path,encoding='GBK',error_bad_lines=False)
    except:
        df = pd.read_csv(path,encoding='utf-8',error_bad_lines=False)   
    try:
        flag=list(df["Major"])
        if "all" not in flag:
            return 1            #全部专业都有对应分数
        for i in range(len(flag)):
            if flag[i]!="all":
                return 2           #部分专业有对应分数
    except:
        return 3
    return 3               #没有细分专业

def school_data_num(all_files):
    t2019=0
    t2018=0
    t2017=0
    t2016=0
    for i in all_files:
        #print(i)
        try:
            df = pd.read_csv(i,encoding='GBK')
        except:
            df = pd.read_csv(i,encoding='utf-8')
        for row in df.itertuples():
            if row[2]==2019:
                t2019=t2019+1
            if row[2]==2018:
                t2018=t2018+1
            if row[2]==2017:
                t2017=t2017+1
            if row[2]==2016:
                t2016=t2016+1
    return t2019,t2018,t2017,t2016

def json_data_num(all_files):
    t=0
    for i in all_files:
       #print(i)
       try:
            f = open(i,encoding='utf-8')
       except:
            f = open(i,encoding='GBK')
       f = json.load(f)
       a=list(list(list(f.values())[0].values())[0].values())
       num=0
       for j in a:
           num=num+len(j)
       t=t+num
    return t

def if_major_num(all_files):
    t=0
    s=0
    r=0
    for i in all_files:
       #print(i)
       if if_major(i)==1:
           t=t+1
       elif if_major(i)==2:
           s=s+1
       elif if_major(i)==3:
           r=r+1
    return t,s,r

def merge_csv(all_files):
   
    t = pd.DataFrame({'College':[],'Year':[],'Province':[],'Category':[],'Major':[],'Score':[],'Contributor':[]})
    for i in all_files:
        #print(i)
        try:
            df = pd.read_csv(i,encoding='GBK')
        except:
            df = pd.read_csv(i,encoding='utf-8')
        t=pd.concat([t, df],sort=False)
        
    t=t.drop(columns=['Unnamed: 0','Unnamed: 3', 'Campus','Unnamed: 6','Unnamed: 7','Unnamed: 9']) 
    return t

    



#def main():
    

#if __name__ == '__main__':
#    main()

csv_all_files = scan_files('data',None,'csv')
json_all_files = scan_files('data',None,'json')

print("已采集",len(csv_all_files),"所高校数据")
t,s,r=if_major_num(csv_all_files)
print("其中",t,"所高校每个专业都有对应分数线")
print("    ",s,"所高校部分专业有对应分数线")
print("    ",r,"所高校没有细分专业")
school_num2019,school_num2018,school_num2017,school_num2016=school_data_num(csv_all_files)
print("总计2019年单条数据量：",school_num2019)
print("总计2018年单条数据量：",school_num2018)
print("总计2017年单条数据量：",school_num2017)
print("总计2016年单条数据量：",school_num2016)

province_num=json_data_num(json_all_files)
print("已采集",35,"个省份高考分数排名")
print("总计单条数据量：",province_num)


t=merge_csv(csv_all_files)
t.to_csv('高校录取分数线整合.csv',encoding='utf-8_sig')
print("已保存全部数据至'高校录取分数线整合.csv'")














