File size: 1,550 Bytes
a6ec46c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import os
import pandas as pd

basePath = "./data/output/data/"
uploadPath = "./data/latents/"

# def toJson(data):
#     return {
#         "success": True,
#         "data": data
#     }
def getALlData():
    files = os.listdir(basePath)

    merged_df = None
    for each in files:
        df = pd.read_csv(os.path.join(basePath, each))

        if merged_df is None: merged_df = df
        else: merged_df = pd.concat([merged_df, df], ignore_index=True)
    
    grouped_df = merged_df.groupby(['methods', 'datasets']).mean().reset_index()
    grouped_df = grouped_df.fillna(0)
    for col in grouped_df.select_dtypes(include=['float']).columns:
        grouped_df[col] = grouped_df[col].round(4)

    data =  grouped_df.to_dict(orient='records')
    return data

def getList(datatype):
    if datatype == "Integration Accuracy":
        file = "integration_accuracy.csv"
    elif datatype == "Batch Correction":
        file = "batch.csv"
    elif datatype == "Bio Conservation":
        file = "biomarker.csv"
    

    path = os.path.join(basePath, file)
    df = pd.read_csv(path)
    df["object_type"] = datatype
    data =  df.to_dict(orient='records')
    return data

def getListByName(file, name):
    path = os.path.join(basePath, file)
    df = pd.read_csv(path)
    filtered_records = df[df['methods'] == name]
    data =  filtered_records.to_dict(orient='records')
    return data

def uploadFile(uploadFiles):
    for file in uploadFiles:
        save_path = os.path.join(uploadPath, file.filename)
        file.save(save_path)