import polars as pl;
import os;
import time;
from multiprocessing import Pool;

now=time.time()

def work(year):
    data=[] 
    for a,b,c in os.walk("data"):
        for cc in c:
            if cc.startswith(year):
                print(cc)
                df=pl.read_parquet(os.path.join(a,cc))
                df1=(
                    df.filter(pl.col("sale_time").dt.hour().eq(12))
                    .group_by([pl.col("dan_wei"),pl.col("saler")])
                    .agg([pl.col("sale_num").sum()])
                    .group_by([pl.col("dan_wei")])
                    .agg([pl.col("sale_num").mean().alias("a")])
                )
                df2=(
                    df.filter(pl.col("sale_time").dt.hour().eq(12))
                    .group_by([pl.col("dan_wei"),pl.col("sale_good")])
                    .agg([pl.col("saler").n_unique()])
                    .group_by([pl.col("dan_wei")])
                    .agg([pl.col("saler").mean().alias("b")])
                )
                df3=(
                    df.group_by([pl.col("dan_wei"),pl.col("sale_time").dt.hour()])
                    .agg([pl.col("sale_num").sum()])
                    .group_by([pl.col("dan_wei")])
                    .agg([pl.col("sale_num").mean().alias("c")])
                )
                df=(
                    df1.join(df2,on="dan_wei",how="inner")
                    .join(df3,on="dan_wei",how="inner")
                )
                data.append(df)
    return data

with Pool(3) as pool:
    df=pool.map(work,["2020","2021","2022"])

data=[]
for i in df:
    df=pl.concat(i)
    data.append(df)
df=pl.concat(data).group_by([pl.col("dan_wei")]).agg([pl.all().mean()])

print(df,time.time()-now)