import pymysql as pq
import time
import scipy.stats
import matplotlib.pyplot as plt
import json
from itertools import groupby
from dotenv import dotenv_values
import sys

envs = dotenv_values('../.env')

conn = pq.connect(host='localhost', user=envs['DB_USERNAME'],
                        passwd=envs['DB_PASSWORD'],
                        db=envs['DB_DATABASE'], charset='utf8')
cur = conn.cursor()
cur.execute('select min_p, max_p, avg_p, average_score, mention, comment_count, publish_date '+sys.argv[1][8:])

result=cur.fetchall()

data=[[(float(d[i]) if i!=6 else time.mktime(d[6].timetuple())) for d in result if (
  (None not in d) and (d[3]!=0) and (time.mktime(d[6].timetuple())>0) and (d[5]!=0)
)] for i in range(7)]

# 0 min_p, 1 max_p, 2 avg_p, 3 average_score, 4 mention, 5 comment_count, 6 publish_date
# 3、4 0\4 4\5

name=['min_p','max_p','avg_p','average_score', 'mention', 'comment_count', 'publish_date']
displayName=['最低价','最高价','均价','均分', '出现次数', '评论量', '出版日期']
aggregated={}
aggregated['spearman']={}
aggregated['distribution']=[]
segment=[5, 5, 5, 2, 1, 100, 24*60*60*30]
useLog=[True, True, True, False, True, True, False]

for j in range(7):
    for k in range(7):
        x=data[j]
        y=data[k]
        aggregated['spearman'][str(j)+'_'+str(k)]=[round(i, 3) for i in scipy.stats.spearmanr(x, y)]

    target=data[j]
    aggregated['distribution'].append(
        {
            "data": [(k*segment[j], len(list(g))) for k, g in groupby(sorted(target), key=lambda x: x//segment[j]) if k*segment[j]>0],
            "segments": (max(target)-min(target))//segment[j],
            "name": name[j],
            "displayName": displayName[j],
            "useLog": useLog[j]
        }
    )

aggregated['sample']=list(zip(*data))[0:1000]

print(json.dumps(aggregated))
