import pandas as pd
import jieba

df = pd.read_csv('./data_statistic/SJZ_Property.csv')


def value_count_statistics(df, col_name, loc=None):
    if loc:
        result = df[df.property_loc == loc][col_name].value_counts()
    else:
        result = df[col_name].value_counts()
    result_list = []
    for k, v in result.items():
        result_list.append({'name': k, 'value': v})
    return result_list


def value_sort_statistics(dataframe, col_name, loc=None, ascend=False, top=50):
    if loc is None:
        data = dataframe.sort_values(by=col_name, ascending=ascend).head(top)
    else:
        data = dataframe[dataframe.property_loc == loc]
        data = data.sort_values(by=col_name, ascending=ascend).head(top)
    return data.to_dict('records')


def area_price_order(dataframe):
    order_data = dataframe.groupby(['property_loc'])['property_average_price'].mean().sort_values(ascending=False)
    print(order_data)
    result_list = []
    for k, v in order_data.items():
        result_list.append({'area': k, 'value': v})
    return result_list


def title_statistic():
    counts = {}
    result = []
    with open('./text/title.txt', mode='r', encoding='utf-8') as f:
        words = jieba.lcut(f.read())
        for word in words:
            if len(word) == 1:
                continue
            else:
                counts[word] = counts.get(word, 0) + 1
        items = list(counts.items())
        items.sort(key=lambda x: x[1], reverse=True)
        for key, value in items:
            result.append({'word': key, 'value': value})
    return result[:100]

# value_count_statistics(df,'property_name', '开发区')
# value_count_statistics(df,'property_info')
# value_sort_statistics(df, 'property_price')
# area_price_order(df)
