import numpy as np
import pandas as pd
import torch
from torch import nn
from d2l import torch as d2l

import hashlib
import os
import tarfile
import zipfile
import requests
import seaborn as sns
from matplotlib import pyplot as plt

#@save
DATA_HUB = dict()
DATA_URL = 'http://d2l-data.s3-accelerate.amazonaws.com/'

loss = nn.MSELoss()

DATA_HUB['kaggle_house_train'] = (  #@save
    DATA_URL + 'kaggle_house_pred_train.csv',
    '585e9cc93e70b39160e7921475f9bcd7d31219ce')

DATA_HUB['kaggle_house_test'] = (  #@save
    DATA_URL + 'kaggle_house_pred_test.csv',
    'fa19780a7b011d9b009e8bff8e99922a8ee2eb90')
# 加载训练和测试数据
def load_data():
    train_data = pd.read_csv("./data/california-housing-prices/train.csv")
    test_data = pd.read_csv("./data/california-housing-prices/test.csv")
    return train_data, test_data
# 分析特征数据缺失的比例
def analyze_missing_data(train_data):
    total = train_data.isnull().sum().sort_values(ascending=False)
    print(train_data.isnull().sum(), train_data.isnull().count())
    percent = (train_data.isnull().sum()/train_data.isnull().count()).sort_values(ascending=False)
    missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
    print(missing_data.head(20))
    return missing_data

def convert_bedroom(bedroom):
    try:
        if bedroom == 0:
            return 1
        else:
            return bedroom
    except Exception as e:
        values = bedroom.split(",")
        return len(values)

# 分析数字类型的特征与价格的关联关系
def get_num_feature(train_data):
    numeric_cols = train_data.select_dtypes(include=np.number).columns
    print(numeric_cols)
    corrmat = train_data[numeric_cols].corr()
    f, ax = plt.subplots(figsize=(12, 9))
    sns.heatmap(corrmat, vmax=.8, square=True)
    plt.show()
    k = 10
    cols = corrmat.nlargest(k, 'Sold Price')['Sold Price'].index
    cm = np.corrcoef(train_data[cols].values.T)
    sns.set(font_scale=1.25)
    hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values)
    plt.xticks(rotation = 90)
    plt.show()


# 分析非数字类型的特征与价格的关联关系

def get_net():
    net = nn.Sequential(nn.Linear(in_features,1))
    return net

def train(net, train_features, train_labels, test_features, test_labels,
          num_epochs, learning_rate, weight_decay, batch_size):
    train_ls, test_ls = [], []
    train_iter = d2l.load_array((train_features, train_labels), batch_size)
    # 这里使用的是Adam优化算法
    optimizer = torch.optim.Adam(net.parameters(),
                                 lr = learning_rate,
                                 weight_decay = weight_decay)
    for epoch in range(num_epochs):
        for X, y in train_iter:
            optimizer.zero_grad()
            l = loss(net(X), y)
            l.backward()
            optimizer.step()
        train_ls.append(log_rmse(net, train_features, train_labels))
        if test_labels is not None:
            test_ls.append(log_rmse(net, test_features, test_labels))
    return train_ls, test_ls

def log_rmse(net, features, labels):
    # 为了在取对数时进一步稳定该值，将小于1的值设置为1
    clipped_preds = torch.clamp(net(features), 1, float('inf'))
    rmse = torch.sqrt(loss(torch.log(clipped_preds),
                           torch.log(labels)))
    return rmse.item()
def get_k_fold_data(k, i, X, y):
    assert k > 1
    fold_size = X.shape[0] // k
    X_train, y_train = None, None
    for j in range(k):
        idx = slice(j * fold_size, (j + 1) * fold_size)
        X_part, y_part = X[idx, :], y[idx]
        if j == i:
            X_valid, y_valid = X_part, y_part
        elif X_train is None:
            X_train, y_train = X_part, y_part
        else:
            X_train = torch.cat([X_train, X_part], 0)
            y_train = torch.cat([y_train, y_part], 0)
    return X_train, y_train, X_valid, y_valid


def k_fold(k, X_train, y_train, num_epochs, learning_rate, weight_decay,
           batch_size):
    train_l_sum, valid_l_sum = 0, 0
    for i in range(k):
        data = get_k_fold_data(k, i, X_train, y_train)
        net = get_net()
        train_ls, valid_ls = train(net, *data, num_epochs, learning_rate,
                                   weight_decay, batch_size)
        train_l_sum += train_ls[-1]
        valid_l_sum += valid_ls[-1]
        if i == 0:
            d2l.plot(list(range(1, num_epochs + 1)), [train_ls, valid_ls],
                     xlabel='epoch', ylabel='rmse', xlim=[1, num_epochs],
                     legend=['train', 'valid'], yscale='log')
        print(f'折{i + 1}，训练log rmse{float(train_ls[-1]):f}, '
              f'验证log rmse{float(valid_ls[-1]):f}')
    return train_l_sum / k, valid_l_sum / k

def train_and_pred(train_features, test_features, train_labels, test_data,
                   num_epochs, lr, weight_decay, batch_size):
    net = get_net()
    train_ls, _ = train(net, train_features, train_labels, None, None,
                        num_epochs, lr, weight_decay, batch_size)
    d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch',
             ylabel='log rmse', xlim=[1, num_epochs], yscale='log')
    print(f'训练log rmse：{float(train_ls[-1]):f}')
    # 将网络应用于测试集。
    preds = net(test_features).detach().numpy()
    # 将其重新格式化以导出到Kaggle
    test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])
    submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)
    submission.to_csv('submission.csv', index=False)

def download_extract(name, folder=None):  #@save
    """下载并解压zip/tar文件"""
    fname = download(name)
    base_dir = os.path.dirname(fname)
    data_dir, ext = os.path.splitext(fname)
    if ext == '.zip':
        fp = zipfile.ZipFile(fname, 'r')
    elif ext in ('.tar', '.gz'):
        fp = tarfile.open(fname, 'r')
    else:
        assert False, '只有zip/tar文件可以被解压缩'
    fp.extractall(base_dir)
    return os.path.join(base_dir, folder) if folder else data_dir

def download_all():  #@save
    """下载DATA_HUB中的所有文件"""
    for name in DATA_HUB:
        download(name)

def download(name, cache_dir=os.path.join('..', 'data')):  #@save
    """下载一个DATA_HUB中的文件，返回本地文件名"""
    assert name in DATA_HUB, f"{name} 不存在于 {DATA_HUB}"
    url, sha1_hash = DATA_HUB[name]
    os.makedirs(cache_dir, exist_ok=True)
    fname = os.path.join(cache_dir, url.split('/')[-1])
    if os.path.exists(fname):
        sha1 = hashlib.sha1()
        with open(fname, 'rb') as f:
            while True:
                data = f.read(1048576)
                if not data:
                    break
                sha1.update(data)
        if sha1.hexdigest() == sha1_hash:
            return fname  # 命中缓存
    print(f'正在从{url}下载{fname}...')
    r = requests.get(url, stream=True, verify=True)
    with open(fname, 'wb') as f:
        f.write(r.content)
    return fname

if __name__ == '__main__':
    # train_data = pd.read_csv(download('kaggle_house_train'))
    # test_data = pd.read_csv(download('kaggle_house_test'))
    # n_train = train_data.shape[0]
    # train_data = pd.concat((train_data.iloc[:, 1:-1], test_data.iloc[:, 1:]))
    # train_features = torch.tensor(train_data[:n_train].values, dtype=torch.float32)
    # test_features = torch.tensor(train_data[n_train:].values, dtype=torch.float32)
    # in_features = train_features.shape[1]
    # train_labels = torch.tensor(
    #     train_data.SalePrice.values.reshape(-1, 1), dtype=torch.float32)

    # k, num_epochs, lr, weight_decay, batch_size = 5, 100, 5, 0, 64
    # train_l, valid_l = k_fold(k, train_features, train_labels, num_epochs, lr,
    #                         weight_decay, batch_size)
    # print(f'{k}-折验证: 平均训练log rmse: {float(train_l):f}, '
    #     f'平均验证log rmse: {float(valid_l):f}')


    # train_and_pred(train_features, test_features, train_labels, test_data,
    #             num_epochs, lr, weight_decay, batch_size)
    train_data, test_data = load_data()
    missing_data = analyze_missing_data(train_data=train_data)
    print(train_data.shape)
    # 删除数据缺失大于0.4的列
    train_data.drop(missing_data[missing_data["Percent"] > 0.4].index, axis=1, inplace=True)
    print(train_data.shape)
    train_data["Bedrooms"].apply(convert_bedroom)
    # 对于数字类型补充众数
    train_data["Bedrooms"] = train_data["Bedrooms"].fillna(2.0)
    train_data['Bathrooms'] = train_data['Bathrooms'].fillna(train_data['Full bathrooms'])
    train_data['Bathrooms'] = train_data['Bathrooms'].fillna(2.0)
    train_data['Full bathrooms'] = train_data['Full bathrooms'].fillna(train_data['Bathrooms'])
    train_data['Full bathrooms'] = train_data['Full bathrooms'].fillna(train_data['Full bathrooms'].mode())
    train_data['Elementary School Score'] = train_data['Elementary School Score'].fillna(train_data['Elementary School Score'].mode().values[0])
    train_data['High School Score'] = train_data['High School Score'].fillna(train_data['High School Score'].mode())
    train_data['Tax assessed value'] = train_data['Tax assessed value'].fillna(train_data['Listed Price'] * 0.66)

    get_num_feature(train_data=train_data)

