#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2021/5/27 9:32
# @Author  : mxc
# @File    : yuce.py
# @Software: PyCharm
import csv
from sklearn.linear_model import Ridge, Lasso, ElasticNetCV
from sklearn.model_selection import GridSearchCV, KFold
from sklearn.svm import SVR
from sklearn.ensemble import AdaBoostRegressor, RandomForestRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
from scipy.stats import pearsonr
import numpy as np
import pandas as pd

'''
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
import pandas as pd
from sklearn.datasets import load_boston
from keras.layers import Dense,Dropout,Activation,Input
from keras.models import Sequential,Model
from sklearn.model_selection import train_test_split
from numpy import *
from keras import metrics
def load_data():
    df_train=pd.read_csv("train.csv")
    df_test=pd.read_csv("test.csv")
    return df_train,df_test

def make_model(InputSize):
    model=Sequential()
    model.add(Dense(units=90,activation='relu',input_shape=(InputSize,)))
    model.add(Dropout(0.1))
    model.add(Dense(units=50,activation='relu'))
    model.add(Dropout(0.1))
    model.add(Dense(units=1,activation=None))
    model.compile(loss='mean_squared_error',optimizer='adam',metrics=[metrics.mae])
    print(model.summary())
    return model

if __name__ == '__main__':
    df_train,df_test=load_data()
    X_train,X_test,y_train,y_test=train_test_split(df_train.drop('sell_price',axis=1).values,df_train['sell_price'].values,test_size=0.1,random_state=0)
    model=make_model(17)
    model.fit(X_train,y_train,batch_size=32,epochs=100,verbose=1,validation_data=(X_test,y_test),shuffle=True)
    pred=model.predict(df_test.values)
    submissionFile=pd.DataFrame({'price':pred.reshape(1,-1)[0]})
    submissionFile.to_csv("submission.csv",index=False)

import re
with open("yuce.csv", "w",encoding='utf-8') as csvfile:
    writer = csv.writer(csvfile)
    data = ["房屋面积", "价格", "押一付一", "有阳台", "南北通透", "配套齐全","独立阳台", "首次出租", "精装修", "独卫", "邻地铁"]
    # 先写入columns_name
    writer.writerow(data)
    # 写入多行用writerows
    # writer.writerows([[0, 1, 3], [1, 2, 3], [2, 3, 4]])

guanjian=set()
guanjian1=[]

store_list=[]
with open("房屋租赁数据ceshi.csv","r",encoding='utf-8')as f:
    for i in f.readlines():
        l=i.strip().split('"')
        # 押一付一，独立阳台,南北通透,配套齐全,首次出租,精装修,独卫,邻地铁
        if '押一付一' in l[1][1:-1]:
            a=1
        else:
            a=0
        if '阳台' in l[1][1:-1]:
            b = 1
        else:
            b=0
        if '南北通透' in l[1][1:-1]:
            c=1
        else:
            c=0
        if '配套齐全' in l[1][1:-1]:
            d=1
        else:
            d=0
        if '首次出租' in l[1][1:-1]:
            e=1
        else:
            e=0
        if '精装修' in l[1][1:-1]:
            f=1
        else:
            f=0
        if '独卫' in l[1][1:-1]:
            g=1
        else:
            g=0
        if '邻地铁' in l[1][1:-1]:
            h=1
        else:
            h=0
        store_list.append([i.split(",")[3].strip("㎡"), i.split(",")[-2],a,b,c,d,e,f,g,h ])
with open("yuce_test.csv", "a+",encoding='utf-8') as csvfile:
    writer = csv.writer(csvfile)
    #data = ["房屋面积", "价格", "押一付一", "有阳台", "南北通透", "配套齐全","独立阳台", "首次出租", "精装修", "独卫", "邻地铁"]
    # 先写入columns_name
    #writer.writerow(data)
    # 写入多行用writerows
    writer.writerows(store_list)

with open('yuce_test.csv', 'r', encoding='utf-8') as fr, open('new.csv', 'w', encoding='utf-8') as fd:
    for text in fr.readlines():
        if text.split():
            fd.write(text)
    print('输出成功....')
'''



def lasso(x_train, y_train, x_test):
    param_grid = {'alpha': [5]}#, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1,0.0005
    model = GridSearchCV(Lasso(), param_grid, cv=10).fit(x_train, y_train)
    #print(model)
    #with open("canshu_lasso.txt",'a+',encoding='utf-8')as f:
        #f.write("LASSO网格搜索最优得分："+str(model.best_score_)+'\n'+"LASSO网格搜索最优参数组合："+ str(model.best_params_)+'\n')
    #print("LASSO网格搜索最优得分：", model.best_score_)
    #print("LASSO网格搜索最优参数组合：\n", model.best_params_)
    y_pred = model.predict(x_test)
    print(y_pred)
    return y_pred


def ridge(x_train, y_train, x_test):
    param_grid = {'alpha': [0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1,5]}
    model = GridSearchCV(Ridge(), param_grid, cv=10).fit(x_train, y_train)  # cv=10为10折交叉验证
    with open("canshu_ridge.txt",'a+',encoding='utf-8')as f:
        f.write("ridge网格搜索最优得分："+str(model.best_score_)+'\n'+"ridge网格搜索最优参数组合："+ str(model.best_params_)+'\n')
    print("RIDGE网格搜索最优得分：", model.best_score_)
    print("RIDGE网格搜索最优参数组合：\n", model.best_params_)
    y_pred = model.predict(x_test)
    return y_pred


def svr(x_train, y_train, x_test):
    param_grid = {'gamma': range(1, 10), 'C': range(1, 100, 10)}
    model = GridSearchCV(SVR(), param_grid, cv=10, scoring='r2').fit(x_train, y_train)
    print("网格搜索最优得分：", model.best_score_)
    print("网格搜索最优参数组合：\n", model.best_params_)
    y_pred = model.predict(x_test)
    return y_pred


'''
def random_forest(x_train, y_train, x_test):
    param_grid = {'n_estimators':range(100,1001,100),'max_features':['auto','sqrt','log2']}  # 50,500,50
    model = GridSearchCV(RandomForestRegressor(n_jobs = 24),param_grid,cv = 10,scoring = 'r2').fit(x_train,y_train)
    y_pred = model.predict(x_test)
    return y_pred


def AdaBoost(x_train, y_train, x_test):
    param_0 = {'min_samples_leaf':range(1,5)}
    base_model = GridSearchCV(DecisionTreeRegressor(),param_0,cv=10,scoring='r2')
    base_model.fit(x_train,y_train)
    base_samples_leaf = base_model.best_params_['min_samples_leaf']
    param_grid = {'n_estimators':range(10,101,10),'loss':['square','linear','exponential']}
    model = GridSearchCV(AdaBoostRegressor(base_estimator = DecisionTreeRegressor(min_samples_leaf=base_samples_leaf)),param_grid,cv = 10,scoring = 'r2').fit(x_train,y_train)
    y_pred = model.predict(x_test)
    return y_pred

def elastic(x_train, y_train, x_test):
    model = ElasticNetCV(cv = 10, max_iter = 100000).fit(x_train,y_train)
    y_pred = model.predict(x_test)
    return y_pred

def knn(x_train, y_train, x_test):
    param_grid = {'n_neighbors':range(2,20,2)}
    model = GridSearchCV(KNeighborsRegressor(n_jobs = 24),param_grid,cv = 10,scoring = 'r2').fit(x_train,y_train)
    y_pred = model.predict(x_test)
    return y_pred
 '''


def Kflodtrain(X, Y,x_1, fold, model_type):
    kfold = KFold(n_splits=fold, shuffle=True)
    y_test_total, y_pred_total = [], []
    for train_index, test_index in kfold.split(X, Y):
        x_train, y_train = X[train_index], Y[train_index]
        #x_test, y_test = X[test_index], Y[test_index]
        x_test= x_1
        if model_type == "LASSO":
            y_pred_total.append(lasso(x_train, y_train, x_test))
            print(len(x_test))
        elif model_type == 'SVR':
            y_pred_total.append(svr(x_train, y_train, x_test))
        # elif model_type == "RF":
        # y_pred_total.append(random_forest(x_train,y_train,x_test))
        else:  # 'ridge':
            y_pred_total.append(ridge(x_train, y_train, x_test))
        #y_test_total.append(y_test)

    #y_test_total = np.concatenate(y_test_total, axis=0)
    y_pred_total = np.concatenate(y_pred_total, axis=0)
    return y_test_total, y_pred_total


def main():
    #
    money = np.array(pd.read_csv("yuce.csv", header=0).values)
    money_1= np.array(pd.read_csv("yuce_test.csv").values)
    x = money[:, :9]
    y = money[:, 9]
    x_1=money_1[:, :9]
    # print(x_1)
    # print("XXX",len(x_1))
    fold = 10
    for model_type in ['LASSO']  :#'SVM', 'ridge'
        print(model_type)
        PCC = []
        y_test, y_pred = [], []

        for i in range(10):
            print("i=", i, " start")
            y_test_total, y_pred_total = Kflodtrain(x, y,x_1 ,fold, model_type)
            # y_test.append(y_test_total)
            # y_pred.append(y_pred_total)
            # PCC.append(pearsonr(y_test_total.ravel(), y_pred_total.ravel()))
            # print("i=", i, " end")
        # c = {'PCC': PCC}
        # file_name = ''
        # res_pcc = pd.DataFrame(c)
        # res_pcc.to_excel(file_name + model_type + 'PCC_1.xlsx', index=False)


if __name__ == "__main__":
    main()