
import numpy as np
import pandas as pd
import statsmodels.formula.api as smf
# from sklearn.model_selection import train_test_split
from sqlalchemy import create_engine
from matplotlib import pyplot as plt
import statsmodels as sm
import seaborn as sns 
import pymysql

db_config = {
    'host': '127.0.0.1',
    'user': 'root',
    'password': 'root',
    'database': 'zuoye',
    'port':3306,
    'charset': 'utf8'
}

engine = create_engine(
    f"mysql+pymysql://{db_config['user']}:{db_config['password']}@{db_config['host']}:{db_config['port']}/{db_config['database']}?charset={db_config['charset']}"
)

conn = pymysql.connect(**db_config)

chunk_size = 10000

df = pd.read_sql_query(
    """
    SELECT d.* FROM date_1 d WHERE d.trade_date BETWEEN '2023-01-01' AND '2024-01-01' AND d.ts_code = '000001.SZ'
    """,
    conn,
    chunksize=chunk_size
)

df1 = pd.concat(df, ignore_index=True)

df1['zd_closes'] = round((df1['closes'] - df1['closes'].shift(1)) / df1['closes'].shift(1), 2)

#处理缺失值
df1 = df1.dropna(subset=['zd_closes'])

ex = ['id', 'ts_code', 'trade_data', 'the_data', 'opens', 'high', 'low', 'closes', 'pre_closes', 'changes', 'pct_chg', 'amount']
number = df1.select_dtypes(include = ['number']).columns.to_list()
newList = [col for col in number if col not in ex]

formuls = 'zd_closes ~' + ' + '.join(newList)
res = smf.ols(formuls, data=df1).fit()
print(res.summary())#by liyouyang#

import pandas as pd
from mlxtend.preprocessing import TransactionEncoder
from mlxtend.frequent_patterns import apriori, association_rules

#读取菜品数据
df=pd.read_excel('data-mining-experiment\zuoye2\餐厅数据.xlsx')

#转换菜品数据格式
trsation=df['菜品'].str.split(',').to_list()

# print(trsation)

#标准化数据
ts=TransactionEncoder()
te_ary=ts.fit(trsation).transform(trsation)
df_enecoded=pd.DataFrame(te_ary,columns=ts.columns_)
# print(df_enecoded)

#使用apriori进行关联规则挖掘
frequent_itemsets = apriori(df_enecoded,min_support=0.1,use_colnames=True)
frequent_itemsets.sort_values(by='support',ascending=False, inplace=True)

# #选择2项集
# print(frequent_itemsets[frequent_itemsets.itemsets.apply(lambda x:len(x) - 2)])
rules=association_rules(frequent_itemsets,metric='confidence',min_threshold=0.1)
#筛选有效规则
effective = rules[
    (rules['lift'] > 1) & (rules['confidence']>0.1) 
    ].sort_values(by=['lift','confidence'],ascending=False)           
print(effective[['antecedents','consequents','support','confidence','lift']])

# 可视化查看关联规则
import matplotlib.pyplot as plt
import networkx as nx
# 设置字体
plt.rcParams['font.sans-serif']=['Simhei']
plt.rcParams['axes.unicode_minus']= False
#关联关系可视化
G=nx.DiGraph()
for _, row in effective.iterrows():
    G.add_edge(','.join(list(row['antecedents'])),
               ','.join(list(row['consequents'])),
               weight=row['lift']
    )
plt.figure(figsize=(12,8))
pos=nx.spring_layout(G)
nx.draw(G, pos,
        with_labels=True,
        edge_color=[G[u][v]['weight'] for u, v in G.edges()],
        width=2.0,
        edge_cmap=plt.cm.Blues)
plt.show()

