import datetime

import matplotlib
import numpy as np
import pandas as pd
import streamlit as st
from pyspark import SparkConf, SparkContext
from pyspark.streaming import StreamingContext
from pyspark.sql import SparkSession
import matplotlib.pyplot as plt
from pyspark.sql.functions import desc
import plotly.express as px
matplotlib.use('TkAgg')
master = "172.17.6.158"

# 创建一个 SparkSession 对象
spark = SparkSession.Builder().appName("spark-demo").getOrCreate()
spark.conf.set("spark.master", f"spark://{master}:7077")
spark.conf.set("spark.app.name", "spark-demo")

# 从 HDFS 中读取数据集
data = spark.read.option("header", "true").csv(f"hdfs://{master}:9000/sparkdata")

st.title("基于spark的电商大数据统计")

st.header("信息查询")
start = st.date_input(label="开始时间", value=None, min_value=None, max_value=datetime.date.today())
end = st.date_input(label="结束时间", value=None, min_value=None, max_value=datetime.date.today())

start = str(start.strftime("%Y/%m/%d"))
end = str(end.strftime("%Y/%m/%d"))

print(start)
print(end)

action = st.selectbox(label="请选择查询信息类别", options=('pv', 'buy', 'cart', 'fav'))

datalist = data.select("user_id", "product_category")\
    .where("date >= '" + start + "' and date <= '" + end + "' and action= '" + action + "' ")\
    .groupBy("product_category")\
    .count()\
    .orderBy(desc("count"))\
    .limit(5)\
    .collect()

categorys = ''
for i in datalist:
    categorys = categorys+i[0]+','

categorys = categorys.rstrip(",")

print(categorys)

datalist1 = data.select("product_category","date")\
    .where("product_category in (" + categorys + ") and action= '" + action + "' ")\
    .groupBy("product_category","date")\
    .count()\
    .orderBy(desc("product_category"))\
    .orderBy("date")\
    .to_pandas_on_spark()

fig = px.line(datalist1, x='date', y='count', color='product_category')
fig.update_layout(title_text="最优5条信息情况")
st.plotly_chart(fig, use_container_width=True)

spark.stop()






