from pyspark.sql import SparkSession
import os
from demos import SchemaLoader
from metrics import  BestFilmsByOverallRating,GenresByAverageRating,MostRatedFilms
from util import  CommonUtil

os.environ['JAVA_HOME'] = "/bigdata/server/jdk"
os.environ['HADOOP_CONF_DIR'] = "/bigdata/server/hadoop/etc/hadoop"

MOVIES_CSV_FILE_PATH = "hdfs://master:8020/tmp/data/movies.csv"
RATINGS_CSV_FILE_PATH = "hdfs://master:8020/tmp/data/ratings.csv"

if __name__ == '__main__':
    spark = SparkSession.builder \
        .appName("movie_stat") \
        .master("yarn")\
        .getOrCreate()

    # 获取schema加载类的对象
    schemaLoader = SchemaLoader.SchemaLoader()
    commonUtil=CommonUtil.CommonUtil()

    # 将csv文件加载到dataframe
    movieDF=commonUtil.readCsvIntoDataSet(spark,MOVIES_CSV_FILE_PATH,schemaLoader.movieSchema)
    ratingDF = commonUtil.readCsvIntoDataSet(spark, RATINGS_CSV_FILE_PATH, schemaLoader.ratingSchema)


    # 需求1：电影评分个数超5000，且平均分较高的top10（电影名称和其平均分）
    bestFilmsByOverallRating= BestFilmsByOverallRating.BestFilmsByOverallRating()
    resultDF1=bestFilmsByOverallRating.run(movieDF,ratingDF,spark)
    commonUtil.writeIntoMysql(resultDF1, "ten_movies_averagerating")

    # 需求2：每个电影类别对应的平均分
    genresByAverageRating = GenresByAverageRating.GenresByAverageRating()
    resultDF2=genresByAverageRating.run(movieDF, ratingDF, spark)
    commonUtil.writeIntoMysql(resultDF2,"genres_average_rating")

    # 需求3：评分次数较多的top10电影
    mostRatedFilms = MostRatedFilms.MostRatedFilms()
    resultDF3=mostRatedFilms.run(movieDF, ratingDF, spark)
    commonUtil.writeIntoMysql(resultDF3, "ten_most_rated_films")


