from pyspark import SparkConf, SparkContext
from operator import add
import pandas as pd
import numpy as np
import csv
import re

city_list = ['北京', '重庆', '上海', '天津', '成都',
             '三亚', '杭州', '武汉', '广州', '西安',
             '贵阳', '苏州', '昆明', '南京', '桂林',
             '张家界', '丽江', '哈尔滨', '拉萨', '长沙']  # 城市名称
"""
for i in range(len(city_list)):
    data = pd.DataFrame()
    for m in range(1, 5):
        dt = pd.read_csv('./游记数据/' + city_list[i] + '/第' + str(m) + '季度.csv',encoding='gb18030')
        data = pd.concat([data,dt])
        data.reset_index(drop=True, inplace=True)
        data.to_csv('./游记数据/' + city_list[i] + '/' + city_list[i] + '.csv', index=False, encoding='gbk')
"""

if __name__ == '__main__':
    # 初始化执行环境，构建SparkContext对象
    conf = SparkConf().setMaster("local[*]").setAppName("BrowserCount")
    sc = SparkContext(conf=conf)
    # 读取数据
    for i in range(len(city_list)):
        file_rdd = (sc.textFile('./游记数据/' + city_list[i] + '/' + city_list[i] + '.csv'))
        # 切分并提取浏览量数据
        split_rdd = file_rdd.map(lambda x: x.split(","))
        month_rdd = split_rdd.map(lambda x: (x[2], x[5]))
        #

        # reduce 数据加和以及排序
        result = month_rdd.reduceByKey(lambda a, b: (a + b)).sortBy(lambda x: x[2])
        month_rdd = split_rdd.map(lambda x: (re.search(r'20\d\d-\d{1,}', str(x[1])), 1))  # (key,value)=(年月，游记数量)
        month_rdd.reduceByKey(add).sortBy(lambda x: re.search(r'20\d\d-\d{1,}', str(x[1]))).saveAsTextFile(
            "hdfs://master:9000/user/Analyse/result")
        print(result)
