# import sys
# from operator import add
#
# from pyspark.sql import SparkSession
#
#
# if __name__ == "__main__":
#
#     spark = SparkSession.builder.appName("PythonWordCount").getOrCreate()
#
#     lines = spark.read.text("obs://obs.dc.mrs.cqvip.com/input/suhong/input/").rdd.map(lambda r: r[0])
#     counts = lines.flatMap(lambda x: x.split(' ')).map(lambda x: (x, 1)).reduceByKey(add)
#     counts.saveAsTextFile("obs://obs.dc.mrs.cqvip.com/input/suhong/test")
#
#     spark.stop()
import sys

from pyspark.sql import SparkSession


def contains(str, substr):
    if substr in str:
        return True
    return False


if __name__ == "__main__":
    if len(sys.argv) < 2:
        print("Usage: CollectFemaleInfo <file>")
        exit(-1)

    spark = SparkSession \
        .builder \
        .appName("CollectFemaleInfo") \
        .getOrCreate()

    """
    以下程序主要实现以下几步功能：
    1.读取数据。其是传入参数argv[1]指定数据路径 - text
    2.筛选女性网民上网时间数据信息 - filter
    3.汇总每个女性上网时间 - map/map/reduceByKey
    4.筛选出时间大于两个小时的女性网民信息 - filter
    """
    inputPath = sys.argv[1]
    result = spark.read.text(inputPath).rdd.map(lambda r: r[0]) \
        .filter(lambda line: contains(line, "female")) \
        .map(lambda line: line.split(',')) \
        .map(lambda dataArr: (dataArr[0], int(dataArr[2]))) \
        .reduceByKey(lambda v1, v2: v1 + v2) \
        .filter(lambda tupleVal: tupleVal[1] > 120) \
        .collect()

    for (k, v) in result:
        print(k + "," + str(v))

    # 停止SparkContext
    spark.stop()
