import pyspark
from pyspark import SparkContext
from pyspark.sql import SparkSession
from pyspark.sql import Row
sc = SparkContext("local", "duty 2")
stockrdd = sc.textFile('file:///home/allen/myspark/stock_small.csv')
divrdd = sc.textFile('file:///home/allen/myspark/dividends_small.csv')
stockrdd = stockrdd.map(lambda line:line.split(","))
divrdd = divrdd.map(lambda line:line.split(","))
sqlContext=SparkSession.builder.getOrCreate()
stockrows = stockrdd.map(lambda p:Row(
    exchange = p[0],
    stock_symbol = p[1],
    date =p[2],
    open = p[3],
    high = p[4],
    low = p[5],
    close = p[6],
    volume = p[7],
    close_adj = p[8]
))
stockdf = sqlContext.createDataFrame(stockrows)
divrows = divrdd.map(lambda p:Row(
    exchange = p[0],
    symbol = p[1],
    date =p[2],
    dividend = p[3]
))
divdf = sqlContext.createDataFrame(divrows)
# stockdf.printSchema()
# stockdf.show()
stockdf.registerTempTable("stocksm")
divdf.registerTempTable("div")
sqlContext.sql("SELECT date,stock_symbol,close \
    FROM stocksm \
    WHERE stock_symbol = 'IBM' AND \
    date IN (SELECT date FROM div WHERE symbol = 'IBM')" 
    ).show(100)
print('------------the result of question1,duty 2--------------------')

sqlContext.sql("SELECT year(date) year,avg(close_adj) stock_price_adj_close \
    FROM stocksm \
    WHERE stock_symbol = 'AAPL' \
    GROUP BY year\
    HAVING stock_price_adj_close>50 \
    ORDER BY year "
    ).show()
print('------------the result of question2,duty 2--------------------')