from pyspark.streaming import StreamingContext
from pyspark.streaming.kafka import KafkaUtils
from pyspark.sql.session import SparkSession

# from kafka import KafkaProducer
import pymysql
import os
import json

from pyspark import SparkConf,SparkContext
from pyspark.sql import SQLContext,HiveContext
from pyspark.sql.types import *


os.environ['JAVA_HOME'] = "/usr/local/java/bin"
# os.environ['PYTHON_HOME'] ="/usr/local/Python3.7/python"


def printTemp(iter):
    # 打开数据库连接
    db = pymysql.connect(host='192.168.0.221',user='root', password='1qaz@WSXcomit', port=3306, db='xiagang-test')

    for rec in iter:
        insert_to_db(db,rec[0:10],str(rec[10:14]),rec[14:23],rec[23:32],rec[32:42],str(rec[42:45]))
    db.close()


def insert_to_db(db,rec0,rec1,rec2,rec3,rec4,rec5):
    cursor = db.cursor()


    sql = "INSERT INTO xiagang_datatest(timestamp,u_id,lat,lng,gps_time,jiaodu) VALUES ('%s','%s','%s','%s','%s','%s')"%(rec0,rec1,rec2,rec3,rec4,rec5)
    # print(sql)
    cursor.execute(sql)
    db.commit()

    # try:
    #     cursor.execute(sql)
    #     db.commit()
    # except:
    #     db.rollback()

def update_data(iter):
    # 打开数据库连接
    db = pymysql.connect(host='192.168.0.221',user='root', password='1qaz@WSXcomit', port=3306, db='xiagang-test')

    for rec in iter:
        update_to_db(db,str(rec[42:45]),rec[14:23],rec[10:14])
    db.close()


def update_to_db(db,rec0,rec1,rec2):
    cursor = db.cursor()
    sql = "UPDATE xiagang_datatest SET jiaodu=%s,lat=%s where u_id='%s'; "%(rec0,rec1,rec2)
    print(sql)
    cursor.execute(sql)
    db.commit()

# 发送数据到kafka
# def send_data(bootstrap_servers, topic_name, data):
#     producer = KafkaProducer(bootstrap_servers=bootstrap_servers, value_serializer=lambda v: json.dumps(v).encode('utf-8'))
#     producer.send(topic_name, data)
#     producer.close()

# 封装json
def create_json(lines):
    # rec[0:10],str(rec[10:14]),rec[14:23],rec[23:32],rec[32:42],str(rec[42:45])
    timestamp = lines.map(lambda x:x[0:10])
    u_id = lines.map(lambda x:x[10:14])
    lat = lines.map(lambda x:x[14:23])
    lng = lines.map(lambda x:x[23:32])
    gps_time = lines.map(lambda x:x[32:42])
    angle = lines.map(lambda x:x[42:45])
    line_json = {}
    line_json["timestamp"] = timestamp
    line_json["u_id"] = u_id
    line_json["lat"] = lat
    line_json["lng"] = lng
    line_json["gps_time"] = gps_time
    line_json["angle"] = angle
    printTemp(line_json)


    # conf = {
    #        'bootstrap_servers': '192.168.0.221:9092',
    #         'topic_name': 'result_gps'
    # }
    # bootstrap_servers = conf['bootstrap_servers']
    # topic_name = conf['topic_name']
    # send_data(bootstrap_servers, topic_name, line_json)
    # print("==================发送成功=================")
    return line_json




if __name__ == '__main__':

    topic="test0210"

    print("创建spark")
    spark = SparkSession.builder.master("local[2]").getOrCreate()
    sc = spark.sparkContext
    ssc = StreamingContext(sc,3)

    sqlContext = SQLContext(sc)

    #创建direct连接，指定要连接的topic和broker地址
    ks = KafkaUtils.createDirectStream(ssc,[topic],{"metadata.broker.list":"192.168.0.221:9092"})
    #(None,内容)
    print("================kafka连接成功=====================")
    ks.pprint()

    #以下代码每操作一次，就打印输出一次
    lines = ks.map(lambda x:x[1])
    lines.pprint()

    # 插入数据
    lines.foreachRDD(lambda rdd: rdd.foreachPartition(printTemp))
    # 更新数据
    # lines.foreachRDD(lambda rdd: rdd.foreachPartition(update_data))

    # 将接收到的数据流再次封装成json格式发送出去
    # line_json = create_json(lines)



    ssc.start()
    #等待计算结束
    ssc.awaitTermination()