# encoding: utf-8

# author: hufei_neo
# blog: https://blog.csdn.net/hufei_neo
# email: hufei_neo@163.com
# file: tosql.py
# time: 2022-01-13 15:43

%db_python_gx_scb.pyspark
#db_python_gx_scb.pyspark脚本

import pandas as pd
from pyspark import SparkContext,SparkConf
from pyspark.sql import HiveContext
from pyspark.sql.types import *

df_merge_1 = pd.read_csv(r'/data2/shlx/df_liw_v1.csv')

df_merge_1['serv_id'] = df_merge_1['serv_id'].astype(str)
df_merge_1['resu_1'] = df_merge_1['resu_1'].astype(float).apply(lambda x: round(x,3))

##写表
df_test = df_merge_1[['serv_id','resu_1']]
sqlContext = HiveContext(sc)
schema = StructType([
	StructField("cust_id", StringType(), True),
    StructField("resu_1", DoubleType(), True)
    ])
sdf = sqlContext.createDataFrame(df_test,schema)
sdf.registerTempTable("temp_test")
###写入已存在的表
# sqlContext.sql("insert overwrite table idealsh.df_liwang_month_12_v1 select * from temp_test")
###创建新表并写入
sqlContext.sql("create table idealsh.df_liwang_month_12_v1 select * from temp_test") # 导入大数据平台宽表df_liwang_month_12_v1