#从csv中读取数据
def load_data_csv():
	df=spark.read.csv("/root/xxx.csv",header=True,inferSchema=True)
    return df

#从es中读取数据
def load_data_es(spark,esip,database,indexname):
    df = spark.read \
        .format("org.elasticsearch.spark.sql") \
        .option("es.nodes", esip) \
        .option("es.port", 7000) \
        .option("es.mapping.date.rich", "false")\
        .option("es.net.http.auth.user", "es")\
        .option("es.net.http.auth.pass", "xxxx")\
        .option("es.resource", database+"/"+indexname) \
        .load()
    return df
    
#从mysql中读取数据
def load_data_mysql(spark,ip):
    #bin/pyspark --driver-class-path mysql-connector-java-5.1.40-bin.jar --jars mysql-connector-java-5.1.40-bin.jar
    df = spark.read
        .format("jdbc")\
        .option("url", "jdbc:mysql://ip/ald_xinen")\
        .option("dbtable", "(select * from ald_session_logs) as df")
        .option('user', "root")
        .option('password', "fish@sky")
        .load()
    return df
