from pyspark import SparkContext, SparkConf
import pymongo
from pyspark.sql.session import SparkSession
import re

conf = SparkConf().setMaster("local").setAppName("DataTransform-info")
sc = SparkContext(conf=conf)
spark = SparkSession.builder.appName('DataTransform-info').getOrCreate()
conn = pymongo.MongoClient("mongodb://47.93.220.108:27017")['movie']['movieInfo']
ll = []
a = conn.find({}, {'_id': 0})
for i in a:
    i['arrange_percentage'] = float(
        re.findall(r'[0-9]+\.[0-9]+', i['arrange_percentage'])[0] if re.findall(r'[0-9]+\.[0-9]+',
                                                                                i['arrange_percentage']) else 0)
    i['attendance'] = float(
        re.findall(r'[0-9]+\.[0-9]+', str(i['attendance']))[0] if re.findall(r'[0-9]+\.[0-9]+',
                                                                             str(i['attendance'])) else 0)
    i['boxOffice'] = float(
        re.findall(r'[0-9]+\.?[0-9]+', i['boxOffice'])[0] if re.findall(r'[0-9]+\.?[0-9]+',
                                                                        i['boxOffice']) else 0) * 10000
    i['boxOffice_percentage'] = float(
        re.findall(r'[0-9]+\.[0-9]+', i['boxOffice_percentage'])[0] if re.findall(r'[0-9]+\.[0-9]+',
                                                                                  i['boxOffice_percentage']) else 0)
    i['arrange'] = int(i['arrange'])
    ll.append(i)
rdd = sc.parallelize(ll)
df = spark.createDataFrame(rdd)
from elasticsearch import Elasticsearch

es = Elasticsearch(hosts='106.13.117.37', port='9200')

for i in df.collect():
    data = i.asDict()
    data['key_movie'] = data['movie_name']
    es.index('movieinfo', body=data)

