from __future__ import annotations

import random, sys, os, json, math, time
import pyspark
from pyspark.rdd import RDD
from pyspark import SparkContext, SparkConf
import findspark
from typing import Any


"""
    平常使用spark处理数据不需要其他功能，只需要map一个。
    
    为了使用spark，需要先findspark。
    在findspark的帮助下，提供

"""


#指定spark_home为刚才的解压路径,指定python路径

class SparkManager:
    _singleton: SparkManager | None = None 
    
    _rdd: RDD | None = None
    
    def __new__(cls: type[SparkManager]) -> SparkManager:
        """创建唯一spark manager"""
        if cls._singleton is None:
            cls._singleton = super().__new__(cls)
        return cls._singleton
    
    def __init__(self, 
                 spark_home: str, 
                 python_path: str,
                 app_name: str='rdd_tutorial',
                 master: str='local[16]'
                 ):
        findspark.init(spark_home,python_path)
        
        self.conf: SparkConf = SparkConf().setAppName(app_name).setMaster(master)
        self.sc: SparkContext = SparkContext(conf=self.conf)
        
    def adding_datas(self,
                     datas: list,
                     minPartitions: int=5):
        self._rdd = self.sc.parallelize(datas, minPartitions)
        
    def map(self,
            func):
        self._rdd = self._rdd.map(func)
        
    def run(self,
            func) -> list:
        _rdd = self._rdd.map(func)
        datas = _rdd.collect()
        return datas
        
    def reset(self):
        self._rdd = None 
        

s = SparkManager()