# Set docker registry to store image.
# Ensure you have permission for pushing docker image requests. 
DOCKER_REGISTRY = 'ai.internal:30082'
NEXUS_PROXY_MIRROR="ai.internal:30092"

# Set namespace. Note that the created PVC should be in the namespace.
my_namespace = 'wangjiarong-workspace-example-com'
# You also can get the default target namepspace using below API.
#namespace = fairing_utils.get_default_target_namespace()
# To satify the distributed training, the PVC should be access from all nodes in the cluster.
# The example creates a NFS PV to satify that.
nfs_server = '192.168.75.14'
nfs_path = '/data/nfsroot/wangjiarong-workspace-example-com/data/mnist'
pv_name = 'kubeflow-mnist'
pvc_name = 'mnist-pvc'
configFile="admin.conf"
currentContext="kubernetes-admin@kubernetes"

import os

if os.path.isdir('/var/run/secrets/kubernetes.io/'):
    os.environ.setdefault('KUBECONFIG', os.environ.get('KUBECONFIG', '~/.kube/config'))
else:
    os.environ.setdefault('KUBECONFIG', os.environ.get('KUBECONFIG', configFile))

from kubeflow.fairing.utils import is_running_in_k8s #this will set KUBE_CONFIG_DEFAULT_LOCATION
    
from kubernetes import client as k8s_client
from kubernetes import config as k8s_config
import yaml

pv_yaml = f'''
apiVersion: v1
kind: PersistentVolume
metadata:
  name: {pv_name}
spec:
  capacity:
    storage: 10Gi
  accessModes:
  - ReadWriteMany
  persistentVolumeReclaimPolicy: Retain
  nfs:
    path: {nfs_path}
    server: {nfs_server}
'''
pvc_yaml = f'''
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: {pvc_name}
  namespace: {my_namespace}
spec:
  accessModes:
    - ReadWriteMany
  storageClassName: ""
  resources:
    requests:
      storage: 10Gi
'''

if is_running_in_k8s():
    k8s_config.load_incluster_config()
else:
    k8s_config.load_kube_config()#configFile,currentContext)

k8s_core_api = k8s_client.CoreV1Api()
pvs=k8s_core_api.list_persistent_volume(field_selector="metadata.name="+pv_name)
if len(pvs.items)==0:k8s_core_api.create_persistent_volume(yaml.safe_load(pv_yaml))
pvcs=k8s_core_api.list_persistent_volume_claim_for_all_namespaces(field_selector="metadata.name="+pvc_name+",metadata.namespace="+my_namespace)
if len(pvcs.items)==0:k8s_core_api.create_namespaced_persistent_volume_claim(my_namespace, yaml.safe_load(pvc_yaml))

num_chief = 1 #number of Chief in TFJob 
num_ps = 1  #number of PS in TFJob 
num_workers = 2  #number of Worker in TFJob 
model_dir = "/mnt"
export_path = "/mnt/export" 
train_steps = "1000"
batch_size = "100"
learning_rate = "0.01"

import uuid
from kubeflow import fairing   
from kubeflow.fairing.kubernetes.utils import mounting_pvc


tfjob_name = f'mnist-training-{uuid.uuid4().hex[:4]}'
#need repair the dataset, the default directory, where is it?
output_map =  {
    "Dockerfile": "Dockerfile",
    "mnist.py": "mnist.py",
    "data/t10k-images-idx3-ubyte.gz": "t10k-images-idx3-ubyte.gz",
    "data/t10k-labels-idx1-ubyte.gz": "t10k-labels-idx1-ubyte.gz",
    "data/train-images-idx3-ubyte.gz": "train-images-idx3-ubyte.gz",
    "data/train-labels-idx1-ubyte.gz": "train-labels-idx1-ubyte.gz"
}

def getMimioContext():
    s3_endpoint_url = 'http://mimio.ai.internal.cn'
    s3_secret_id = 'minio'
    s3_secret_key = 'minio123'
    s3_region = 'test'
    s3_bucket_name = 'kubeflow-test'#mimio browser + ->名字->回车
    from myMinio import myMinioContextSource
    minio_context_source = myMinioContextSource(
        endpoint_url=s3_endpoint_url,
        minio_secret=s3_secret_id,
        minio_secret_key=s3_secret_key,
        region_name=s3_region,registry_mirror=NEXUS_PROXY_MIRROR)
    return minio_context_source
command=["python",
         "/opt/mnist.py",#"/home/jovyan/kubeflow-fairing/examples/mnist/mnist.py",
         "--tf-model-dir=" + model_dir,
         "--tf-export-dir=" + export_path,
         "--tf-train-steps=" + train_steps,
         "--tf-batch-size=" + batch_size,
         "--tf-learning-rate=" + learning_rate]

fairing.config.set_preprocessor('python', command=command, path_prefix="/app", output_map=output_map)
#ClusterBuilder
#registry=None,image_name=constants.DEFAULT_IMAGE_NAME,context_source=None,preprocessor=None,push=True,base_image=constants.DEFAULT_BASE_IMAGE,pod_spec_mutators=None,namespace=None,dockerfile_path=None,cleanup=False)
fairing.config.set_builder(name='cluster', registry=DOCKER_REGISTRY, base_image="",namespace=my_namespace,
                           image_name="library/mnist", dockerfile_path="Dockerfile",context_source=getMimioContext())
#TfJob
#namespace=None, worker_count=1, ps_count=0,chief_count=0, runs=1, job_name=None, stream_log=True,labels=None, pod_spec_mutators=None, cleanup=False, annotations=None,config_file=None, context=None, client_configuration=None, persist_config=True
if is_running_in_k8s():fairing.config.set_deployer(name='tfjob', namespace=my_namespace, stream_log=False, job_name=tfjob_name,
                            chief_count=num_chief, worker_count=num_workers, ps_count=num_ps, 
                            pod_spec_mutators=[mounting_pvc(pvc_name=pvc_name, pvc_mount_path=model_dir)])
else:fairing.config.set_deployer(name='tfjob', namespace=my_namespace, stream_log=False, job_name=tfjob_name,
                            config_file=configFile,#context_source=currentContext,
                            chief_count=num_chief, worker_count=num_workers, ps_count=num_ps, 
                            pod_spec_mutators=[mounting_pvc(pvc_name=pvc_name, pvc_mount_path=model_dir)])
fairing.config.run()