#coding=utf-8 
#Set docker registry to store image. 
#win上python做的，docker可以pull，containerd会报错
#Failed to pull image "ai.internal.local:30082/library/fairing-train:846E5A91": rpc error: code = NotFound desc = failed to pull and unpack image "ai.internal.local:30082/library/fairing-train:846E5A91": failed to copy: httpReadSeeker: failed open: content at http://192.168.75.13:30082/v2/library/fairing-train/manifests/sha256:641b9299575e6319f282bbef74611e5a7a150f4cee061b461a62f582853234b5?ns=ai.internal.local%!A(MISSING)30082 not found: not found
# Ensure you have permission for pushing docker image requests. 
DOCKER_REGISTRY = 'ai.internal.local:30082'
# Set namespace. Note that the created PVC should be in the namespace.
my_namespace = 'wangjiarong-workspace-example-com'
# You also can get the default target namepspace using below API.
#namespace = fairing_utils.get_default_target_namespace()
# To satify the distributed training, the PVC should be access from all nodes in the cluster.
# The example creates a NFS PV to satify that.
nfs_server = '192.168.75.14'
nfs_path = '/data/nfsroot/wangjiarong-workspace-example-com/data/mnist'
pv_name = 'kubeflow-mnist'
pvc_name = 'mnist-pvc'
configFile="admin.conf"
currentContext="kubernetes-admin@kubernetes"

import os

if os.path.isdir('/var/run/secrets/kubernetes.io/'):
    os.environ.setdefault('KUBECONFIG', os.environ.get('KUBECONFIG', '~/.kube/config'))
else:
    os.environ.setdefault('KUBECONFIG', os.environ.get('KUBECONFIG', configFile))
    os.environ.setdefault('DOCKER_CONFIG', os.environ.get('DOCKER_CONFIG', os.getcwd()))
    

from kubeflow.fairing.utils import is_running_in_k8s #this will set KUBE_CONFIG_DEFAULT_LOCATION
    
from kubernetes import client as k8s_client
from kubernetes import config as k8s_config
import yaml

pv_yaml = f'''
apiVersion: v1
kind: PersistentVolume
metadata:
  name: {pv_name}
spec:
  capacity:
    storage: 10Gi
  accessModes:
  - ReadWriteMany
  persistentVolumeReclaimPolicy: Retain
  nfs:
    path: {nfs_path}
    server: {nfs_server}
'''
pvc_yaml = f'''
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: {pvc_name}
  namespace: {my_namespace}
spec:
  accessModes:
    - ReadWriteMany
  storageClassName: ""
  resources:
    requests:
      storage: 10Gi
'''

if is_running_in_k8s():
    k8s_config.load_incluster_config()
else:
    k8s_config.load_kube_config()#configFile,currentContext)

k8s_core_api = k8s_client.CoreV1Api()
pvs=k8s_core_api.list_persistent_volume(field_selector="metadata.name="+pv_name)
if len(pvs.items)==0:k8s_core_api.create_persistent_volume(yaml.safe_load(pv_yaml))
pvcs=k8s_core_api.list_persistent_volume_claim_for_all_namespaces(field_selector="metadata.name="+pvc_name+",metadata.namespace="+my_namespace)
if len(pvcs.items)==0:k8s_core_api.create_namespaced_persistent_volume_claim(my_namespace, yaml.safe_load(pvc_yaml))

num_chief = 1 #number of Chief in TFJob 
num_ps = 1  #number of PS in TFJob 
num_workers = 2  #number of Worker in TFJob 
model_dir = "/mnt"
export_path = "/mnt/export" 
train_steps = "1000"
batch_size = "100"
learning_rate = "0.01"

import uuid
from kubeflow import fairing   
from kubeflow.fairing.kubernetes.utils import mounting_pvc


tfjob_name = f'mnist-training-{uuid.uuid4().hex[:4]}'
#need repair the dataset, the default directory, where is it?


#AppendBuilder
#registry=None,image_name=constants.DEFAULT_IMAGE_NAME,base_image=constants.DEFAULT_BASE_IMAGE,push=True,preprocessor=None
fairing.config.set_builder('append',image_name="library/fairing-train",base_image='tensorflow/tensorflow:1.15.2-py3', 
                           registry=DOCKER_REGISTRY)#, namespace=my_namespace)

from kubeflow.fairing.kubernetes.utils import volume_mounts

#Job, it user docker_http,里面用了Schema,localhost,re.match(r'.*\.local(?:host)?(?::\d{1,5})?$', endpoint):http,还用了config.json放harbor密码
#namespace=None, runs=1, output=None,cleanup=True, labels=None, job_name=None,stream_log=True, deployer_type=constants.JOB_DEPLOYER_TYPE,pod_spec_mutators=None, annotations=None, config_file=None,context=None, client_configuration=None, persist_config=True
if is_running_in_k8s():fairing.config.set_deployer(name='job', namespace=my_namespace, stream_log=False, job_name=tfjob_name,
                            pod_spec_mutators=[mounting_pvc(pvc_name=pvc_name, pvc_mount_path=model_dir)])
else:fairing.config.set_deployer(name='job', namespace=my_namespace, stream_log=False, job_name=tfjob_name,
                            config_file=configFile,#context_source=currentContext,
                            pod_spec_mutators=[mounting_pvc(pvc_name=pvc_name, pvc_mount_path=model_dir)])
def train():
    print("hello world!")


remote_train = fairing.config.fn(train)
remote_train()
