# 此接口用于保存并上传任意dicom文件夹到Orthanc服务器上
import zipfile
import pydicom
import requests
import os
from requests.auth import HTTPBasicAuth
import random
import time
from pyorthanc import find, Orthanc
orthanc = Orthanc('http://localhost:8042',
                  username='orthanc', password='orthanc')

from Flask_run import db
from database.models.mri import Mri
from database.models.patient import Patient
from database.models.study_id import Study_id

global MAPPING_TABLE
MAPPING_TABLE={}

def SaveFile(file,name):
   if file :#当确认有上传文件
      source_path='./Dicom_repository'
      if not os.path.isdir(source_path):
        os.mkdir(source_path)
      filename =str(time.time()+random.gauss(5, 1))+name
      savepath=os.path.join(source_path, filename)
      file.save(savepath)#保存文件，也可以直接写成f.save(os.path.join(UPLOAD_FOLDER,fname))
      return savepath

def UploadBuffer(dicom):

    url='http://localhost:8042'
    username='orthanc'
    password='orthanc'

    auth = HTTPBasicAuth(username, password)
    r = requests.post('%s/instances' % url, auth = auth, data = dicom)

    try:
        r.raise_for_status()
    except:
        print('  not a valid DICOM file, ignoring it')
        return

    info = r.json()
    # 上传完毕
           
    r2 = requests.get('%s/instances/%s/tags?short' % (url, info['ID']),
                      auth = auth)
    r2.raise_for_status()
    tags = r2.json()
    
    Study_info={'PatientName': tags['0010,0010'],'sex':tags['0010,0040'],'borndate':tags['0010,0030'],'fid': tags['0008,0050'],'pid':tags['0010,0020'],'Study_id':tags['0020,000d'],'check_date':tags['0008,0020'],'hospital':tags['0008,0080'],'device':tags['0008,0070'],'modality':tags['0008,0060']} 
    pid = Study_info['pid']
    study_id = Study_info['Study_id']
    fid = Study_info['fid']
    borndate = Study_info['borndate']
    check_date = Study_info['check_date']
    name = Study_info['PatientName']
    sex = Study_info['sex']
    big=int(check_date[:4])
    small=int(borndate[:4])
    age=big-small
    hospital = Study_info['hospital']
    device=Study_info['device']
    modality=Study_info['modality']
    orthanc_id=info['ParentStudy']
    
    # 存储当前的study id 和 study instance id 
    MAPPING_TABLE[info['ParentStudy']] = study_id 
    
    # 根据检查病人是否存在
    patient = Patient.query.filter(Patient.pid == pid).first() 
    #patient = db.session.query(Patient).filter(Patient.pid == pid).first()
    if patient is None:
        #不存在则添加
        newpatient = Patient(name,sex,age,pid,hospital,borndate)
        print('病人信息更新',newpatient)
        db.session.add(newpatient)
        db.session.commit()
        patient = Patient.query.filter(Patient.pid == pid).first()
    #添加mri记录
        # 检查mri时候存在，不过没必要数据库设置F号唯一即可，重复插入会报错
    iffid = Patient.query.filter(Mri.fid == fid).first()  
    if iffid is None:   
        newmri = Mri(fid,patient.id,study_id,check_date,device,modality)
        print('mri更新',newmri)
        db.session.add(newmri)
        db.session.commit()
    
        # 这块在调试成功的时候，可以放到上一个if语句里，不然会导致多余的数据库更新操作
        newstudy_id = Study_id(study_id,orthanc_id)
        db.session.add(newstudy_id)
        print('study_id更新',newstudy_id)
        
    ifstudyid = Study_id.query.filter(Study_id.study_id == study_id).first() 
    if ifstudyid is None:    
      newstudy_id = Study_id(study_id,orthanc_id)
      print('study_id更新',newstudy_id)
      db.session.add(newstudy_id)

    db.session.query(Mri).filter_by(study_id=study_id).update({'available': True})
    db.session.commit()
    # 在此处对每条MRI记录中的“是否可用”进行更新
              
def unzip_file(zip_src, dst_dir):
    # r = zipfile.is_zipfile(zip_src) 
    fz = zipfile.ZipFile(zip_src, 'r')
    for file in fz.namelist():
        fz.extract(file, dst_dir)   
    return dst_dir  
    
def UploadFile(path):
    # 如果上传的单个文件不是dicom文件，则跳过
    try:
      dcm = pydicom.read_file(path)
      series=dcm[0X0008, 0X103e].value
    except:
      print(' not a valid DICOM file, ignoring it')
      return 6000 # 代表不是dicom文件
    
    # flag=0时不上传，flag=1时上传
    flag=0
    if 't2' in series and 'tra' in series and 'dark-fluid' in series and not 'daohang' in series and 'PosDisp' not in series:
        flag=1
    elif 't1' in series and 'tra' in series and 'daohang' not in series and not 'PosDisp' in series:
        flag=1
    elif "ADC" in series and not 'daohang' in series and 'PosDisp' not in series:
        flag=1
    if flag==1:
      with open(path, 'rb') as f:
          dicom = f.read()
          # print('Uploading: %s (%dMB)' % (path, len(dicom) / (1024 * 1024)))
          UploadBuffer(dicom)
          
def upload_slice():
  # 遍历上传的所有study
  for ort_study_id,study_instance_uid in MAPPING_TABLE.items():
    # 删除当前study的重复序列
    delete_series(ort_study_id)
    slice_count=0
    patients = find(
          orthanc=orthanc,
          study_filter=lambda s: s.id_ == ort_study_id
      )
    for patient in patients:
      # dic=patient.get_main_information()
      for study in patient.studies:
        if study.id_ == ort_study_id:
          # dic=study.get_main_information()
          for series in study.series:
            series_info=series.get_main_information()
            slice_num=len(series_info['Instances'])
            slice_count+=slice_num
                
    # 上传slice到mysql
    Mri.query.filter(Mri.study_id == study_instance_uid).update({'slice': slice_count})
    # 提交会话
    db.session.commit()    
        
def upload_zip_or_dicom(path):   
    for root, dirs, files in os.walk(path):
      for name in files:
        # 如果当前文件是zip文件，此时由于害怕zip里面还有zip，这时候得用递归思想，重新在最开始进行判断
        if os.path.splitext(name) [1]==".zip":
          # 递归
           upload_fun(os.path.join(root,name)) 
        # 如果当前文件是single文件(包含dicom和一些不需要上传的文件)
        else:
           UploadFile(os.path.join(root, name))   
  
def upload_fun(path): 
    # 如果上传的是单个文件 or zip  
    if os.path.isfile(path):
      extension = os.path.splitext(path) [1]
      # 如果上传的是 zip
      if extension == '.zip':
        # 解压到同名文件夹
        unzip_path=unzip_file(zip_src=path, dst_dir=path.replace(".zip",""))
        # 遍历解压后的文件夹  
        upload_zip_or_dicom(unzip_path)
      # 如果上传的是单个文件  （此处只允许上传zip或单个dicom文件，如果上传rar、tar之类的压缩包或者info文件会跳过）
      else:
        UploadFile(path) 
    # 如果上传的是文件夹
    elif os.path.isdir(path):
      files_list = os.listdir(path)
      if len(files_list) > 0:
        upload_zip_or_dicom(path)
      else:
        print("当前文件夹为空！！！")

def delete_series(ort_study_id):
    # 删除当前study的重复序列
    patients = find(
          orthanc=orthanc,
          study_filter=lambda s: s.id_ == ort_study_id
      )
    # 记录所有series name,series time和seriesid,组成一个字典series_dict
    # series_dict={
    # "1":{series_id:XXX,series_name:XXX,series_time:XXX}
    # "2":{series_id:XXX,series_name:XXX,series_name:XXX}
    # "3":{series_id:XXX,series_name:XXX,series_time:XXX}
    # }
    # 遍历所有序列，如果出现重复/类似的名字，则比对其对应的series time,删掉series time小的series  
    series_dict={}
    i=0
    for patient in patients:
      for study in patient.studies:
        if study.id_ == ort_study_id:
          for series in study.series:
              i+=1
              series_info=series.get_main_information()
              series_dict[i]={'series_id': series_info['ID'],'series_name': series_info['MainDicomTags']['SeriesDescription'],'series_time':series_info['MainDicomTags']['SeriesTime']}
          break
    # print(series_dict)
    if i>3:
      flag_t1={}
      flag_t2={}
      flag_ADC={}
      for j in range(1,i+1):
        if 't2' in series_dict[j]['series_name']:
            flag_t2[j]=series_dict[j]
        elif 't1' in series_dict[j]['series_name']:
            flag_t1[j]=series_dict[j]
        elif "ADC" in series_dict[j]['series_name']:
            flag_ADC[j]=series_dict[j]
      
      if len(flag_t1)>1:
        # 再重新组装一个字典 用来比较大小
        t1_dict={}
        for item in flag_t1.values():
           d={item['series_id']:item['series_time']}
           t1_dict.update(d)
        # print(t1_dict)
        min_id=min(t1_dict.keys(),key=(lambda x:t1_dict[x]))
        # print(min_id)
        # print(t1_dict[min_id])
        delete_based_id(min_id)
        
      if len(flag_t2)>1:
        t2_dict={}
        for item in flag_t2.values():
           d={item['series_id']:item['series_time']}
           t2_dict.update(d)
        # print(t2_dict)
        min_id=min(t2_dict.keys(),key=(lambda x:t2_dict[x]))
        # print(min_id)
        # print(t2_dict[min_id])
        delete_based_id(min_id)
        
      if len(flag_ADC)>1:
        ADC_dict={}
        for item in flag_ADC.values():
           d={item['series_id']:item['series_time']}
           ADC_dict.update(d)
        # print(ADC_dict)
        min_id=min(ADC_dict.keys(),key=(lambda x:ADC_dict[x]))
        # print(min_id)
        # print(ADC_dict[min_id])
        delete_based_id(min_id)     
     
def delete_based_id(ort_ser_id):
    order="curl -X DELETE http://localhost:8042/series/"+ort_ser_id
    os.system(order)
              
def upload2orthanc(path): 
    upload_fun(path)    
    # 更新slice信息
    upload_slice()
 
if __name__ == '__main__':   
    state=upload2orthanc(path=r"C:\Users\AMYGDALA\Desktop\PN0147686-YUAN_GUO_YING.zip")