import os
import requests
import zipfile
import pydicom
from requests.auth import HTTPBasicAuth
from application import db
from pyorthanc import find, Orthanc
orthanc = Orthanc('http://localhost:8042',
                  username='orthanc', password='orthanc')

global MAPPING_TABLE
MAPPING_TABLE={}


class Study_id(db.Model):
    __tablename__ = 'study_id'

    id = db.Column(db.Integer, primary_key=True)
    study_id = db.Column(db.String(255, 'utf8_general_ci'), unique=True,info='dicom study instance uid')
    orthanc_id = db.Column(db.String(255, 'utf8_general_ci'), info='orthanc study id')

    def __init__(self,study_id,orthanc_id):
        self.study_id = study_id
        self.orthanc_id = orthanc_id
         
class Patient(db.Model):
    __tablename__ = 'patient'

    id = db.Column(db.Integer, primary_key=True)
    name = db.Column(db.String(255, 'utf8_general_ci'), info='姓名')
    sex = db.Column(db.String(4, 'utf8_general_ci'), info='性别')
    age = db.Column(db.String(255), info='年龄')
    pid = db.Column(db.String(255), index=True, info='患者编号') 
    borntime = db.Column(db.DateTime, info='出生日期')
    hospital = db.Column(db.String(255), info='医院名')

    def __init__(self,name,sex,age,pid,hospital,borntime):
        self.sex = sex
        self.pid = pid
        self.name = name
        self.age = age
        self.hospital = hospital
        self.borntime = borntime

class Mri(db.Model): 
    __tablename__ = 'mri'
 
    id = db.Column(db.Integer, primary_key=True)
    fid = db.Column(db.String(255), unique=True, info='MRI的唯一F号')
    patient_id = db.Column(db.ForeignKey('patient.id', ondelete='RESTRICT', onupdate='RESTRICT'), index=True, info='用外键来对应患者的id（患者主键索引）减少数据冗余')
    study_id = db.Column(db.String(255), unique=True,info='studyid')
    
    device = db.Column(db.String(255, 'utf8_general_ci'), info='成像设备')
    modality = db.Column(db.String(255, 'utf8_general_ci'), info='模态个数')
    slice = db.Column(db.Integer, info='切片个数')
    
    check_date = db.Column(db.DateTime, info='检查日期')
    complaints = db.Column(db.Text(collation='utf8_general_ci'), info='主诉')
    notes = db.Column(db.Text, info='备注')
    
    ai_invasion = db.Column(db.String(10, 'utf8_general_ci'), info='ai脑侵袭')
    ai_level = db.Column(db.String(10, 'utf8_general_ci'), info='ai分级')
    
    operation_date = db.Column(db.DateTime, info='手术日期')
    Lesion_site = db.Column(db.String(255), info='病变部位')
    arachnoid = db.Column(db.String(40, 'utf8_general_ci'), info='蛛网膜界面破坏')
    simpson = db.Column(db.String(40, 'utf8_general_ci'), info='辛普森分级')
    adhesions = db.Column(db.String(40, 'utf8_general_ci'), info='黏连')
    
    subtype = db.Column(db.String(40, 'utf8_general_ci'), info='病例亚型')
    patholog_invasion = db.Column(db.String(255),info='脑侵袭')
    pathology_level = db.Column(db.String(255),info='分级')
    pathology_valueadded = db.Column(db.String(255, 'utf8_general_ci'), info='增值指数')
    
    inv_consistence = db.Column(db.String(4, 'utf8_general_ci'), info='脑侵袭是否一致')
    level_consistence = db.Column(db.String(4, 'utf8_general_ci'), info='分级是否一致')

    available = db.Column(db.Boolean, info='是否可用')
    patient = db.relationship('Patient', primaryjoin='Mri.patient_id == Patient.id', backref='mris')

    def __init__(self,fid,patient_id,study_id,check_date,device,modality):
        self.fid = fid
        self.patient_id = patient_id
        self.study_id = study_id
        self.check_date = check_date
        self.device = device
        self.modality = modality
         
def UploadBuffer(dicom):
  
    url='http://localhost:8042'
    username='orthanc'
    password='orthanc'
    
    auth = HTTPBasicAuth(username, password)
    r = requests.post('%s/instances' % url, auth = auth, data = dicom)
    try:
        r.raise_for_status()
    except:
        return

    info = r.json()
    # 上传完毕
    r2 = requests.get('%s/instances/%s/tags?short' % (url, info['ID']),
                      auth = auth)
    r2.raise_for_status()
    tags = r2.json()
    study_instance_uid=tags['0020,000d']
    # 需要构建一个 study id 和study instance id的映射表    
    
    Study_info={'PatientName': tags['0010,0010'],'sex':tags['0010,0040'],'borndate':tags['0010,0030'],'fid': tags['0008,0050'],'pid':tags['0010,0020'],'Study_id':tags['0020,000d'],'check_date':tags['0008,0020'],'hospital':tags['0008,0080'],'device':tags['0008,0070'],'modality':tags['0008,0060']} 
    pid = Study_info['pid']
    study_id = Study_info['Study_id']
    fid = Study_info['fid']
    borndate = Study_info['borndate']
    check_date = Study_info['check_date']
    name = Study_info['PatientName']
    sex = Study_info['sex']
    big=int(check_date[:4])
    small=int(borndate[:4])
    age=big-small
    hospital = Study_info['hospital']
    device=Study_info['device']
    modality=Study_info['modality']
    orthanc_id=info['ParentStudy']
    
    # 存储当前的study id 和 study instance id 
    MAPPING_TABLE[info['ParentStudy']] = study_id 

    # 根据检查病人是否存在
    patient = Patient.query.filter(Patient.pid == pid).first() 
    if patient is None:
        #不存在则添加
        newpatient = Patient(name,sex,age,pid,hospital,borndate)
        db.session.add(newpatient)
        db.session.commit()
        patient = Patient.query.filter(Patient.pid == pid).first()
    #添加mri记录
        # 检查mri时候存在，不过没必要数据库设置F号唯一即可，重复插入会报错
    iffid = Patient.query.filter(Mri.fid == fid).first()  
    if iffid is None:   
        newmri = Mri(fid,patient.id,study_id,check_date,device,modality)
        db.session.add(newmri)
        db.session.commit()
        
    ifstudyid = Study_id.query.filter(Study_id.study_id == study_id).first() 
    if ifstudyid is None:    
      newstudy_id = Study_id(study_id,orthanc_id)
      db.session.add(newstudy_id)
    
    db.session.query(Mri).filter_by(study_id=study_id).update({'available': True})
    db.session.commit()
    return study_instance_uid  

def unzip_file(zip_src, dst_dir):
    # r = zipfile.is_zipfile(zip_src) 
    fz = zipfile.ZipFile(zip_src, 'r')
    for file in fz.namelist():
        fz.extract(file, dst_dir)   
    return dst_dir  
        
def UploadFile(path):
    # 如果上传的单个文件不是dicom文件，则跳过
    try:
      dcm = pydicom.read_file(path)
      series=dcm[0X0008, 0X103e].value
    except:
      print(' not a valid DICOM file, ignoring it')
      return 6000 # 代表不是dicom文件
    
    # flag=0时不上传，flag=1时上传
    flag=0
    if 't2' in series and 'tra_dark-fluid' in series and not 'daohang' in series and 'PosDisp' not in series:
        flag=1
    elif 't1' in series and 'tra' in series and 'daohang' not in series and not 'PosDisp' in series:
        flag=1
    elif "ADC" in series and not 'daohang' in series and 'PosDisp' not in series:
        flag=1
    if flag==1:
      with open(path, 'rb') as f:
          dicom = f.read()
          # print('Uploading: %s (%dMB)' % (path, len(dicom) / (1024 * 1024)))
          Study_id=UploadBuffer(dicom)
      return Study_id
      
def upload_slice():
  # 遍历上传的所有study
  for ort_study_id,study_instance_uid in MAPPING_TABLE.items():
    # 删除当前study的重复序列
    delete_series(ort_study_id)
    patients = find(
          orthanc=orthanc,
          study_filter=lambda s: s.id_ == ort_study_id
      )
    
    # 根据当前study,在orthanc中统计slice数量
    slice_count=0
    for patient in patients:
      # dic=patient.get_main_information()
      # print("patient_info",dic) 
      for study in patient.studies:
        if study.id_ == ort_study_id:
          # dic=study.get_main_information()
          # print("study_info",dic)
          for series in study.series:
            series_info=series.get_main_information()
            slice_num=len(series_info['Instances'])
            slice_count+=slice_num
                
    # 上传slice数量到mysql
    Mri.query.filter(Mri.study_id == study_instance_uid).update({'slice': slice_count})
    # 提交会话
    db.session.commit()

def upload_zip_or_dicom(path):   
    for root, dirs, files in os.walk(path):
      for name in files:
        # 如果当前文件是zip文件，此时由于害怕zip里面还有zip，这时候得用递归思想，重新在最开始进行判断
        if os.path.splitext(name) [1]==".zip":
          # 递归
          study_id=upload_fun(os.path.join(root,name)) 
          if study_id!=None:
            Study_id=study_id
        # 如果当前文件是single文件(包含dicom和一些不需要上传的文件)
        else:
          study_id=UploadFile(os.path.join(root, name))
          if study_id!=None:
            Study_id=study_id
            
    return Study_id
  
def upload_fun(path): 
    # 如果上传的是单个文件 or zip  
    if os.path.isfile(path):
      extension = os.path.splitext(path) [1]
      # 如果上传的是 zip
      if extension == '.zip':
        # 解压到同名文件夹
        unzip_path=unzip_file(zip_src=path, dst_dir=path.replace(".zip",""))
        # 遍历解压后的文件夹  
        Study_id=upload_zip_or_dicom(unzip_path)
      # 如果上传的是单个文件  （此处只允许上传zip或单个dicom文件，如果上传rar、tar之类的压缩包或者info文件会跳过）
      else:
        study_id=UploadFile(path)
        if study_id!=None:
          Study_id=study_id   
    # 如果上传的是文件夹
    elif os.path.isdir(path):
      files_list = os.listdir(path)
      if len(files_list) > 0:
        Study_id=upload_zip_or_dicom(path)
      else:
        print("当前文件夹为空！！！")
    return Study_id
  
# 根据每个study instance uid 获取 orthanc上的study_id(ort_study_id)
# 根据ort_study_id 找到对应的所有series_id
# 记录所有series的名字，并检索其中一个slice,记录series time,组成一个字典{series_name:series_time}
# 遍历所有序列，如果出现重复/类似的名字，则比对其对应的series time,删掉series time小的series  

def delete_series(ort_study_id):
    # 删除当前study的重复序列
    patients = find(
          orthanc=orthanc,
          study_filter=lambda s: s.id_ == ort_study_id
      )
    # 记录所有series name,series time和seriesid,组成一个字典series_dict
    # series_dict={
    # "1":{series_id:XXX,series_name:XXX,series_time:XXX}
    # "2":{series_id:XXX,series_name:XXX,series_name:XXX}
    # "3":{series_id:XXX,series_name:XXX,series_time:XXX}
    # }
    # 遍历所有序列，如果出现重复/类似的名字，则比对其对应的series time,删掉series time小的series  
    series_dict={}
    i=0
    for patient in patients:
      for study in patient.studies:
        if study.id_ == ort_study_id:
          for series in study.series:
              i+=1
              series_info=series.get_main_information()
              series_dict[i]={'series_id': series_info['ID'],'series_name': series_info['MainDicomTags']['SeriesDescription'],'series_time':series_info['MainDicomTags']['SeriesTime']}
          break
    # print(series_dict)
    if i>3:
      flag_t1={}
      flag_t2={}
      flag_ADC={}
      for j in range(1,i+1):
        if 't2' in series_dict[j]['series_name']:
            flag_t2[j]=series_dict[j]
        elif 't1' in series_dict[j]['series_name']:
            flag_t1[j]=series_dict[j]
        elif "ADC" in series_dict[j]['series_name']:
            flag_ADC[j]=series_dict[j]
      
      if len(flag_t1)>1:
        # 再重新组装一个字典 用来比较大小
        t1_dict={}
        for item in flag_t1.values():
           d={item['series_id']:item['series_time']}
           t1_dict.update(d)
        # print(t1_dict)
        min_id=min(t1_dict.keys(),key=(lambda x:t1_dict[x]))
        # print(min_id)
        # print(t1_dict[min_id])
        delete_based_id(min_id)
        
      if len(flag_t2)>1:
        t2_dict={}
        for item in flag_t2.values():
           d={item['series_id']:item['series_time']}
           t2_dict.update(d)
        # print(t2_dict)
        min_id=min(t2_dict.keys(),key=(lambda x:t2_dict[x]))
        # print(min_id)
        # print(t2_dict[min_id])
        delete_based_id(min_id)
        
      if len(flag_ADC)>1:
        ADC_dict={}
        for item in flag_ADC.values():
           d={item['series_id']:item['series_time']}
           ADC_dict.update(d)
        # print(ADC_dict)
        min_id=min(ADC_dict.keys(),key=(lambda x:ADC_dict[x]))
        # print(min_id)
        # print(ADC_dict[min_id])
        delete_based_id(min_id)     
     
def delete_based_id(ort_ser_id):
    order="curl -X DELETE http://localhost:8042/series/"+ort_ser_id
    os.system(order)
      
def upload2orthanc(path): 
    Study_id_set=upload_fun(path)   
    # 更新slice信息
    upload_slice()
    return Study_id_set
 
if __name__ == '__main__':   
    state=upload2orthanc(path=r"C:\Users\AMYGDALA\Desktop\PN0147686-YUAN_GUO_YING.zip")
    # state=upload2orthanc(path=r"C:\Users\AMYGDALA\Desktop\data\Grade_2_invasion\Grade_2_invasion")
   
      # Study_id=upload2orthanc(path=r"C:\Users\AMYGDALA\Desktop\data\Grade_2_noninvasion\Grade_2_noninvasion")
      # state=upload2orthanc(path=r"C:\Users\AMYGDALA\Desktop\data\Grade_2_invasion\Grade_2_invasion")