#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import sys
import numpy as np
import random
#import tracing
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import cv2
from PIL import Image
from demo_interface import Ui_MainWindow 
from about import Ui_Form  
from settings import Ui_Settings
from Monitor import Ui_Monitor
from newsb import Pedestrian
from subtraction import Waitforsbbzx
"../FFOutput/04.avi"
"../FFOutput/05.avi"
"../bike.avi"
"../fucing.avi"      
"../FFOutput/02.avi" "disturbing"
"../FFOutput/07.avi" "quick movement"
"../FFOutput/11.avi" 
"../FFOutput/12.avi" "people too much"
"../FFOutput/13.avi" "no symbolic object"
"../FFOutput/14.avi" "no symbolic object"
"../FFOutput/18.avi"
camera=cv2.VideoCapture("../FFOutput/05.avi")
framenum=camera.get(7)
pause=False

def center(points):#计算给定框的质心
    """calculates centroid of a given matrix"""
    x = (points[0][0] + points[1][0] + points[2][0] + points[3][0]) / 4
    y = (points[0][1] + points[1][1] + points[2][1] + points[3][1]) / 4
    return np.array([np.float32(x), np.float32(y)], np.float32)

class MyApp(QtWidgets.QMainWindow):
    def __init__(self):
        super(MyApp,self).__init__()  
        self.new=Ui_MainWindow()  
        self.new.setupUi(self) 
        self.new.pushButton.clicked.connect(self.startlogin)
        self.new.pushButton_2.clicked.connect(self.flippause)
        self.new.pushButton_3.clicked.connect(self.generating)
        self.new.actionOpen.triggered.connect(self.openavifile)
        self.new.actionVersion.triggered.connect(self.showversion)
        self.new.actionSettings.triggered.connect(self.showsettings)
        self.new.actionMonitor.triggered.connect(self.showmonitor)
        self.new.comboBox.currentIndexChanged.connect(self.changeshowevent)
        self.new.horizontalSlider.setMaximum(framenum)
        self.new.horizontalSlider.setMinimum(0)
        self.new.label.setPalette(QPalette(Qt.white))
        self.new.label.setAutoFillBackground(True)
        self.about=Ui_Form()
        self.aboutform = QtWidgets.QMainWindow()
        self.about.setupUi(self.aboutform)
        self.setting=Ui_Settings()
        self.settingform=QtWidgets.QMainWindow()
        self.setting.setupUi(self.settingform)
        self.setting=Ui_Settings()
        self.settingform=QtWidgets.QMainWindow()
        self.setting.setupUi(self.settingform)      
        self.setting.lineEdit.editingFinished.connect(self.setfiltingarea)
        self.setting.lineEdit_2.editingFinished.connect(self.setfiltingdistance)
        self.setting.comboBox.currentIndexChanged.connect(self.setplayingspeed)
        
        self.monitor=Monitor()
    fileName1="../fucing.avi"   
    hsj=None
    
    def __del__(self):
        if self.hsj:
            del self.hsj
        if self.about:
            del self.about
        if self.setting:
            del self.setting
        if self.monitor:
            del self.monitor
            
    def setplayingspeed(self,playingspeed):
        if self.hsj:
            self.hsj.delaying=(4-int(playingspeed))*500000
            
    def setfiltingarea(self):
        if self.hsj:
            self.hsj.filtingarea=int(self.setting.lineEdit.text())
            
    def setfiltingdistance(self,distance):
        if self.hsj:
            self.hsj.filtingdistance=int(self.setting.lineEdit_2.text())    
            
    def update(self):
        self.new.horizontalSlider.setMaximum(framenum)
        self.new.horizontalSlider.setMinimum(0)     
        self.new.label.setPalette(QPalette(Qt.white))
        self.new.label.setAutoFillBackground(True)
        self.new.textBrowser.clear()
        Pedestrian.count=0
        camera.set(1,0)
        if self.hsj:
            self.hsj.stop()
            
    def endgenerating(self):
        self.new.pushButton_3.setText("Generate")
        self.new.pushButton_3.setEnabled(True)
        QMessageBox.information(self,"Generate","Generating synopsis completed!",QMessageBox.Ok)
    
    def showsettings(self):
        self.settingform.show()
        
    def showmonitor(self):
        self.monitor.running=True
        self.monitor.show()
        if self.hsj:
            self.hsj._subsignal.connect(self.monitor.showsubframe)

    
    def showversion(self):
        self.aboutform.show()
        #QMessageBox.information(self,"About","SB",QMessageBox.Ok)
    
    def generating(self):
        self.bzx =Waitforsbbzx()
        self.bzx.cap=cv2.VideoCapture(self.fileName1)
        print self.fileName1
        self.bzx._endgenerating.connect(self.endgenerating)
        self.bzx.start()  
        self.new.pushButton_3.setText("Generating...")
        self.new.pushButton_3.setEnabled(False)
        #subtraction.generating(camera)
    
    def openavifile(self):
        global camera
        global framenum
        self.fileName1, filetype = QFileDialog.getOpenFileName(self,  
                                    "选取文件",  
                                    "C:/",  
                                    "All Files (*);; AVI video (*.avi)")   #设置文件扩展名过滤,注意用双分号间隔  
        print(self.fileName1,filetype)
        camera=cv2.VideoCapture(self.fileName1)
        framenum=camera.get(7)
        self.update()
        self.new.pushButton.setText("Play")        
    
    def flippause(self):
        global pause
        pause=~pause
        if pause:
            self.new.pushButton_2.setText("continue")
        else:
            self.new.pushButton_2.setText("pause")
        
    
    def startlogin(self):  
        # 创建线程  
        if self.hsj:
            self.update()
        self.hsj =Waitforsbhsj()  
        # 连接信号  
        self.hsj.showevent=self.new.comboBox.currentText()
        self.hsj._signal.connect(self.loadframe) 
        self.hsj._informationsignal.connect(self.new.textBrowser.setText)
        self.hsj._framenumsignal.connect(self.new.horizontalSlider.setValue)
        if self.monitor.running:
            self.hsj._subsignal.connect(self.monitor.showsubframe)
        # 开始线程  
        self.hsj.start() 
        self.new.pushButton.setText("Replay")         
        
    def viewvideo(self):
        
        #Testing camera
        (grabbed, frame) = camera.read()     
        if not grabbed:
            (grabbed, frame) = camera.read()
            print 'not grabbed'   
            return
        #cv2.imshow("sbhsj",frame)
        
        height, width, bytesPerComponent = frame.shape
        bytesPerLine = bytesPerComponent * width
        # 变换彩色空间顺序
        cv2.cvtColor(frame, cv2.COLOR_BGR2RGB,frame)
        # 转为QImage对象
        image = QImage(frame.data, width, height, bytesPerLine, QImage.Format_RGB888)
        pix = QPixmap.fromImage(image)
        #self.area.paintimage(pix,width,height)
        self.new.label.setPixmap(pix)
        

            
    def loadframe(self,frame):
        height, width, bytesPerComponent = frame.shape
        bytesPerLine = bytesPerComponent * width
        # 变换彩色空间顺序
        cv2.cvtColor(frame, cv2.COLOR_BGR2RGB,frame)
        #print 'sbhsj'
        # 转为QImage对象
        image = QImage(frame.data, width, height, bytesPerLine, QImage.Format_RGB888)
        pix = QPixmap.fromImage(image)
        self.new.label.setPixmap(pix)      
        self.new.label.update()
        
    def changeshowevent(self,number):
        if self.hsj:
            self.hsj.showevent=self.new.comboBox.currentText()
        
class Waitforsbhsj(QThread):
    def _init_(self):
        super(Waitforsbhsj, self).__init__()
    
    def __del__(self):
        self.stop()
    
    running=True
    showevent=None    
    framenum=0
    delaying=2000000
            
    _signal=pyqtSignal(np.ndarray)
    _informationsignal=pyqtSignal(str)
    _framenumsignal=pyqtSignal(int)
    _subsignal=pyqtSignal(np.ndarray,int)
    filtingarea=500
    filtingdistance=100
    
    def stop(self):
        self.running=False
    
    def run(self):
      global camera
      self.running=True
      fuck=1
      f=open("test.txt",'w')
      #camera = cv2.VideoCapture("twoperson.avi")
      #camera = cv2.VideoCapture("jogging.avi")
      #camera = cv2.VideoCapture("Pedestrian.avi")
      #camera = cv2.VideoCapture("../fucing.avi")
      #camera = cv2.VideoCapture("bike.avi")
      history = 20
      # KNN background subtractor
      bs = cv2.createBackgroundSubtractorKNN()
      bs.setHistory(history)
      #cv2.namedWindow("surveillance")
      pedestrians = []
      firstFrame = True
      frames = 1
      fourcc = cv2.VideoWriter_fourcc(*'XVID')
      out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))
      grabbed, frame = camera.read()
      self.framenum+=1
      while self.running:
        if pause:
            continue    
        grabbed, frame = camera.read()
        self.framenum+=1
        if (grabbed is False):
          print "failed to grab frame."
          break
        originframe=frame.copy()
        #print " -------------------- FRAME %d --------------------" % frames

        fgmask = bs.apply(frame)
        #cv2.imshow("fgmask",fgmask)
        # this is just to let the background subtractor build a bit of history
        if frames < history:
          frames += 1
          continue
        
        for i in range(0,len(pedestrians)):
            pedestrians[i].flag=0
        #Pedestrian.emptyImage = np.zeros((1000,1000,3), np.uint8)         
        th = cv2.threshold(fgmask.copy(), 127, 255, cv2.THRESH_BINARY)[1]
        th = cv2.erode(th, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3)), iterations = 1)
        dilated = cv2.dilate(th, cv2.getStructuringElement(cv2.MORPH_RECT, (3,3)), iterations = 2)
        if self.showevent=="Foreground":
            self._signal.emit(cv2.cvtColor(dilated,cv2.COLOR_GRAY2BGR))
        image, contours, hier = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        
        people_count=0
        for i in range(0,len(pedestrians)):
                    pedestrians[i].flag=0
        for c in contours:
         
          if cv2.contourArea(c) > self.filtingarea:
           
            (x,y,w,h) = cv2.boundingRect(c)
           # print (x,y,w,h)
            rcenter= center([[x,y],[x+w, y],[x,y+h],[x+w, y+h]])  
            cv2.rectangle(frame, (x,y), (x+w, y+h), (0, 255, 0), 1)
            # only create pedestrians in the first frame, then just follow the ones you have
            if firstFrame is True:
              fuck=0
              Pedestrian.total+=1
              
              pedestrians.append(Pedestrian(Pedestrian.total, frame, (x,y,w,h),random.randint(0,7)))
              cv2.imwrite("1.jpg",frame)
            else:
                if len(pedestrians)==0:
                     Pedestrian.total+=1
                     pedestrians.append(Pedestrian(Pedestrian.total, frame, (x,y,w,h),random.randint(0,7)))
                for i in range(0,len(pedestrians)):
                    fuck=1
                    if pedestrians[i].flag==0:
                        if(pedestrians[i].judge(rcenter,frame,(x,y,w,h),self.filtingdistance) is True):
                            #print "有效距离"
                            fuck=0
                            break 
            if fuck==1:
                Pedestrian.total+=1
                pedestrians.append(Pedestrian(Pedestrian.total, frame, (x,y,w,h),random.randint(0,7)))
            people_count+= 1
            
                
        noflagpoint=[]         
        for i in range(0,len(pedestrians)): 
            if pedestrians[i].flag==0:
                noflagpoint.append(i)
            
            else:
                f.write("pre:%d %d  mes:%d %d correct_pos:%d %d\n" % (pedestrians[i].lastpre[0],pedestrians[i].lastpre[1],pedestrians[i].center[0],pedestrians[i].center[1],pedestrians[i].correct_pos[0], pedestrians[i].correct_pos[1]))
        l=len(noflagpoint)
        #print l
        if(len(noflagpoint)!=0):
            #print "为什么"
            if l==1:
                 del pedestrians[noflagpoint[0]]
            else:
                for i in range(0,len(noflagpoint)):
                    #print noflagpoint[i]
                    del pedestrians[noflagpoint[i]]
                    for j in range(i,l):
                        if(j+1<l):
                            noflagpoint[j+1]-=1
        #print "hey,guy"
        firstFrame=False
        Pedestrian.string="第%d帧\n\n" % frames
        for i in range(0,len(pedestrians)):
            Pedestrian.string+="第%d号:%d,%d\n\n" % (pedestrians[i].id,pedestrians[i].lastpre[0],pedestrians[i].lastpre[1])
        #print Pedestrian.string
        frames+=1
        for i in range(0,6):
            if Pedestrian.nwjudge[i]==0:
                continue
            else:
                #print "sbhsj"
                self._subsignal.emit(Pedestrian.subframes[i],i)
                
        if self.showevent=="Processing":
            self._signal.emit(frame)
            self._informationsignal.emit(Pedestrian.string) 
        if self.showevent=="Original Video":
            self._signal.emit(originframe)
        self._framenumsignal.emit(self.framenum)
        out.write(frame)
        if self.delaying:
            for i in range(0,self.delaying):
                pass
        '''
        (grabbed, frame) = camera.read()
        if not grabbed:
            print 'not grabbed'   
            return
        self.framenum+=1
        
        while grabbed:
            if pause:
                continue
            (grabbed, frame) = camera.read()  
            if grabbed:
                self.framenum+=1
                self._signal.emit(frame)
                self._informationsignal.emit("Huang String is a shabby") 
                self._framenumsignal.emit(self.framenum)
        '''
      f.close()

class Monitor(QtWidgets.QMainWindow):
    def __init__(self):
        super(Monitor,self).__init__()  
        self.new=Ui_Monitor()  
        self.new.setupUi(self)   

    running=False    
    
    def showsubframe(self,subframe,index):
        height, width, bytesPerComponent = subframe.shape
        bytesPerLine = bytesPerComponent * width
        # 变换彩色空间顺序
        cv2.cvtColor(subframe, cv2.COLOR_BGR2RGB,subframe)
        #print 'sbhsj'
        # 转为QImage对象
        #print type(subframe)
        image = QImage(subframe.data, width, height, bytesPerLine, QImage.Format_RGB888)
        pix = QPixmap.fromImage(image)
        if index==0:
            self.new.label.setPixmap(pix)
            self.new.label.update()
        if index==1:
            self.new.label_2.setPixmap(pix)
            self.new.label_2.update()
        if index==2:
            self.new.label_3.setPixmap(pix)
            self.new.label_3.update()
        if index==3:
            self.new.label_4.setPixmap(pix)
            self.new.label_4.update()
        if index==4:
            self.new.label_5.setPixmap(pix)
            self.new.label_5.update()
        if index==5:
            self.new.label_6.setPixmap(pix)
            self.new.label_6.update()

if __name__ == "__main__":
    app = QtWidgets.QApplication(sys.argv)
    window = MyApp()
    window.show()
    sys.exit(app.exec_())


