# -*- coding: utf-8 -*-
"""
Created on Sat Oct 21 21:43:42 2017

@author: huang
"""

# -*- coding: utf-8 -*-
"""
Created on Fri Oct 20 15:02:42 2017

@author: huang
"""
import cv2
import numpy as np
import math
#import bigsb

def center(points):#计算给定框的质心
    """calculates centroid of a given matrix"""
    x = (points[0][0] + points[1][0] + points[2][0] + points[3][0]) / 4
    y = (points[0][1] + points[1][1] + points[2][1] + points[3][1]) / 4
    return np.array([np.float32(x), np.float32(y)], np.float32)



class Pedestrian():#每个行人对象会有一个id，一个卡尔曼跟踪器和一个标定框
  string=""
  total=0 #通过的行人总数
  color=[(60,20,220),(147,20,255),(204,50,153),(255,0,0),(127,255,0),(0,255,0),(0,255,255),(0,165,255)]
  font = cv2.FONT_HERSHEY_SIMPLEX
  #emptyImage = np.zeros((1000,1000,3), np.uint8) 
  nwjudge=[0,0,0,0,0,0]
  subframes=[None,None,None,None,None,None]
  def __init__(self, id, frame, mes_window,ncolor):#编号，帧和跟踪框
    # set up the roi
    self.id = int(id)
    x,y,w,h =mes_window     #跟踪框
    self.mes_window = mes_window
    self.ncolor=ncolor
    self.nw=100    #窗口号
    # set up the kalman
    '''
    三个参数，状态空间的维数，测量值的维数，控制向量的维数，默认为0
    '''
    4#print "Pedestrian %d created" % self.id
    self.kalman = cv2.KalmanFilter(4,2)
    self.kalman.measurementMatrix = np.array([[1,0,0,0],[0,1,0,0]],np.float32)
    self.kalman.transitionMatrix = np.array([[1,0,1,0],[0,1,0,1],[0,0,1,0],[0,0,0,1]],np.float32)
    self.kalman.processNoiseCov = np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]],np.float32) * 0.03
    self.kalman.measurementNoiseCov = np.array([[1,0],[0,1]], np.float32) * 1
    self.kalman.statePre =  np.array([[mes_window[0]+mes_window[2]/2],[mes_window[1]+mes_window[3]/2],[0],[0]],np.float32)
    self.kalman.statePost= np.array([[mes_window[0]+mes_window[2]/2],[mes_window[1]+mes_window[3]/2],],np.float32)
    self.measurement = np.array((2,1), np.float32) 
    #self.prediction = np.zeros((2,1), np.float32)
    self.prediction = np.array((mes_window[0]+mes_window[2]/2,mes_window[1]+mes_window[3]/2), np.float32)
    self.lastpre=self.prediction
    #****************************************
    self.correct_pos=self.prediction
    #print self.prediction
    self.center = None
    self.flag=1
   # self.prediction=mes_window[0],mes_window[1]
    self.update(frame,mes_window)
    
    
  def __del__(self):
    Pedestrian.nwjudge[self.nw]=0
    #print "Pedestrian %d destroyed" % self.id

  def update(self, frame,mes_window):
    #print "updating %d " % self.id
    
    x,y,w,h = mes_window
    self.center = center([[x,y],[x+w, y],[x,y+h],[x+w, y+h]])  
   # cv2.rectangle(frame, (x,y), (x+w, y+h), (255, 255, 0), 2)
    self.lastpre=self.prediction
    self.correct_pos=self.kalman.correct(self.center)
   
    self.prediction = self.kalman.predict()
    #print self.prediction
    #cv2.circle(frame, (int(self.lastpre[0]), int(self.lastpre[1])), 4, (0, 0,255), -1)
    cv2.circle(frame, (int(self.correct_pos[0]), int(self.correct_pos[1])), 4, Pedestrian.color[self.ncolor], -1)
    
    # actual info
    cv2.putText(frame, "ID:%d" % self.id, (int(x+w/3),int(y-10)),
        self.font, 0.4,
        Pedestrian.color[self.ncolor],
        1,
        cv2.LINE_AA)
    
    #判断该放进哪个框
    for i in range(0,6):
        if(self.nw!=100):
            if(self.nw==i):
                temp=frame[y:y+h,x:x+w]
                #Pedestrian.emptyImage[0:temp.shape[0],100*i:100*i+temp.shape[1]]=frame[y:y+h,x:x+w]
                self.subframes[i]=temp.copy()
                self.nw=i
                Pedestrian.nwjudge[i]=1
                break
        else:
            if(Pedestrian.nwjudge[i]==0):
                temp=frame[y:y+h,x:x+w]
                #Pedestrian.emptyImage[0:temp.shape[0],100*i:100*i+temp.shape[1]]=frame[y:y+h,x:x+w]
                self.subframes[i]=temp.copy()
                self.nw=i
                Pedestrian.nwjudge[i]=1
                break
                
            
    #self.img[y:y+h,x:x+w]=frame[y:y+h,x:x+w]
    #temp=frame[y:y+h,x:x+w]
    #Pedestrian.emptyImage[0:temp.shape[0],0:temp.shape[1]]=frame[y:y+h,x:x+w]
    #self.subframes[0]=temp
    #cv2.imshow("temp",frame[y:y+h,x:x+w])
    
  def judge(self,rcenter,frame,mes_window,filtingdistance):
    dis=math.sqrt((self.prediction[0]-rcenter[0])**2+(self.prediction[1]-rcenter[1])**2)
    
  #  print dis
    if dis<filtingdistance:
      #  print "fuck"
        self.flag=1
        self.mes_window=mes_window
        self.update(frame,mes_window)
        return True
    else:
        return False
    
    
 ###########################################################################   
"../FFOutput/04.avi"
"../FFOutput/05.avi"
"../bike.avi"
"../fucing.avi"      
"../FFOutput/02.avi" "disturbing"
"../FFOutput/07.avi" "quick movement"
"../FFOutput/11.avi" 
"../FFOutput/12.avi" "people too much"
"../FFOutput/13.avi" "no symbolic object"
"../FFOutput/14.avi" "no symbolic object"
"../FFOutput/18.avi"    

def mainprocess():
  fuck=1
  f=open("test.txt",'w')
  #camera = cv2.VideoCapture("twoperson.avi")
  #camera = cv2.VideoCapture("jogging.avi")
  #camera = cv2.VideoCapture("Pedestrian.avi")
  camera = cv2.VideoCapture("../fucing.avi")
  #camera = cv2.VideoCapture("bike.avi")
  history = 20
  # KNN background subtractor
  bs = cv2.createBackgroundSubtractorKNN()
  bs.setHistory(history)
  cv2.namedWindow("surveillance")
  pedestrians = {}
  firstFrame = True
  frames = 1
  fourcc = cv2.VideoWriter_fourcc(*'XVID')
  out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))
  grabbed, frame = camera.read()
  while True:
    
    grabbed, frame = camera.read()
    if (grabbed is False):
      print "failed to grab frame."
      break
    print " -------------------- FRAME %d --------------------" % frames
    #bigsb.Waitforsbhsj._signal.emit(frame)
    fgmask = bs.apply(frame)
    #cv2.imshow("fgmask",fgmask)
    # this is just to let the background subtractor build a bit of history
    if frames < history:
      frames += 1
      continue


    th = cv2.threshold(fgmask.copy(), 127, 255, cv2.THRESH_BINARY)[1]
    th = cv2.erode(th, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3)), iterations = 1)
    dilated = cv2.dilate(th, cv2.getStructuringElement(cv2.MORPH_RECT, (3,3)), iterations = 2)
    #cv2.imshow("dilate",dilated)
    image, contours, hier = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    
    people_count=0
    for c in contours:
     
      if cv2.contourArea(c) > 500:
       
        (x,y,w,h) = cv2.boundingRect(c)
       # print (x,y,w,h)
        rcenter= center([[x,y],[x+w, y],[x,y+h],[x+w, y+h]])  
        cv2.rectangle(frame, (x,y), (x+w, y+h), (0, 255, 0), 1)
        # only create pedestrians in the first frame, then just follow the ones you have
        if firstFrame is True:
          pedestrians[people_count] = Pedestrian(people_count, frame, (x,y,w,h))
          Pedestrian.count+=1
          pedestrians[people_count].number=Pedestrian.count
          #cv2.imwrite("1.jpg",frame)
        else:
           
            for i, p in pedestrians.iteritems():
                fuck=1
                p.flag=0
                if(p.judge(rcenter,frame,(x,y,w,h)) is True):
                    #print "有效距离"
                    fuck=0
                    break 
        if fuck==1:
            pedestrians[people_count] = Pedestrian(people_count, frame, (x,y,w,h))
            Pedestrian.count+=1
            pedestrians[people_count].number=Pedestrian.count
        people_count+= 1
        
            
            
    for i, p in pedestrians.iteritems(): 
        if p.flag==0:
            del p
        else:
            pass
            #f.write("pre:%d %d  mes:%d %d\n" % (p.lastpre[0],p.lastpre[1],p.center[0],p.center[1]))
            
    firstFrame=False
    frames+=1
    #cv2.imshow("surveillance", frame)
    out.write(frame)
    if cv2.waitKey(50) & 0xff == 27:
        break
  out.release()
  camera.release()
  f.close()
  cv2.destroyAllWindows()
'''
if __name__ == "__main__":
  main()
'''

