/***************************************************************************
 创建者: 华磊
 开始时间: 2019.4.24
 copyright: (C) 华友高科
 修改说明: (每次有修改就添加一条，带有 修改人，修改时间，修改描述)
 example (1) hualei 2020.5.8 类的具体实现代码编写

 ***************************************************************************
 *  @file x.h
 *  功能:视觉跟踪
 *　1)相机内参需要标定,使用方格标定图形，补偿相机和视场的平行度和镜头畸变。卡洛普视觉手册Ｐ20
 * 2)相机外参手眼标定，使用９点图形。用户坐标系和相机坐标系平行（最好原点重合），输入像素比。卡洛普视觉手册Ｐ21
 * 3)物体模板识别，识别的中心点一般不可设置，机械手抓取时，该中心点需要和工具坐标原点重合。（有的情况下这个工具坐标不好示教，最好模板中心点可以设置）
 * 4)非线性补偿。可能机械手原点误差和几何参数误差导致机械手的绝对精度差。通过９点去补偿这个偏差。卡洛普视觉手册Ｐ７
 * 视觉和跟踪分为２个类，定点视觉只用本类，跟踪视觉需要用本类和皮带类。工艺号也是分开的。
 *                                                                         *
 ***************************************************************************/

#ifndef VISIONTRACK_H
#define VISIONTRACK_H

#include <QObject>
#include "globalDefine.h"
#include <QVector>
#include <QMap>
#include <QQueue>
#include "GeneralDefine.h"
#include <QTcpSocket>
#include <QMutex>
#include "MacroDefine/GeneralDefine.h"

//class CoordinateManager;
class RegData;
class MotionMessage;
//class QTcpSocket;
namespace KDL{
class Frame;
}
//static 关键词变量，需要注意重入．实际上是一个全局变量．

class BeltTrack;
class VisionTrack : public QObject
{
    Q_OBJECT
public:
    explicit VisionTrack(int visionIdIn,RegData *regDataIn, VisionTrackConfig configIn, MotionMessage *motionMessageIn,double deltaTimeIn,
                         BeltTrack* trackIn=NULL,QObject *parent = nullptr);

public:
    int forceConnectCamera();
    int setWork(bool isOnIn);
    int setDebugFlag(int flagIn);
    int forceTrigCamera(bool isHighIn);
    int getDoType(int &doDeviceTypeOut,int &doIndexOut);
    int setVisionTrigObjectLength(double lengthIn);
    int reset();
    int getVisionTrackDebugInfo(VisionTrackDebugInfo &visionInfoOut);

    bool isInitialOk();
    bool isSocketOkStatus();
    int modifyVisionTrackConfig(VisionTrackConfig configIn);
    int getUserCoordinateIndex();

    int runVision();//初始化视觉.无法判断连接建立后的网络断开。
    int trigVisionOnce();//触发视觉一次,发送指令，不改变ｉｏ
    int getVisionData();//视觉点放入寄存器。
    int clearVisionData();

    int loopCommontimeOnce();//相机数据收发，非实时
    int loopRealtimeOnce(bool detectSiIn, bool &digitalOutput, bool &isDoChangeOut);//（跟踪点的锁存编码器值和当前编码器值更新用户坐标系的值），实时更新用户坐标系

signals:
    void tcpStart_client_signal(const QString& ip, int port, bool forceReconnect);
    void tcpRead_client_signal(const QString& ip,int port,QByteArray& dataOut);
    void tcpWrite_clien_signal(const QString &ip, int port,const QByteArray &dataOut);

private slots:
    void tcpStart_client_slot(const QString& ip, int port, bool forceReconnect);
    void tcpRead_Client_slot(const QString& ip,int port,QByteArray &dataOut);
    void tcpWrite_Client_slot(const QString& ip,int port,const QByteArray &dataIn);
//    void sendData_slot(QTcpSocket *socketIn, const QByteArray &dataIn, bool &sendStatus);
    void socketError_slot(QAbstractSocket::SocketError errorNum);
//    void newConnectionSlot();
private:
    int sendRobotStatusInfo();
    int checkVisionResponceDifference();
    int getLatchValue(int &valueOut);
    int recordLatchValue();
    int detectWorkPieceLength(bool detectSiIn, double &detectLengthOut);
    int setOutPut(int bitIndex, bool isHigh, unsigned short &outPutValue);
    int tcpStart_client(const QString& ip, int port, bool forceReconnect);
    int tcpWrite_client(const QString& ip, int port,const QByteArray& dataIn);
    int tcpRead_client(const QString& ip, int port,QByteArray& dataOut);
    int processReceiveData(int protocol, const QByteArray& dataIn, QVector<QVector<CameraReturnInfo> > &resultOut);
    int processReceiveData_3value(const QByteArray& dataIn, QVector<QVector<CameraReturnInfo> > &resultOut);
    int processReceiveData_3value_oneTime(const QString &dataIn, QVector<CameraReturnInfo> &resultOut);
    int processReceiveData_3value_single(QString tmp_frame, CameraReturnInfo &resultOut);
    int processReceiveData_keba(const QByteArray& dataIn, QVector<QVector<CameraReturnInfo>> &resultOut);
    int processReceiveData_keba_oneTime(const QString &dataIn, QVector<CameraReturnInfo> &resultOut);
    int processReceiveData_keba_single(QString tmp_frame, CameraReturnInfo &resultOut);

    int translateCameraPointToRobotPoint(const CameraReturnInfo &cameraPointIn,VisionObjectInfo &pointOut);

    int connectSensorServer(QString ip,int port);
//    int initialConfigFile(QString configFileName);
    void addMsg(int messageLevel, std::string componentName, std::string messageType, int messageCode
                , int robotIdIn, int parameter1=0, int parameter2=0,QString paraStr="");

private:
    int visionId;
    bool isInitialOkStatus;
    volatile bool isConnectedStatus;
    int socketErrorCode;

    RegData *regData;
    VisionTrackConfig visionConfig;
    BeltTrack *beltTrack;
//    CoordinateManager *coordinateManager;
    MotionMessage *motionMessage;
    int robotId;
    QMap <QString, QTcpSocket*> tcpClientList;
    QQueue<VisionObjectInfo> visionPointBuffer;
    QMutex usingMutex;
    bool startFinishedStatus;
    bool readFinishedStatus;
    bool writeFinishedStatus;

//    volatile unsigned short digitalOut;

    CameraReturnInfo lastReceiveData_camera;
    double deltaTime;
    int currentEncoderValue_compare;

    KDL::Frame *frame_visionlink_in_userlink;
    volatile bool isCameraNeedResponse;
    volatile int noResponseCount;
    volatile int visionResponseDifference;
//    int currentEncoderValue;
//    int currentEncoderLatchValue;
    VisionTrackDebugInfo debugInfo;
    bool isAutoTrig;
    double trigTimeOrDistance_offset;
    QQueue<int> currentEncoderLatchValueList;
    int debugFlag;
    int trigTimeCount;
    int trigStage;

    bool tmpSiOld;
    int tmpTrigStage;
    int encoderValueForPieceLength;
    bool isInWork;
    int tickCount;

};

#endif // VISIONTRACK_H
