// project home: cp3-cg.googlecode.com

#include "myvectormath.h"
#include "global.h"
#include <iostream>
#include <cmath>
#include <omp.h>
using namespace std;

vector<Double_t> matrixvectorproduct(vector<Double_t> *A, vector<Int_t> *JA, vector<Int_t> *IA, vector<Double_t> *x, bool debug){
  
//   bool debug=true;
  
  Int_t dim = x->size();
  if (debug){cout << "dimension: " << dim << endl;}
  vector<Double_t> b(dim,0); //Ausgabevektor
  
  for (int j=0;j<dim;j++){
    if (debug){cout << "j=" << j << ";" << endl;}
    for (int i=IA->at(j);i<IA->at(j+1);i++){
      if (debug){cout << "i=" << i << ";" << endl;}
      b.at(j)=b.at(j)+A->at(i)*x->at(JA->at(i));
    }
  }
  
  return b;
}

void Laplaceproduct(Int_t n, vector<Double_t>  *vecin, vector<Double_t>  *vecout){
// diese Operation verbraucht ca. (4+2*ndim)*nvol Flops
  int i,k;
  const int N=vecout->size();
 #pragma omp parallel for shared (vecout, ndim, mass, vecin, nn) private (i, k)
  for (i=0;i<N;i++){
    vecout->at(i)=(2*ndim+pow(mass,2))*vecin->at(i);
    for (k=1;k<=ndim;k++){
      vecout->at(i)=vecout->at(i)-(vecin->at(nn[k][i])+vecin->at(nn[k+ndim][i]));
    }
  }
}








Double_t skalarp(vector<Double_t> *x, vector<Double_t> *y){
  // diese Operation verbraucht ca. 2*nvol Flops
  Double_t product=0;
  int i;
  const int n=x->size();
  #pragma omp parallel for shared (x, y) private (i) reduction(+: product) //
  for (i=0; i<n;i++){
    product+=x->at(i) * y->at(i);
  }
  return product;
}
Double_t skalarp_nop(vector<Double_t> *x, vector<Double_t> *y){
  // diese Operation verbraucht ca. 2*nvol Flops
  Double_t product=0;
  int i;
  const int n=x->size();
//   #pragma omp parallel for shared (x, y) private (i) reduction(+: product) //
  for (i=0; i<n;i++){
    product+=x->at(i) * y->at(i);
  }
  return product;
}






void vectorsub(vector<Double_t> *x, vector<Double_t> *y, vector<Double_t> *vecout){
  // diese Operation verbraucht ca. nvol Flops
  int i;
  const int n=x->size();
  vecout->assign(x->size(),0);
  #pragma omp parallel for shared (vecout, x, y) private (i)
  for (i=0; i<n;i++){
    vecout->at(i)=x->at(i) - y->at(i);
  }
}
void vectorsub_nop(vector<Double_t> *x, vector<Double_t> *y, vector<Double_t> *vecout){
  // diese Operation verbraucht ca. nvol Flops
  int i;
  vecout->assign(x->size(),0);
//   #pragma omp parallel shared (vecout, x, y) private (i)
  for (i=0; (unsigned)i<x->size();i++){
    vecout->at(i)=x->at(i) - y->at(i);
  }
}

void vectoradd(vector<Double_t> *x, vector<Double_t> *y, vector<Double_t> *vecout){
  // diese Operation verbraucht ca. nvol Flops
  vecout->assign(x->size(),0);
  int i;
  const int n=x->size();
  #pragma omp parallel for shared (vecout, x, y) private (i)
  for (i=0; i<n;i++){
    vecout->at(i)=x->at(i) + y->at(i);
  }
}
void vectoradd_nop(vector<Double_t> *x, vector<Double_t> *y, vector<Double_t> *vecout){
  // diese Operation verbraucht ca. nvol Flops
  vecout->assign(x->size(),0);
  int i;
//   #pragma omp parallel shared (vecout, x, y) private (i)
  for (i=0; (unsigned)i<x->size();i++){
    vecout->at(i)=x->at(i) + y->at(i);
  }
}



void VectorMultiS(vector<Double_t> *x, Double_t alpha, vector<Double_t> *vecout){
  // diese Operation verbraucht ca. nvol Flops
  vecout->assign(x->size(),0);
  int i;
  const int n=x->size();
  #pragma omp parallel for shared (vecout, x, alpha) private (i)
  for (i=0; i<n;i++){
    vecout->at(i)=x->at(i) * alpha;
  }
}
void VectorMultiS_nop(vector<Double_t> *x, Double_t alpha, vector<Double_t> *vecout){
  // diese Operation verbraucht ca. nvol Flops
  vecout->assign(x->size(),0);
  int i;
//   #pragma omp parallel shared (vecout, x, alpha) private (i)
  for (i=0; (unsigned)i<x->size();i++){
    vecout->at(i)=x->at(i) * alpha;
  }
}




