set -x

fetch(){
   :<<EOF
   rm -rf output
   spark-submit \
      --packages com.databricks:spark-xml_2.12:0.12.0,mysql:mysql-connector-java:5.1.49 \
      --master local[4] 32.py

      --packages org.apache.spark:spark-sql-kafka-0-10_2.12:3.1.2,com.databricks:spark-xml_2.12:0.12.0,mysql:mysql-connector-java:5.1.49
EOF
   spark-submit  \
      --packages org.apache.spark:spark-sql-kafka-0-10_2.12:3.1.2,mysql:mysql-connector-java:5.1.49 \
      --master local[4] 1712.py \
      172.16.0.59:9092 subscribe topic1
}
prepare_little_file(){
   #arr=(10000 20000 50000 100000)
   arr=(10)

   for k in ${arr[@]}; do
      rm -rf $k
      mkdir -p $k
      for ((i=0; i<$k; i++)); do
         cp books.xml $i.xml
         mv $i.xml $k/
      done
   done
}

prepare_large_file(){
   arr=(10000 20000 50000 100000)

   for k in ${arr[@]}; do
      rm -f $k.xml
      touch $k.xml

      cat head.xml >> $k.xml

      for ((i=0; i<$k; i++)); do
         cat body.xml >> $k.xml
      done

      cat tail.xml >> $k.xml
   done
}

sshell(){
   :<<EOF
   spark-shell --packages com.databricks:spark-xml_2.11:0.12.0
   spark-shell --packages com.databricks:spark-xml_2.12:0.12.0,org.json4s:json4s-xml_2.12:4.0.1
   spark-shell --packages com.databricks:spark-xml_2.12:0.12.0,org.json4s:json4s-native_2.12:4.0.2
      --packages com.databricks:spark-xml_2.12:0.12.0,org.json4s:json4s-jackson_2.12:4.0.3,org.json4s:json4s-xml_2.12:4.0.1
EOF
   spark-shell --packages com.databricks:spark-xml_2.12:0.12.0,mysql:mysql-connector-java:5.1.49
}

clean(){
   sbt clean
   rm -rf output target project
   rm -f *.xml
}

zoo_off(){
   pid=`ps -ef|grep zookeeper |grep -v grep|awk '{print $2}'`
   kill -9 $pid
}

kafka_daemon(){
   clear

   nohup ./bin/zookeeper-server-start.sh ./config/zookeeper.properties >1.log 2>&1 &
   sleep 1
   nohup ./bin/kafka-server-start.sh ./config/server.properties >2.log 2>&1 &
   :<<EOF
EOF
}

kafka_off(){
   pid=`ps -ef|grep kafka |grep -v grep|awk '{print $2}'`
   kill -9 $pid
}

clear(){
   :<<EOF
   ./bin/kafka-server-stop.sh
   ./bin/zookeeper-server-stop.sh

   pid=`ps -ef|grep kafka |grep -v grep|awk '{print $2}'`
   kill -9 $pid

   pid=`ps -ef|grep zookeeper |grep -v grep|awk '{print $2}'`
   kill -9 $pid
   rm -rf logs nohup.out /tmp/kafka-logs /tmp/zookeeper *.log
EOF

   dbUser=`cat application.properties |grep 'database.user'|grep -v grep|awk -F "=" '{print $2}'`
   password=`cat application.properties |grep 'database.password'|grep -v grep|awk -F "=" '{print $2}'`
   schema=`cat application.properties |grep 'database.schema'|grep -v grep|awk -F "=" '{print $2}'`

   mysql -u $dbUser --password=$password <<EOF
use $schema
delete from dc_message_log;
delete from dc_order_list;
delete from dc_order_head;
delete from dc_ent_info;
\q
EOF
}

create_topic(){
   kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic topic1
}

produce(){
   /usr/local/kafka/bin/kafka-console-producer.sh --broker-list localhost:9092 --topic topic1
}

put(){
   cat 10.xml|tr -d '\n' |tr -d '\r' > 1.xml
   :<<EOF
   rm -rf input
   mkdir input
   mv 1.xml input/`date +%s`
EOF
   /usr/local/kafka/bin/kafka-console-producer.sh --broker-list localhost:9092 --topic topic1 <1.xml
}

consumer(){
   /usr/local/kafka/bin/kafka-console-consumer.sh --bootstrap-server localhost:9092  --topic topic1
}

reboot(){
   rm -rf target
   sbt package
   boot
}

toggle(){
   CD=$PWD

   cd src/main/scala
   if [ -f "Put.scala" ]; then
      mv Put.scala Put.scala.bk
      mv DirectKafkaWordCount.scala.bk DirectKafkaWordCount.scala
   else
      mv Put.scala.bk Put.scala
      mv DirectKafkaWordCount.scala DirectKafkaWordCount.scala.bk
   fi
   ls -l

   cd $CD
}

daemon(){
   :<<EOF
         --packages com.databricks:dbutils-api_2.12:0.0.5
         --packages org.apache.spark:spark-sql-kafka-0-10_2.12:3.1.2,com.databricks:spark-xml_2.12:0.12.0,mysql:mysql-connector-java:5.1.49
   "172.16.0.59:9092" "1" "topic1"
EOF

   MAIN_CLASS=`find src -type f -name '*.scala'|xargs grep -l 'def main'|awk -F '[/.]' '{print $4}'`
   if [ ! -n "$MAIN_CLASS" ];then
      exit -1
   fi
   ARTIFACT=`find target/scala-* -name '*.jar'`
   if [ -n "$ARTIFACT" ];then
      nohup spark-submit \
         --packages org.apache.spark:spark-sql-kafka-0-10_2.12:3.1.2,mysql:mysql-connector-java:5.1.49,org.apache.spark:spark-streaming-kafka-0-10_2.12:3.1.2,com.databricks:spark-xml_2.12:0.12.0 \
         --class "$MAIN_CLASS" --master local[*] $ARTIFACT "100k.xml" &
      tail -f nohup.out
   fi
}

sum(){
   cat nohup.out-$1 |grep "Finished task.\+ms"
}

dist(){

   ARTIFACT=`find . -maxdepth 1 -type f -name '1931_2.12-1.1.jar'`
   if [ -n "$ARTIFACT" ];then
      echo "Launching">>nohup.out
      date '+%Y-%m-%d %H:%M:%S'>> nohup.out
      spark-submit \
         --packages mysql:mysql-connector-java:5.1.49,com.databricks:spark-xml_2.12:0.12.0 \
         --class "Input" --master local[*] $ARTIFACT | tee -a nohup.out

   fi

   log
   
   rm -rf $merge_dir $cache_dir

}

boot(){

   MAIN_CLASS=`find src -type f -name '*.scala'|xargs grep -l 'def main'|awk -F '[/.]' '{print $4}'`
   if [ ! -n "$MAIN_CLASS" ];then
      exit -1
   fi
   ARTIFACT=`find target/scala-* -name '*.jar'`
   if [ -n "$ARTIFACT" ];then
      echo "Launching">>nohup.out
      date '+%Y-%m-%d %H:%M:%S'>> nohup.out
      spark-submit \
         --packages mysql:mysql-connector-java:5.1.49,com.databricks:spark-xml_2.12:0.12.0 \
         --class "$MAIN_CLASS" --master local[*] $ARTIFACT | tee -a nohup.out

   fi

   log

   rm -rf $merge_dir

}

log(){
   if [ -n "$oid" ]; then
      mysql -u $dbUser --password=$password <<EOF
use $schema
update dc_message_log set END_TIME = '`date '+%Y-%m-%d %H:%M:%S'`' where oid = '$oid'; 
update dc_message_log set TOTAL_TIME = UNIX_TIMESTAMP(END_TIME)-UNIX_TIMESTAMP(BEGIN_TIME) where oid = '$oid';
update dc_message_log set AVG_TIME = TOTAL_TIME/MSG_COUNT where oid = '$oid';
\q
EOF
   fi
   
}

trial(){
   #merge
   fastmer
   dist
}

launch(){
   fastmer
   boot
}

prop(){
   dbUser=`cat application.properties |grep 'database.user'|grep -v grep|awk -F "=" '{print $2}'`
   password=`cat application.properties |grep 'database.password'|grep -v grep|awk -F "=" '{print $2}'`
   schema=`cat application.properties |grep 'database.schema'|grep -v grep|awk -F "=" '{print $2}'`
   cache_dir=`cat application.properties |grep 'cache.dir'|grep -v grep|awk -F "=" '{print $2}'`
   input_dir=`cat application.properties |grep 'xml.input.dir'|grep -v grep|awk -F "=" '{print $2}'`
   output_dir=`cat application.properties |grep 'xml.output.dir'|grep -v grep|awk -F "=" '{print $2}'`
   merge_dir=`cat application.properties |grep 'xml.merge.dir'|grep -v grep|awk -F "=" '{print $2}'`
}

# fast merge
fastmer(){
   prop

   mkdir -p $cache_dir $input_dir $output_dir $merge_dir

   oid=`date +%Y%m%d''%H%M%S%N | cut -b 1-23`
   cache_sql="${cache_dir}/`date "+%Y%m%d%H%M%S"`.sql"

   msgCount=`find $input_dir -type f -name '*.xml' |wc -l`
   cat <<EOF >>$cache_sql
use $schema
insert into dc_message_log(OID, BEGIN_TIME) values('$oid', '`date '+%Y-%m-%d %H:%M:%S'`');
update dc_message_log set MSG_COUNT = $msgCount where oid = '$oid'; 
EOF

   java -cp mergef-1.jar xxx.MainApp

   nohup mysql -u $dbUser --password=$password $schema <$cache_sql &

   cat <<EOF >$cache_dir/1.sh
   set -x
   rm -rf $cache_dir/input
   mv $input_dir $cache_dir/input
   mkdir -p $input_dir
   for f in \`find "$cache_dir/input" -type f -name '*.xml'\`; do mv \$f $output_dir 1>/dev/null 2>&1; done
EOF
   chmod +1 $cache_dir/1.sh
   nohup $cache_dir/1.sh &

   echo "Merged">>nohup.out
   echo `date '+%Y-%m-%d %H:%M:%S'`>>nohup.out
}

merge(){

   prop

   oid=`date +%Y%m%d''%H%M%S%N | cut -b 1-23`
   cache_sql="${cache_dir}/`date "+%Y%m%d%H%M%S"`.sql"

   cat <<EOF >>$cache_sql
use $schema
insert into dc_message_log(OID, BEGIN_TIME) values('$oid', '`date '+%Y-%m-%d %H:%M:%S'`');
EOF

   echo "Merging">>nohup.out
   echo `date '+%Y-%m-%d %H:%M:%S'`>> nohup.out

   merge_file_name=`date "+%Y%m%d%H%M%S"`
   merge_file="${merge_dir}/${merge_file_name}.xml"

   mkdir -p $merge_dir $output_dir $cache_dir
   touch $merge_file

   i=0
   for f in `find "$input_dir" -type f -name '*.xml'`; do
      i=`expr $i + 1`
      cat $f >> $merge_file && mv $f $output_dir 1>/dev/null 2>&1

      fn=`echo $f|awk -F '/' '{print $NF}'`
      cat <<EOF >>$cache_sql
insert into dc_message_data(OID, FILE_NAME) values('`uuidgen -r|sed 's/-//g'`', '$fn');
EOF
      
   done
   msgCount=$i

   cat <<EOF >>$cache_sql
update dc_message_log set MSG_COUNT = $msgCount where oid = '$oid'; 
EOF
   nohup mysql -u $dbUser --password=$password $schema <$cache_sql &

   echo "Merged">>nohup.out
   echo `date '+%Y-%m-%d %H:%M:%S'`>>nohup.out
   #rm -rf $cache_dir
}

input(){
   prop

   mkdir -p $output_dir $input_dir

   i=0
   if [ -n "$1" ]; then nbr=$1; else nbr=100; fi

   for f in `find $output_dir -type f -name '*.xml'`; do
      mv $f $input_dir
      
      i=`expr $i + 1`
      if [ $i -gt $nbr ]; then
         break
      fi
   done
}

fake(){
   prop

   mkdir -p $output_dir $input_dir

   i=0

   for f in `find $input_dir -type f -name '*.xml'`; do
      mv $f $output_dir
      
      i=`expr $i + 1`
      if [ $i -gt 7000 ]; then
         break
      fi
   done
}

downgrade(){
   rm -f /usr/local/scala
   rm -f /usr/local/spark
   rm -f build.sbt

   ln -s /usr/local/scala-2.11.8 /usr/local/scala
   ln -s /usr/local/spark-2.1.1-bin-hadoop2.3 /usr/local/spark
   ln -s build.sbt.2.11 build.sbt
}

upgrade(){
   rm -f /usr/local/scala
   rm -f /usr/local/spark
   rm -f build.sbt

   ln -s /usr/local/scala-2.12.14 /usr/local/scala
   ln -s /usr/local/spark-3.1.2 /usr/local/spark
   ln -s build.sbt.2.12 build.sbt
}

$@
