﻿using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using MPI;

namespace ParallelLib.UseMPI
{
    public class MpiDemo
    {
        /*
         点对点方法：
            发送：
            Communicator.world.Send;
            接收：
                Communicator.world.Receive;
            同步方法：
                Communicator.world.Barrier();
        集中数据：All to one
            Communicator.world.Gather();
            Communicator.world.Allgather();所有进程都获得一份。
        广播数据：one to All
            Communicator.world.Broadcast();
            Communicator.world.Scatter();从root广播向其他节点广播不同的数据。
        All to All：
            Communicator.world.Alltoall();两两节点之间都传递数据，包括自身。
        Reduce：合操作
            Communicator.world.Reduce();从各个节点获得数据，进行计算之后返回给root节点。
         */
        public static void Main_Hostnames(string[] args)
        {
            MPI.Environment.Run(ref args, comm =>
            {
                string[] hostnames = comm.Gather(MPI.Environment.ProcessorName, 0);
                if (comm.Rank == 0)
                {
                    Array.Sort(hostnames);
                    foreach (string host in hostnames)
                        Console.WriteLine(host);
                }
            });
        }
        public static void Main_Hello(string[] args)
        {
            MPI.Environment.Run(ref args, communicator =>
            {
                Console.WriteLine("Hello, from process number "
                                         + communicator.Rank + " of "
                                         + communicator.Size);
            });
        }
/*
* This example shows how one can use MPI to compute an approximate value 
* for Pi. The basic idea is very simple: consider 2x2 square circumscribed
* about a circle of radius 1, centered on the origin. Then, we take a bunch
* darts at random and throw them at the square. The ratio of darts that 
* land in the circle to the number of darts thrown is equal to the ratio
* of the area of the circle to the area of the square. Using this equivalence,
* we can approximate pi. The more darts we throw, the better our
* approximation of pi. So, we parallelize this program by having every
* processor throw darts independently, and then sum up the results at the
* end to compute pi.
*/
        public static void Main_Pi(string[] args)
        {
            int dartsPerProcessor = 10000000;
            MPI.Environment.Run(ref args, comm =>
            {
                if (args.Length > 0)
                    dartsPerProcessor = Convert.ToInt32(args[0]);

                Random random = new Random(5 * comm.Rank);
                int dartsInCircle = 0;
                for (int i = 0; i < dartsPerProcessor; ++i)
                {
                    double x = (random.NextDouble() - 0.5) * 2;
                    double y = (random.NextDouble() - 0.5) * 2;
                    if (x * x + y * y <= 1.0)
                        ++dartsInCircle;
                }

                int totalDartsInCircle = comm.Reduce(dartsInCircle, Operation<int>.Add, 0);
                if (comm.Rank == 0)
                {
                    Console.WriteLine("Pi is approximately {0:F15}.",
                        4.0 * totalDartsInCircle / (comm.Size * dartsPerProcessor));
                }
            });
        }

        public static void Main_PingPong(string[] args)
        {
            MPI.Environment.Run(ref args, comm =>
            {
                if (comm.Rank == 0)
                {
                    Console.WriteLine("Rank 0 is alive and running on " + MPI.Environment.ProcessorName);
                    for (int dest = 1; dest < comm.Size; ++dest)
                    {
                        Console.Write("Pinging process with rank " + dest + "...");
                        comm.Send("Ping!", dest, 0);
                        string destHostname = comm.Receive<string>(dest, 1);
                        Console.WriteLine(" Pong!");
                        Console.WriteLine("  Rank " + dest + " is alive and running on " + destHostname);
                    }
                }
                else
                {
                    comm.Receive<string>(0, 0);
                    comm.Send(MPI.Environment.ProcessorName, 0, 1);
                }
            });
        }

        /*
        * This example program passes a message around a ring of processes,
        * where each processor adds its rank to the message string.
         */
        public static void Main_Ring(string[] args)
        {
            MPI.Environment.Run(ref args, comm =>
            {
                if (comm.Size < 2)
                {
                    // Our ring needs at least two processes
                    Console.WriteLine("The Ring example must be run with at least two processes.");
                    Console.WriteLine("Try: mpiexec -np 4 ring.exe");
                }
                else if (comm.Rank == 0)
                {
                    // Rank 0 initiates communication around the ring
                    string data = "Hello, World!";

                    // Send "Hello, World!" to our right neighbor
                    comm.Send(data, (comm.Rank + 1) % comm.Size, 0);

                    // Receive data from our left neighbor
                    comm.Receive((comm.Rank + comm.Size - 1) % comm.Size, 0, out data);

                    // Add our own rank and write the results
                    data += " 0";
                    Console.WriteLine(data);
                }
                else
                {
                    // Receive data from our left neighbor
                    String data;
                    comm.Receive((comm.Rank + comm.Size - 1) % comm.Size, 0, out data);

                    // Add our own rank to the data
                    data = data + " " + comm.Rank.ToString() + ",";

                    // Pass on the intermediate to our right neighbor
                    comm.Send(data, (comm.Rank + 1) % comm.Size, 0);
                }
            });
        }

    }
}
