#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@Project ：Employment_class 
@File    ：14_多线程与多进程复习.py
@IDE     ：PyCharm 
@Author  ：XiaoDengYa
@Date    ：2021/6/10/010 17:15 
"""

"""
fork 创建多进程
"""
#
# import os
# import time
#
# ret = os.fork()
# if ret == 0: # 子进程的返回值是0
#     while True:
#         print("----1---")
#         time.sleep(1)
# else: # 主进程返回非0,
#     while True:
#         print("----2---")
#         time.sleep(1)

"""
from multiprocessing import Process
# 创建进程
"""
# from multiprocessing import Process
#
# def demo():
#     while True:
#         print("-------test-------")
#         time.sleep(1)
#
# if __name__ == '__main__':
#     p = Process(target=demo)
#     p.start()
#
#     while True:
#         print("——————main——————")
#         time.sleep(1)

# import multiprocessing
# import time
#
# def worker(interval):
#     n = 5
#     while n > 0:
#         print("The time is {0}".format(time.ctime()))
#         time.sleep(interval)
#         n -= 1
#
# if __name__ == "__main__":
#     p = multiprocessing.Process(target = worker, args = (3,))
#     p.daemon =True # 不等待子线程，主进程结束后就退出
#     # 注：因子进程设置了daemon属性，主进程结束，它们就随着结束了。
#     p.start()
#
#     print("p.pid:", p.pid)
#     print("p.name:", p.name)
#     print("p.is_alive:", p.is_alive())

# 设置daemon执行完结束的方法
# import multiprocessing
# import time
#
# def worker(interval):
#     n = 5
#     while n > 0:
#         print("The time is {0}".format(time.ctime()))
#         time.sleep(interval)
#         n -= 1
#
# if __name__ == "__main__":
#     p = multiprocessing.Process(target = worker, args = (3,))
#     p.daemon =True # 不等待子线程，主进程结束后就退出
#     # 注：因子进程设置了daemon属性，主进程结束，它们就随着结束了。
#     p.start()
#     p.join()
#     print("p.pid:", p.pid)
#     print("p.name:", p.name)
#     print("p.is_alive:", p.is_alive())


# 当多个进程需要访问共享资源的时候，Lock可以用来避免访问的冲突。

# import multiprocessing
# import sys
#
#
# def worker_with(lock, f):
#     with lock:
#         fs = open(f, 'a+')
#         n = 10
#         while n > 1:
#             fs.write("Lockd acquired via with\n")
#             n -= 1
#         fs.close()
#
#
# def worker_no_with(lock, f):
#     lock.acquire()
#     try:
#         fs = open(f, 'a+')
#         n = 10
#         while n > 1:
#             fs.write("Lock acquired directly\n")
#             n -= 1
#         fs.close()
#     finally:
#         lock.release()
#
#
# if __name__ == "__main__":
#     lock = multiprocessing.Lock()
#     f = "file.txt"
#     w = multiprocessing.Process(target=worker_with, args=(lock, f))
#     nw = multiprocessing.Process(target=worker_no_with, args=(lock, f))
#     w.start()
#     nw.start()
#     print("end")


# Semaphore用来控制对共享资源的访问数量，例如池的最大连接数。
# import multiprocessing
# import time
#
# def worker(s, i):
#     s.acquire()
#     print(multiprocessing.current_process().name + "acquire")
#     time.sleep(i)
#     print(multiprocessing.current_process().name + "release\n")
#     s.release()
#
# if __name__ == "__main__":
#     s = multiprocessing.Semaphore(2)
#     for i in range(5):
#         p = multiprocessing.Process(target = worker, args=(s, i*2))
#         p.start()

# Event用来实现进程间同步通信。
# import multiprocessing
# import time
#
#
# def wait_for_event(e):
#     print("wait_for_event: starting")
#     e.wait()
#     print("wairt_for_event: e.is_set()->" + str(e.is_set()))
#
#
# def wait_for_event_timeout(e, t):
#     print("wait_for_event_timeout:starting")
#     e.wait(t)
#     print("wait_for_event_timeout:e.is_set->" + str(e.is_set()))
#
#
# if __name__ == "__main__":
#     e = multiprocessing.Event()
#     w1 = multiprocessing.Process(name="block",
#                                  target=wait_for_event,
#                                  args=(e,))
#
#     w2 = multiprocessing.Process(name="non-block",
#                                  target=wait_for_event_timeout,
#                                  args=(e, 2))
#     w1.start()
#     w2.start()
#
#     time.sleep(3)
#
#     e.set()
#     print("main: event is set")


# import multiprocessing
# import time
#
# def proc1(pipe):
#     while True:
#         for i in range(10000):
#             print("send: %s" %(i))
#             pipe.send(i)
#             time.sleep(1)
#
# def proc2(pipe):
#     while True:
#         print("proc2 rev:", pipe.recv())
#         time.sleep(1)
#
# def proc3(pipe):
#     while True:
#         print("PROC3 rev:", pipe.recv())
#         time.sleep(1)
#
# if __name__ == "__main__":
#     pipe = multiprocessing.Pipe()
#     p1 = multiprocessing.Process(target=proc1, args=(pipe[0],))
#     p2 = multiprocessing.Process(target=proc2, args=(pipe[1],))
#     #p3 = multiprocessing.Process(target=proc3, args=(pipe[1],))
#
#     p1.start()
#     p2.start()
#     #p3.start()
#
#     p1.join()
#     p2.join()
#     #p3.join()


# Pool
#coding: utf-8
import multiprocessing
import time

def func(msg):
    print("msg:", msg)
    time.sleep(3)
    print("end")

if __name__ == "__main__":
    pool = multiprocessing.Pool(processes = 3)
    for i in range(4):
        msg = "hello %d" %(i)
        pool.apply_async(func, (msg, ))   #维持执行的进程总数为processes，当一个进程执行完毕后会添加新的进程进去

    print("Mark~ Mark~ Mark~~~~~~~~~~~~~~~~~~~~~~")
    pool.close()
    pool.join()   #调用join之前，先调用close函数，否则会出错。执行完close后不会有新的进程加入到pool,join函数等待所有子进程结束
    print("Sub-process(es) done.")