"""递归锁，可以解决死锁的问题
递归锁： 在Python中为了支持在同一线程中多次请求同一资源，python提供了可重入锁RLock。这个RLock内部维护着一个Lock和一个counter变量，counter记录了acquire的次数，从而使得资源可以被多次require。直到一个线程所有的acquire都被release，其他的线程才能获得资源。上面的例子如果使用RLock代替Lock，则不会发生死锁

lock_1 = lock_2 = threading.RLock()： 一个线程拿到锁，counter加1,该线程内又碰到加锁的情况，则counter继续加1，这期间所有其他线程都只能等待，等待该线程释放所有锁，即counter递减到0为止
"""
from concurrent.futures import ThreadPoolExecutor
from threading import Lock, RLock


def thread_run_1(num):
    global global_num
    for i in range(num):
        lock_1.acquire()  # 上锁,counter加1
        global_num += 1
        print("thread_run_1 is locked")
        lock_2.acquire()  # counter加1
        print("thread_run_1：get the lock_2")
        lock_2.release()  # 解锁，counter减1
        lock_1.release()  # 解锁，counter减1
    print("In thread 1, the global_num is {}".format(global_num))


def thread_run_2(num):
    global global_num
    for i in range(num):
        lock_2.acquire()  # 上锁,counter加1
        global_num += 1
        print("thread_run_2 is locked")
        lock_1.acquire()  # counter加1
        print("thread_run_2：get the lock_1")
        lock_1.release()  # 解锁，counter减1
        lock_2.release()  # 解锁，counter减1
    print("In thread 2, the global_num is {}".format(global_num))


if __name__ == "__main__":
    global_num = 0
    # 解决死锁
    # 这样使用RLock不行，因为这样相当于创建了两个RLock对象
    # lock_1 = RLock()
    # lock_2 = RLock()
    lock_1 = lock_2 = RLock()

    with ThreadPoolExecutor() as executor:
        executor.submit(thread_run_1, 1000000)
        executor.submit(thread_run_2, 100000)
    print("Finally, the global_num is {}".format(global_num))
