;/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
;*  Copyleft (!c) 2004-2005 Div. Nucl. Med., Cyric, Tohoku Univ.    *
;*  Copyleft (!c) 2005-2011 Elseif Laboratory.                      *
;*  elseifkk@users.sf.net                                           *
;*                                                                  *
;*  All Rights Reversed.                                            *
;*                                                                  *
;*                                                                  *
;*  LICENSE AGREEMENT:                                              *
;*                                                                  *
;*  You agree that we don't agree to anything.                      *
;*                                                                  *
;*                                                                  *
;*  DISCLAIMER OF WARRANTIES:                                       *
;*                                                                  *
;*  This software is provided to you "AS IS," and the authors and   *
;*  contributors disclaim any warranty or liability obligations to  *
;*  you of any kind, implied or express, including but not limited  *
;*  to the warranties of merchantability, fitness for a particular  *
;*  purpose, and non-infringement.                                  *
;* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
global pivot_r8_asm__
global pivot_r4_asm__
global pivot_i4_asm__
;;; 
;;; 
%macro sort 1			; len
section .data 
.i dd 0
.j dd 0
.n0 dd 0
.n1 dd 0
;;; 
section .text
	push edi
	push esi
	push ebx
%assign _P 4*3
	mov ebx, [esp+_P+8]
	mov eax, [ebx]		; eax=i
	mov [.i], eax
	mov ebx, [esp+_P+12]
	mov edx, [ebx]		; edx=j
	mov [.j], edx
	mov esi, [esp+_P+4]	; esi=a(1)
	sub esi, %1
	mov ebx, [esi+eax*%1]	; ebx=lo a(i)
%if %1 = 8
	mov edi, [esi+eax*%1+4]	; edi=hi a(i)
%endif 
	mov ecx, eax		; ecx=i
	inc eax			; eax=k=i+1
;;; do while(k<=j.and.a(i)==a(k))
.do_pivot_r8:	
	cmp eax, edx
	ja near .failure_pivot_r8
%if %1 = 8
	cmp edi, [esi+eax*%1+4]	; cmp hi a(i), a(k)
	jb .exit_pivot_r8	; a(i)<a(k)
	jne .next_pivot_r8	; a(i)>a(k)
%endif
	cmp ebx, [esi+eax*%1] 	; cmp lo a(i), a(k)
	jb .exit_pivot_r8	; a(i)<a(k)
	jne .next_pivot_r8	; a(i)>a(k)
	inc eax
	jmp .do_pivot_r8
.next_pivot_r8:
	mov eax, ecx
.exit_pivot_r8:
.partition_r8_asm__:	
	lea ebx, [esi+eax*%1]
	mov edx, [ebx]
	mov [.n0], edx
%if %1 = 8
	mov ebx, [ebx+4]	; ebx=n1
%endif
	mov eax, [.i]
	mov ecx, [.j]
.dododo:
.do_1:	
	cmp eax, [.j]
	ja .next_1
;;; if(a(l)<x) then
   	lea edi, [esi+eax*%1]	; a(l)
%if %1 = 8
	mov edx, [edi+4]
	cmp edx, ebx
	ja .next_1
	je .see_1
%else
	mov edx, [edi]
	cmp edx, [.n0]
	jae .next_1
%endif
.loop_1:
	inc eax
	jmp .do_1
%if %1 = 8
.see_1:	
	mov edx, [edi]
	cmp edx, [.n0]
	jb .loop_1
%endif
.next_1:	
.do_2:
 	cmp ecx, [.i]
	jb .next_2
;;; if(a(r)>=x) then
  	lea edi, [esi+ecx*%1]	; a(r)
%if %1 = 8
	mov edx, [edi+4]
	cmp edx, ebx
	jb .next_2
	je .see_2
%else
	mov edx, [edi]
	cmp edx, [.n0]
	jb .next_2
%endif
.loop_2:	
	dec ecx
	jmp .do_2
%if %1 = 8
.see_2:
	mov edx, [edi]
	cmp edx, [.n0]
	jae .loop_2
%endif
.next_2:	
	cmp eax, ecx		; l,r
	ja .exit_1
	mov edi, [esi+ecx*%1]
	mov edx, [esi+eax*%1]
	mov [esi+eax*%1], edi
	mov [esi+ecx*%1], edx
%if %1 = 8
	mov edi, [esi+ecx*%1+4]
	mov edx, [esi+eax*%1+4]
	mov [esi+eax*%1+4], edi
	mov [esi+ecx*%1+4], edx
%endif
	dec ecx
	inc eax
	cmp eax, ecx
	jbe .dododo
.exit_1:
	pop ebx
	pop esi
	pop edi
	ret
.failure_pivot_r8:
	xor eax, eax
	jmp .exit_1
%endmacro
;;; integer*4 function pivot_r8(a,i,j)
;;; a must not be negative!
;;; 
pivot_r8_asm__:
sort 8
;;; 
pivot_i4_asm__:	
pivot_r4_asm__:
sort 4
