--- contrib/libnum/bignum/s/pentiumKerN.s--	Sat Nov 29 01:55:57 1997
+++ contrib/libnum/bignum/s/pentiumKerN.s	Mon Jul 27 13:17:50 1998
@@ -6,149 +6,149 @@
 		.data
 		.type	copyright,@object
 		.size	copyright,72
-copyright:	.string	"@(#)KerN.c: copyright Digital Equipment Corporation & INRIA 1988, 1989\n"
+copyright:	.ascii	"@(#)KerN.c: copyright Digital Equipment Corporation & INRIA 1988, 1989\n"
 
 		.text
-		.align	16
-		.globl	BnnSetToZero
-BnnSetToZero:	movl	4(%esp),%edx		# nn
+		.align	4
+		.globl	_BnnSetToZero
+_BnnSetToZero:	movl	4(%esp),%edx		# nn
 		movl	8(%esp),%eax		# nl
 		testl	%eax,%eax
-		je	BSTZ2			# if(nl==0) return
-		.align		16
-BSTZ1:		movl	$0,(%edx)		# *nn = 0
+		je	_BSTZ2			# if(nl==0) return
+		.align		4
+_BSTZ1:	movl	$0,(%edx)		# *nn = 0
 		decl	%eax			# nl--
 		leal	4(%edx),%edx		# nn += 1
-		jne	BSTZ1			# if(nl) BSTZ1
-BSTZ2:		ret
+		jne	_BSTZ1			# if(nl) _BSTZ1
+_BSTZ2:		ret
 
-		.align	16
-		.globl	BnnAssign
-BnnAssign:	pushl	%ebx
+		.align	4
+		.globl	_BnnAssign
+_BnnAssign:	pushl	%ebx
 		movl	8(%esp),%edx		# mm
 		movl	12(%esp),%ebx		# nn
 		movl	16(%esp),%eax		# nl
 		testl	%eax,%eax		# if(nl<=0) return
-		je	BAG4
+		je	_BAG4
 		sall	$2,%eax
 		leal	(%eax,%ebx),%ecx	# nnlim=nn+nl
 		cmpl	%ebx,%edx
-		jb	BAG1			# if(mm<nn) BAG1
+		jb	_BAG1			# if(mm<nn) _BAG1
 		cmpl	%ecx,%edx
-		jbe	BAG2			# if(mm <= nnlim) BAG2
-		.align		16
-BAG1:		movl	(%ebx),%eax		# A = *nn
+		jbe	_BAG2			# if(mm <= nnlim) _BAG2
+		.align		4
+_BAG1:		movl	(%ebx),%eax		# A = *nn
 		leal	4(%ebx),%ebx		# nn += 1
 		movl	%eax,(%edx)		# *mm = A
 		leal	4(%edx),%edx		# mm += 1
-		cmpl	%ecx,%ebx		# if(nn < nnlim) BAG1
-		jb	BAG1
+		cmpl	%ecx,%ebx		# if(nn < nnlim) _BAG1
+		jb	_BAG1
 		popl	%ebx			# return
 		ret
-BAG2:		cmpl	%ebx,%edx
-		jbe	BAG4			# if(mm <= nn) return
+_BAG2:		cmpl	%ebx,%edx
+		jbe	_BAG4			# if(mm <= nn) return
 		addl	%eax,%edx		# mm += nl
-		.align		16
-BAG3:		addl	$-4,%edx		# mm--
+		.align		4
+_BAG3:		addl	$-4,%edx		# mm--
 		addl	$-4,%ecx		# nnlim--
 		movl	(%ecx),%eax		# A = *nnlim
 		movl	%eax,(%edx)		# *mm = A
-		cmpl	%ecx,%ebx		# if(nn<nnlim) BAG3
-		jb	BAG3
-BAG4:		popl	%ebx
+		cmpl	%ecx,%ebx		# if(nn<nnlim) _BAG3
+		jb	_BAG3
+_BAG4:		popl	%ebx
 		ret
 
-		.align	16
-		.globl	BnnSetDigit
-BnnSetDigit:	movl	4(%esp),%edx		# nn
+		.align	4
+		.globl	_BnnSetDigit
+_BnnSetDigit:	movl	4(%esp),%edx		# nn
 		movl	8(%esp),%eax		# d
 		movl	%eax,(%edx)		# *nn = d
 		ret
 
-		.align	16
-		.globl	BnnGetDigit
-BnnGetDigit:	movl	4(%esp),%eax		# nn
+		.align	4
+		.globl	_BnnGetDigit
+_BnnGetDigit:	movl	4(%esp),%eax		# nn
 		movl	(%eax),%eax		# return(*nn)
 		ret
 
-		.align	16
-		.globl	BnnNumDigits
-BnnNumDigits:	movl	8(%esp),%eax		# nl
+		.align	4
+		.globl	_BnnNumDigits
+_BnnNumDigits:	movl	8(%esp),%eax		# nl
 		leal	0(,%eax,4),%edx
 		addl	4(%esp),%edx		# nn += nl
-		jmp	BND2
-		.align		16
-BND1:		decl	%eax			# nl--
-BND2:		testl	%eax,%eax		# if(nl == 0) BND3
-		je	BND3
+		jmp	_BND2
+		.align		4
+_BND1:		decl	%eax			# nl--
+_BND2:		testl	%eax,%eax		# if(nl == 0) _BND3
+		je	_BND3
 		addl	$-4,%edx		# nn--
-		cmpl	$0,(%edx)		# if(nn-- != 0) BND1
-		je	BND1
-BND3:		testl	%eax,%eax		# if(nl != 0) return(nl)
-		jne	BND4
+		cmpl	$0,(%edx)		# if(nn-- != 0) _BND1
+		je	_BND1
+_BND3:		testl	%eax,%eax		# if(nl != 0) return(nl)
+		jne	_BND4
 		movl	$1,%eax			# return(1)
-BND4:		ret
+_BND4:		ret
 
-		.align	16
-		.globl	BnnNumLeadingZeroBitsInDigit
-BnnNumLeadingZeroBitsInDigit:
+		.align	4
+		.globl	_BnnNumLeadingZeroBitsInDigit
+_BnnNumLeadingZeroBitsInDigit:
 		movl	4(%esp),%edx		# d
 		xorl	%eax,%eax		# p = 0
-		testl	%edx,%edx		# if(d) BNLZ1
-		jne	BNLZ1
+		testl	%edx,%edx		# if(d) _BNLZ1
+		jne	_BNLZ1
 		movl	$32,%eax		# return(32)
 		ret
-BNLZ1:		testl	$-65536,%edx		# if(d & 0xFFFF0000) BNLZ2
-		jne	BNLZ2
+_BNLZ1:		testl	$-65536,%edx		# if(d & 0xFFFF0000) _BNLZ2
+		jne	_BNLZ2
 		movl	$16,%eax		# p = 16
 		sall	$16,%edx		# d <<= 16
-BNLZ2:		testl	$-16777216,%edx		# if(d & 0xFF000000) BNLZ3
-		jne	BNLZ3
+_BNLZ2:		testl	$-16777216,%edx		# if(d & 0xFF000000) _BNLZ3
+		jne	_BNLZ3
 		addl	$8,%eax			# p += 8
 		sall	$8,%edx			# d <<= 8
-BNLZ3:		testl	$-268435456,%edx	# if(d & 0xF0000000) BNLZ4
-		jne	BNLZ4
+_BNLZ3:		testl	$-268435456,%edx	# if(d & 0xF0000000) _BNLZ4
+		jne	_BNLZ4
 		addl	$4,%eax			# p += 4
 		sall	$4,%edx			# d <<= 4
-BNLZ4:		testl	$-1073741824,%edx	# if(d & 0xC0000000) BNLZ5
-		jne	BNLZ5
+_BNLZ4:		testl	$-1073741824,%edx	# if(d & 0xC0000000) _BNLZ5
+		jne	_BNLZ5
 		addl	$2,%eax			# p += 2
 		sall	$2,%edx			# d <<= 2
-BNLZ5:		testl	%edx,%edx		# if(d) BNLZ6
-		jl	BNLZ6
+_BNLZ5:		testl	%edx,%edx		# if(d) _BNLZ6
+		jl	_BNLZ6
 		incl	%eax			# p += 1
-BNLZ6:		ret
+_BNLZ6:		ret
 
-		.align	16
-		.globl	BnnDoesDigitFitInWord
-BnnDoesDigitFitInWord:
+		.align	4
+		.globl	_BnnDoesDigitFitInWord
+_BnnDoesDigitFitInWord:
 		movl	$1,%eax
 		ret
 
-		.align	16
-		.globl	BnnIsDigitZero
-BnnIsDigitZero:	cmpl	$0,4(%esp)
+		.align	4
+		.globl	_BnnIsDigitZero
+_BnnIsDigitZero:	cmpl	$0,4(%esp)
 		sete	%al
 		andl	$255,%eax
 		ret
 
-		.align	16
-		.globl BnnIsDigitNormalized
-BnnIsDigitNormalized:
+		.align	4
+		.globl _BnnIsDigitNormalized
+_BnnIsDigitNormalized:
 		movl	4(%esp),%eax
 		shrl	$31,%eax
 		ret
 
-		.align	16
-		.globl	BnnIsDigitOdd
-BnnIsDigitOdd:	xorl	%eax,%eax
+		.align	4
+		.globl	_BnnIsDigitOdd
+_BnnIsDigitOdd:	xorl	%eax,%eax
 		testb	$1,4(%esp)
 		setnz	%al
 		ret
 
-		.align	16
-		.globl	BnnCompareDigits
-BnnCompareDigits:
+		.align	4
+		.globl	_BnnCompareDigits
+_BnnCompareDigits:
 		movl	4(%esp),%ecx		# d1
 		movl	8(%esp),%edx		# d2
 		xorl	%eax,%eax
@@ -157,7 +157,7 @@
 		je	.LBCD2
 		movl	$-1,%eax
 		ret
-		.align	16
+		.align	4
 .LBCD1:		movl	$1,%eax
 .LBCD2:		ret
 
@@ -178,45 +178,45 @@
 #		orl	%ecx,%eax		# A = (CF-!ZF)|!ZF
 #		ret
 
-		.align	16
-		.globl	BnnComplement
-BnnComplement:	movl	4(%esp),%edx		# nn
+		.align	4
+		.globl	_BnnComplement
+_BnnComplement:	movl	4(%esp),%edx		# nn
 		movl	8(%esp),%eax		# nl
 		testl	%eax,%eax		# if(nl==0) return
-		je	BCOMP2
+		je	_BCOMP2
 		leal	(%edx,%eax,4),%ecx	# nnlim = nn+nl
-		.align		16
-BCOMP1:		notl	(%edx)			# *nn = !*nn
+		.align		4
+_BCOMP1:		notl	(%edx)			# *nn = !*nn
 		addl	$4,%edx			# nn++
-		cmpl	%ecx,%edx		# if(nn<nnlim) BCOMP1
-		jb	BCOMP1
-BCOMP2:		ret
-
-		.align	16
-		.globl	BnnAndDigits
-BnnAndDigits:
+		cmpl	%ecx,%edx		# if(nn<nnlim) _BCOMP1
+		jb	_BCOMP1
+_BCOMP2:		ret
+
+		.align	4
+		.globl	_BnnAndDigits
+_BnnAndDigits:
 		movl	4(%esp),%eax
 		movl	8(%esp),%edx
 		andl	%edx,(%eax)
 		ret
 
-		.align	16
-		.globl	BnnOrDigits
-BnnOrDigits:	movl	4(%esp),%eax
+		.align	4
+		.globl	_BnnOrDigits
+_BnnOrDigits:	movl	4(%esp),%eax
 		movl	8(%esp),%edx
 		orl	%edx,(%eax)
 		ret
 
-		.align	16
-		.globl	BnnXorDigits
-BnnXorDigits:	movl	4(%esp),%eax
+		.align	4
+		.globl	_BnnXorDigits
+_BnnXorDigits:	movl	4(%esp),%eax
 		movl	8(%esp),%edx
 		xorl	%edx,(%eax)
 		ret
 
-		.align	16
-		.globl	BnnShiftLeft
-BnnShiftLeft:	pushl	%ebp
+		.align	4
+		.globl	_BnnShiftLeft
+_BnnShiftLeft:	pushl	%ebp
 		pushl	%edi
 		pushl	%esi
 		pushl	%ebx
@@ -225,15 +225,15 @@
 		movl	28(%esp),%ecx		# nbi
 		xorl	%eax,%eax		# res = 0
 		testl	%ecx,%ecx		# if(nbi == 0) return(res)
-		je	BSL2
+		je	_BSL2
 		testl	%ebx,%ebx		# if(ml == 0) return(res)
-		je	BSL2
+		je	_BSL2
 		movl	$32,%edx		# rnbi = 32
 		subl	%ecx,%edx		# rnbi -= nbi
 		bswap	%edx			# Same as rnbi << 24..
 		orl	%edx,%ecx		# C = rnbi .. nbi
-		.align		16
-BSL1:
+		.align		4
+_BSL1:
 		movl	(%ebp),%esi		# save = *mm
 		movl	(%ebp),%edi		# X = save
 		sall	%cl,%edi		# X << nbi
@@ -245,16 +245,16 @@
 		shrl	%cl,%eax		# res >>= rnbi
 		bswap	%ecx
 		decl	%ebx			# ml--
-		jne	BSL1			# if(ml) BSL1
-BSL2:		popl	%ebx
+		jne	_BSL1			# if(ml) _BSL1
+_BSL2:		popl	%ebx
 		popl	%esi
 		popl	%edi
 		popl	%ebp
 		ret
 
-		.align	16
-		.globl	BnnShiftRight
-BnnShiftRight:	pushl	%ebp
+		.align	4
+		.globl	_BnnShiftRight
+_BnnShiftRight:	pushl	%ebp
 		pushl	%edi
 		pushl	%esi
 		pushl	%ebx
@@ -263,16 +263,16 @@
 		movl	28(%esp),%ecx		# nbi
 		xorl	%eax,%eax		# res = 0
 		testl	%ecx,%ecx		# if(nbi == 0) return(res)
-		je	BSR2
+		je	_BSR2
 		testl	%ebx,%ebx		# if(ml == 0) return(res)
-		je	BSR2
+		je	_BSR2
 		leal	(%ebp,%ebx,4),%ebp	# mm += ml
 		movl	$32,%edx		# rnbi = 32
 		subl	%ecx,%edx		# rnbi -= nbi
 		bswap	%edx			# Same as rnbi << 24..
 		orl	%edx,%ecx		# C = rnbi .. nbi
-		.align		16
-BSR1:
+		.align		4
+_BSR1:
 		addl	$-4,%ebp		# mm--
 		movl	(%ebp),%esi		# save = *mm
 		movl	(%ebp),%edi		# X = save
@@ -284,37 +284,37 @@
 		sall	%cl,%eax		# res <<= rnbi
 		bswap	%ecx
 		decl	%ebx			# ml--
-		jne	BSR1			# if(ml) BSR1
-BSR2:		popl	%ebx
+		jne	_BSR1			# if(ml) _BSR1
+_BSR2:		popl	%ebx
 		popl	%esi
 		popl	%edi
 		popl	%ebp
 		ret
 
-		.align	16
-		.globl	BnnAddCarry
-BnnAddCarry:	movl	4(%esp),%edx		# nn
+		.align	4
+		.globl	_BnnAddCarry
+_BnnAddCarry:	movl	4(%esp),%edx		# nn
 		movl	8(%esp),%ecx		# nl
 		cmpl	$0,12(%esp)		# if(carryin==0) return(0);
-		je	BAC4
-BAC1:		testl	%ecx,%ecx		# if(nl==0) return(1);
-		je	BAC3
-		.align		16
-BAC2:		movl	(%edx),%eax		# X = *nn
+		je	_BAC4
+_BAC1:		testl	%ecx,%ecx		# if(nl==0) return(1);
+		je	_BAC3
+		.align		4
+_BAC2:		movl	(%edx),%eax		# X = *nn
 		addl	$1,%eax			# X++
 		movl	%eax,(%edx)		# *nn = X
-		jnc	BAC4			# if !CF return(0);
+		jnc	_BAC4			# if !CF return(0);
 		leal	4(%edx),%edx		# nn += 1;
 		decl	%ecx			# nl--
-		jnz	BAC2			# if(nl!=0) BAC2
-BAC3:		movl	$1,%eax			# return(1);
+		jnz	_BAC2			# if(nl!=0) _BAC2
+_BAC3:		movl	$1,%eax			# return(1);
 		ret
-BAC4:		xorl	%eax,%eax		# return(0);
+_BAC4:		xorl	%eax,%eax		# return(0);
 		ret
 
-		.align	16
-		.globl	BnnAdd
-BnnAdd:		pushl	%edi
+		.align	4
+		.globl	_BnnAdd
+_BnnAdd:		pushl	%edi
 		pushl	%esi
 		pushl	%ebx
 		movl	16(%esp),%edx		# mm
@@ -323,11 +323,11 @@
 		movl	28(%esp),%esi		# nl
 		movl	32(%esp),%eax		# c
 		subl	%esi,%ecx		# ml -= nl
-		testl	%esi,%esi		# if(nl == 0) BADD2
-		je	BADD2
+		testl	%esi,%esi		# if(nl == 0) _BADD2
+		je	_BADD2
 		neg	%eax			# CF = c
-		.align		16
-BADD1:
+		.align		4
+_BADD1:
 		movl	(%ebx),%eax		# c = *nn
 		movl	(%edx),%edi		# X = *mm
 		adc	%eax,%edi		# X += c + CF
@@ -335,53 +335,53 @@
 		decl	%esi			# nl--
 		leal	4(%ebx),%ebx		# nn += 1;
 		leal	4(%edx),%edx		# mm += 1;
-		jne	BADD1			# if(nl != 0) BADD1
+		jne	_BADD1			# if(nl != 0) _BADD1
 		setc	%al			# c = CF
 		andl	$255,%eax
-BADD2:		testl	%eax,%eax		# if(c == 0) return(0);
-		je	BADD5
+_BADD2:		testl	%eax,%eax		# if(c == 0) return(0);
+		je	_BADD5
 		testl	%ecx,%ecx		# if(ml==0) return(1);
-		je	BADD4
-		.align		16
-BADD3:		incl	(%edx)			# (*mm)++
-		jnz	BADD5			# if !ZF return(0);
+		je	_BADD4
+		.align		4
+_BADD3:		incl	(%edx)			# (*mm)++
+		jnz	_BADD5			# if !ZF return(0);
 		addl	$4,%edx			# mm++
 		decl	%ecx			# ml--
-		jnz	BADD3			# if(ml!=0) BADD3
-BADD4:		movl	$1,%eax			# return(1);
+		jnz	_BADD3			# if(ml!=0) _BADD3
+_BADD4:		movl	$1,%eax			# return(1);
 		popl	%ebx
 		popl	%esi
 		popl	%edi
 		ret
-BADD5:		xorl	%eax,%eax		# return(0);
+_BADD5:		xorl	%eax,%eax		# return(0);
 		popl	%ebx
 		popl	%esi
 		popl	%edi
 		ret
 
-		.align	16
-		.globl	BnnSubtractBorrow
-BnnSubtractBorrow:
+		.align	4
+		.globl	_BnnSubtractBorrow
+_BnnSubtractBorrow:
 		movl	4(%esp),%edx		# nn
 		movl	8(%esp),%ecx		# nl
 		cmpl	$0,12(%esp)		# if(carryin==1) return(1);
-		jne	BSB4
-BSB1:		testl	%ecx,%ecx		# if(nl==0) return(0);
-		je	BSB3
-		.align		16
-BSB2:		subl	$1,(%edx)		# (*nn)--
-		jnc	BSB4			# if !CF return(1);
+		jne	_BSB4
+_BSB1:		testl	%ecx,%ecx		# if(nl==0) return(0);
+		je	_BSB3
+		.align		4
+_BSB2:		subl	$1,(%edx)		# (*nn)--
+		jnc	_BSB4			# if !CF return(1);
 		addl	$4,%edx			# nn++
 		decl	%ecx			# nl--
-		jnz	BSB2			# if(nl!=0) BSB2
-BSB3:		xorl	%eax,%eax		# return(0);
+		jnz	_BSB2			# if(nl!=0) _BSB2
+_BSB3:		xorl	%eax,%eax		# return(0);
 		ret
-BSB4:		movl	$1,%eax			# return(1);
+_BSB4:		movl	$1,%eax			# return(1);
 		ret
 
-		.align	16
-		.globl	BnnSubtract
-BnnSubtract:	pushl	%edi
+		.align	4
+		.globl	_BnnSubtract
+_BnnSubtract:	pushl	%edi
 		pushl	%esi
 		pushl	%ebx
 		movl	16(%esp),%edx		# mm
@@ -390,56 +390,56 @@
 		movl	28(%esp),%esi		# nl
 		movl	32(%esp),%eax		# c
 		subl	%esi,%ecx		# ml -= nl
-		testl	%esi,%esi		# if(nl) BS2
-		je	BS2
+		testl	%esi,%esi		# if(nl) _BS2
+		je	_BS2
 		xorl	$1,%eax			# c = !c;
 		neg	%eax			# CF = c
-		.align		16
-BS1:		movl	(%edx),%edi		# X = *mm
+		.align		4
+_BS1:		movl	(%edx),%edi		# X = *mm
 		movl	(%ebx),%eax		# c = *nn
 		sbb	%eax,%edi		# X -= c + CF
 		movl	%edi,(%edx)		# *mm = X
 		leal	4(%ebx),%ebx		# nn += 1;
 		leal	4(%edx),%edx		# mm += 1;
 		decl	%esi			# nl--
-		jne	BS1			# if(nl != 0) BS1
+		jne	_BS1			# if(nl != 0) _BS1
 		setc	%al			# c = CF
 		andl	$255,%eax
 		xorl	$1,%eax			# c = !c;
-BS2:		testl	%eax,%eax		# if(c == 1) return(1);
-		jne	BS5
+_BS2:		testl	%eax,%eax		# if(c == 1) return(1);
+		jne	_BS5
 		testl	%ecx,%ecx		# if(ml==0) return(0);
-		je	BS4
-		.align		16
-BS3:		subl	$1,(%edx)		# (*mm)--
-		jnc	BS5			# if !CF return(1);
+		je	_BS4
+		.align		4
+_BS3:		subl	$1,(%edx)		# (*mm)--
+		jnc	_BS5			# if !CF return(1);
 		addl	$4,%edx			# mm++
 		decl	%ecx			# ml--
-		jnz	BS3			# if(ml!=0) BS3
-BS4:		xorl	%eax,%eax		# return(0);
+		jnz	_BS3			# if(ml!=0) _BS3
+_BS4:		xorl	%eax,%eax		# return(0);
 		popl	%ebx
 		popl	%esi
 		popl	%edi
 		ret
-BS5:		movl	$1,%eax			# return(1);
+_BS5:		movl	$1,%eax			# return(1);
 		popl	%ebx
 		popl	%esi
 		popl	%edi
 		ret
 
-		.align	16
-		.globl	BnnMultiplyDigit
-BnnMultiplyDigit:
+		.align	4
+		.globl	_BnnMultiplyDigit
+_BnnMultiplyDigit:
 		movl	20(%esp),%ecx		# d
-		testl	%ecx,%ecx		# if(d!=0) BM1
-		jne	BMD1			
+		testl	%ecx,%ecx		# if(d!=0) _BM1
+		jne	_BMD1			
 		xorl	%eax,%eax		# return(0);
 		ret
-BMD1:		cmpl	$1,%ecx			# if(d!=1) BM2
-		jne	BMD2
+_BMD1:		cmpl	$1,%ecx			# if(d!=1) _BM2
+		jne	_BMD2
 		movl	$0,20(%esp)
-		jmp	BnnAdd			# return(BnnAdd(pp,pl,mm,ml,0)
-BMD2:		pushl	%ebp
+		jmp	_BnnAdd			# return(_BnnAdd(pp,pl,mm,ml,0)
+_BMD2:		pushl	%ebp
 		pushl	%edi
 		pushl	%esi
 		pushl	%ebx
@@ -449,9 +449,9 @@
 		subl	%ebp,24(%esp)		# pl -= ml
 		xorl	%ebx,%ebx		# low = 0
 		testl	%ebp,%ebp
-		je	BMD7			# if(ml == 0) return(0);
-		.align		16
-BMD3:		movl	(%esi),%eax		# XL = *mm
+		je	_BMD7			# if(ml == 0) return(0);
+		.align		4
+_BMD3:		movl	(%esi),%eax		# XL = *mm
 		addl	$4,%esi			# mm++
 		mul	%ecx			# XH:XL = D*XL
 		addl	%ebx,%eax		# XL += low
@@ -461,36 +461,36 @@
 		movl	%eax,(%edi)		# *pp = XL
 		addl	$4,%edi			# pp++
 		movl	%edx,%ebx		# low = XH
-BMD4:		decl	%ebp			# ml--
-		jne	BMD3			# if(ml) BMD3
+_BMD4:		decl	%ebp			# ml--
+		jne	_BMD3			# if(ml) _BMD3
 		movl	24(%esp),%edx		# pl
 		addl	%ebx,(%edi)		# *pp += low
-		jnc	BMD7			# if !CF return(0)
+		jnc	_BMD7			# if !CF return(0)
 		decl	%edx			# pl--
-		je	BMD6			# if(pl == 0) return(1)
+		je	_BMD6			# if(pl == 0) return(1)
 		addl	$4,%edi			# pp++
-		.align		16
-BMD5:		addl	$1,(%edi)		# (*pp)++
-		jnc	BMD7			# if !CF return(0);
+		.align		4
+_BMD5:		addl	$1,(%edi)		# (*pp)++
+		jnc	_BMD7			# if !CF return(0);
 		addl	$4,%edi			# pp++
 		decl	%edx			# pl--
-		jnz	BMD5			# if(pl!=0) BMD5
-BMD6:		movl	$1,%eax			# return(1);
+		jnz	_BMD5			# if(pl!=0) _BMD5
+_BMD6:		movl	$1,%eax			# return(1);
 		popl	%ebx
 		popl	%esi
 		popl	%edi
 		popl	%ebp
 		ret
-BMD7:		xorl	%eax,%eax		# return(0);
+_BMD7:		xorl	%eax,%eax		# return(0);
 		popl	%ebx
 		popl	%esi
 		popl	%edi
 		popl	%ebp
 		ret
 
-		.align	16
-		.globl	BnnDivideDigit
-BnnDivideDigit:	pushl	%edi
+		.align	4
+		.globl	_BnnDivideDigit
+_BnnDivideDigit:	pushl	%edi
 		pushl	%esi
 		pushl	%ebx
 		movl	16(%esp),%edi		# qq
@@ -503,17 +503,17 @@
 		addl	$-4,%esi		# nn--
 		movl	(%esi),%edx		# XH = *nn;
 		testl	%ecx,%ecx
-		je	BDD2			# if(nl==0) return(XH)
-		.align		16
-BDD1:		addl	$-4,%esi		# nn--
+		je	_BDD2			# if(nl==0) return(XH)
+		.align		4
+_BDD1:		addl	$-4,%esi		# nn--
 		movl	(%esi),%eax		# XL = *nn
 		div	%ebx			# XL = XH:XL / d;
 						# XH = XH:XL % d;
 		addl	$-4,%edi		# qq--
 		movl	%eax,(%edi)		# *qq = XL;
 		decl	%ecx			# nl--
-		jnz	BDD1			# if(nl!=0) BDD1
-BDD2:		movl	%edx,%eax		# return(XH);
+		jnz	_BDD1			# if(nl!=0) _BDD1
+_BDD2:		movl	%edx,%eax		# return(XH);
 		popl	%ebx
 		popl	%esi
 		popl	%edi
