Fix potential bug. `l` is just an input, it's `*l` that is read and written to.

Change-Id: I96aa4ef86325347fdf0749ada5eef271ddcb5362
diff --git a/src/arch/aarch64/inc/hf/arch/spinlock.h b/src/arch/aarch64/inc/hf/arch/spinlock.h
index 3e3d1a0..2b39a1b 100644
--- a/src/arch/aarch64/inc/hf/arch/spinlock.h
+++ b/src/arch/aarch64/inc/hf/arch/spinlock.h
@@ -52,12 +52,12 @@
 		"	mov	%w2, #1\n"
 		"	sevl\n" /* set event bit */
 		"1:	wfe\n"  /* wait for event, clear event bit */
-		"2:	ldaxr	%w1, [%0]\n"      /* load lock value */
+		"2:	ldaxr	%w1, [%3]\n"      /* load lock value */
 		"	cbnz	%w1, 1b\n"	/* if lock taken, goto WFE */
-		"	stxr	%w1, %w2, [%0]\n" /* try to take lock */
+		"	stxr	%w1, %w2, [%3]\n" /* try to take lock */
 		"	cbnz	%w1, 2b\n"	/* loop if unsuccessful */
-		: "+r"(l), "=&r"(tmp1), "=&r"(tmp2)
-		:
+		: "+m"(*l), "=&r"(tmp1), "=&r"(tmp2)
+		: "r"(l)
 		: "cc");
 }
 
@@ -67,5 +67,5 @@
 	 * Store zero to lock's value with release semantics. This triggers an
 	 * event which wakes up other threads waiting on a lock (no SEV needed).
 	 */
-	__asm__ volatile("stlr wzr, [%0]" : "+r"(l)::"cc");
+	__asm__ volatile("stlr wzr, [%1]" : "=m"(*l) : "r"(l) : "cc");
 }