So, I'm trying to come up with a fast 3D cross product using NEON instructions, and this is currently what I have:
	
	
	
		
And here's the generated assembly output:
	
	
	
		
Looks like I'll have to rewrite it using inline assembly to remove the redundant vmov instructions. But other than that, I'm wondering if the general algorithm can at all be improved. There's some ARM marketing material floating around saying that a 3-term cross product can be done in 3 cycles. I'm not sure how that's possible with 2x vrev64 + 2x vdups + vmul + vldmia + vmls + veor, unless the data is already pre-formatted for a vmul + vmls + veor/vmul. Am I missing something obvious here that can make this faster?
				
			
		Code:
	
	// cross3(v1, v2) = [ y1*z2 - y2*z1 , z1*x2 - z2*x1 , x1*y2 - x2*y1 ]
float32x4_t cross3(float32x4_t v1, float32x4_t v2) {
    static uint32x4_t const cross3_mask = { 0x00000000, 0x80000000, 0x00000000, 0x00000000 };
    float32x4_t result, temp1, temp2;
    
    // swizzle v1 and v2 into correct terms
    float32x2_t xy1 = vget_low_f32(v1);
    float32x2_t zz1 = vget_high_f32(v1);
    float32x2_t xy2 = vget_low_f32(v2);
    float32x2_t zz2 = vget_high_f32(v2);
    float32x2_t yx1 = vrev64_f32(xy1);
    float32x2_t yx2 = vrev64_f32(xy2);
    zz1 = vdup_lane_f32(zz1, 0);
    zz2 = vdup_lane_f32(zz2, 0);
    
    // result = v1.yxx_ * v2.zzy_
    temp1 = vcombine_f32(yx1, xy1);
    temp2 = vcombine_f32(zz2, yx2);
    result = vmulq_f32(temp1, temp2); 
    
    // result = result - (v1.zzy_ * v2.yxx_)
    temp1 = vcombine_f32(zz1, yx1);
    temp2 = vcombine_f32(yx2, xy2);
    result = vmlsq_f32(result, temp1, temp2); 
    
    // result = result * [ 1, -1, 1 ]
    result = vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(result), cross3_mask));
    return result;
}And here's the generated assembly output:
		Code:
	
	__Z6cross319__simd128_float32_t19__simd128_float32_t:
	@ args = 16, pretend = 0, frame = 0
	@ frame_needed = 0, uses_anonymous_args = 0
	@ link register save eliminated.
	vldmia	sp, {d6-d7}
	vmov	d26, r0, r1  @ v4sf
	vmov	d27, r2, r3
	vmov	d20, d6
	vmov	d2, d26
	vmov	d3, d7
	vrev64.32	d5, d2
	vrev64.32	d1, d20
	vmov	d17, d27
	vdup.32	d6, d3[0]
	ldr	r3, L3
	vmov	d24, d6
	vmov	d25, d1
	vdup.32	d4, d17[0]
	vmov	d22, d5
	vmov	d23, d2
	vmov	d18, d1
	vmov	d19, d20
	vmul.f32	q1, q11, q12
	vmov	d16, d4
	vmov	d17, d5
	vldmia	r3, {d0-d1}
	vmls.f32	q1, q8, q9
	veor	q3, q1, q0
	vmov	r0, r1, d6  @ v4sf
	vmov	r2, r3, d7
	@ lr needed for prologue
	bx	lrLooks like I'll have to rewrite it using inline assembly to remove the redundant vmov instructions. But other than that, I'm wondering if the general algorithm can at all be improved. There's some ARM marketing material floating around saying that a 3-term cross product can be done in 3 cycles. I'm not sure how that's possible with 2x vrev64 + 2x vdups + vmul + vldmia + vmls + veor, unless the data is already pre-formatted for a vmul + vmls + veor/vmul. Am I missing something obvious here that can make this faster?
 
	
 
 
		