signed __int64 __fastcall sub_1404E7E00(_QWORD *a1, signed __int64 a2, unsigned int a3, _BYTE *a4) { __int64 v4; // r10 unsigned __int64 v5; // r11 _QWORD *v6; // rbx signed __int64 result; // rax char v8; // cl char v9; // cl __int64 v10; // rax int v11; // er8 unsigned __int64 v12; // rdx char *v13; // [rsp+20h] [rbp-28h] __int64 v14; // [rsp+28h] [rbp-20h] char v15[8]; // [rsp+30h] [rbp-18h] v4 = a3; v5 = a2; v6 = a1; *a4 = 0; result = a3 - 1; if ( (unsigned int)result > 7 || (v8 = 8 * v4, (unsigned int)(8 * v4) < 0x40) && a2 >= (unsigned __int64)(1i64 << v8) && ((v9 = v8 - 1, result = -1i64 << v9, -1i64 << v9 > a2) || a2 >= 1i64 << v9) ) { *a4 = 1; } else { v10 = 0i64; if ( a3 ) { if ( *(_BYTE *)(*(_QWORD *)(v6[2] + 8i64) + 16i64) ) { v11 = 0; do { v12 = v5 >> v11; v11 += 8; v15[v10] = v12; v10 = (unsigned int)(v10 + 1); } while ( (_DWORD)v10 != (_DWORD)v4 ); } else { do { v15[v10] = (unsigned __int64)a2 >> (8 * ((unsigned __int8)v4 - (unsigned __int8)v10) - 8); v10 = (unsigned int)(v10 + 1); } while ( (_DWORD)v10 != (_DWORD)v4 ); } } v14 = v4; v13 = v15; result = (*(__int64 (__fastcall **)(_QWORD *, char **))(*v6 + 336i64))(v6, &v13); } return result; }