Overview Fake SELFs

2.1. Kernel code

2.2. Testing Fake PKGs

3.1. Toolchain

3.2. Modding of Shellcore

3.3. Kernel code Defines, structures and helper functions Package repacking issues

5.1. SFO file parsing

5.2. PlayGo chunk builder

5.3. GP4 project generation

5.4. Some of PKG/PFS stuff Conclusion

Executable files including SELFs/SPRXs/SEXEs/SDLLs are signed and encrypted. While their fake versions are not signed and encrypted, we usually can't use them on a retail console due to restrictions that are put in place by the auth manager that loads them (a secure module, or SM, that is running by SAMU, a secure co-processor that is embedded into our AMD's APU). The (S)ELF loader from the kernel use SAMU calls for everything crypto-related. Applications, including games (except system ones) requires everything to be bundled into pkg files. The console uses this system heavily, so we can't just drop our application in the file system and hope that all things will work normally. So we must use packages too if we don't want to give ourselves a headache. These packages are also encrypted and signed, and we can't use debug packages on a retail console. Even debug packages are encrypted and signed unlike executable files. Due to Sony using asymmetric encryption, we don't have all the private keys to decrypt them properly.

sceSblAuthMgrIsLoadable2

This function basically checks if a file could be loaded from the specific folder/partition and also sets up the authentication information that is used by the system to check access rights and capabilities. If we need some specific capabilities we may store them inside our own file and feed them into the authentication's information and for others we just use a default auth's information.

Code: static inline int sceSblAuthMgrGetSelfAuthInfoFake(struct self_context* ctx, struct self_auth_info* info) { struct self_header* hdr; struct self_fake_auth_info* fake_info; if (ctx->format == SELF_FORMAT_SELF) { hdr = (struct self_header*)ctx->header; fake_info = (struct self_fake_auth_info*)(ctx->header + hdr->header_size + hdr->meta_size - 0x100); if (fake_info->size == sizeof(fake_info->info)) { memcpy(info, &fake_info->info, sizeof(*info)); return 0; } return -37; } else { return -35; } } //... static inline int is_fake_self(struct self_context* ctx) { struct self_ex_info* ex_info; int ret; if (ctx && ctx->format == SELF_FORMAT_SELF) { ret = sceSblAuthMgrGetSelfInfo(ctx, &ex_info); if (ret) return 0; return ex_info->ptype == SELF_PTYPE_FAKE; } else { return 0; } } //... static const uint8_t s_auth_info_for_exec[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x31, 0x00, 0x00, 0x00, 0x00, 0x80, 0x03, 0x00, 0x20, 0x00, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x40, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x40, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; static const uint8_t s_auth_info_for_dynlib[] = { 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x30, 0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x40, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; static int build_self_auth_info_fake(struct self_context* ctx, struct self_auth_info* parent_auth_info, struct self_auth_info* auth_info) { struct self_auth_info fake_auth_info; struct self_ex_info* ex_info; struct elf64_ehdr* ehdr = NULL; int ret; if (!ctx || !parent_auth_info || !auth_info) { ret = EINVAL; goto error; } if (!is_fake_self(ctx)) { ret = EINVAL; goto error; } ret = sceSblAuthMgrGetSelfInfo(ctx, &ex_info); if (ret) goto error; ret = sceSblAuthMgrGetElfHeader(ctx, &ehdr); if (ret) goto error; if (!ehdr) { ret = ESRCH; goto error; } ret = sceSblAuthMgrGetSelfAuthInfoFake(ctx, &fake_auth_info); if (ret) { switch (ehdr->type) { case ELF_ET_EXEC: case ELF_ET_SCE_EXEC: case ELF_ET_SCE_EXEC_ASLR: memcpy(&fake_auth_info, s_auth_info_for_exec, sizeof(fake_auth_info)); ret = 0; break; case ELF_ET_SCE_DYNAMIC: memcpy(&fake_auth_info, s_auth_info_for_dynlib, sizeof(fake_auth_info)); ret = 0; break; default: ret = ENOTSUP; goto error; } fake_auth_info.paid = ex_info->paid; // TODO: overwrite low bits of PAID with title id number } if (auth_info) memcpy(auth_info, &fake_auth_info, sizeof(*auth_info)); error: return ret; } //... static int sceSblAuthMgrIsLoadable2_hook(struct self_context* ctx, struct self_auth_info* old_auth_info, int path_id, struct self_auth_info* new_auth_info) { int ret; if (ctx->format == SELF_FORMAT_ELF || is_fake_self(ctx)) ret = build_self_auth_info_fake(ctx, old_auth_info, new_auth_info); else ret = sceSblAuthMgrIsLoadable2(ctx, old_auth_info, path_id, new_auth_info); return ret; }

sceSblAuthMgrVerifyHeader

This function parses a file's header, decrypts it (for finalized files) and set ups a context structure inside SM that is used later for loading purposes. We can't just omit a context set up because it's used by further system's code, so we'll use a little trick by letting the system to set up a fake context for us (this is a bit of a lie though, it's a real context that SM will prepare, but we won't use it as is in kernel). Thankfully, we have a few real SELFs inside static kernel memory (embedded into a memory disk image that is loaded at startup), so we don't even need to load them from the external file system. So, we need to authenticate, for example, mini-syscore.elf, and then replace a part of the context's structure with our own stuff.

Code: static inline int auth_self_header(struct self_context* ctx) { extern const uint8_t* mini_syscore_self_binary; /* TODO: you should point it to its location in a kernel's memory (copy the file using ftp or something like that and try to find it in kernel's memory dump) */ struct self_header* hdr; unsigned int old_total_header_size, new_total_header_size; int old_format; uint8_t* tmp; int is_unsigned; int ret; is_unsigned = ctx->format == SELF_FORMAT_ELF || is_fake_self(ctx); if (is_unsigned) { old_format = ctx->format; old_total_header_size = ctx->total_header_size; /* take a header from mini-syscore.elf */ hdr = (struct self_header*)mini_syscore_self_binary; new_total_header_size = hdr->header_size + hdr->meta_size; tmp = (uint8_t*)alloc(new_total_header_size); if (!tmp) { ret = ENOMEM; goto error; } /* temporarily swap an our header with a header from a real SELF file */ memcpy(tmp, ctx->header, new_total_header_size); memcpy(ctx->header, hdr, new_total_header_size); /* it's now SELF, not ELF or whatever... */ ctx->format = SELF_FORMAT_SELF; ctx->total_header_size = new_total_header_size; /* call the original method using a real SELF file */* ret = sceSblAuthMgrSmVerifyHeader(ctx); /* restore everything we did before */ memcpy(ctx->header, tmp, new_total_header_size); ctx->format = old_format; ctx->total_header_size = old_total_header_size; dealloc(tmp); } else { ret = sceSblAuthMgrSmVerifyHeader(ctx); } error: return ret; } //... static int sceSblAuthMgrVerifyHeader_hook(struct self_context* ctx) { sceSblAuthMgrSmStart(); return auth_self_header(ctx); }

sceSblAuthMgrSmLoadSelfSegment

SELF files consist of segments that are divided into blocks. Fortunately, for fake files we don't need that at all, so a code for loading them will be trivial. Notice, we're dealing with stack frames here to find a context's structure. For 4.05 you may use a different method or maybe a different offset, just look into the original kernel's method to see if you need to adjust something.

Code: static int sceSblAuthMgrSmLoadSelfSegment__sceSblServiceMailbox_hook(unsigned long service_id, uint8_t* request, void* response) { /* getting a stack frame of a parent function */ uint8_t* frame = (uint8_t*)__builtin_frame_address(1); /* TODO: may need to fix */ /* finding a pointer to a context's structure */ struct self_context* ctx = *(struct self_context**)(frame - 0x100); /* TODO: may need to fix */ int is_unsigned = ctx && is_fake_self(ctx); int ret; if (is_unsigned) { *(int*)(response + 0x04) = 0; /* setting error field to zero, thus we have no errors */ ret = 0; } else { ret = sceSblServiceMailbox(service_id, request, response); } return ret; }

sceSblAuthMgrSmLoadSelfBlock

Here is where the actual decryption/copying happens. So, as I said above, we just need to copy the block's data from one buffer to another. However, there is a catch, we have the GPU's addresses here, so we need to convert them into CPU addresses to use them in memcpy() calls, we can do that by looking through lists of mapped memory ranges and searching for a matched GPU address. Also notice, here we're using a specific register to get a context's structure. For 4.05 you may use a different method or maybe a different register, just look into the original kernel's method to find what needs to be changed.

Code: static int sceSblAuthMgrSmLoadSelfBlock__sceSblServiceMailbox_hook(unsigned long service_id, uint8_t* request, void* response) { register struct self_context* ctx __asm__ ("r14"); /* TODO: may need to fix */ vm_offset_t segment_data_gpu_va = *(unsigned long*)(request + 0x08); vm_offset_t cur_data_gpu_va = *(unsigned long*)(request + 0x50); vm_offset_t cur_data2_gpu_va = *(unsigned long*)(request + 0x58); unsigned int data_offset = *(unsigned int*)(request + 0x44); unsigned int data_size = *(unsigned int*)(request + 0x48); vm_offset_t segment_data_cpu_va, cur_data_cpu_va, cur_data2_cpu_va; unsigned int size1; int is_unsigned = ctx && (ctx->format == SELF_FORMAT_ELF || is_fake_self(ctx)); int ret; if (is_unsigned) { /* looking into lists of GPU's mapped memory regions */ segment_data_cpu_va = sceSblDriverGpuVaToCpuVa(segment_data_gpu_va, NULL); cur_data_cpu_va = sceSblDriverGpuVaToCpuVa(cur_data_gpu_va, NULL); cur_data2_cpu_va = cur_data2_gpu_va ? sceSblDriverGpuVaToCpuVa(cur_data2_gpu_va, NULL) : 0; if (segment_data_cpu_va && cur_data_cpu_va) { if (cur_data2_gpu_va && cur_data2_gpu_va != cur_data_gpu_va && data_offset > 0) { /* data spans two consecutive memory's pages, so we need to copy twice */ size1 = PAGE_SIZE - data_offset; memcpy((char*)segment_data_cpu_va, (char*)cur_data_cpu_va + data_offset, size1); memcpy((char*)segment_data_cpu_va + size1, (char*)cur_data2_cpu_va, data_size - size1); } else { memcpy((char*)segment_data_cpu_va, (char*)cur_data_cpu_va + data_offset, data_size); } } *(int*)(request + 0x04) = 0; /* setting error field to zero, thus we have no errors */ ret = 0; } else { ret = sceSblServiceMailbox(service_id, request, response); } return ret; }

Code: void install_unsigned_loader(void) { /* TODO: need to change a slide of "call sceSblAuthMgrVerifyHeader" instruction */ INSTALL_CALL_HOOK(0x61F976, sceSblAuthMgrVerifyHeader_hook); INSTALL_CALL_HOOK(0x620599, sceSblAuthMgrVerifyHeader_hook); /* TODO: need to change a slide of "call sceSblAuthMgrIsLoadable2" instruction */ INSTALL_CALL_HOOK(0x61F24F, sceSblAuthMgrIsLoadable2_hook); /* TODO: need to change a slide of "call sceSblServiceMailbox" instruction that's located inside sceSblAuthMgrSmLoadSelfBlock() */ INSTALL_CALL_HOOK(0x6244E1, sceSblAuthMgrSmLoadSelfBlock__sceSblServiceMailbox_hook); /* TODO: need to change a slide of "call sceSblServiceMailbox" instruction that's located inside sceSblAuthMgrSmLoadSelfSegment() */ INSTALL_CALL_HOOK(0x6238BA, sceSblAuthMgrSmLoadSelfSegment__sceSblServiceMailbox_hook); }

Code: 0000h: 11 00 00 00 00 00 00 38 00 00 00 00 00 1C 00 40 0010h: 00 FF 00 00 00 00 00 A5 00 00 00 00 00 00 00 00 0020h: 00 00 00 00 00 00 00 00 00 00 00 80 00 40 00 40 0030h: 00 00 00 00 00 00 00 80 01 00 00 00 00 00 00 04 0040h: 00 40 FF FF 00 00 00 F0 XX XX XX XX XX XX XX XX 0050h: XX XX XX XX XX XX XX XX 00 00 00 00 00 00 00 00 0060h: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 0070h: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 0080h: 00 00 00 00 00 00 00 00

Code: 0000h: 11 00 00 00 00 00 00 38 00 00 00 00 00 1C 00 40 0010h: 00 FF 00 00 00 00 00 85 00 00 00 00 00 00 00 00 0020h: 00 00 00 00 00 00 00 00 00 00 00 80 00 40 00 40 0030h: 00 00 00 00 00 00 00 80 01 00 00 00 00 00 00 04 0040h: 00 40 FF FF 00 00 00 F0 XX XX XX XX XX XX XX XX 0050h: XX XX XX XX XX XX XX XX 00 00 00 00 00 00 00 00 0060h: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 0070h: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 0080h: 00 00 00 00 00 00 00 00

Code: $ make_fself.py --paid 0x3800000000000011 --auth-info '<bytes of self_auth_info structure>' eboot.elf eboot.bin loading elf file: eboot.elf saving fake signed elf file: eboot.bin processing segment #00... processing segment #01... processing segment #02... processing segment #08... done

Code: static int mount_large_fs(const char* device, const char* mountpoint, const char* fstype, const char* mode, unsigned int flags) { struct iovec* iov = NULL; int iovlen = 0; int ret; build_iovec(&iov, &iovlen, "fstype", fstype, -1); build_iovec(&iov, &iovlen, "fspath", mountpoint, -1); build_iovec(&iov, &iovlen, "from", device, -1); build_iovec(&iov, &iovlen, "large", "yes", -1); build_iovec(&iov, &iovlen, "timezone", "static", -1); build_iovec(&iov, &iovlen, "async", "", -1); build_iovec(&iov, &iovlen, "ignoreacl", "", -1); if (mode) { build_iovec(&iov, &iovlen, "dirmask", mode, -1); build_iovec(&iov, &iovlen, "mask", mode, -1); } printf(" [I] Mounting %s(%s) to %s...

", device, fstype, mountpoint); ret = nmount(iov, iovlen, flags); if (ret < 0) { printf(" [E] Failed: %d (errno: %d).", ret, errno); goto error; } else { printf(" [I] Success."); } error: return ret; } //... static int remount_partitions(void) { int ret; //... ret = mount_large_fs("/dev/da0x5.crypt", "/system_ex", "exfatfs", "511", MNT_UPDATE); if (ret) goto error; //... error: return ret; }

Public key for custom packages ( ypkg_public.pem )

) Private key for custom packages (ypkg_private.pem)

Code: <mount-image> 0xc6 0xcf 0x71 0xe7 0xe5 0x9a 0xf0 0xd1 0x2a 0x2c 0x45 0x8b 0xf9 0x2a 0x0e 0xc1 0x43 0x05 0x8b 0xc3 0x71 0x17 0x80 0x1d 0xcd 0x49 0x7d 0xde 0x35 0x9d 0x25 0x9b 0xa0 0xd7 0xa0 0xf2 0x7d 0x6c 0x08 0x7e 0xaa 0x55 0x02 0x68 0x2b 0x23 0xc6 0x44 0xb8 0x44 0x18 0xeb 0x56 0xcf 0x16 0xa2 0x48 0x03 0xc9 0xe7 0x4f 0x87 0xeb 0x3d 0x30 0xc3 0x15 0x88 0xbf 0x20 0xe7 0x9d 0xff 0x77 0x0c 0xde 0x1d 0x24 0x1e 0x63 0xa9 0x4f 0x8a 0xbf 0x5b 0xbe 0x60 0x19 0x68 0x33 0x3b 0xfc 0xed 0x9f 0x47 0x4e 0x5f 0xf8 0xea 0xcb 0x3d 0x00 0xbd 0x67 0x01 0xf9 0x2c 0x6d 0xc6 0xac 0x13 0x64 0xe7 0x67 0x14 0xf3 0xdc 0x52 0x69 0x6a 0xb9 0x83 0x2c 0x42 0x30 0x13 0x1b 0xb2 0xd8 0xa5 0x02 0x0d 0x79 0xed 0x96 0xb1 0x0d 0xf8 0xcc 0x0c 0xdf 0x81 0x95 0x4f 0x03 0x58 0x09 0x57 0x0e 0x80 0x69 0x2e 0xfe 0xff 0x52 0x77 0xea 0x75 0x28 0xa8 0xfb 0xc9 0xbe 0xbf 0x9f 0xbb 0xb7 0x79 0x8e 0x18 0x05 0xe1 0x80 0xbd 0x50 0x34 0x94 0x81 0xd3 0x53 0xc2 0x69 0xa2 0xd2 0x4c 0xcf 0x6c 0xf4 0x57 0x2c 0x10 0x4a 0x3f 0xfb 0x22 0xfd 0x8b 0x97 0xe2 0xc9 0x5b 0xa6 0x2b 0xcd 0xd6 0x1b 0x6b 0xdb 0x68 0x7f 0x4b 0xc2 0xa0 0x50 0x34 0xc0 0x05 0xe5 0x8d 0xef 0x24 0x67 0xff 0x93 0x40 0xcf 0x2d 0x62 0xa2 0xa0 0x50 0xb1 0xf1 0x3a 0xa8 0x3d 0xfd 0x80 0xd1 0xf9 0xb8 0x05 0x22 0xaf 0xc8 0x35 0x45 0x90 0x58 0x8e 0xe3 0x3a 0x7c 0xbd 0x3e 0x27 </mount-image>

Code: push 100h mov esi, eax push offset aSce_sysAboutRi ; "sce_sys/about/right.sprx" push esi call ebp ; __imp_strncmp add esp, 0Ch test eax, eax jz loc_454B16

Code: push offset aAlreadyConvert ; "already converted from elf file to self"... jmp loc_45502E

Code: jmp unk_5E63FC-1918E6h jmp loc_45502E

Code: mov [esp+44Ch+Dst], 464952h mov [esp+44Ch+var_3D6], cx call ds:__imp_strncpy

Code: static int do_shellcore_patches(void) { struct proc* p = NULL; struct proc_vm_map_entry* entries = NULL; uint8_t* text_seg_base = NULL; size_t num_entries; size_t i, n; int ret = 0; /* XXX: all offsets below are belongs to functions that parses .pkg files */ static uint32_t call_ofs_for__xor__eax_eax__3nop[] = { 0x11A0DB, // call sceKernelIsGenuineCEX (0x1486BB for 4.55) 0x66EA3B, // call sceKernelIsGenuineCEX (0x6E523B for 4.55) 0x7F554B, // call sceKernelIsGenuineCEX (0x852C6B for 4.55) 0x11A107, // call nidf_libSceDipsw_0xD21CE9E2F639A83C (0x1486E7 for 4.55) 0x66EA67, // call nidf_libSceDipsw_0xD21CE9E2F639A83C (0x6E5267 for 4.55) 0x7F5577, // call nidf_libSceDipsw_0xD21CE9E2F639A83C (0x852C97 for 4.55) }; p = proc_find_by_name("SceShellCore"); if (!p) { //printf("Unable to find shellcore process.

"); ret = ENOENT; goto error; } ret = proc_get_vm_map(p, &entries, &num_entries); if (ret) { //printf("proc_get_vm_map(%p) failed.

", p); goto error; } for (i = 0; i < num_entries; ++i) { if (entries[i].prot == (PROT_READ | PROT_EXEC)) { text_seg_base = (uint8_t*)entries[i].start; break; } } if (!text_seg_base) { //printf("Unable to find text segment base of shellcore.

"); ret = ESRCH; goto error; } // // Enable installing of debug packages. // for (i = 0; i < COUNT_OF(call_ofs_for__xor__eax_eax__3nop); ++i) { ret = proc_write_mem(p, text_seg_base + call_ofs_for__xor__eax_eax__3nop[i], 5, "\x31\xC0\x90\x90\x90", &n); if (ret) { //printf("proc_write_mem(%p) failed.

", p); goto error; } } /* XXX: this offset corresponds to "fake\0" string in the Shellcore's memory */ ret = proc_write_mem(p, text_seg_base + 0xC980EE /* 0x40F28 for 4.55 */, strlen("free") + 1, "free", &n); if (ret) { //printf("proc_write_mem(%p) failed.

", p); goto error; } //printf("Shellcore process has been patched.

"); error: if (entries) dealloc(entries); return ret; }

Code: #define MAX_FAKE_KEYS 32 struct fake_key_desc { uint8_t key[0x20]; int occupied; }; /* we maintain a list of known fake keys to be able to find them and do some changes in crypto requests */ static struct fake_key_desc s_fake_keys[MAX_FAKE_KEYS]; static struct sx s_fake_keys_lock; /* we mark our key using some pattern that we can check later */ static const uint8_t s_fake_key_seed[0x10] = { 0x46, 0x41, 0x4B, 0x45, 0x46, 0x41, 0x4B, 0x45, 0x46, 0x41, 0x4B, 0x45, 0x46, 0x41, 0x4B, 0x45, }; static struct fake_key_desc* get_free_fake_key_slot(void) { struct fake_key_desc* slot = NULL; size_t i; sx_xlock(&s_fake_keys_lock); { for (i = 0; i < COUNT_OF(s_fake_keys); ++i) { if (!s_fake_keys[i].occupied) { s_fake_keys[i].occupied = 1; slot = s_fake_keys + i; break; } } } sx_xunlock(&s_fake_keys_lock); return slot; } static struct fake_key_desc* is_fake_pfs_key(uint8_t* key) { struct fake_key_desc* slot = NULL; size_t i; sx_xlock(&s_fake_keys_lock); { for (i = 0; i < COUNT_OF(s_fake_keys); ++i) { if (!s_fake_keys[i].occupied) continue; if (memcmp(s_fake_keys[i].key, key, sizeof(s_fake_keys[i].key)) == 0) { slot = s_fake_keys + i; break; } } } sx_xunlock(&s_fake_keys_lock); return slot; } static void debug_pfs_cleanup(void* arg) { sx_destroy(&s_fake_keys_lock); } //... /* these components are belongs to the key I've generated for our own content */ static const uint8_t s_ypkg_n[0x100] = { /* TODO: paste a corresponding key here as described in a write-up, don't forget to remove a leading zero byte and correct byte order */ }; static const unsigned int s_ypkg_e = UINT32_C(0x10001); static const uint8_t s_ypkg_d[0x100] = { /* TODO: paste a corresponding key here as described in a write-up, be careful to use correct byte order */ }; static const uint8_t s_ypkg_p[0x80] = { /* TODO: paste a corresponding key here as described in a write-up, be careful to use correct byte order */ }; static const uint8_t s_ypkg_q[0x80] = { /* TODO: paste a corresponding key here as described in a write-up, be careful to use correct byte order */ }; static const uint8_t s_ypkg_dmp1[0x80] = { /* TODO: paste a corresponding key here as described in a write-up, be careful to use correct byte order */ }; static const uint8_t s_ypkg_dmq1[0x80] = { /* TODO: paste a corresponding key here as described in a write-up, be careful to use correct byte order */ }; static const uint8_t s_ypkg_iqmp[0x80] = { /* TODO: paste a corresponding key here as described in a write-up, be careful to use correct byte order */ }; /* a common function to generate a final key for PFS */ static inline void pfs_gen_crypto_key(uint8_t* ekpfs, uint8_t seed[PFS_SEED_SIZE], unsigned int index, uint8_t key[PFS_FINAL_KEY_SIZE]) { struct thread* td = curthread(); uint8_t d[4 + PFS_SEED_SIZE]; memset(d, 0, sizeof(d)); /* an index tells which key we should generate */ *(uint32_t*)d = LE32(index); memcpy(d + sizeof(uint32_t), seed, PFS_SEED_SIZE); fpu_kern_enter(td, fpu_kern_ctx, 0); { Sha256Hmac(key, d, sizeof(d), ekpfs, EKPFS_SIZE); } fpu_kern_leave(td, fpu_kern_ctx); } /* an encryption key generator based on EKPFS and PFS header seed */ static inline void pfs_generate_enc_key(uint8_t* ekpfs, uint8_t seed[PFS_SEED_SIZE], uint8_t key[PFS_FINAL_KEY_SIZE]) { pfs_gen_crypto_key(ekpfs, seed, 1, key); } /* asigning key generator based on EKPFS and PFS header seed */ static inline void pfs_generate_sign_key(uint8_t* ekpfs, uint8_t seed[PFS_SEED_SIZE], uint8_t key[PFS_FINAL_KEY_SIZE]) { pfs_gen_crypto_key(ekpfs, seed, 2, key); } static int sceSblPfsKeymgrIoctl__sceSblPfsKeymgrGenEKpfsForGDGPAC__hook(pfs_key_blob_t* blob) { struct thread* td = curthread(); struct rsa_buffer in_data; struct rsa_buffer out_data; struct rsa_key key; uint8_t dec_data[EEKPFS_SIZE]; struct fake_key_desc* fake_key_slot; int ret; /* try to decrypt EEKPFS normally */ ret = sceSblPfsKeymgrGenEKpfsForGDGPAC(blob); if (ret) { /* if this key is for debug/fake content, we could try to decrypt it manually */ if (!blob->finalized) { memset(&in_data, 0, sizeof(in_data)); { in_data.ptr = blob->eekpfs; in_data.size = sizeof(blob->eekpfs); } memset(&out_data, 0, sizeof(out_data)); { out_data.ptr = dec_data; out_data.size = sizeof(dec_data); } memset(&key, 0, sizeof(key)); { /* here we feed a custom key to the algorithm */ key.p = (uint8_t*)s_ypkg_p; key.q = (uint8_t*)s_ypkg_q; key.dmp1 = (uint8_t*)s_ypkg_dmp1; key.dmq1 = (uint8_t*)s_ypkg_dmq1; key.iqmp = (uint8_t*)s_ypkg_iqmp; } fpu_kern_enter(td, fpu_kern_ctx, 0); { /* RSA PKCS1v15 */ ret = RsaesPkcs1v15Dec2048CRT(&out_data, &in_data, &key); } fpu_kern_leave(td, fpu_kern_ctx); if (ret == 0) { /* got EKPFS key? */ memcpy(blob->ekpfs, dec_data, sizeof(blob->ekpfs)); /* add it to our key list */ fake_key_slot = get_free_fake_key_slot(); if (fake_key_slot) memcpy(fake_key_slot->key, blob->ekpfs, sizeof(fake_key_slot->key)); } } } return ret; } static int pfs_sbl_init__sceSblPfsSetKey__hook(unsigned int* ekh, unsigned int* skh, uint8_t* key, uint8_t* iv, int mode, int unused, uint8_t disc_flag) { struct sbl_key_rbtree_entry* key_entry; int is_fake_key; int ret; ret = sceSblPfsSetKey(ekh, skh, key, iv, mode, unused, disc_flag); /* check if it's a key that we have decrypted manually */ is_fake_key = is_fake_pfs_key(key) != NULL; key_entry = sceSblKeymgrGetKey(*ekh); /* find a corresponding key entry */ if (key_entry) { if (is_fake_key) { /* generate an encryption key */ pfs_generate_enc_key(key, iv, key_entry->desc.pfs.key); memcpy(key_entry->desc.pfs.seed, s_fake_key_seed, sizeof(s_fake_key_seed)); } } key_entry = sceSblKeymgrGetKey(*skh); /* find a corresponding key entry */ if (key_entry) { if (is_fake_key) { /* generate a signing key */ pfs_generate_sign_key(key, iv, key_entry->desc.pfs.key); memcpy(key_entry->desc.pfs.seed, s_fake_key_seed, sizeof(s_fake_key_seed)); } } return ret; } //... static int npdrm_decrypt_debug_rif(unsigned int type, uint8_t* data) { static const uint8_t rif_debug_key[0x10] = { /* TODO: place here a debug/fake RIF key */ }; struct thread* td = curthread(); int ret; fpu_kern_enter(td, fpu_kern_ctx, 0); { /* decrypt fake rif manually using a key from publishing tools */ ret = AesCbcCfb128Decrypt(data + RIF_DIGEST_SIZE, data + RIF_DIGEST_SIZE, RIF_DATA_SIZE, rif_debug_key, sizeof(rif_debug_key) * 8, data); if (ret) ret = SCE_SBL_ERROR_NPDRM_ENOTSUP; } fpu_kern_leave(td, fpu_kern_ctx); return ret; } static int npdrm_decrypt_isolated_rif__sceSblKeymgrSmCallfunc__hook(union keymgr_payload* payload) { /* it's SM request, thus we have the GPU address here, so we need to convert it to the CPU address */ union keymgr_request* request = (union keymgr_request*)sceSblDriverGpuVaToCpuVa(payload->data, NULL); int ret; /* try to decrypt rif normally */ ret = sceSblKeymgrSmCallfunc(payload); /* and if it fails then we check if it's fake rif and try to decrypt it by ourselves */ if ((ret != 0 || payload->status != 0) && request) { if (request->decrypt_rif.type == 0x200) { /* fake? */ ret = npdrm_decrypt_debug_rif(request->decrypt_rif.type, request->decrypt_rif.data); payload->status = ret; ret = 0; } } return ret; } //... static int ccp_msg_populate_key(unsigned int key_handle, uint8_t* key, int reverse) { struct sbl_key_rbtree_entry* key_entry; uint8_t* in_key; int i; int status = 0; /* searching for a key entry */ key_entry = sceSblKeymgrGetKey(key_handle); if (key_entry) { /* we have found one, now checking if it's our key */ if (memcmp(key_entry->desc.pfs.seed, s_fake_key_seed, sizeof(key_entry->desc.pfs.seed)) == 0) { /* currently we have a crypto request that use a key slot which should be already in CCP, but because we did everything manually, we don't have this key slot, so we need to remove using of key slot and place a plain key here */ in_key = key_entry->desc.pfs.key; if (reverse) { /* reverse bytes of a key if it's needed */ for (i = 0; i < 0x20; ++i) key[0x20 - i - 1] = in_key[i]; } else { /* copy a key as is */ memcpy(key, in_key, 0x20); } status = 1; } } return status; } static int ccp_msg_populate_key_if_needed(struct ccp_msg* msg) { unsigned int cmd = msg->op.common.cmd; unsigned int type = CCP_OP(cmd); uint8_t* buf; int status = 0; /* skip messages that use plain keys and key slots */ if (!(cmd & CCP_USE_KEY_HANDLE)) goto skip; buf = (uint8_t*)&msg->op; /* we only need to handle xts/hmac crypto operations */ switch (type) { case CCP_OP_XTS: status = ccp_msg_populate_key(*(uint32_t*)(buf + 0x28), buf + 0x28, 1); /* xts key have a reversed byte order */ break; case CCP_OP_HMAC: status = ccp_msg_populate_key(*(uint32_t*)(buf + 0x40), buf + 0x40, 0); /* hmac key have a normal byte order */ break; default: goto skip; } /* if key was successfully populated, then remove the flag which tells CCP to use a key slot */ if (status) msg->op.common.cmd &= ~CCP_USE_KEY_HANDLE; skip: return status; } static int pfs_crypto__sceSblServiceCryptAsync__hook(struct ccp_req* request) { struct ccp_msg* msg; int ret; TAILQ_FOREACH(msg, &request->msgs, next) { /* handle each message in crypto request */ ccp_msg_populate_key_if_needed(msg); } /* run a crypto function normally */ ret = sceSblServiceCryptAsync(request); return ret; } //... #define pfs_generate_icv_sub__sceSblServiceCryptAsync__hook pfs_crypto__sceSblServiceCryptAsync__hook #define pfs_generate_icv_async_sub__sceSblServiceCryptAsync__hook pfs_crypto__sceSblServiceCryptAsync__hook #define pfs_dec_sub__sceSblServiceCryptAsync__hook pfs_crypto__sceSblServiceCryptAsync__hook #define pfs_dec_icv_sub__sceSblServiceCryptAsync__hook pfs_crypto__sceSblServiceCryptAsync__hook #define pfs_dec_icv_async_sub__sceSblServiceCryptAsync__hook pfs_crypto__sceSblServiceCryptAsync__hook #define pfs_enc_sub__sceSblServiceCryptAsync__hook pfs_crypto__sceSblServiceCryptAsync__hook #define pfs_icv_enc_sub__sceSblServiceCryptAsync__hook pfs_crypto__sceSblServiceCryptAsync__hook static void do_debug_pfs_patches(void) { memset(s_fake_keys, 0, sizeof(s_fake_keys)); sx_init(&s_fake_keys_lock, "fake_keys_lock"); EVENTHANDLER_REGISTER(shutdown_pre_sync, &debug_pfs_cleanup, NULL, 0); /* TODO: need to change a slide of "call sceSblKeymgrSmCallfunc" instruction inside npdrm_decrypt_isolated_rif() */ INSTALL_CALL_HOOK(0x62DF00, npdrm_decrypt_isolated_rif__sceSblKeymgrSmCallfunc__hook); /* TODO: need to change slides of "call sceSblPfsKeymgrGenEKpfsForGDGPAC" instructions inside sceSblPfsKeymgrIoctl() */ INSTALL_CALL_HOOK(0x607045, sceSblPfsKeymgrIoctl__sceSblPfsKeymgrGenEKpfsForGDGPAC__hook); INSTALL_CALL_HOOK(0x6070E1, sceSblPfsKeymgrIoctl__sceSblPfsKeymgrGenEKpfsForGDGPAC__hook); /* TODO: need to change slides of "call sceSblPfsSetKey" instructions inside pfs_sbl_init_sub() and pfs_sbl_init() */ INSTALL_CALL_HOOK(0x69DB4A, pfs_sbl_init__sceSblPfsSetKey__hook); INSTALL_CALL_HOOK(0x69DBD8, pfs_sbl_init__sceSblPfsSetKey__hook); /* TODO: need to change slides of "call sceSblServiceCryptAsync" instructions inside corresponding pfs_*_sub() functions (easy to find by looking at string references that contains function names) */ INSTALL_CALL_HOOK(0x69DDE4, pfs_generate_icv_sub__sceSblServiceCryptAsync__hook); // hmac INSTALL_CALL_HOOK(0x69E28C, pfs_generate_icv_async_sub__sceSblServiceCryptAsync__hook); // hmac INSTALL_CALL_HOOK(0x69E4E8, pfs_dec_sub__sceSblServiceCryptAsync__hook); // xts INSTALL_CALL_HOOK(0x69E85D, pfs_dec_icv_sub__sceSblServiceCryptAsync__hook); // hmac, xts INSTALL_CALL_HOOK(0x69EC7E, pfs_dec_icv_async_sub__sceSblServiceCryptAsync__hook); // hmac, xts INSTALL_CALL_HOOK(0x69EF0D, pfs_enc_sub__sceSblServiceCryptAsync__hook); // xts INSTALL_CALL_HOOK(0x69F252, pfs_icv_enc_sub__sceSblServiceCryptAsync__hook); // hmac, xts }

Code: // Disable RSA signature check for PFS. kernel_text_base + 0x69F4E0: 55 48 89 E5 -> 31 C0 C3 90 // Enable debug RIFs. kernel_text_base + 0x62D30D: E8 0E 04 00 00 EB 38 3D -> B8 01 00 00 00 EB 38 3D

Code: #define JOIN_HELPER(x, y) x##y #define JOIN(x, y) JOIN_HELPER(x, y) //... #define TYPE_PAD(size) char JOIN(_pad_, __COUNTER__)[size] #define TYPE_VARIADIC_BEGIN(name) name { union { #define TYPE_BEGIN(name, size) name { union { TYPE_PAD(size) #define TYPE_END(...) }; } __VA_ARGS__ #define TYPE_FIELD(field, offset) struct { TYPE_PAD(offset); field; } #define TYPE_CHECK_SIZE(name, size) \ _Static_assert(sizeof(name) == (size), "Size of " #name " != " #size) #define TYPE_CHECK_FIELD_OFFSET(name, member, offset) \ _Static_assert(offsetof(name, member) == (offset), "Offset of " #name "." #member " != " #offset) #define TYPE_CHECK_FIELD_SIZE(name, member, size) \ _Static_assert(sizeof(((name*)0)->member) == (size), "Size of " #name "." #member " != " #size) Okay, so let's summarize all needed structures (if I forgot some, just ping me): #define PAGE_SIZE 0x4000 //... struct fpu_kern_ctx; /* TODO: you should point it to its location in a kernel's memory (grab this variable from some function that does crypto operations, for example, sceSblSsGenerateSealedKey()) */ struct fpu_kern_ctx* fpu_kern_ctx; static inline struct thread* curthread(void) { struct thread* td; __asm__ __volatile__ ( "mov %%gs:0, %0" : "=r"(td) ); return td; } //... #define SCE_SBL_ERROR_NPDRM_ENOTSUP 0x800F0A25 //... #define ELF_IDENT_SIZE 0x10 #define ELF_EHDR_EXT_SIZE 0x1000 #define ELF_IDENT_MAG0 0 #define ELF_IDENT_MAG1 1 #define ELF_IDENT_MAG2 2 #define ELF_IDENT_MAG3 3 #define ELF_IDENT_CLASS 4 #define ELF_IDENT_DATA 5 #define ELF_CLASS_64 2 #define ELF_DATA_LSB 1 #define ELF_TYPE_NONE 0 #define ELF_TYPE_EXEC 2 #define ELF_MACHINE_X86_64 0x3E #define ELF_PHDR_TYPE_NULL 0x0 #define ELF_PHDR_TYPE_LOAD 0x1 #define ELF_PHDR_TYPE_SCE_DYNLIBDATA 0x61000000 #define ELF_PHDR_TYPE_SCE_RELRO 0x61000010 #define ELF_PHDR_TYPE_SCE_COMMENT 0x6FFFFF00 #define ELF_PHDR_TYPE_SCE_VERSION 0x6FFFFF01 #define ELF_PHDR_FLAG_X 0x1 #define ELF_PHDR_FLAG_W 0x2 #define ELF_PHDR_FLAG_R 0x4 #define ELF_ET_EXEC 0x2 #define ELF_ET_SCE_EXEC 0xFE00 #define ELF_ET_SCE_EXEC_ASLR 0xFE10 #define ELF_ET_SCE_DYNAMIC 0xFE18 typedef uint16_t elf64_half_t; typedef uint32_t elf64_word_t; typedef uint64_t elf64_xword_t; typedef uint64_t elf64_off_t; typedef uint64_t elf64_addr_t; struct elf64_ehdr { uint8_t ident[ELF_IDENT_SIZE]; elf64_half_t type; elf64_half_t machine; elf64_word_t version; elf64_addr_t entry; elf64_off_t phoff; elf64_off_t shoff; elf64_word_t flags; elf64_half_t ehsize; elf64_half_t phentsize; elf64_half_t phnum; elf64_half_t shentsize; elf64_half_t shnum; elf64_half_t shstrndx; }; struct elf64_phdr { elf64_word_t type; elf64_word_t flags; elf64_off_t offset; elf64_addr_t vaddr; elf64_addr_t paddr; elf64_xword_t filesz; elf64_xword_t memsz; elf64_xword_t align; }; struct elf64_shdr { elf64_word_t name; elf64_word_t type; elf64_xword_t flags; elf64_addr_t addr; elf64_off_t offset; elf64_xword_t size; elf64_word_t link; elf64_word_t info; elf64_xword_t addralign; elf64_xword_t entsize; }; //... #define SELF_DIGEST_SIZE 0x20 #define SELF_CONTENT_ID_SIZE 0x13 #define SELF_RANDOM_PAD_SIZE 0x0D #define SELF_MAX_HEADER_SIZE 0x4000 enum self_format { SELF_FORMAT_NONE, SELF_FORMAT_ELF, SELF_FORMAT_SELF, }; #define SIZEOF_SELF_PAGER 0x100 // XXX: random, don't use directly without fixing it TYPE_BEGIN(struct self_pager, SIZEOF_SELF_PAGER); TYPE_FIELD(struct mtx lock, 0x00); TYPE_END(); #define SIZEOF_SELF_INFO 0x100 // XXX: random, don't use directly without fixing it TYPE_BEGIN(struct self_info, SIZEOF_SELF_INFO); TYPE_FIELD(struct vnode* vp, 0x20); TYPE_FIELD(struct self_pager* pager, 0x28); TYPE_FIELD(uint8_t* header, 0x38); TYPE_FIELD(int ctx_id, 0x40); TYPE_END(); #define SIZEOF_SELF_CONTEXT 0x60 // sceSblAuthMgrAuthHeader:bzero(sbl_authmgr_context, 0x60) TYPE_BEGIN(struct self_context, SIZEOF_SELF_CONTEXT); TYPE_FIELD(enum self_format format, 0x00); TYPE_FIELD(int elf_auth_type, 0x04); /* auth id is based on that */ TYPE_FIELD(unsigned int total_header_size, 0x08); TYPE_FIELD(int ctx_id, 0x1C); TYPE_FIELD(uint64_t svc_id, 0x20); TYPE_FIELD(int buf_id, 0x30); TYPE_FIELD(uint8_t* header, 0x38); TYPE_FIELD(struct mtx lock, 0x40); TYPE_END(); #define SIZEOF_SELF_HEADER 0x20 TYPE_BEGIN(struct self_header, SIZEOF_SELF_HEADER); TYPE_FIELD(uint32_t magic, 0x00); #define SELF_MAGIC 0x1D3D154F #define ELF_MAGIC 0x464C457F TYPE_FIELD(uint8_t version, 0x04); TYPE_FIELD(uint8_t mode, 0x05); TYPE_FIELD(uint8_t endian, 0x06); TYPE_FIELD(uint8_t attr, 0x07); TYPE_FIELD(uint32_t key_type, 0x08); TYPE_FIELD(uint16_t header_size, 0x0C); TYPE_FIELD(uint16_t meta_size, 0x0E); TYPE_FIELD(uint64_t file_size, 0x10); TYPE_FIELD(uint16_t num_entries, 0x18); TYPE_FIELD(uint16_t flags, 0x1A); TYPE_END(); #define SIZEOF_SELF_ENTRY 0x20 TYPE_BEGIN(struct self_entry, SIZEOF_SELF_ENTRY); TYPE_FIELD(uint64_t props, 0x00); TYPE_FIELD(uint64_t offset, 0x08); TYPE_FIELD(uint64_t file_size, 0x10); TYPE_FIELD(uint64_t memory_size, 0x18); TYPE_END(); #define SIZEOF_SELF_EX_INFO 0x40 TYPE_BEGIN(struct self_ex_info, SIZEOF_SELF_EX_INFO); TYPE_FIELD(uint64_t paid, 0x00); TYPE_FIELD(uint64_t ptype, 0x08); #define SELF_PTYPE_FAKE 0x1 TYPE_FIELD(uint64_t app_version, 0x10); TYPE_FIELD(uint64_t fw_version, 0x18); TYPE_FIELD(uint8_t digest[SELF_DIGEST_SIZE], 0x20); TYPE_END(); #define SIZEOF_SELF_AUTH_INFO 0x88 // sceSblAuthMgrIsLoadable2:bzero(auth_info, 0x88) TYPE_BEGIN(struct self_auth_info, SIZEOF_SELF_AUTH_INFO); TYPE_FIELD(uint64_t paid, 0x00); TYPE_FIELD(uint64_t caps[4], 0x08); TYPE_FIELD(uint64_t attrs[4], 0x28); TYPE_FIELD(uint8_t unk[0x40], 0x48); TYPE_END(); #define SIZEOF_SELF_FAKE_AUTH_INFO (sizeof(uint64_t) + SIZEOF_SELF_AUTH_INFO) TYPE_BEGIN(struct self_fake_auth_info, SIZEOF_SELF_FAKE_AUTH_INFO); TYPE_FIELD(uint64_t size, 0x00); TYPE_FIELD(struct self_auth_info info, 0x08); TYPE_END(); int sceSblAuthMgrGetSelfInfo(struct self_context* ctx, struct self_ex_info** info); int sceSblAuthMgrIsLoadable2(struct self_context* ctx, struct self_auth_info* old_auth_info, int path_id, struct self_auth_info* new_auth_info); void sceSblAuthMgrSmStart(void); int sceSblAuthMgrSmVerifyHeader(struct self_context* ctx); static inline int sceSblAuthMgrGetElfHeader(struct self_context* ctx, struct elf64_ehdr** ehdr) { struct self_header* self_hdr; struct elf64_ehdr* elf_hdr; size_t pdata_size; if (ctx->format == SELF_FORMAT_ELF) { elf_hdr = (struct elf64_ehdr*)ctx->header; if (ehdr) *ehdr = elf_hdr; return 0; } else if (ctx->format == SELF_FORMAT_SELF) { self_hdr = (struct self_header*)ctx->header; pdata_size = self_hdr->header_size - sizeof(struct self_entry) * self_hdr->num_entries - sizeof(struct self_header); if (pdata_size >= sizeof(struct elf64_ehdr) && (pdata_size & 0xF) == 0) { elf_hdr = (struct elf64_ehdr*)((uint8_t*)self_hdr + sizeof(struct self_header) + sizeof(struct self_entry) * self_hdr->num_entries); if (ehdr) *ehdr = elf_hdr; return 0; } return -37; } return -35; } int kern_get_self_auth_info(struct thread* td, const char* path, enum uio_seg pathseg, struct self_auth_info* info); //... #define CONTENT_KEY_SEED_SIZE 0x10 #define SELF_KEY_SEED_SIZE 0x10 #define EEKC_SIZE 0x20 struct ekc { uint8_t content_key_seed[CONTENT_KEY_SEED_SIZE]; uint8_t self_key_seed[SELF_KEY_SEED_SIZE]; }; #define SIZEOF_SBL_KEY_DESC 0x7C // sceSblKeymgrSetKey union sbl_key_desc { struct { uint16_t cmd; uint16_t pad; uint8_t key[0x20]; uint8_t seed[0x10]; } pfs; //... uint8_t raw[SIZEOF_SBL_KEY_DESC]; }; TYPE_CHECK_SIZE(union sbl_key_desc, SIZEOF_SBL_KEY_DESC); #define SIZEOF_SBL_KEY_RBTREE_ENTRY 0xA8 // sceSblKeymgrSetKey #define TYPE_SBL_KEY_RBTREE_ENTRY_DESC_OFFSET 0x04 #define TYPE_SBL_KEY_RBTREE_ENTRY_LOCKED_OFFSET 0x80 TYPE_BEGIN(struct sbl_key_rbtree_entry, SIZEOF_SBL_KEY_RBTREE_ENTRY); TYPE_FIELD(uint32_t handle, 0x00); TYPE_FIELD(union sbl_key_desc desc, TYPE_SBL_KEY_RBTREE_ENTRY_DESC_OFFSET); TYPE_FIELD(uint32_t locked, TYPE_SBL_KEY_RBTREE_ENTRY_LOCKED_OFFSET); TYPE_FIELD(struct sbl_key_rbtree_entry* left, 0x88); TYPE_FIELD(struct sbl_key_rbtree_entry* right, 0x90); TYPE_FIELD(struct sbl_key_rbtree_entry* parent, 0x98); TYPE_FIELD(uint32_t set, 0xA0); TYPE_END(); #define RIF_DIGEST_SIZE 0x10 #define RIF_DATA_SIZE 0x90 #define RIF_KEY_TABLE_SIZE 0x230 #define RIF_MAX_KEY_SIZE 0x20 #define RIF_PAYLOAD_SIZE (RIF_DIGEST_SIZE + RIF_DATA_SIZE) #define SIZEOF_ACTDAT 0x200 TYPE_BEGIN(struct actdat, SIZEOF_ACTDAT); TYPE_FIELD(uint32_t magic, 0x00); TYPE_FIELD(uint16_t version_major, 0x04); TYPE_FIELD(uint16_t version_minor, 0x06); TYPE_FIELD(uint64_t account_id, 0x08); TYPE_FIELD(uint64_t start_time, 0x10); TYPE_FIELD(uint64_t end_time, 0x18); TYPE_FIELD(uint64_t flags, 0x20); TYPE_FIELD(uint32_t unk3, 0x28); TYPE_FIELD(uint32_t unk4, 0x2C); TYPE_FIELD(uint8_t open_psid_hash[0x20], 0x60); TYPE_FIELD(uint8_t static_per_console_data_1[0x20], 0x80); TYPE_FIELD(uint8_t digest[0x10], 0xA0); TYPE_FIELD(uint8_t key_table[0x20], 0xB0); TYPE_FIELD(uint8_t static_per_console_data_2[0x10], 0xD0); TYPE_FIELD(uint8_t static_per_console_data_3[0x20], 0xE0); TYPE_FIELD(uint8_t signature[0x100], 0x100); TYPE_END(); #define SIZEOF_RIF 0x400 TYPE_BEGIN(struct rif, SIZEOF_RIF); TYPE_FIELD(uint32_t magic, 0x00); TYPE_FIELD(uint16_t version_major, 0x04); TYPE_FIELD(uint16_t version_minor, 0x06); TYPE_FIELD(uint64_t account_id, 0x08); TYPE_FIELD(uint64_t start_time, 0x10); TYPE_FIELD(uint64_t end_time, 0x18); TYPE_FIELD(char content_id[0x30], 0x20); TYPE_FIELD(uint16_t format, 0x50); TYPE_FIELD(uint16_t drm_type, 0x52); TYPE_FIELD(uint16_t content_type, 0x54); TYPE_FIELD(uint16_t sku_flag, 0x56); TYPE_FIELD(uint64_t content_flags, 0x58); TYPE_FIELD(uint32_t iro_tag, 0x60); TYPE_FIELD(uint32_t ekc_version, 0x64); TYPE_FIELD(uint16_t unk3, 0x6A); TYPE_FIELD(uint16_t unk4, 0x6C); TYPE_FIELD(uint8_t digest[0x10], 0x260); TYPE_FIELD(uint8_t data[RIF_DATA_SIZE], 0x270); TYPE_FIELD(uint8_t signature[0x100], 0x300); TYPE_END(); union keymgr_payload { struct { uint32_t cmd; uint32_t status; uint64_t data; }; uint8_t buf[0x80]; }; union keymgr_request { struct { uint32_t type; uint8_t key[RIF_MAX_KEY_SIZE]; uint8_t data[RIF_DIGEST_SIZE + RIF_DATA_SIZE]; } decrypt_rif; }; union keymgr_response { struct { uint32_t type; uint8_t key[RIF_MAX_KEY_SIZE]; uint8_t data[RIF_DIGEST_SIZE + RIF_DATA_SIZE]; } decrypt_rif; }; /* TODO: you should point it to its location in a kernel's memory (see a code of sceSblKeymgrSetKey(), just before a loop there is something like this: mov rdx, cs:xxx go there, it's your target */ struct sbl_key_rbtree_entry** sbl_keymgr_key_rbtree; static inline struct sbl_key_rbtree_entry* sceSblKeymgrGetKey(unsigned int handle) { struct sbl_key_rbtree_entry* entry = *sbl_keymgr_key_rbtree; while (entry) { if (entry->handle < handle) entry = entry->right; else if (entry->handle > handle) entry = entry->left; else if (entry->handle == handle) return entry; } return NULL; } int sceSblKeymgrSmCallfunc(union keymgr_payload* payload); //... #define EKPFS_SIZE 0x20 #define EEKPFS_SIZE 0x100 #define PFS_SEED_SIZE 0x10 #define PFS_FINAL_KEY_SIZE 0x20 #define SIZEOF_PFS_KEY_BLOB 0x158 struct pfs_key_blob { uint8_t ekpfs[EKPFS_SIZE]; uint8_t eekpfs[EEKPFS_SIZE]; struct ekc eekc; uint32_t key_ver; uint32_t pubkey_ver; uint32_t type; uint32_t finalized; uint32_t is_disc; uint32_t pad; }; typedef struct pfs_key_blob pfs_key_blob_t; TYPE_CHECK_SIZE(pfs_key_blob_t, SIZEOF_PFS_KEY_BLOB); int sceSblPfsKeymgrGenEKpfsForGDGPAC(struct pfs_key_blob* key_blob); int sceSblPfsSetKey(uint32_t* ekh, uint32_t* skh, uint8_t* key, uint8_t* iv, int type, int unused, uint8_t is_disc); int sceSblPfsClearKey(uint32_t ekh, uint32_t skh); //... #define CCP_MAX_PAYLOAD_SIZE 0x88 #define CCP_OP(cmd) (cmd >> 24) #define CCP_OP_XTS 2 #define CCP_OP_HMAC 9 #define CCP_USE_KEY_HANDLE (1 << 20) struct ccp_link { void* p; }; struct ccp_msg { union ccp_op op; uint32_t index; uint32_t result; TAILQ_ENTRY(ccp_msg) next; uint64_t message_id; LIST_ENTRY(ccp_link) links; }; struct ccp_req { TAILQ_HEAD(, ccp_msg) msgs; void (*cb)(void* arg, int result); void* arg; uint64_t message_id; LIST_ENTRY(ccp_link) links; }; union ccp_op { struct { uint32_t cmd; uint32_t status; } common; //... uint8_t buf[CCP_MAX_PAYLOAD_SIZE]; }; //... #define SBL_MSG_SERVICE_MAILBOX_MAX_SIZE 0x80 int sceSblServiceMailbox(unsigned long service_id, uint8_t request[SBL_MSG_SERVICE_MAILBOX_MAX_SIZE], uint8_t response); int sceSblServiceCrypt(struct ccp_req* request); int sceSblServiceCryptAsync(struct ccp_req* request); //... struct sbl_mapped_page_group; #define SIZEOF_SBL_MAP_LIST_ENTRY 0x50 // sceSblDriverMapPages TYPE_BEGIN(struct sbl_map_list_entry, SIZEOF_SBL_MAP_LIST_ENTRY); TYPE_FIELD(struct sbl_map_list_entry* next, 0x00); TYPE_FIELD(struct sbl_map_list_entry* prev, 0x08); TYPE_FIELD(unsigned long cpu_va, 0x10); TYPE_FIELD(unsigned int num_page_groups, 0x18); TYPE_FIELD(unsigned long gpu_va, 0x20); TYPE_FIELD(struct sbl_mapped_page_group* page_groups, 0x28); TYPE_FIELD(unsigned int num_pages, 0x30); TYPE_FIELD(unsigned long flags, 0x38); TYPE_FIELD(struct proc* proc, 0x40); TYPE_FIELD(void* vm_page, 0x48); TYPE_END(); static inline struct sbl_map_list_entry* sceSblDriverFindMappedPageListByGpuVa(vm_offset_t gpu_va) { struct sbl_map_list_entry* entry; if (!gpu_va) return NULL; entry = *sbl_driver_mapped_pages; while (entry) { if (entry->gpu_va == gpu_va) return entry; entry = entry->next; } return NULL; } /* TODO: you should point it to its location in a kernel's memory (see a code of sceSblDriverGvmInitialize(), just before a call to mtx_init() there is something like this: mov cs:xxx, 0 go there, it's your target */ struct sbl_map_list_entry** sbl_driver_mapped_pages; static inline struct sbl_map_list_entry* sceSblDriverFindMappedPageListByCpuVa(vm_offset_t cpu_va) { struct sbl_map_list_entry* entry; if (!cpu_va) return NULL; entry = *sbl_driver_mapped_pages; while (entry) { if (entry->cpu_va == cpu_va) return entry; entry = entry->next; } return NULL; } static inline vm_offset_t sceSblDriverGpuVaToCpuVa(vm_offset_t gpu_va, size_t* num_page_groups) { struct sbl_map_list_entry* entry = sceSblDriverFindMappedPageListByGpuVa(gpu_va); if (!entry) return 0; if (num_page_groups) *num_page_groups = entry->num_page_groups; return entry->cpu_va; } //... struct rsa_buffer { uint8_t* ptr; size_t size; }; #define SIZEOF_RSA_KEY 0x48 TYPE_BEGIN(struct rsa_key, SIZEOF_RSA_KEY); TYPE_FIELD(uint8_t* p, 0x20); TYPE_FIELD(uint8_t* q, 0x28); TYPE_FIELD(uint8_t* dmp1, 0x30); TYPE_FIELD(uint8_t* dmq1, 0x38); TYPE_FIELD(uint8_t* iqmp, 0x40); TYPE_END(); int AesCbcCfb128Encrypt(uint8_t* out, const uint8_t* in, size_t data_size, const uint8_t* key, int key_size, uint8_t* iv); int AesCbcCfb128Decrypt(uint8_t* out, const uint8_t* in, size_t data_size, const uint8_t* key, int key_size, uint8_t* iv); void Sha256Hash(uint8_t hash[0x20], const uint8_t* data, size_t data_size); void Sha256Hmac(uint8_t hash[0x20], const uint8_t* data, size_t data_size, const uint8_t* key, int key_size); int RsaesPkcs1v15Enc2048(struct rsa_buffer* out, struct rsa_buffer* in, struct rsa_key* key); int RsaesPkcs1v15Dec2048CRT(struct rsa_buffer* out, struct rsa_buffer* in, struct rsa_key* key); //... void build_iovec(struct iovec** iov, int* iovlen, const char* name, const void* val, size_t len) { int i; if (*iovlen < 0) return; i = *iovlen; *iov = realloc(*iov, sizeof **iov * (i + 2)); if (*iov == NULL) { *iovlen = -1; return; } (*iov)[i].iov_base = strdup(name); (*iov)[i].iov_len = strlen(name) + 1; ++i; (*iov)[i].iov_base = (void*)val; if (len == (size_t)-1) { if (val != NULL) len = strlen(val) + 1; else len = 0; } (*iov)[i].iov_len = (int)len; *iovlen = ++i; } //... struct proc* proc_find_by_name(const char* name) { struct proc* p; if (!name) return NULL; sx_slock(allproc_lock); FOREACH_PROC_IN_SYSTEM(p) { PROC_LOCK(p); if (strncmp(p->p_comm, name, sizeof(p->p_comm)) == 0) { PROC_UNLOCK(p); goto done; } PROC_UNLOCK(p); } p = NULL; done: sx_sunlock(allproc_lock); return p; } int proc_get_vm_map(struct proc* p, struct proc_vm_map_entry** entries, size_t* num_entries) { struct vmspace* vm; struct proc_vm_map_entry* info = NULL; vm_map_t map; vm_map_entry_t entry; size_t n, i; int ret; if (!p) { ret = EINVAL; goto error; } if (!entries) { ret = EINVAL; goto error; } if (!num_entries) { ret = EINVAL; goto error; } PROC_LOCK(p); if (p->p_flag & P_WEXIT) { PROC_UNLOCK(p); ret = ESRCH; goto error; } _PHOLD(p); PROC_UNLOCK(p); vm = vmspace_acquire_ref(p); if (!vm) { PRELE(p); ret = ESRCH; goto error; } map = &vm->vm_map; vm_map_lock_read(map); for (entry = map->header.next, n = 0; entry != &map->header; entry = entry->next) { if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) continue; ++n; } if (n == 0) goto done; info = (struct proc_vm_map_entry*)alloc(n * sizeof(*info)); if (!info) { vm_map_unlock_read(map); vmspace_free(vm); PRELE(p); ret = ENOMEM; goto error; } memset(info, 0, n * sizeof(*info)); for (entry = map->header.next, i = 0; entry != &map->header; entry = entry->next) { if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) continue; info[i].start = entry->start; info[i].end = entry->end; info[i].offset = entry->offset; info[i].prot = 0; if (entry->protection & VM_PROT_READ) info[i].prot |= PROT_READ; if (entry->protection & VM_PROT_WRITE) info[i].prot |= PROT_WRITE; if (entry->protection & VM_PROT_EXECUTE) info[i].prot |= PROT_EXEC; ++i; } done: vm_map_unlock_read(map); vmspace_free(vm); PRELE(p); *num_entries = n; *entries = info; info = NULL; ret = 0; error: if (info) dealloc(info); return ret; } int proc_rw_mem(struct proc* p, void* ptr, size_t size, void* data, size_t* n, int write) { struct thread* td = curthread(); struct iovec iov; struct uio uio; int ret; if (!p) { ret = EINVAL; goto error; } if (size == 0) { if (n) *n = 0; ret = 0; goto error; } memset(&iov, 0, sizeof(iov)); iov.iov_base = (caddr_t)data; iov.iov_len = size; memset(&uio, 0, sizeof(uio)); uio.uio_iov = &iov; uio.uio_iovcnt = 1; uio.uio_offset = (off_t)ptr; uio.uio_resid = (ssize_t)size; uio.uio_segflg = UIO_SYSSPACE; uio.uio_rw = write ? UIO_WRITE : UIO_READ; uio.uio_td = td; ret = proc_rwmem(p, &uio); if (n) *n = (size_t)((ssize_t)size - uio.uio_resid); error: return ret; } //... static inline int proc_write_mem(struct proc* p, void* ptr, size_t size, void* data, size_t* n) { return proc_rw_mem(p, ptr, size, data, n, 1); }

sfo.h

sfo.c

playgo.h

playgo.c

gp4.h

gp4.c

pkg.h

pkg.c

Code: //... #define RIF_KEY_TABLE_SIZE 0x230 #define SIZEOF_RIF 0x400 struct rif_key_blob { struct ekc ekc; uint8_t entitlement_key[0x10]; }; union keymgr_request { struct { uint32_t type; uint8_t key[RIF_MAX_KEY_SIZE]; uint8_t data[RIF_DIGEST_SIZE + RIF_DATA_SIZE]; } decrypt_rif; struct { struct rif rif; uint8_t key_table[RIF_KEY_TABLE_SIZE]; uint64_t timestamp; int status; } decrypt_entire_rif; }; union keymgr_response { struct { uint32_t type; uint8_t key[RIF_MAX_KEY_SIZE]; uint8_t data[RIF_DIGEST_SIZE + RIF_DATA_SIZE]; } decrypt_rif; struct { uint8_t raw[SIZEOF_RIF]; } decrypt_entire_rif; }; //... static int npdrm_decrypt_rif_new__sceSblKeymgrSmCallfunc__hook(union keymgr_payload* payload) { uint64_t buf_gpu_va = (uint64_t)payload->mapped_buf; /* it's SM request, thus we have the GPU address here, so we need to convert it to the CPU address */ union keymgr_request* request = (union keymgr_request*)sceSblDriverGpuVaToCpuVa(buf_gpu_va, NULL); union keymgr_response* response = (union keymgr_response*)request; struct ekc* eekc; int orig_ret, ret; /* try to decrypt rif normally */ ret = orig_ret = sceSblKeymgrSmCallfunc(payload); /* and if it fails then we check if it's fake rif and try to decrypt it by ourselves */ if ((ret != 0 || payload->status != 0) && request) { if (BE16(request->decrypt_entire_rif.rif.format) != 0x200) { /* not fake? */ ret = orig_ret; goto err; } ret = npdrm_decrypt_debug_rif(request->decrypt_entire_rif.rif.format, request->decrypt_entire_rif.rif.digest); if (ret) { ret = orig_ret; goto err; } /* XXX: sorry, i'm lazy to refactor this crappy code :D basically, we're copying decrypted data to proper place, consult with kernel code if offsets needs to be changed */ memcpy(response->decrypt_entire_rif.raw, request->decrypt_entire_rif.rif.digest, sizeof(request->decrypt_entire_rif.rif.digest) + sizeof(request->decrypt_entire_rif.rif.data)); memset(response->decrypt_entire_rif.raw + sizeof(request->decrypt_entire_rif.rif.digest) + sizeof(request->decrypt_entire_rif.rif.data), 0, sizeof(response->decrypt_entire_rif.raw) - (sizeof(request->decrypt_entire_rif.rif.digest) + sizeof(request->decrypt_entire_rif.rif.data))); payload->status = ret; ret = 0; } err: return ret; } //... INSTALL_CALL_HOOK(0x62ECDE, npdrm_decrypt_rif_new__sceSblKeymgrSmCallfunc__hook); //... DECLARE_FUNCTION(0x62EB40, npdrm_decrypt_rif_new, int, struct rif_key_blob* key_blob, struct rif* rif); /* used to decrypt GD rifs (0x1A) */ //...

Added SceShellCore patches by Flatz for PKG Installation