diff --git a/sphaira/CMakeLists.txt b/sphaira/CMakeLists.txt index 82d191f..4f8a3ae 100644 --- a/sphaira/CMakeLists.txt +++ b/sphaira/CMakeLists.txt @@ -90,6 +90,10 @@ add_executable(sphaira source/minizip_helper.cpp source/fatfs.cpp + source/utils/devoptab_save.cpp + # todo: + # source/utils/devoptab_zip.cpp + source/usb/base.cpp source/usb/usbds.cpp source/usb/usbhs.cpp @@ -108,7 +112,9 @@ add_executable(sphaira source/yati/nx/nca.cpp source/yati/nx/ncm.cpp source/yati/nx/ns.cpp + source/yati/nx/nxdumptool_rsa.c + source/yati/nx/nxdumptool/save.c ) target_compile_definitions(sphaira PRIVATE @@ -397,6 +403,7 @@ target_include_directories(sphaira PRIVATE include ${minizip_inc} ${mbedtls_inc} + include/yati/nx/nxdumptool ) # copy the romfs diff --git a/sphaira/include/nro.hpp b/sphaira/include/nro.hpp index f5959e1..a5cf537 100644 --- a/sphaira/include/nro.hpp +++ b/sphaira/include/nro.hpp @@ -27,6 +27,9 @@ struct NroEntry { u64 icon_size{}; u64 icon_offset{}; + u64 romfs_size{}; + u64 romfs_offset{}; + FsTimeStampRaw timestamp{}; Hbini hbini{}; diff --git a/sphaira/include/ui/menus/homebrew.hpp b/sphaira/include/ui/menus/homebrew.hpp index 9c43e77..44f262e 100644 --- a/sphaira/include/ui/menus/homebrew.hpp +++ b/sphaira/include/ui/menus/homebrew.hpp @@ -71,6 +71,8 @@ private: return m_sort.Get() >= SortType_UpdatedStar; } + Result MountRomfsFs(); + private: static constexpr inline const char* INI_SECTION = "homebrew"; diff --git a/sphaira/include/utils/devoptab.hpp b/sphaira/include/utils/devoptab.hpp new file mode 100644 index 0000000..886dd6d --- /dev/null +++ b/sphaira/include/utils/devoptab.hpp @@ -0,0 +1,24 @@ +#pragma once + +#include +#include "fs.hpp" + +namespace sphaira::devoptab { + +// mounts to "lower_case_hex_id:/" +Result MountFromSavePath(u64 id, fs::FsPath& out_path); +void UnmountSave(u64 id); + +// todo: +void MountZip(fs::Fs* fs, const fs::FsPath& mount, fs::FsPath& out_path); +void UmountZip(const fs::FsPath& mount); + +// todo: +void MountNsp(fs::Fs* fs, const fs::FsPath& mount, fs::FsPath& out_path); +void UmountNsp(const fs::FsPath& mount); + +// todo: +void MountXci(fs::Fs* fs, const fs::FsPath& mount, fs::FsPath& out_path); +void UmountXci(const fs::FsPath& mount); + +} // namespace sphaira::devoptab diff --git a/sphaira/include/yati/nx/nxdumptool/core/nxdt_includes.h b/sphaira/include/yati/nx/nxdumptool/core/nxdt_includes.h new file mode 100644 index 0000000..a6e5bed --- /dev/null +++ b/sphaira/include/yati/nx/nxdumptool/core/nxdt_includes.h @@ -0,0 +1,63 @@ +/* + * nxdt_includes.h + * + * Copyright (c) 2020-2024, DarkMatterCore . + * + * This file is part of nxdumptool (https://github.com/DarkMatterCore/nxdumptool). + * + * nxdumptool is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * nxdumptool is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once + +#ifndef __NXDT_INCLUDES_H__ +#define __NXDT_INCLUDES_H__ + +/* C headers. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef __cplusplus +#include +#else +#include +#define _Atomic(X) std::atomic< X > +#endif + +/* libnx header. */ +#include + +/* Global defines. */ +#include "../defines.h" + +/* File/socket based logger. */ +#include "nxdt_log.h" + +#endif /* __NXDT_INCLUDES_H__ */ diff --git a/sphaira/include/yati/nx/nxdumptool/core/nxdt_log.h b/sphaira/include/yati/nx/nxdumptool/core/nxdt_log.h new file mode 100644 index 0000000..9386483 --- /dev/null +++ b/sphaira/include/yati/nx/nxdumptool/core/nxdt_log.h @@ -0,0 +1,160 @@ +/* + * nxdt_log.h + * + * Copyright (c) 2020-2024, DarkMatterCore . + * + * This file is part of nxdumptool (https://github.com/DarkMatterCore/nxdumptool). + * + * nxdumptool is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * nxdumptool is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once + +#ifndef __NXDT_LOG_H__ +#define __NXDT_LOG_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +/// Used to control logfile verbosity. +#define LOG_LEVEL_DEBUG 0 +#define LOG_LEVEL_INFO 1 +#define LOG_LEVEL_WARNING 2 +#define LOG_LEVEL_ERROR 3 +#define LOG_LEVEL_NONE 4 + +/// Defines the log level used throughout the application. +/// Log messages with a log value lower than this one won't be compiled into the binary. +/// If a value lower than LOG_LEVEL_DEBUG or equal to/greater than LOG_LEVEL_NONE is used, logfile output will be entirely disabled. +#define LOG_LEVEL LOG_LEVEL_NONE /* TODO: change before release (warning?). */ + +#if (LOG_LEVEL >= LOG_LEVEL_DEBUG) && (LOG_LEVEL < LOG_LEVEL_NONE) + +/// Helper macros. + +#define LOG_MSG_GENERIC(level, fmt, ...) logWriteFormattedStringToLogFile(level, __FILE__, __LINE__, __PRETTY_FUNCTION__, fmt, ##__VA_ARGS__) +#define LOG_MSG_BUF_GENERIC(dst, dst_size, level, fmt, ...) logWriteFormattedStringToBuffer(dst, dst_size, level, __FILE__, __LINE__, __PRETTY_FUNCTION__, fmt, ##__VA_ARGS__) +#define LOG_DATA_GENERIC(data, data_size, level, fmt, ...) logWriteBinaryDataToLogFile(data, data_size, level, __FILE__, __LINE__, __PRETTY_FUNCTION__, fmt, ##__VA_ARGS__) + +#if LOG_LEVEL == LOG_LEVEL_DEBUG +#define LOG_MSG_DEBUG(fmt, ...) LOG_MSG_GENERIC(LOG_LEVEL_DEBUG, fmt, ##__VA_ARGS__) +#define LOG_MSG_BUF_DEBUG(dst, dst_size, fmt, ...) LOG_MSG_BUF_GENERIC(dst, dst_size, LOG_LEVEL_DEBUG, fmt, ##__VA_ARGS__) +#define LOG_DATA_DEBUG(data, data_size, fmt, ...) LOG_DATA_GENERIC(data, data_size, LOG_LEVEL_DEBUG, fmt, ##__VA_ARGS__) +#else +#define LOG_MSG_DEBUG(fmt, ...) do {} while(0) +#define LOG_MSG_BUF_DEBUG(dst, dst_size, fmt, ...) do {} while(0) +#define LOG_DATA_DEBUG(data, data_size, fmt, ...) do {} while(0) +#endif /* LOG_LEVEL == LOG_LEVEL_DEBUG */ + +#if LOG_LEVEL <= LOG_LEVEL_INFO +#define LOG_MSG_INFO(fmt, ...) LOG_MSG_GENERIC(LOG_LEVEL_INFO, fmt, ##__VA_ARGS__) +#define LOG_MSG_BUF_INFO(dst, dst_size, fmt, ...) LOG_MSG_BUF_GENERIC(dst, dst_size, LOG_LEVEL_INFO, fmt, ##__VA_ARGS__) +#define LOG_DATA_INFO(data, data_size, fmt, ...) LOG_DATA_GENERIC(data, data_size, LOG_LEVEL_INFO, fmt, ##__VA_ARGS__) +#else +#define LOG_MSG_INFO(fmt, ...) do {} while(0) +#define LOG_MSG_BUF_INFO(dst, dst_size, fmt, ...) do {} while(0) +#define LOG_DATA_INFO(data, data_size, fmt, ...) do {} while(0) +#endif /* LOG_LEVEL <= LOG_LEVEL_INFO */ + +#if LOG_LEVEL <= LOG_LEVEL_WARNING +#define LOG_MSG_WARNING(fmt, ...) LOG_MSG_GENERIC(LOG_LEVEL_WARNING, fmt, ##__VA_ARGS__) +#define LOG_MSG_BUF_WARNING(dst, dst_size, fmt, ...) LOG_MSG_BUF_GENERIC(dst, dst_size, LOG_LEVEL_WARNING, fmt, ##__VA_ARGS__) +#define LOG_DATA_WARNING(data, data_size, fmt, ...) LOG_DATA_GENERIC(data, data_size, LOG_LEVEL_WARNING, fmt, ##__VA_ARGS__) +#else +#define LOG_MSG_WARNING(fmt, ...) do {} while(0) +#define LOG_MSG_BUF_WARNING(dst, dst_size, fmt, ...) do {} while(0) +#define LOG_DATA_WARNING(data, data_size, fmt, ...) do {} while(0) +#endif /* LOG_LEVEL <= LOG_LEVEL_WARNING */ + +#if LOG_LEVEL <= LOG_LEVEL_ERROR +#define LOG_MSG_ERROR(fmt, ...) LOG_MSG_GENERIC(LOG_LEVEL_ERROR, fmt, ##__VA_ARGS__) +#define LOG_MSG_BUF_ERROR(dst, dst_size, fmt, ...) LOG_MSG_BUF_GENERIC(dst, dst_size, LOG_LEVEL_ERROR, fmt, ##__VA_ARGS__) +#define LOG_DATA_ERROR(data, data_size, fmt, ...) LOG_DATA_GENERIC(data, data_size, LOG_LEVEL_ERROR, fmt, ##__VA_ARGS__) +#else +#define LOG_MSG_ERROR(fmt, ...) do {} while(0) +#define LOG_MSG_BUF_ERROR(dst, dst_size, fmt, ...) do {} while(0) +#define LOG_DATA_ERROR(data, data_size, fmt, ...) do {} while(0) +#endif /* LOG_LEVEL <= LOG_LEVEL_ERROR */ + +/// Writes the provided string to the logfile. +/// If the logfile hasn't been created and/or opened, this function takes care of it. +void logWriteStringToLogFile(const char *src); + +/// Writes a formatted log string to the logfile. +/// If the logfile hasn't been created and/or opened, this function takes care of it. +__attribute__((format(printf, 5, 6))) void logWriteFormattedStringToLogFile(u8 level, const char *file_name, int line, const char *func_name, const char *fmt, ...); + +/// Writes a formatted log string to the provided buffer. +/// If the buffer isn't big enough to hold both its current contents and the new formatted string, it will be resized. +__attribute__((format(printf, 7, 8))) void logWriteFormattedStringToBuffer(char **dst, size_t *dst_size, u8 level, const char *file_name, int line, const char *func_name, const char *fmt, ...); + +/// Writes a formatted log string + a hex string representation of the provided binary data to the logfile. +/// If the logfile hasn't been created and/or opened, this function takes care of it. +__attribute__((format(printf, 7, 8))) void logWriteBinaryDataToLogFile(const void *data, size_t data_size, u8 level, const char *file_name, int line, const char *func_name, const char *fmt, ...); + +/// Forces a flush operation on the logfile. +void logFlushLogFile(void); + +/// Write any pending data to the logfile, flushes it and then closes it. +void logCloseLogFile(void); + +/// Returns a pointer to a dynamically allocated buffer that holds the last error message string, or NULL if there's none. +/// The allocated buffer must be freed by the caller using free(). +char *logGetLastMessage(void); + +/// (Un)locks the log mutex. Can be used to block other threads and prevent them from writing data to the logfile. +/// Use with caution. +void logControlMutex(bool lock); + +#else /* (LOG_LEVEL >= LOG_LEVEL_DEBUG) && (LOG_LEVEL < LOG_LEVEL_NONE) */ + +/// Helper macros. + +#define LOG_MSG_GENERIC(level, fmt, ...) do {} while(0) +#define LOG_MSG_BUF_GENERIC(dst, dst_size, level, fmt, ...) do {} while(0) +#define LOG_DATA_GENERIC(data, data_size, level, fmt, ...) do {} while(0) + +#define LOG_MSG_DEBUG(fmt, ...) do {} while(0) +#define LOG_MSG_BUF_DEBUG(dst, dst_size, fmt, ...) do {} while(0) +#define LOG_DATA_DEBUG(data, data_size, fmt, ...) do {} while(0) + +#define LOG_MSG_INFO(fmt, ...) do {} while(0) +#define LOG_MSG_BUF_INFO(dst, dst_size, fmt, ...) do {} while(0) +#define LOG_DATA_INFO(data, data_size, fmt, ...) do {} while(0) + +#define LOG_MSG_WARNING(fmt, ...) do {} while(0) +#define LOG_MSG_BUF_WARNING(dst, dst_size, fmt, ...) do {} while(0) +#define LOG_DATA_WARNING(data, data_size, fmt, ...) do {} while(0) + +#define LOG_MSG_ERROR(fmt, ...) do {} while(0) +#define LOG_MSG_BUF_ERROR(dst, dst_size, fmt, ...) do {} while(0) +#define LOG_DATA_ERROR(data, data_size, fmt, ...) do {} while(0) + +#define logWriteStringToLogFile(...) do {} while(0) +#define logWriteFormattedStringToLogFile(...) do {} while(0) +#define logWriteFormattedStringToBuffer(...) do {} while(0) +#define logWriteBinaryDataToLogFile(...) do {} while(0) +#define logFlushLogFile(...) do {} while(0) +#define logCloseLogFile(...) do {} while(0) +#define logGetLastMessage(...) NULL +#define logControlMutex(...) do {} while(0) + +#endif /* (LOG_LEVEL >= LOG_LEVEL_DEBUG) && (LOG_LEVEL < LOG_LEVEL_NONE) */ + +#ifdef __cplusplus +} +#endif + +#endif /* __NXDT_LOG_H__ */ diff --git a/sphaira/include/yati/nx/nxdumptool/core/save.h b/sphaira/include/yati/nx/nxdumptool/core/save.h new file mode 100644 index 0000000..81bf24d --- /dev/null +++ b/sphaira/include/yati/nx/nxdumptool/core/save.h @@ -0,0 +1,560 @@ +/* + * save.h + * + * Copyright (c) 2019-2020, shchmue. + * Copyright (c) 2020-2024, DarkMatterCore . + * + * This file is part of nxdumptool (https://github.com/DarkMatterCore/nxdumptool). + * + * nxdumptool is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * nxdumptool is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once + +#ifndef __SAVE_H__ +#define __SAVE_H__ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define IVFC_MAX_LEVEL 6 + +#define SAVE_HEADER_SIZE 0x4000 +#define SAVE_FAT_ENTRY_SIZE 8 +#define SAVE_FS_LIST_MAX_NAME_LENGTH 0x40 +#define SAVE_FS_LIST_ENTRY_SIZE 0x60 + +#define MAGIC_DISF 0x46534944 +#define MAGIC_DPFS 0x53465044 +#define MAGIC_JNGL 0x4C474E4A +#define MAGIC_SAVE 0x45564153 +#define MAGIC_RMAP 0x50414D52 +#define MAGIC_IVFC 0x43465649 + +#define ACTION_VERIFY (1 << 2) + +typedef enum { + VALIDITY_UNCHECKED = 0, + VALIDITY_INVALID, + VALIDITY_VALID +} validity_t; + +typedef struct save_ctx_t save_ctx_t; + +typedef struct { + u32 magic; /* "DISF". */ + u32 version; + u8 hash[0x20]; + u64 file_map_entry_offset; + u64 file_map_entry_size; + u64 meta_map_entry_offset; + u64 meta_map_entry_size; + u64 file_map_data_offset; + u64 file_map_data_size; + u64 duplex_l1_offset_a; + u64 duplex_l1_offset_b; + u64 duplex_l1_size; + u64 duplex_data_offset_a; + u64 duplex_data_offset_b; + u64 duplex_data_size; + u64 journal_data_offset; + u64 journal_data_size_a; + u64 journal_data_size_b; + u64 journal_size; + u64 duplex_master_offset_a; + u64 duplex_master_offset_b; + u64 duplex_master_size; + u64 ivfc_master_hash_offset_a; + u64 ivfc_master_hash_offset_b; + u64 ivfc_master_hash_size; + u64 journal_map_table_offset; + u64 journal_map_table_size; + u64 journal_physical_bitmap_offset; + u64 journal_physical_bitmap_size; + u64 journal_virtual_bitmap_offset; + u64 journal_virtual_bitmap_size; + u64 journal_free_bitmap_offset; + u64 journal_free_bitmap_size; + u64 ivfc_l1_offset; + u64 ivfc_l1_size; + u64 ivfc_l2_offset; + u64 ivfc_l2_size; + u64 ivfc_l3_offset; + u64 ivfc_l3_size; + u64 fat_offset; + u64 fat_size; + u64 duplex_index; + u64 fat_ivfc_master_hash_a; + u64 fat_ivfc_master_hash_b; + u64 fat_ivfc_l1_offset; + u64 fat_ivfc_l1_size; + u64 fat_ivfc_l2_offset; + u64 fat_ivfc_l2_size; + u8 _0x190[0x70]; +} fs_layout_t; + +NXDT_ASSERT(fs_layout_t, 0x200); + +#pragma pack(push, 1) +typedef struct { + u64 offset; + u64 length; + u32 block_size_power; +} duplex_info_t; +#pragma pack(pop) + +NXDT_ASSERT(duplex_info_t, 0x14); + +typedef struct { + u32 magic; /* "DPFS". */ + u32 version; + duplex_info_t layers[3]; +} duplex_header_t; + +NXDT_ASSERT(duplex_header_t, 0x44); + +typedef struct { + u32 version; + u32 main_data_block_count; + u32 journal_block_count; + u32 _0x0C; +} journal_map_header_t; + +NXDT_ASSERT(journal_map_header_t, 0x10); + +typedef struct { + u32 magic; /* "JNGL". */ + u32 version; + u64 total_size; + u64 journal_size; + u64 block_size; +} journal_header_t; + +NXDT_ASSERT(journal_header_t, 0x20); + +typedef struct { + u32 magic; /* "SAVE". */ + u32 version; + u64 block_count; + u64 block_size; +} save_fs_header_t; + +NXDT_ASSERT(save_fs_header_t, 0x18); + +typedef struct { + u64 block_size; + u64 allocation_table_offset; + u32 allocation_table_block_count; + u32 _0x14; + u64 data_offset; + u32 data_block_count; + u32 _0x24; + u32 directory_table_block; + u32 file_table_block; +} fat_header_t; + +NXDT_ASSERT(fat_header_t, 0x30); + +typedef struct { + u32 magic; /* "RMAP". */ + u32 version; + u32 map_entry_count; + u32 map_segment_count; + u32 segment_bits; + u8 _0x14[0x2C]; +} remap_header_t; + +NXDT_ASSERT(remap_header_t, 0x40); + +typedef struct remap_segment_ctx_t remap_segment_ctx_t; +typedef struct remap_entry_ctx_t remap_entry_ctx_t; + +#pragma pack(push, 1) +struct remap_entry_ctx_t { + u64 virtual_offset; + u64 physical_offset; + u64 size; + u32 alignment; + u32 _0x1C; + u64 virtual_offset_end; + u64 physical_offset_end; + remap_segment_ctx_t *segment; + remap_entry_ctx_t *next; +}; +#pragma pack(pop) + +struct remap_segment_ctx_t{ + u64 offset; + u64 length; + remap_entry_ctx_t **entries; + u64 entry_count; +}; + +typedef struct { + u8 *data; + u8 *bitmap; +} duplex_bitmap_t; + +typedef struct { + u32 block_size; + u8 *bitmap_storage; + u8 *data_a; + u8 *data_b; + duplex_bitmap_t bitmap; + u64 _length; +} duplex_storage_ctx_t; + +enum base_storage_type { + STORAGE_BYTES = 0, + STORAGE_DUPLEX = 1, + STORAGE_REMAP = 2, + STORAGE_JOURNAL = 3 +}; + +typedef struct { + remap_header_t *header; + remap_entry_ctx_t *map_entries; + remap_segment_ctx_t *segments; + enum base_storage_type type; + u64 base_storage_offset; + duplex_storage_ctx_t *duplex; + FILE *file; +} remap_storage_ctx_t; + +typedef struct { + u64 title_id; + u8 user_id[0x10]; + u64 save_id; + u8 save_data_type; + u8 _0x21[0x1F]; + u64 save_owner_id; + u64 timestamp; + u64 _0x50; + u64 data_size; + u64 journal_size; + u64 commit_id; +} extra_data_t; + +NXDT_ASSERT(extra_data_t, 0x70); + +typedef struct { + u64 logical_offset; + u64 hash_data_size; + u32 block_size; + u32 reserved; +} ivfc_level_hdr_t; + +NXDT_ASSERT(ivfc_level_hdr_t, 0x18); + +typedef struct { + u32 magic; + u32 id; + u32 master_hash_size; + u32 num_levels; + ivfc_level_hdr_t level_headers[IVFC_MAX_LEVEL]; + u8 salt_source[0x20]; +} ivfc_save_hdr_t; + +NXDT_ASSERT(ivfc_save_hdr_t, 0xC0); + +#pragma pack(push, 1) +typedef struct { + u8 cmac[0x10]; + u8 _0x10[0xF0]; + fs_layout_t layout; + duplex_header_t duplex_header; + ivfc_save_hdr_t data_ivfc_header; + u32 _0x404; + journal_header_t journal_header; + journal_map_header_t map_header; + u8 _0x438[0x1D0]; + save_fs_header_t save_header; + fat_header_t fat_header; + remap_header_t main_remap_header, meta_remap_header; + u64 _0x6D0; + extra_data_t extra_data; + u8 _0x748[0x390]; + ivfc_save_hdr_t fat_ivfc_header; + u8 _0xB98[0x3468]; +} save_header_t; +#pragma pack(pop) + +NXDT_ASSERT(save_header_t, 0x4000); + +typedef struct { + duplex_storage_ctx_t layers[2]; + duplex_storage_ctx_t data_layer; + u64 _length; +} hierarchical_duplex_storage_ctx_t; + +typedef struct { + u8 *data_a; + u8 *data_b; + duplex_info_t info; +} duplex_fs_layer_info_t; + +typedef struct { + u8 *map_storage; + u8 *physical_block_bitmap; + u8 *virtual_block_bitmap; + u8 *free_block_bitmap; +} journal_map_params_t; + +typedef struct { + u32 physical_index; + u32 virtual_index; +} journal_map_entry_t; + +NXDT_ASSERT(journal_map_entry_t, 0x8); + +typedef struct { + journal_map_header_t *header; + journal_map_entry_t *entries; + u8 *map_storage; +} journal_map_ctx_t; + +typedef struct { + journal_map_ctx_t map; + journal_header_t *header; + u32 block_size; + u64 journal_data_offset; + u64 _length; + FILE *file; +} journal_storage_ctx_t; + +typedef struct { + u64 data_offset; + u64 data_size; + u64 hash_offset; + u32 hash_block_size; + validity_t hash_validity; + enum base_storage_type type; + save_ctx_t *save_ctx; +} ivfc_level_save_ctx_t; + +typedef struct { + ivfc_level_save_ctx_t *data; + u32 block_size; + u8 salt[0x20]; +} integrity_verification_info_ctx_t; + +typedef struct integrity_verification_storage_ctx_t integrity_verification_storage_ctx_t; + +struct integrity_verification_storage_ctx_t { + ivfc_level_save_ctx_t *hash_storage; + ivfc_level_save_ctx_t *base_storage; + validity_t *block_validities; + u8 salt[0x20]; + u32 sector_size; + u32 sector_count; + u64 _length; + integrity_verification_storage_ctx_t *next_level; +}; + +typedef struct { + ivfc_level_save_ctx_t levels[5]; + ivfc_level_save_ctx_t *data_level; + validity_t **level_validities; + u64 _length; + integrity_verification_storage_ctx_t integrity_storages[4]; +} hierarchical_integrity_verification_storage_ctx_t; + +typedef struct { + u32 prev; + u32 next; +} allocation_table_entry_t; + +typedef struct { + u32 free_list_entry_index; + void *base_storage; + fat_header_t *header; +} allocation_table_ctx_t; + +typedef struct { + hierarchical_integrity_verification_storage_ctx_t *base_storage; + u32 block_size; + u32 initial_block; + allocation_table_ctx_t *fat; + u64 _length; +} allocation_table_storage_ctx_t; + +typedef struct { + allocation_table_ctx_t *fat; + u32 virtual_block; + u32 physical_block; + u32 current_segment_size; + u32 next_block; + u32 prev_block; +} allocation_table_iterator_ctx_t; + +typedef struct { + char name[SAVE_FS_LIST_MAX_NAME_LENGTH]; + u32 parent; +} save_entry_key_t; + +#pragma pack(push, 1) +typedef struct { + u32 start_block; + u64 length; + u32 _0xC[2]; +} save_file_info_t; +#pragma pack(pop) + +NXDT_ASSERT(save_file_info_t, 0x14); + +#pragma pack(push, 1) +typedef struct { + u32 next_directory; + u32 next_file; + u32 _0x8[3]; +} save_find_position_t; +#pragma pack(pop) + +NXDT_ASSERT(save_find_position_t, 0x14); + +#pragma pack(push, 1) +typedef struct { + u32 next_sibling; + union { /* Save table entry type. Size = 0x14. */ + save_file_info_t save_file_info; + save_find_position_t save_find_position; + }; +} save_table_entry_t; +#pragma pack(pop) + +NXDT_ASSERT(save_table_entry_t, 0x18); + +#pragma pack(push, 1) +typedef struct { + u32 parent; + char name[SAVE_FS_LIST_MAX_NAME_LENGTH]; + save_table_entry_t value; + u32 next; +} save_fs_list_entry_t; +#pragma pack(pop) + +NXDT_ASSERT(save_fs_list_entry_t, 0x60); + +typedef struct { + u32 free_list_head_index; + u32 used_list_head_index; + allocation_table_storage_ctx_t storage; + u32 capacity; +} save_filesystem_list_ctx_t; + +typedef struct { + save_filesystem_list_ctx_t file_table; + save_filesystem_list_ctx_t directory_table; +} hierarchical_save_file_table_ctx_t; + +typedef struct { + hierarchical_integrity_verification_storage_ctx_t *base_storage; + allocation_table_ctx_t allocation_table; + save_fs_header_t *header; + hierarchical_save_file_table_ctx_t file_table; +} save_filesystem_ctx_t; + +struct save_ctx_t { + save_header_t header; + FILE *file; + struct { + FILE *file; + u32 action; + } tool_ctx; + validity_t header_cmac_validity; + validity_t header_hash_validity; + u8 *data_ivfc_master; + u8 *fat_ivfc_master; + remap_storage_ctx_t data_remap_storage; + remap_storage_ctx_t meta_remap_storage; + duplex_fs_layer_info_t duplex_layers[3]; + hierarchical_duplex_storage_ctx_t duplex_storage; + journal_storage_ctx_t journal_storage; + journal_map_params_t journal_map_info; + hierarchical_integrity_verification_storage_ctx_t core_data_ivfc_storage; + hierarchical_integrity_verification_storage_ctx_t fat_ivfc_storage; + u8 *fat_storage; + save_filesystem_ctx_t save_filesystem_core; + u8 save_mac_key[0x10]; +}; + +static inline u32 allocation_table_entry_index_to_block(u32 entry_index) +{ + return (entry_index - 1); +} + +static inline u32 allocation_table_block_to_entry_index(u32 block_index) +{ + return (block_index + 1); +} + +static inline int allocation_table_is_list_end(allocation_table_entry_t *entry) +{ + return ((entry->next & 0x7FFFFFFF) == 0); +} + +static inline int allocation_table_is_list_start(allocation_table_entry_t *entry) +{ + return (entry->prev == 0x80000000); +} + +static inline int allocation_table_get_next(allocation_table_entry_t *entry) +{ + return (entry->next & 0x7FFFFFFF); +} + +static inline int allocation_table_get_prev(allocation_table_entry_t *entry) +{ + return (entry->prev & 0x7FFFFFFF); +} + +static inline allocation_table_entry_t *save_allocation_table_read_entry(allocation_table_ctx_t *ctx, u32 entry_index) +{ + return ((allocation_table_entry_t*)((u8*)ctx->base_storage + (entry_index * SAVE_FAT_ENTRY_SIZE))); +} + +static inline u32 save_allocation_table_get_free_list_entry_index(allocation_table_ctx_t *ctx) +{ + return allocation_table_get_next(save_allocation_table_read_entry(ctx, ctx->free_list_entry_index)); +} + +static inline u32 save_allocation_table_get_free_list_block_index(allocation_table_ctx_t *ctx) +{ + return allocation_table_entry_index_to_block(save_allocation_table_get_free_list_entry_index(ctx)); +} + +bool save_process(save_ctx_t *ctx); +bool save_process_header(save_ctx_t *ctx); +void save_free_contexts(save_ctx_t *ctx); + +bool save_open_fat_storage(save_filesystem_ctx_t *ctx, allocation_table_storage_ctx_t *storage_ctx, u32 block_index); +u32 save_allocation_table_storage_read(allocation_table_storage_ctx_t *ctx, void *buffer, u64 offset, size_t count); +bool save_fs_list_get_value(save_filesystem_list_ctx_t *ctx, u32 index, save_fs_list_entry_t *value); +u32 save_fs_list_get_index_from_key(save_filesystem_list_ctx_t *ctx, save_entry_key_t *key, u32 *prev_index); +bool save_hierarchical_file_table_find_path_recursive(hierarchical_save_file_table_ctx_t *ctx, save_entry_key_t *key, const char *path); +bool save_hierarchical_file_table_get_file_entry_by_path(hierarchical_save_file_table_ctx_t *ctx, const char *path, save_fs_list_entry_t *entry); +bool save_hierarchical_directory_table_get_file_entry_by_path(hierarchical_save_file_table_ctx_t *ctx, const char *path, save_fs_list_entry_t *entry); + +save_ctx_t *save_open_savefile(const char *path, u32 action); +void save_close_savefile(save_ctx_t **ctx); +bool save_get_fat_storage_from_file_entry_by_path(save_ctx_t *ctx, const char *path, allocation_table_storage_ctx_t *out_fat_storage, u64 *out_file_entry_size); + +#ifdef __cplusplus +} +#endif + +#endif /* __SAVE_H__ */ diff --git a/sphaira/include/yati/nx/nxdumptool/defines.h b/sphaira/include/yati/nx/nxdumptool/defines.h new file mode 100644 index 0000000..1869192 --- /dev/null +++ b/sphaira/include/yati/nx/nxdumptool/defines.h @@ -0,0 +1,61 @@ +/* + * defines.h + * + * Copyright (c) 2020-2024, DarkMatterCore . + * + * This file is part of nxdumptool (https://github.com/DarkMatterCore/nxdumptool). + * + * nxdumptool is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * nxdumptool is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once + +#ifndef __DEFINES_H__ +#define __DEFINES_H__ + +/* Broadly useful language defines. */ + +#define MEMBER_SIZE(type, member) sizeof(((type*)NULL)->member) + +#define MAX_ELEMENTS(x) ((sizeof((x))) / (sizeof((x)[0]))) + +#define ALIGN_UP(x, y) (((x) + ((y) - 1)) & ~((y) - 1)) +#define ALIGN_DOWN(x, y) ((x) & ~((y) - 1)) +#define IS_ALIGNED(x, y) (((x) & ((y) - 1)) == 0) + +#define IS_POWER_OF_TWO(x) ((x) > 0 && ((x) & ((x) - 1)) == 0) + +#define DIVIDE_UP(x, y) (((x) + ((y) - 1)) / (y)) + +#define CONCATENATE_IMPL(s1, s2) s1##s2 +#define CONCATENATE(s1, s2) CONCATENATE_IMPL(s1, s2) + +#define ANONYMOUS_VARIABLE(pref) CONCATENATE(pref, __COUNTER__) + +#define NON_COPYABLE(cls) \ + cls(const cls&) = delete; \ + cls& operator=(const cls&) = delete + +#define NON_MOVEABLE(cls) \ + cls(cls&&) = delete; \ + cls& operator=(cls&&) = delete + +#define ALWAYS_INLINE inline __attribute__((always_inline)) +#define ALWAYS_INLINE_LAMBDA __attribute__((always_inline)) + +#define CLEANUP(func) __attribute__((__cleanup__(func))) + +#define NXDT_ASSERT(name, size) static_assert(sizeof(name) == (size), "Bad size for " #name "! Expected " #size ".") + +#endif /* __DEFINES_H__ */ diff --git a/sphaira/source/fatfs.cpp b/sphaira/source/fatfs.cpp index ac70c9d..e3c5bc2 100644 --- a/sphaira/source/fatfs.cpp +++ b/sphaira/source/fatfs.cpp @@ -5,6 +5,7 @@ #include #include +#include #include #include @@ -14,14 +15,99 @@ namespace sphaira::fatfs { namespace { -// 256-512 are the best values, anything more has serious slow down -// due to non-seq reads. +// todo: replace with off+size and have the data be in another struct +// in order to be more lcache efficient. struct BufferedFileData { - u8 data[1024 * 256]; - s64 off; - s64 size; + u8* data{}; + u64 off{}; + u64 size{}; + + ~BufferedFileData() { + if (data) { + free(data); + } + } + + void Allocate(u64 new_size) { + data = (u8*)realloc(data, new_size * sizeof(*data)); + off = 0; + size = 0; + } }; +template +struct LinkedList { + T* data; + LinkedList* next; + LinkedList* prev; +}; + +constexpr u64 CACHE_LARGE_ALLOC_SIZE = 1024 * 512; +constexpr u64 CACHE_LARGE_SIZE = 1024 * 16; + +template +struct Lru { + using ListEntry = LinkedList; + + // pass span of the data. + void Init(std::span data) { + list_flat_array.clear(); + list_flat_array.resize(data.size()); + + auto list_entry = list_head = list_flat_array.data(); + + for (size_t i = 0; i < data.size(); i++) { + list_entry = list_flat_array.data() + i; + list_entry->data = data.data() + i; + + if (i + 1 < data.size()) { + list_entry->next = &list_flat_array[i + 1]; + } + if (i) { + list_entry->prev = &list_flat_array[i - 1]; + } + } + + list_tail = list_entry->prev->next; + } + + // moves entry to the front of the list. + void Update(ListEntry* entry) { + // only update position if we are not the head. + if (list_head != entry) { + entry->prev->next = entry->next; + if (entry->next) { + entry->next->prev = entry->prev; + } else { + list_tail = entry->prev; + } + + // update head. + auto head_temp = list_head; + list_head = entry; + list_head->prev = nullptr; + list_head->next = head_temp; + head_temp->prev = list_head; + } + } + + // moves last entry (tail) to the front of the list. + auto GetNextFree() { + Update(list_tail); + return list_head->data; + } + + auto begin() const { return list_head; } + auto end() const { return list_tail; } + +private: + ListEntry* list_head{}; + ListEntry* list_tail{}; + std::vector list_flat_array{}; +}; + +using LruBufferedData = Lru; + enum BisMountType { BisMountType_PRODINFOF, BisMountType_SAFE, @@ -31,7 +117,10 @@ enum BisMountType { struct FatStorageEntry { FsStorage storage; - BufferedFileData buffered; + s64 storage_size; + LruBufferedData lru_cache[2]; + BufferedFileData buffered_small[1024]; // 1MiB (usually). + BufferedFileData buffered_large[2]; // 1MiB FATFS fs; devoptab_t devoptab; }; @@ -52,35 +141,57 @@ static_assert(std::size(BIS_MOUNT_ENTRIES) == FF_VOLUMES); FatStorageEntry g_fat_storage[FF_VOLUMES]; -// crappy generic buffered io i wrote a while ago. -// this allows for 3-4x speed increase reading from storage. -// as it avoids reading very small chunks at a time. -// note: this works best when the file is not fragmented. -Result ReadFile(FsStorage* storage, BufferedFileData& m_buffered, void *_buffer, size_t file_off, size_t read_size) { +Result ReadStorage(FsStorage* storage, std::span lru_cache, void *_buffer, u64 file_off, u64 read_size, u64 capacity) { + // log_write("[FATFS] read offset: %zu size: %zu\n", file_off, read_size); auto dst = static_cast(_buffer); size_t amount = 0; - // check if we already have this data buffered. - if (m_buffered.size) { - // check if we can read this data into the beginning of dst. - if (file_off < m_buffered.off + m_buffered.size && file_off >= m_buffered.off) { - const auto off = file_off - m_buffered.off; - const auto size = std::min(read_size, m_buffered.size - off); - std::memcpy(dst, m_buffered.data + off, size); + R_UNLESS(file_off < capacity, FsError_UnsupportedOperateRangeForFileStorage); + read_size = std::min(read_size, capacity - file_off); - read_size -= size; - file_off += size; - amount += size; - dst += size; + // fatfs reads in max 16k chunks. + // knowing this, it's possible to detect large file reads by simply checking if + // the read size is 16k (or more, maybe in the furter). + // however this would destroy random access performance, such as fetching 512 bytes. + // the fix was to have 2 LRU caches, one for large data and the other for small (anything below 16k). + // the results in file reads 32MB -> 184MB and directory listing is instant. + const auto large_read = read_size >= 1024 * 16; + auto& lru = large_read ? lru_cache[1] : lru_cache[0]; + + for (auto list = lru.begin(); list; list = list->next) { + const auto& m_buffered = list->data; + if (m_buffered->size) { + // check if we can read this data into the beginning of dst. + if (file_off < m_buffered->off + m_buffered->size && file_off >= m_buffered->off) { + const auto off = file_off - m_buffered->off; + const auto size = std::min(read_size, m_buffered->size - off); + if (size) { + // log_write("[FAT] cache HIT at: %zu\n", file_off); + std::memcpy(dst, m_buffered->data + off, size); + + read_size -= size; + file_off += size; + amount += size; + dst += size; + + lru.Update(list); + break; + } + } } } if (read_size) { - m_buffered.off = 0; - m_buffered.size = 0; + // log_write("[FAT] cache miss at: %zu %zu\n", file_off, read_size); - // if the dst dst is big enough, read data in place. - if (read_size >= sizeof(m_buffered.data)) { + auto alloc_size = large_read ? CACHE_LARGE_ALLOC_SIZE : std::max(read_size, 512); + alloc_size = std::min(alloc_size, capacity - file_off); + + auto m_buffered = lru.GetNextFree(); + m_buffered->Allocate(alloc_size); + + // if the dst is big enough, read data in place. + if (read_size > alloc_size) { if (R_SUCCEEDED(fsStorageRead(storage, file_off, dst, read_size))) { const auto bytes_read = read_size; read_size -= bytes_read; @@ -89,18 +200,18 @@ Result ReadFile(FsStorage* storage, BufferedFileData& m_buffered, void *_buffer, dst += bytes_read; // save the last chunk of data to the m_buffered io. - const auto max_advance = std::min(amount, sizeof(m_buffered.data)); - m_buffered.off = file_off - max_advance; - m_buffered.size = max_advance; - std::memcpy(m_buffered.data, dst - max_advance, max_advance); + const auto max_advance = std::min(amount, alloc_size); + m_buffered->off = file_off - max_advance; + m_buffered->size = max_advance; + std::memcpy(m_buffered->data, dst - max_advance, max_advance); } - } else if (R_SUCCEEDED(fsStorageRead(storage, file_off, m_buffered.data, sizeof(m_buffered.data)))) { - const auto bytes_read = sizeof(m_buffered.data); - const auto max_advance = std::min(read_size, bytes_read); - std::memcpy(dst, m_buffered.data, max_advance); + } else if (R_SUCCEEDED(fsStorageRead(storage, file_off, m_buffered->data, alloc_size))) { + const auto bytes_read = alloc_size; + const auto max_advance = std::min(read_size, bytes_read); + std::memcpy(dst, m_buffered->data, max_advance); - m_buffered.off = file_off; - m_buffered.size = bytes_read; + m_buffered->off = file_off; + m_buffered->size = bytes_read; read_size -= max_advance; file_off += max_advance; @@ -109,7 +220,7 @@ Result ReadFile(FsStorage* storage, BufferedFileData& m_buffered, void *_buffer, } } - return 0; + R_SUCCEED(); } void fill_stat(const FILINFO* fno, struct stat *st) { @@ -199,7 +310,6 @@ DIR_ITER* fat_diropen(struct _reent *r, DIR_ITER *dirState, const char *path) { int fat_dirreset(struct _reent *r, DIR_ITER *dirState) { if (FR_OK != f_rewinddir((FDIR*)dirState->dirStruct)) { - log_write("[FAT] fat_dirreset failed\n"); return set_errno(r, ENOENT); } return r->_errno = 0; @@ -277,11 +387,15 @@ Result MountAll() { log_write("[FAT] %s\n", bis.volume_name); + fat.lru_cache[0].Init(fat.buffered_small); + fat.lru_cache[1].Init(fat.buffered_large); + fat.devoptab = DEVOPTAB; fat.devoptab.name = bis.volume_name; fat.devoptab.deviceData = &fat; R_TRY(fsOpenBisStorage(&fat.storage, bis.id)); + R_TRY(fsStorageGetSize(&fat.storage, &fat.storage_size)); log_write("[FAT] BIS SUCCESS %s\n", bis.volume_name); R_UNLESS(FR_OK == f_mount(&fat.fs, bis.mount_name, 1), 0x1); @@ -319,7 +433,7 @@ const char* VolumeStr[] { Result fatfs_read(u8 num, void* dst, u64 offset, u64 size) { // log_write("[FAT] num: %u\n", num); auto& fat = sphaira::fatfs::g_fat_storage[num]; - return sphaira::fatfs::ReadFile(&fat.storage, fat.buffered, dst, offset, size); + return sphaira::fatfs::ReadStorage(&fat.storage, fat.lru_cache, dst, offset, size, fat.storage_size); } } // extern "C" diff --git a/sphaira/source/nro.cpp b/sphaira/source/nro.cpp index f0fb047..e2dffd8 100644 --- a/sphaira/source/nro.cpp +++ b/sphaira/source/nro.cpp @@ -60,7 +60,7 @@ auto nro_parse_internal(fs::Fs* fs, const fs::FsPath& path, NroEntry& entry) -> std::strcpy(nacp.lang.author, "Unknown"); std::strcpy(nacp.display_version, "Unknown"); - entry.icon_offset = entry.icon_size = 0; + entry.romfs_offset = entry.romfs_size = entry.icon_offset = entry.icon_size = 0; entry.is_nacp_valid = false; } else { entry.size += sizeof(asset) + asset.icon.size + asset.nacp.size + asset.romfs.size; @@ -70,6 +70,8 @@ auto nro_parse_internal(fs::Fs* fs, const fs::FsPath& path, NroEntry& entry) -> // lazy load the icons entry.icon_size = asset.icon.size; entry.icon_offset = data.header.size + asset.icon.offset; + entry.romfs_offset = data.header.size + asset.romfs.offset; + entry.romfs_size = asset.romfs.size; entry.is_nacp_valid = true; } diff --git a/sphaira/source/ui/menus/game_nca_menu.cpp b/sphaira/source/ui/menus/game_nca_menu.cpp index 20b776d..a3da765 100644 --- a/sphaira/source/ui/menus/game_nca_menu.cpp +++ b/sphaira/source/ui/menus/game_nca_menu.cpp @@ -171,7 +171,9 @@ Menu::Menu(Entry& entry, const meta::MetaEntry& meta_entry) }}), std::make_pair(Button::A, Action{"Mount Fs"_i18n, [this](){ // todo: handle error here. - MountNcaFs(); + if (!m_entries.empty() && !GetEntry().missing) { + MountNcaFs(); + } }}), std::make_pair(Button::B, Action{"Back"_i18n, [this](){ SetPop(); diff --git a/sphaira/source/ui/menus/homebrew.cpp b/sphaira/source/ui/menus/homebrew.cpp index 1ec2b86..916c50c 100644 --- a/sphaira/source/ui/menus/homebrew.cpp +++ b/sphaira/source/ui/menus/homebrew.cpp @@ -2,6 +2,7 @@ #include "log.hpp" #include "fs.hpp" #include "ui/menus/homebrew.hpp" +#include "ui/menus/filebrowser.hpp" #include "ui/sidebar.hpp" #include "ui/error_box.hpp" #include "ui/option_box.hpp" @@ -469,7 +470,16 @@ void Menu::DisplayOptions() { ScanHomebrew(); App::PopToMenu(); }, "Hides the selected homebrew.\n\n" - "To Unhide homebrew, enable \"Show hidden\" in the sort options."_i18n); + "To unhide homebrew, enable \"Show hidden\" in the sort options."_i18n); + + auto mount_option = options->Add("Mount RomFS"_i18n, [this](){ + const auto rc = MountRomfsFs(); + App::PushErrorBox(rc, "Failed to mount NRO RomFS"_i18n); + }, "Mounts the homebrew RomFS"_i18n); + + mount_option->Depends([this](){ + return GetEntry().romfs_offset && GetEntry().romfs_size; + }, "This homebrew does not have a RomFS"_i18n); options->Add("Delete"_i18n, [this](){ const auto buf = "Are you sure you want to delete "_i18n + GetEntry().path.toString() + "?"; @@ -500,4 +510,47 @@ void Menu::DisplayOptions() { } } +struct NroRomFS final : fs::FsStdio { + NroRomFS(const fs::FsPath& name, const fs::FsPath& root) : FsStdio{true, root}, m_name{name} { + + } + + ~NroRomFS() { + romfsUnmount(m_name); + } + + const fs::FsPath m_name; +}; + +Result Menu::MountRomfsFs() { + const char* name = "nro_romfs"; + const char* root = "nro_romfs:/"; + const auto& e = GetEntry(); + + // todo: add errors for when nro doesn't have romfs. + R_UNLESS(e.romfs_offset, 0x1); + R_UNLESS(e.romfs_size, 0x1); + + FsFile file; + R_TRY(fsFsOpenFile(fsdevGetDeviceFileSystem("sdmc"), e.path, FsOpenMode_Read, &file)); + + const auto rc = romfsMountFromFile(file, e.romfs_offset, name); + if (R_FAILED(rc)) { + fsFileClose(&file); + R_THROW(rc); + } + + auto fs = std::make_shared(name, root); + + const filebrowser::FsEntry fs_entry{ + .name = e.GetName(), + .root = root, + .type = filebrowser::FsType::Custom, + .flags = filebrowser::FsEntryFlag_ReadOnly, + }; + + App::Push(fs, fs_entry, root); + R_SUCCEED(); +} + } // namespace sphaira::ui::menu::homebrew diff --git a/sphaira/source/ui/menus/save_menu.cpp b/sphaira/source/ui/menus/save_menu.cpp index 92b4993..4659a5d 100644 --- a/sphaira/source/ui/menus/save_menu.cpp +++ b/sphaira/source/ui/menus/save_menu.cpp @@ -10,6 +10,8 @@ #include "minizip_helper.hpp" #include "dumper.hpp" +#include "utils/devoptab.hpp" + #include "ui/menus/save_menu.hpp" #include "ui/menus/filebrowser.hpp" @@ -40,6 +42,18 @@ constexpr const char* NX_SAVE_META_NAME = ".nx_save_meta.bin"; constinit UEvent g_change_uevent; +struct SystemSaveFs final : fs::FsStdio { + SystemSaveFs(u64 id, const fs::FsPath& root) : FsStdio{true, root}, m_id{id} { + + } + + ~SystemSaveFs() { + devoptab::UnmountSave(m_id); + } + + const u64 m_id; +}; + // https://github.com/J-D-K/JKSV/issues/264#issuecomment-2618962807 struct NXSaveMeta { u32 magic{}; // NX_SAVE_META_MAGIC @@ -325,6 +339,12 @@ Menu::Menu(u32 flags) : grid::Menu{"Saves"_i18n, flags} { } } }}), + std::make_pair(Button::A, Action{"Mount Fs"_i18n, [this](){ + if (!m_entries.empty()) { + const auto rc = MountSaveFs(); + App::PushErrorBox(rc, "Failed to mount save filesystem"_i18n); + } + }}), std::make_pair(Button::B, Action{"Back"_i18n, [this](){ SetPop(); }}), @@ -666,11 +686,6 @@ void Menu::DisplayOptions() { RestoreSave(); }, true); } - - options->Add("Mount Fs"_i18n, [this](){ - const auto rc = MountSaveFs(); - App::PushErrorBox(rc, "Failed to mount save filesystem"_i18n); - }); } options->Add("Advanced"_i18n, [this](){ @@ -1103,27 +1118,45 @@ Result Menu::BackupSaveInternal(ProgressBox* pbox, const dump::DumpLocation& loc Result Menu::MountSaveFs() { const auto& e = m_entries[m_index]; - const auto save_data_space_id = (FsSaveDataSpaceId)e.save_data_space_id; + fs::FsPath root; - FsSaveDataAttribute attr{}; - attr.application_id = e.application_id; - attr.uid = e.uid; - attr.system_save_data_id = e.system_save_data_id; - attr.save_data_type = e.save_data_type; - attr.save_data_rank = e.save_data_rank; - attr.save_data_index = e.save_data_index; + if (e.system_save_data_id) { + R_TRY(devoptab::MountFromSavePath(e.system_save_data_id, root)); - auto fs = std::make_shared((FsSaveDataType)e.save_data_type, save_data_space_id, &attr, true); - R_TRY(fs->GetFsOpenResult()); + auto fs = std::make_shared(e.system_save_data_id, root); - const filebrowser::FsEntry fs_entry{ - .name = e.GetName(), - .root = "/", - .type = filebrowser::FsType::Custom, - .flags = filebrowser::FsEntryFlag_ReadOnly, - }; + const filebrowser::FsEntry fs_entry{ + .name = e.GetName(), + .root = root, + .type = filebrowser::FsType::Custom, + .flags = filebrowser::FsEntryFlag_ReadOnly, + }; + + App::Push(fs, fs_entry, root); + } else { + const auto save_data_space_id = (FsSaveDataSpaceId)e.save_data_space_id; + + FsSaveDataAttribute attr{}; + attr.application_id = e.application_id; + attr.uid = e.uid; + attr.system_save_data_id = e.system_save_data_id; + attr.save_data_type = e.save_data_type; + attr.save_data_rank = e.save_data_rank; + attr.save_data_index = e.save_data_index; + + auto fs = std::make_shared((FsSaveDataType)e.save_data_type, save_data_space_id, &attr, true); + R_TRY(fs->GetFsOpenResult()); + + const filebrowser::FsEntry fs_entry{ + .name = e.GetName(), + .root = "/", + .type = filebrowser::FsType::Custom, + .flags = filebrowser::FsEntryFlag_ReadOnly, + }; + + App::Push(fs, fs_entry, "/"); + } - App::Push(fs, fs_entry, "/"); R_SUCCEED(); } diff --git a/sphaira/source/utils/devoptab_save.cpp b/sphaira/source/utils/devoptab_save.cpp new file mode 100644 index 0000000..d4dd03c --- /dev/null +++ b/sphaira/source/utils/devoptab_save.cpp @@ -0,0 +1,375 @@ + +#include "utils/devoptab.hpp" +#include "defines.hpp" +#include "log.hpp" + +#include "yati/nx/nxdumptool/defines.h" +#include "yati/nx/nxdumptool/core/save.h" + +#include +#include +#include +#include +#include + +namespace sphaira::devoptab { +namespace { + +struct Device { + save_ctx_t* ctx; + hierarchical_save_file_table_ctx_t* file_table; +}; + +struct File { + Device* device; + save_fs_list_entry_t entry; + allocation_table_storage_ctx_t storage; + size_t off; +}; + +struct DirNext { + u32 next_directory; + u32 next_file; +}; + +struct Dir { + Device* device; + save_fs_list_entry_t entry; + u32 next_directory; + u32 next_file; +}; + +bool fix_path(const char* str, char* out) { + // log_write("[SAVE] got path: %s\n", str); + + str = std::strrchr(str, ':'); + if (!str) { + return false; + } + + // skip over ':' + str++; + size_t len = 0; + + for (size_t i = 0; str[i]; i++) { + // skip multiple slashes. + if (i && str[i] == '/' && str[i - 1] == '/') { + continue; + } + + // add leading slash. + if (!i && str[i] != '/') { + out[len++] = '/'; + } + + // save single char. + out[len++] = str[i]; + } + + // root path uses "" + if (len == 1 && out[0] == '/') { + // out[0] = '\0'; + } + + // null the end. + out[len] = '\0'; + + // log_write("[SAVE] end path: %s\n", out); + + return true; +} + +static int set_errno(struct _reent *r, int err) { + r->_errno = err; + return -1; +} + +int devoptab_open(struct _reent *r, void *fileStruct, const char *_path, int flags, int mode) { + auto device = (Device*)r->deviceData; + auto file = static_cast(fileStruct); + std::memset(file, 0, sizeof(*file)); + + char path[FS_MAX_PATH]; + if (!fix_path(_path, path)) { + return set_errno(r, ENOENT); + } + + if (!save_hierarchical_file_table_get_file_entry_by_path(device->file_table, path, &file->entry)) { + return set_errno(r, ENOENT); + } + + if (!save_open_fat_storage(&device->ctx->save_filesystem_core, &file->storage, file->entry.value.save_file_info.start_block)) { + return set_errno(r, ENOENT); + } + + file->device = device; + return r->_errno = 0; +} + +int devoptab_close(struct _reent *r, void *fd) { + auto file = static_cast(fd); + std::memset(file, 0, sizeof(*file)); + + return r->_errno = 0; +} + +ssize_t devoptab_read(struct _reent *r, void *fd, char *ptr, size_t len) { + auto file = static_cast(fd); + + // todo: maybe eof here? + const auto bytes_read = save_allocation_table_storage_read(&file->storage, ptr, file->off, len); + if (!bytes_read) { + return set_errno(r, ENOENT); + } + + file->off += bytes_read; + return bytes_read; +} + +off_t devoptab_seek(struct _reent *r, void *fd, off_t pos, int dir) { + auto file = static_cast(fd); + + if (dir == SEEK_CUR) { + pos += file->off; + } else if (dir == SEEK_END) { + pos = file->storage._length; + } + + r->_errno = 0; + return file->off = std::clamp(pos, 0, file->storage._length); +} + +int devoptab_fstat(struct _reent *r, void *fd, struct stat *st) { + auto file = static_cast(fd); + + log_write("[\t\tDEV] fstat\n"); + std::memset(st, 0, sizeof(*st)); + st->st_nlink = 1; + st->st_size = file->storage._length; + st->st_mode = S_IFREG | S_IRUSR | S_IRGRP | S_IROTH; + return r->_errno = 0; +} + +DIR_ITER* devoptab_diropen(struct _reent *r, DIR_ITER *dirState, const char *_path) { + auto device = (Device*)r->deviceData; + auto dir = static_cast(dirState->dirStruct); + std::memset(dir, 0, sizeof(*dir)); + + char path[FS_MAX_PATH]; + if (!fix_path(_path, path)) { + set_errno(r, ENOENT); + return NULL; + } + + if (!std::strcmp(path, "/")) { + save_entry_key_t key{}; + const auto idx = save_fs_list_get_index_from_key(&device->file_table->directory_table, &key, NULL); + if (idx == 0xFFFFFFFF) { + set_errno(r, ENOENT); + return NULL; + } + + if (!save_fs_list_get_value(&device->file_table->directory_table, idx, &dir->entry)) { + set_errno(r, ENOENT); + return NULL; + } + } else if (!save_hierarchical_directory_table_get_file_entry_by_path(device->file_table, path, &dir->entry)) { + set_errno(r, ENOENT); + return NULL; + } + + dir->device = device; + dir->next_file = dir->entry.value.save_find_position.next_file; + dir->next_directory = dir->entry.value.save_find_position.next_directory; + + r->_errno = 0; + return dirState; +} + +int devoptab_dirreset(struct _reent *r, DIR_ITER *dirState) { + auto dir = static_cast(dirState->dirStruct); + + dir->next_file = dir->entry.value.save_find_position.next_file; + dir->next_directory = dir->entry.value.save_find_position.next_directory; + + return r->_errno = 0; +} + +int devoptab_dirnext(struct _reent *r, DIR_ITER *dirState, char *filename, struct stat *filestat) { + auto dir = static_cast(dirState->dirStruct); + + std::memset(filestat, 0, sizeof(*filestat)); + save_fs_list_entry_t entry{}; + + if (dir->next_directory) { + // todo: use save_allocation_table_storage_read for faster reads + if (!save_fs_list_get_value(&dir->device->file_table->directory_table, dir->next_directory, &entry)) { + return set_errno(r, ENOENT); + } + + filestat->st_mode = S_IFDIR | S_IRUSR | S_IRGRP | S_IROTH; + dir->next_directory = entry.value.next_sibling; + } + else if (dir->next_file) { + // todo: use save_allocation_table_storage_read for faster reads + if (!save_fs_list_get_value(&dir->device->file_table->file_table, dir->next_file, &entry)) { + return set_errno(r, ENOENT); + } + + filestat->st_mode = S_IFREG | S_IRUSR | S_IRGRP | S_IROTH; + // todo: confirm this. + filestat->st_size = entry.value.save_file_info.length; + // filestat->st_size = file->storage.block_size; + dir->next_file = entry.value.next_sibling; + } + else { + return set_errno(r, ENOENT); + } + + filestat->st_nlink = 1; + strcpy(filename, entry.name); + + return r->_errno = 0; +} + +int devoptab_dirclose(struct _reent *r, DIR_ITER *dirState) { + auto dir = static_cast(dirState->dirStruct); + std::memset(dir, 0, sizeof(*dir)); + + return r->_errno = 0; +} + +int devoptab_lstat(struct _reent *r, const char *_path, struct stat *st) { + auto device = (Device*)r->deviceData; + + log_write("[\t\tDEV] lstat\n"); + + char path[FS_MAX_PATH]; + if (!fix_path(_path, path)) { + return set_errno(r, ENOENT); + } + + std::memset(st, 0, sizeof(*st)); + save_fs_list_entry_t entry{}; + + // NOTE: this is very slow. + if (save_hierarchical_file_table_get_file_entry_by_path(device->file_table, path, &entry)) { + st->st_mode = S_IFREG | S_IRUSR | S_IRGRP | S_IROTH; + st->st_size = entry.value.save_file_info.length; + } else if (save_hierarchical_directory_table_get_file_entry_by_path(device->file_table, path, &entry)) { + st->st_mode = S_IFDIR | S_IRUSR | S_IRGRP | S_IROTH; + } else { + return set_errno(r, ENOENT); + } + + st->st_nlink = 1; + + return r->_errno = 0; +} + +constexpr devoptab_t DEVOPTAB = { + .structSize = sizeof(File), + .open_r = devoptab_open, + .close_r = devoptab_close, + .read_r = devoptab_read, + .seek_r = devoptab_seek, + .fstat_r = devoptab_fstat, + .stat_r = devoptab_lstat, + .dirStateSize = sizeof(Dir), + .diropen_r = devoptab_diropen, + .dirreset_r = devoptab_dirreset, + .dirnext_r = devoptab_dirnext, + .dirclose_r = devoptab_dirclose, + .lstat_r = devoptab_lstat, +}; + +struct Entry { + u64 id; + Device device; + devoptab_t devoptab; + char name[32]; + s32 ref_count; +}; + +Mutex g_mutex; +std::vector g_entries; + +void MakeMountPath(u64 id, fs::FsPath& out_path) { + std::snprintf(out_path, sizeof(out_path), "%016lx:/", id); +} + +} // namespace + +Result MountFromSavePath(u64 id, fs::FsPath& out_path) { + SCOPED_MUTEX(&g_mutex); + + // check if we already have the save mounted. + for (auto& e : g_entries) { + if (e.id == id) { + e.ref_count++; + MakeMountPath(id, out_path); + R_SUCCEED(); + } + } + + char path[256]; + std::snprintf(path, sizeof(path), "SYSTEM:/save/%016lx", id); + + auto ctx = save_open_savefile(path, 0); + if (!ctx) { + R_THROW(0x1); + } + + log_write("[SAVE] OPEN SUCCESS %s\n", path); + + // create new entry. + auto& entry = g_entries.emplace_back(); + std::snprintf(entry.name, sizeof(entry.name), "%016lx", id); + + entry.id = id; + entry.device.ctx = ctx; + entry.device.file_table = &ctx->save_filesystem_core.file_table; + entry.devoptab = DEVOPTAB; + entry.devoptab.name = entry.name; + entry.devoptab.deviceData = &entry.device; + + R_UNLESS(AddDevice(&entry.devoptab) >= 0, 0x1); + log_write("[SAVE] DEVICE SUCCESS %s %s\n", path, entry.name); + + MakeMountPath(id, out_path); + + entry.ref_count++; + R_SUCCEED(); +} + +void UnmountSave(u64 id) { + SCOPED_MUTEX(&g_mutex); + + auto itr = std::ranges::find_if(g_entries, [id](auto& e){ + return id == e.id; + }); + + if (itr == g_entries.end()) { + return; + } + + if (itr->ref_count) { + itr->ref_count--; + } + + if (!itr->ref_count) { + fs::FsPath path; + MakeMountPath(id, path); + + // todo: verify this actually works. + RemoveDevice(path); + + if (itr->device.ctx) { + save_close_savefile(&itr->device.ctx); + } + + g_entries.erase(itr); + } +} + +} // namespace sphaira::devoptab diff --git a/sphaira/source/yati/nx/es.cpp b/sphaira/source/yati/nx/es.cpp index 8585230..d3302ee 100644 --- a/sphaira/source/yati/nx/es.cpp +++ b/sphaira/source/yati/nx/es.cpp @@ -6,6 +6,9 @@ #include "ui/types.hpp" #include "log.hpp" +#include "yati/nx/nxdumptool/defines.h" +#include "yati/nx/nxdumptool/core/save.h" + #include #include #include @@ -13,6 +16,101 @@ namespace sphaira::es { namespace { +class CachedSave { +public: + constexpr CachedSave(const char* _path) : path{_path} {} + + void Close() { + if (ctx) { + save_close_savefile(&ctx); + ctx = nullptr; + } + } + +protected: + auto Open() { + if (ctx) { + return ctx; + } + return ctx = save_open_savefile(path, 0); + } + +private: + const char* path; + save_ctx_t* ctx{}; +}; + +class CachedCommonSave : public CachedSave { +public: + using CachedSave::CachedSave; + + bool GetTicketBin(allocation_table_storage_ctx_t& storage, u64& size) { + return GetTicketBin(Open(), has_ticket_bin, ticket_bin_storage, ticket_bin_size, storage, size); + } + + bool GetTicketListBin(allocation_table_storage_ctx_t& storage, u64& size) { + return GetTicketBin(Open(), has_ticket_list_bin, ticket_list_bin_storage, ticket_list_bin_size, storage, size); + } + +private: + static bool GetTicketBin(save_ctx_t* ctx, bool& m_has, allocation_table_storage_ctx_t& m_storage, u64& m_size, allocation_table_storage_ctx_t& out_storage, u64& out_size) { + if (!ctx) { + return false; + } + + if (!m_has) { + if (!save_get_fat_storage_from_file_entry_by_path(ctx, "/ticket.bin", &m_storage, &m_size)) { + return false; + } + } + + out_storage = m_storage; + out_size = m_size; + return m_has = true; + } + +private: + u64 ticket_bin_size{}; + allocation_table_storage_ctx_t ticket_bin_storage{}; + bool has_ticket_bin{}; + + u64 ticket_list_bin_size{}; + allocation_table_storage_ctx_t ticket_list_bin_storage{}; + bool has_ticket_list_bin{}; +}; + +class CachedCertSave { +public: + constexpr CachedCertSave(const char* _path) : path{_path} {} + + auto Get() { + if (ctx) { + return ctx; + } + return ctx = save_open_savefile(path, 0); + } + + void Close() { + if (ctx) { + save_close_savefile(&ctx); + ctx = nullptr; + } + } + +private: + const char* path; + save_ctx_t* ctx{}; + u64 ticket_bin_size{}; + allocation_table_storage_ctx_t ticket_bin_storage{}; +}; + +// kept alive whilst es is init, closed after, +// so only the first time opening is slow (40ms). +// todo: set global dirty flag when a ticket has been installed. +// todo: check if its needed to cache now that ive added lru cache to fatfs +CachedCommonSave g_common_save{"SYSTEM:/save/80000000000000e1"}; +CachedCommonSave g_personalised_save{"SYSTEM:/save/80000000000000e2"}; + Service g_esSrv; NX_GENERATE_SERVICE_GUARD(es); @@ -22,6 +120,9 @@ Result _esInitialize() { } void _esCleanup() { + // todo: add cert here when added. + g_common_save.Close(); + g_personalised_save.Close(); serviceClose(&g_esSrv); } @@ -395,34 +496,45 @@ Result GetCommonTicketAndCertificate(const FsRightsId& rights_id, std::vector& tik_out, std::vector& cert_out) { - R_THROW(0x1); - + // todo: finish this off and fetch the cirtificate chain. + // todo: find out what ticket_list.bin is (offsets?) #if 0 - fs::FsStdio fs; - TimeStamp ts; - std::vector tik_buf; - // R_TRY(fs.read_entire_file("system:/save/80000000000000e1", tik_buf)); - R_TRY(fs.read_entire_file("SYSTEM:/save/80000000000000e2", tik_buf)); - log_write("[ES] size: %zu\n", tik_buf.size()); + + u64 ticket_bin_size; + allocation_table_storage_ctx_t ticket_bin_storage; + if (!g_common_save.GetTicketBin(ticket_bin_storage, ticket_bin_size)) { + log_write("\t\tFAILED TO GET SAVE\n"); + R_THROW(0x1); + } log_write("\t\t[ticket read] time taken: %.2fs %zums\n", ts.GetSecondsD(), ts.GetMs()); ts.Update(); - for (u32 i = 0; i < tik_buf.size() - 0x400; i += 0x400) { - const auto tikRsa2048 = (const TicketRsa2048*)(tik_buf.data() + i); - if (tikRsa2048->signature_block.sig_type != SigType_Rsa2048Sha256) { - continue; + std::vector tik_buf(std::min(ticket_bin_size, 1024 * 256)); + for (u64 off = 0; off < ticket_bin_size; off += tik_buf.size()) { + const auto size = save_allocation_table_storage_read(&ticket_bin_storage, tik_buf.data(), off, tik_buf.size()); + if (!size) { + log_write("\t\tfailed to read ticket bin\n"); + R_THROW(0x1); } - if (!std::memcmp(&rights_id, &tikRsa2048->data.rights_id, sizeof(rights_id))) { - log_write("\t[ES] tikRsa2048, found at: %u\n", i); + for (u32 i = 0; i < size - 0x400; i += 0x400) { + const auto tikRsa2048 = (const TicketRsa2048*)(tik_buf.data() + i); + if (tikRsa2048->signature_block.sig_type != SigType_Rsa2048Sha256) { + continue; + } + + if (!std::memcmp(&rights_id, &tikRsa2048->data.rights_id, sizeof(rights_id))) { + log_write("\t[ES] tikRsa2048, found at: %zu\n", off + i); + // log_write("[ES] finished es search\n"); + log_write("\t\t[ticket search] time taken: %.2fs %zums\n", ts.GetSecondsD(), ts.GetMs()); + R_SUCCEED(); + } } } - - log_write("[ES] finished es search\n"); - log_write("\t\t[ticket search] time taken: %.2fs %zums\n", ts.GetSecondsD(), ts.GetMs()); - R_THROW(0x1); #endif + + R_THROW(0x1); } } // namespace sphaira::es diff --git a/sphaira/source/yati/nx/nxdumptool/save.c b/sphaira/source/yati/nx/nxdumptool/save.c new file mode 100644 index 0000000..b926cda --- /dev/null +++ b/sphaira/source/yati/nx/nxdumptool/save.c @@ -0,0 +1,1839 @@ +/* + * save.c + * + * Copyright (c) 2019-2020, shchmue. + * Copyright (c) 2020-2024, DarkMatterCore . + * + * This file is part of nxdumptool (https://github.com/DarkMatterCore/nxdumptool). + * + * nxdumptool is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * nxdumptool is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include + +static inline void save_bitmap_set_bit(void *buffer, size_t bit_offset) +{ + *((u8*)buffer + (bit_offset >> 3)) |= 1 << (bit_offset & 7); +} + +static inline void save_bitmap_clear_bit(void *buffer, size_t bit_offset) +{ + *((u8*)buffer + (bit_offset >> 3)) &= ~(u8)(1 << (bit_offset & 7)); +} + +static inline u8 save_bitmap_check_bit(const void *buffer, size_t bit_offset) +{ + return (*((const u8*)buffer + (bit_offset >> 3)) & (1 << (bit_offset & 7))); +} + +static bool save_duplex_storage_init(duplex_storage_ctx_t *ctx, duplex_fs_layer_info_t *layer, void *bitmap, u64 bitmap_size) +{ + if (!ctx || !layer || !layer->data_a || !layer->data_b || !layer->info.block_size_power || !bitmap || !bitmap_size) + { + LOG_MSG_ERROR("Invalid parameters!"); + return false; + } + + ctx->data_a = layer->data_a; + ctx->data_b = layer->data_b; + ctx->bitmap_storage = (u8*)bitmap; + ctx->block_size = (1 << layer->info.block_size_power); + ctx->bitmap.data = ctx->bitmap_storage; + + ctx->bitmap.bitmap = calloc(1, bitmap_size >> 3); + if (!ctx->bitmap.bitmap) + { + LOG_MSG_ERROR("Failed to allocate memory for duplex bitmap!"); + return false; + } + + u32 bits_remaining = bitmap_size; + u32 bitmap_pos = 0; + u32 *buffer_pos = (u32*)bitmap; + + while(bits_remaining) + { + u32 bits_to_read = (bits_remaining < 32 ? bits_remaining : 32); + u32 val = *buffer_pos; + + for(u32 i = 0; i < bits_to_read; i++) + { + if (val & 0x80000000) + { + save_bitmap_set_bit(ctx->bitmap.bitmap, bitmap_pos); + } else { + save_bitmap_clear_bit(ctx->bitmap.bitmap, bitmap_pos); + } + + bitmap_pos++; + bits_remaining--; + val <<= 1; + } + + buffer_pos++; + } + + return true; +} + +static u32 save_duplex_storage_read(duplex_storage_ctx_t *ctx, void *buffer, u64 offset, size_t count) +{ + if (!ctx || !ctx->block_size || !ctx->bitmap.bitmap || !buffer || !count) + { + LOG_MSG_ERROR("Invalid parameters!"); + return 0; + } + + u64 in_pos = offset; + u32 out_pos = 0; + u32 remaining = count; + + while(remaining) + { + u32 block_num = (u32)(in_pos / ctx->block_size); + u32 block_pos = (u32)(in_pos % ctx->block_size); + u32 bytes_to_read = ((ctx->block_size - block_pos) < remaining ? (ctx->block_size - block_pos) : remaining); + + u8 *data = (save_bitmap_check_bit(ctx->bitmap.bitmap, block_num) ? ctx->data_b : ctx->data_a); + memcpy((u8*)buffer + out_pos, data + in_pos, bytes_to_read); + + out_pos += bytes_to_read; + in_pos += bytes_to_read; + remaining -= bytes_to_read; + } + + return out_pos; +} + +static remap_segment_ctx_t *save_remap_init_segments(remap_header_t *header, remap_entry_ctx_t *map_entries, u32 num_map_entries) +{ + if (!header || !header->map_segment_count || !map_entries || !num_map_entries) + { + LOG_MSG_ERROR("Invalid parameters!"); + return NULL; + } + + remap_segment_ctx_t *segments = calloc(header->map_segment_count, sizeof(remap_segment_ctx_t)); + if (!segments) + { + LOG_MSG_ERROR("Failed to allocate initial memory for remap segments!"); + return NULL; + } + + u32 i, entry_idx = 0; + bool success = false; + + for(i = 0; i < header->map_segment_count; i++) + { + remap_segment_ctx_t *seg = &(segments[i]); + + seg->entry_count = 0; + + seg->entries = calloc(1, sizeof(remap_entry_ctx_t*)); + if (!seg->entries) + { + LOG_MSG_ERROR("Failed to allocate memory for remap segment entry #%u!", entry_idx); + goto end; + } + + seg->entries[seg->entry_count++] = &map_entries[entry_idx]; + seg->offset = map_entries[entry_idx].virtual_offset; + map_entries[entry_idx++].segment = seg; + + while(entry_idx < num_map_entries && map_entries[entry_idx - 1].virtual_offset_end == map_entries[entry_idx].virtual_offset) + { + map_entries[entry_idx].segment = seg; + map_entries[entry_idx - 1].next = &map_entries[entry_idx]; + + remap_entry_ctx_t **ptr = calloc(seg->entry_count + 1, sizeof(remap_entry_ctx_t*)); + if (!ptr) + { + LOG_MSG_ERROR("Failed to allocate memory for remap segment entry #%u!", entry_idx); + goto end; + } + + memcpy(ptr, seg->entries, sizeof(remap_entry_ctx_t*) * seg->entry_count); + free(seg->entries); + seg->entries = ptr; + seg->entries[seg->entry_count++] = &map_entries[entry_idx++]; + } + + seg->length = (seg->entries[seg->entry_count - 1]->virtual_offset_end - seg->entries[0]->virtual_offset); + } + + success = true; + +end: + if (!success) + { + entry_idx = 0; + + for(u32 j = 0; j <= i; j++) + { + if (!map_entries[entry_idx].segment) break; + + if (map_entries[entry_idx].segment->entries) + { + free(map_entries[entry_idx].segment->entries); + map_entries[entry_idx].segment->entries = NULL; + } + + map_entries[entry_idx++].segment = NULL; + + while(entry_idx < num_map_entries && map_entries[entry_idx - 1].virtual_offset_end == map_entries[entry_idx].virtual_offset) + { + map_entries[entry_idx - 1].next = NULL; + + if (!map_entries[entry_idx].segment) break; + + if (map_entries[entry_idx].segment->entries) + { + free(map_entries[entry_idx].segment->entries); + map_entries[entry_idx].segment->entries = NULL; + } + + map_entries[entry_idx++].segment = NULL; + } + } + + free(segments); + segments = NULL; + } + + return segments; +} + +static remap_entry_ctx_t *save_remap_get_map_entry(remap_storage_ctx_t *ctx, u64 offset) +{ + if (!ctx || !ctx->header || !ctx->segments) + { + LOG_MSG_ERROR("Invalid parameters!"); + return NULL; + } + + u32 segment_idx = (u32)(offset >> (64 - ctx->header->segment_bits)); + + if (segment_idx < ctx->header->map_segment_count) + { + for(u32 i = 0; i < ctx->segments[segment_idx].entry_count; i++) + { + if (ctx->segments[segment_idx].entries[i]->virtual_offset_end > offset) return ctx->segments[segment_idx].entries[i]; + } + } + + LOG_MSG_ERROR("Unable to find map entry for offset 0x%lX!", offset); + return NULL; +} + +static u64 save_remap_read(remap_storage_ctx_t *ctx, void *buffer, u64 offset, size_t count) +{ + if (!ctx || (ctx->type == STORAGE_BYTES && !ctx->file) || (ctx->type == STORAGE_DUPLEX && !ctx->duplex) || (ctx->type != STORAGE_BYTES && ctx->type != STORAGE_DUPLEX) || !buffer || !count) + { + LOG_MSG_ERROR("Invalid parameters!"); + return 0; + } + + remap_entry_ctx_t *entry = save_remap_get_map_entry(ctx, offset); + if (!entry) + { + LOG_MSG_ERROR("Failed to retrieve map entry!"); + return 0; + } + + u64 in_pos = offset; + u64 out_pos = 0; + u64 remaining = count; + + int res = 0; + + while(remaining) + { + u64 entry_pos = (in_pos - entry->virtual_offset); + u64 bytes_to_read = ((entry->virtual_offset_end - in_pos) < remaining ? (entry->virtual_offset_end - in_pos) : remaining); + u64 read_bytes = 0; + + switch (ctx->type) + { + case STORAGE_BYTES: + res = fseek(ctx->file, ctx->base_storage_offset + entry->physical_offset + entry_pos, SEEK_SET); + if (res || ftell(ctx->file) != (ctx->base_storage_offset + entry->physical_offset + entry_pos)) + { + LOG_MSG_ERROR("Failed to seek to offset 0x%lX in savefile! (%d).", ctx->base_storage_offset + entry->physical_offset + entry_pos, errno); + return out_pos; + } + + // todo: + read_bytes = fread((u8*)buffer + out_pos, 1, bytes_to_read, ctx->file); + if (read_bytes != bytes_to_read) + { + LOG_MSG_ERROR("Failed to read 0x%lX-byte long chunk from offset 0x%lX in savefile! (read 0x%lX, errno %d).", bytes_to_read, ctx->base_storage_offset + entry->physical_offset + entry_pos, read_bytes, errno); + return (out_pos + read_bytes); + } + + break; + case STORAGE_DUPLEX: + read_bytes = save_duplex_storage_read(ctx->duplex, (u8*)buffer + out_pos, ctx->base_storage_offset + entry->physical_offset + entry_pos, bytes_to_read); + if (read_bytes != bytes_to_read) + { + LOG_MSG_ERROR("Failed to read remap data from duplex storage!"); + return (out_pos + read_bytes); + } + + break; + default: + break; + } + + out_pos += bytes_to_read; + in_pos += bytes_to_read; + remaining -= bytes_to_read; + + if (in_pos >= entry->virtual_offset_end) entry = entry->next; + } + + return out_pos; +} + +static u64 save_journal_storage_read(journal_storage_ctx_t *ctx, remap_storage_ctx_t *remap, void *buffer, u64 offset, size_t count) +{ + if (!ctx || !ctx->block_size || !remap || !buffer || !count) + { + LOG_MSG_ERROR("Invalid parameters!"); + return 0; + } + + u64 in_pos = offset; + u64 out_pos = 0; + u64 remaining = count; + + while(remaining) + { + u64 block_num = (in_pos / ctx->block_size); + u64 block_pos = (in_pos % ctx->block_size); + u64 physical_offset = (ctx->map.entries[block_num].physical_index * ctx->block_size + block_pos); + u64 bytes_to_read = ((ctx->block_size - block_pos) < remaining ? (ctx->block_size - block_pos) : remaining); + + u64 read_bytes = save_remap_read(remap, (u8*)buffer + out_pos, ctx->journal_data_offset + physical_offset, bytes_to_read); + if (read_bytes != bytes_to_read) + { + LOG_MSG_ERROR("Failed to read journal storage data!"); + return (out_pos + read_bytes); + } + + out_pos += bytes_to_read; + in_pos += bytes_to_read; + remaining -= bytes_to_read; + } + + return out_pos; +} + +static bool save_ivfc_storage_init(hierarchical_integrity_verification_storage_ctx_t *ctx, u64 master_hash_offset, ivfc_save_hdr_t *ivfc) +{ + if (!ctx || !ivfc || !ivfc->num_levels) + { + LOG_MSG_ERROR("Invalid parameters!"); + return false; + } + + bool success = false; + + ivfc_level_save_ctx_t *levels = ctx->levels; + levels[0].type = STORAGE_BYTES; + levels[0].hash_offset = master_hash_offset; + + for(u32 i = 1; i < 4; i++) + { + ivfc_level_hdr_t *level = &ivfc->level_headers[i - 1]; + levels[i].type = STORAGE_REMAP; + levels[i].data_offset = level->logical_offset; + levels[i].data_size = level->hash_data_size; + } + + if (ivfc->num_levels == 5) + { + ivfc_level_hdr_t *data_level = &ivfc->level_headers[ivfc->num_levels - 2]; + levels[ivfc->num_levels - 1].type = STORAGE_JOURNAL; + levels[ivfc->num_levels - 1].data_offset = data_level->logical_offset; + levels[ivfc->num_levels - 1].data_size = data_level->hash_data_size; + } + + struct salt_source_t { + char string[50]; + u32 length; + }; + + static const struct salt_source_t salt_sources[6] = { + { "HierarchicalIntegrityVerificationStorage::Master", 48 }, + { "HierarchicalIntegrityVerificationStorage::L1", 44 }, + { "HierarchicalIntegrityVerificationStorage::L2", 44 }, + { "HierarchicalIntegrityVerificationStorage::L3", 44 }, + { "HierarchicalIntegrityVerificationStorage::L4", 44 }, + { "HierarchicalIntegrityVerificationStorage::L5", 44 } + }; + + integrity_verification_info_ctx_t init_info[ivfc->num_levels]; + + init_info[0].data = &levels[0]; + init_info[0].block_size = 0; + + for(u32 i = 1; i < ivfc->num_levels; i++) + { + init_info[i].data = &levels[i]; + init_info[i].block_size = (1 << ivfc->level_headers[i - 1].block_size); + hmacSha256CalculateMac(init_info[i].salt, salt_sources[i - 1].string, salt_sources[i - 1].length, ivfc->salt_source, 0x20); + } + + ctx->integrity_storages[0].next_level = NULL; + + ctx->level_validities = calloc((ivfc->num_levels - 1), sizeof(validity_t*)); + if (!ctx->level_validities) + { + LOG_MSG_ERROR("Failed to allocate memory for level validities!"); + goto end; + } + + for(u32 i = 1; i < ivfc->num_levels; i++) + { + integrity_verification_storage_ctx_t *level_data = &ctx->integrity_storages[i - 1]; + level_data->hash_storage = &levels[i - 1]; + level_data->base_storage = &levels[i]; + level_data->sector_size = init_info[i].block_size; + level_data->_length = init_info[i].data->data_size; + level_data->sector_count = ((level_data->_length + level_data->sector_size - 1) / level_data->sector_size); + memcpy(level_data->salt, init_info[i].salt, 0x20); + + level_data->block_validities = calloc(level_data->sector_count, sizeof(validity_t)); + if (!level_data->block_validities) + { + LOG_MSG_ERROR("Failed to allocate memory for block validities in IVFC level #%u!", i); + goto end; + } + + ctx->level_validities[i - 1] = level_data->block_validities; + if (i > 1) level_data->next_level = &ctx->integrity_storages[i - 2]; + } + + ctx->data_level = &levels[ivfc->num_levels - 1]; + ctx->_length = ctx->integrity_storages[ivfc->num_levels - 2]._length; + + success = true; + +end: + if (!success && ctx->level_validities) + { + free(ctx->level_validities); + ctx->level_validities = NULL; + + for(u32 i = 1; i < ivfc->num_levels; i++) + { + integrity_verification_storage_ctx_t *level_data = &ctx->integrity_storages[i - 1]; + + if (level_data->block_validities) + { + free(level_data->block_validities); + level_data->block_validities = NULL; + ctx->level_validities[i - 1] = NULL; + } else { + break; + } + } + } + + return success; +} + +static size_t save_ivfc_level_fread(ivfc_level_save_ctx_t *ctx, void *buffer, u64 offset, size_t count) +{ + if (!ctx || (ctx->type == STORAGE_BYTES && !ctx->save_ctx->file) || (ctx->type != STORAGE_BYTES && ctx->type != STORAGE_REMAP && ctx->type != STORAGE_JOURNAL) || !buffer || !count) + { + LOG_MSG_ERROR("Invalid parameters!"); + return 0; + } + + size_t read_bytes = 0; + + int res = 0; + + switch (ctx->type) + { + case STORAGE_BYTES: + res = fseek(ctx->save_ctx->file, ctx->hash_offset + offset, SEEK_SET); + if (res || ftell(ctx->save_ctx->file) != (ctx->hash_offset + offset)) + { + LOG_MSG_ERROR("Failed to seek to offset 0x%lX in savefile! (%d).", ctx->hash_offset + offset, errno); + return 0; + } + + // + read_bytes = fread(buffer, 1, count, ctx->save_ctx->file); + if (read_bytes != count) + { + LOG_MSG_ERROR("Failed to read 0x%lX-byte long IVFC level data chunk from offset 0x%lX in savefile! (read 0x%lX, errno %d).", count, ctx->hash_offset + offset, read_bytes, errno); + return read_bytes; + } + + break; + case STORAGE_REMAP: + read_bytes = save_remap_read(&ctx->save_ctx->meta_remap_storage, buffer, ctx->data_offset + offset, count); + if (read_bytes != count) + { + LOG_MSG_ERROR("Failed to read IVFC level data from remap storage!"); + return read_bytes; + } + + break; + case STORAGE_JOURNAL: + read_bytes = save_journal_storage_read(&ctx->save_ctx->journal_storage, &ctx->save_ctx->data_remap_storage, buffer, ctx->data_offset + offset, count); + if (read_bytes != count) + { + LOG_MSG_ERROR("Failed to read IVFC level data from journal storage!"); + return read_bytes; + } + + break; + default: + return 0; + } + + return count; +} + +static bool save_ivfc_storage_read(integrity_verification_storage_ctx_t *ctx, void *buffer, u64 offset, size_t count, u32 verify) +{ + if (!ctx || !ctx->sector_size || (!ctx->next_level && !ctx->hash_storage && !ctx->base_storage) || !buffer || !count) + { + LOG_MSG_ERROR("Invalid parameters!"); + return false; + } + + if (count > ctx->sector_size) + { + LOG_MSG_ERROR("IVFC read exceeds sector size!"); + return false; + } + + u64 block_index = (offset / ctx->sector_size); + + if (ctx->block_validities[block_index] == VALIDITY_INVALID && verify) + { + LOG_MSG_ERROR("Hash error from previous check found at offset 0x%lX, count 0x%lX!", offset, count); + return false; + } + + u8 hash_buffer[0x20] = {0}; + u8 zeroes[0x20] = {0}; + u64 hash_pos = (block_index * 0x20); + + if (ctx->next_level) + { + if (!save_ivfc_storage_read(ctx->next_level, hash_buffer, hash_pos, 0x20, verify)) + { + LOG_MSG_ERROR("Failed to read hash from next IVFC level!"); + return false; + } + } else { + if (save_ivfc_level_fread(ctx->hash_storage, hash_buffer, hash_pos, 0x20) != 0x20) + { + LOG_MSG_ERROR("Failed to read hash from hash storage!"); + return false; + } + } + + if (!memcmp(hash_buffer, zeroes, 0x20)) + { + memset(buffer, 0, count); + ctx->block_validities[block_index] = VALIDITY_VALID; + return true; + } + + if (save_ivfc_level_fread(ctx->base_storage, buffer, offset, count) != count) + { + LOG_MSG_ERROR("Failed to read IVFC level from base storage!"); + return false; + } + + if (!(verify && ctx->block_validities[block_index] == VALIDITY_UNCHECKED)) return true; + + u8 hash[0x20] = {0}; + + u8 *data_buffer = calloc(1, ctx->sector_size + 0x20); + if (!data_buffer) + { + LOG_MSG_ERROR("Failed to allocate memory for data buffer!"); + return false; + } + + memcpy(data_buffer, ctx->salt, 0x20); + memcpy(data_buffer + 0x20, buffer, ctx->sector_size); + + sha256CalculateHash(hash, data_buffer, ctx->sector_size + 0x20); + hash[0x1F] |= 0x80; + + free(data_buffer); + + ctx->block_validities[block_index] = (!memcmp(hash_buffer, hash, 0x20) ? VALIDITY_VALID : VALIDITY_INVALID); + + if (ctx->block_validities[block_index] == VALIDITY_INVALID && verify) + { + LOG_MSG_ERROR("Hash error from current check found at offset 0x%lX, count 0x%lX!", offset, count); + return false; + } + + return true; +} + +static u32 save_allocation_table_read_entry_with_length(allocation_table_ctx_t *ctx, allocation_table_entry_t *entry) +{ + if (!ctx || !ctx->base_storage || !entry) + { + LOG_MSG_ERROR("Invalid parameters!"); + return 0; + } + + u32 length = 1; + u32 entry_index = allocation_table_block_to_entry_index(entry->next); + + allocation_table_entry_t *entries = (allocation_table_entry_t*)((u8*)(ctx->base_storage) + (entry_index * SAVE_FAT_ENTRY_SIZE)); + + if ((entries[0].next & 0x80000000) == 0) + { + if ((entries[0].prev & 0x80000000) && entries[0].prev != 0x80000000) + { + LOG_MSG_ERROR("Invalid range entry in allocation table!"); + return 0; + } + } else { + length = (entries[1].next - entry_index + 1); + } + + if (allocation_table_is_list_end(&entries[0])) + { + entry->next = 0xFFFFFFFF; + } else { + entry->next = allocation_table_entry_index_to_block(allocation_table_get_next(&entries[0])); + } + + if (allocation_table_is_list_start(&entries[0])) + { + entry->prev = 0xFFFFFFFF; + } else { + entry->prev = allocation_table_entry_index_to_block(allocation_table_get_prev(&entries[0])); + } + + return length; +} + +static u32 save_allocation_table_get_list_length(allocation_table_ctx_t *ctx, u32 block_index) +{ + if (!ctx || !ctx->header->allocation_table_block_count) + { + LOG_MSG_ERROR("Invalid parameters!"); + return 0; + } + + allocation_table_entry_t entry = {0}; + entry.next = block_index; + u32 total_length = 0; + u32 table_size = ctx->header->allocation_table_block_count; + u32 nodes_iterated = 0; + + while(entry.next != 0xFFFFFFFF) + { + u32 entry_length = save_allocation_table_read_entry_with_length(ctx, &entry); + if (!entry_length) + { + LOG_MSG_ERROR("Failed to retrieve FAT entry length!"); + return 0; + } + + total_length += entry_length; + nodes_iterated++; + + if (nodes_iterated > table_size) + { + LOG_MSG_ERROR("Cycle detected in allocation table!"); + return 0; + } + } + + return total_length; +} + +static bool save_allocation_table_iterator_begin(allocation_table_iterator_ctx_t *ctx, allocation_table_ctx_t *table, u32 initial_block) +{ + if (!ctx || !table) + { + LOG_MSG_ERROR("Invalid parameters!"); + return false; + } + + ctx->fat = table; + ctx->physical_block = initial_block; + ctx->virtual_block = 0; + + allocation_table_entry_t entry = {0}; + entry.next = initial_block; + + ctx->current_segment_size = save_allocation_table_read_entry_with_length(ctx->fat, &entry); + if (!ctx->current_segment_size) + { + LOG_MSG_ERROR("Failed to retrieve FAT entry length!"); + return false; + } + + ctx->next_block = entry.next; + ctx->prev_block = entry.prev; + + if (ctx->prev_block != 0xFFFFFFFF) + { + LOG_MSG_ERROR("Attempted to start FAT iteration from invalid block 0x%X!", initial_block); + return false; + } + + return true; +} + +static bool save_allocation_table_iterator_move_next(allocation_table_iterator_ctx_t *ctx) +{ + if (!ctx || ctx->next_block == 0xFFFFFFFF) + { + LOG_MSG_ERROR("Invalid parameters!"); + return false; + } + + ctx->virtual_block += ctx->current_segment_size; + ctx->physical_block = ctx->next_block; + + allocation_table_entry_t entry = {0}; + entry.next = ctx->next_block; + + ctx->current_segment_size = save_allocation_table_read_entry_with_length(ctx->fat, &entry); + if (!ctx->current_segment_size) + { + LOG_MSG_ERROR("Failed to retrieve current segment size!"); + return false; + } + + ctx->next_block = entry.next; + ctx->prev_block = entry.prev; + + return true; +} + +static bool save_allocation_table_iterator_move_prev(allocation_table_iterator_ctx_t *ctx) +{ + if (!ctx || ctx->prev_block == 0xFFFFFFFF) + { + LOG_MSG_ERROR("Invalid parameters!"); + return false; + } + + ctx->physical_block = ctx->prev_block; + + allocation_table_entry_t entry = {0}; + entry.next = ctx->prev_block; + + ctx->current_segment_size = save_allocation_table_read_entry_with_length(ctx->fat, &entry); + if (!ctx->current_segment_size) + { + LOG_MSG_ERROR("Failed to retrieve current segment size!"); + return false; + } + + ctx->next_block = entry.next; + ctx->prev_block = entry.prev; + + ctx->virtual_block -= ctx->current_segment_size; + + return true; +} + +static bool save_allocation_table_iterator_seek(allocation_table_iterator_ctx_t *ctx, u32 block) +{ + while(true) + { + if (block < ctx->virtual_block) + { + if (!save_allocation_table_iterator_move_prev(ctx)) return false; + } else + if (block >= ctx->virtual_block + ctx->current_segment_size) + { + if (!save_allocation_table_iterator_move_next(ctx)) return false; + } else { + return true; + } + } +} + +u32 save_allocation_table_storage_read(allocation_table_storage_ctx_t *ctx, void *buffer, u64 offset, size_t count) +{ + if (!ctx || !ctx->fat || !ctx->block_size || !buffer || !count) + { + LOG_MSG_ERROR("Invalid parameters!"); + return 0; + } + + allocation_table_iterator_ctx_t iterator; + if (!save_allocation_table_iterator_begin(&iterator, ctx->fat, ctx->initial_block)) + { + LOG_MSG_ERROR("Failed to initialize FAT interator!"); + return 0; + } + + u64 in_pos = offset; + u32 out_pos = 0; + u32 remaining = count; + + while(remaining) + { + u32 block_num = (u32)(in_pos / ctx->block_size); + if (!save_allocation_table_iterator_seek(&iterator, block_num)) + { + LOG_MSG_ERROR("Failed to seek to block #%u within offset 0x%lX!", block_num, offset); + return out_pos; + } + + u32 segment_pos = (u32)(in_pos - ((u64)iterator.virtual_block * ctx->block_size)); + u64 physical_offset = ((iterator.physical_block * ctx->block_size) + segment_pos); + + u32 remaining_in_segment = ((iterator.current_segment_size * ctx->block_size) - segment_pos); + u32 bytes_to_read = (remaining < remaining_in_segment ? remaining : remaining_in_segment); + + u32 sector_size = ctx->base_storage->integrity_storages[3].sector_size; + u32 chunk_remaining = bytes_to_read; + + for(u32 i = 0; i < bytes_to_read; i += sector_size) + { + u32 bytes_to_request = (chunk_remaining < sector_size ? chunk_remaining : sector_size); + + if (!save_ivfc_storage_read(&ctx->base_storage->integrity_storages[3], (u8*)buffer + out_pos + i, physical_offset + i, bytes_to_request, \ + ctx->base_storage->data_level->save_ctx->tool_ctx.action & ACTION_VERIFY)) + { + LOG_MSG_ERROR("Failed to read %u bytes chunk from IVFC storage at physical offset 0x%lX!", bytes_to_request, physical_offset + i); + return (out_pos + bytes_to_read - chunk_remaining); + } + + chunk_remaining -= bytes_to_request; + } + + out_pos += bytes_to_read; + in_pos += bytes_to_read; + remaining -= bytes_to_read; + } + + return out_pos; +} + +static u32 save_fs_list_get_capacity(save_filesystem_list_ctx_t *ctx) +{ + if (!ctx) + { + LOG_MSG_ERROR("Invalid parameters!"); + return 0; + } + + if (!ctx->capacity) + { + if (save_allocation_table_storage_read(&ctx->storage, &ctx->capacity, 4, 4) != 4) + { + LOG_MSG_ERROR("Failed to read FS capacity from FAT storage!"); + return 0; + } + } + + return ctx->capacity; +} + +static u32 save_fs_list_read_entry(save_filesystem_list_ctx_t *ctx, u32 index, save_fs_list_entry_t *entry) +{ + if (!ctx || !entry) + { + LOG_MSG_ERROR("Invalid parameters!"); + return 0; + } + + u32 ret = save_allocation_table_storage_read(&ctx->storage, entry, index * SAVE_FS_LIST_ENTRY_SIZE, SAVE_FS_LIST_ENTRY_SIZE); + if (ret != SAVE_FS_LIST_ENTRY_SIZE) + { + LOG_MSG_ERROR("Failed to read FS entry from FAT storage!"); + return 0; + } + + return ret; +} + +bool save_fs_list_get_value(save_filesystem_list_ctx_t *ctx, u32 index, save_fs_list_entry_t *value) +{ + if (!ctx || !value) + { + LOG_MSG_ERROR("Invalid parameters!"); + return false; + } + + u32 capacity = save_fs_list_get_capacity(ctx); + if (!capacity) + { + LOG_MSG_ERROR("Failed to retrieve FS capacity!"); + return false; + } + + if (index >= capacity) + { + LOG_MSG_ERROR("Provided index exceeds FS capacity!"); + return false; + } + + if (!save_fs_list_read_entry(ctx, index, value)) + { + LOG_MSG_ERROR("Failed to read FS entry!"); + return false; + } + + return true; +} + +u32 save_fs_list_get_index_from_key(save_filesystem_list_ctx_t *ctx, save_entry_key_t *key, u32 *prev_index) +{ + u32 prev; + if (!prev_index) prev_index = &prev; + + if (!ctx || !key) + { + LOG_MSG_ERROR("Invalid parameters!"); + goto end; + } + + u32 capacity = save_fs_list_get_capacity(ctx); + if (!capacity) + { + LOG_MSG_ERROR("Failed to retrieve FS capacity!"); + goto end; + } + + save_fs_list_entry_t entry; + if (!save_fs_list_read_entry(ctx, ctx->used_list_head_index, &entry)) + { + LOG_MSG_ERROR("Failed to read FS entry for initial index %u!", ctx->used_list_head_index); + goto end; + } + + *prev_index = ctx->used_list_head_index; + u32 index = entry.next; + + while(index) + { + if (index > capacity) + { + LOG_MSG_ERROR("Save entry index %d out of range!", index); + break; + } + + if (!save_fs_list_read_entry(ctx, index, &entry)) + { + LOG_MSG_ERROR("Failed to read FS entry for index %u!", index); + break; + } + + if (entry.parent == key->parent && !strcmp(entry.name, key->name)) return index; + + *prev_index = index; + index = entry.next; + } + + if (!index) LOG_MSG_ERROR("Unable to find FS index from key!"); + +end: + *prev_index = 0xFFFFFFFF; + return 0xFFFFFFFF; +} + +bool save_hierarchical_file_table_find_path_recursive(hierarchical_save_file_table_ctx_t *ctx, save_entry_key_t *key, const char *path) +{ + if (!ctx || !key || !path || !*path) + { + LOG_MSG_ERROR("Invalid parameters!"); + return false; + } + + key->parent = 0; + const char *pos = strchr(path, '/'); + + while(pos) + { + memset(key->name, 0, SAVE_FS_LIST_MAX_NAME_LENGTH); + + const char *tmp = strchr(pos, '/'); + if (!tmp) + { + memcpy(key->name, pos, strlen(pos)); + break; + } + + memcpy(key->name, pos, tmp - pos); + + key->parent = save_fs_list_get_index_from_key(&ctx->directory_table, key, NULL); + if (key->parent == 0xFFFFFFFF) return false; + + pos = (tmp + 1); + } + + return true; +} + +bool save_hierarchical_file_table_get_file_entry_by_path(hierarchical_save_file_table_ctx_t *ctx, const char *path, save_fs_list_entry_t *entry) +{ + if (!ctx || !path || !*path || !entry) + { + LOG_MSG_ERROR("Invalid parameters!"); + return false; + } + + save_entry_key_t key; + if (!save_hierarchical_file_table_find_path_recursive(ctx, &key, path)) + { + LOG_MSG_ERROR("Unable to locate file \"%s\"!", path); + return false; + } + + u32 index = save_fs_list_get_index_from_key(&ctx->file_table, &key, NULL); + if (index == 0xFFFFFFFF) + { + LOG_MSG_ERROR("Unable to get table index for file \"%s\"!", path); + return false; + } + + if (!save_fs_list_get_value(&ctx->file_table, index, entry)) + { + LOG_MSG_ERROR("Unable to get file entry for \"%s\" from index!", path); + return false; + } + + return true; +} + +bool save_hierarchical_directory_table_get_file_entry_by_path(hierarchical_save_file_table_ctx_t *ctx, const char *path, save_fs_list_entry_t *entry) +{ + if (!ctx || !path || !*path || !entry) + { + LOG_MSG_ERROR("Invalid parameters!"); + return false; + } + + save_entry_key_t key; + if (!save_hierarchical_file_table_find_path_recursive(ctx, &key, path)) + { + LOG_MSG_ERROR("Unable to locate file \"%s\"!", path); + return false; + } + + u32 index = save_fs_list_get_index_from_key(&ctx->directory_table, &key, NULL); + if (index == 0xFFFFFFFF) + { + LOG_MSG_ERROR("Unable to get table index for file \"%s\"!", path); + return false; + } + + if (!save_fs_list_get_value(&ctx->directory_table, index, entry)) + { + LOG_MSG_ERROR("Unable to get file entry for \"%s\" from index!", path); + return false; + } + + return true; +} + +bool save_open_fat_storage(save_filesystem_ctx_t *ctx, allocation_table_storage_ctx_t *storage_ctx, u32 block_index) +{ + if (!ctx || !ctx->base_storage || !storage_ctx) + { + LOG_MSG_ERROR("Invalid parameters!"); + return false; + } + + storage_ctx->base_storage = ctx->base_storage; + storage_ctx->fat = &ctx->allocation_table; + storage_ctx->block_size = (u32)ctx->header->block_size; + storage_ctx->initial_block = block_index; + + if (block_index == 0xFFFFFFFF) + { + storage_ctx->_length = 0; + } else { + u32 fat_list_length = save_allocation_table_get_list_length(storage_ctx->fat, block_index); + if (!fat_list_length) + { + LOG_MSG_ERROR("Failed to retrieve FAT list length!"); + return false; + } + + storage_ctx->_length = (fat_list_length * storage_ctx->block_size); + } + + return true; +} + +static bool save_filesystem_init(save_filesystem_ctx_t *ctx, void *fat, save_fs_header_t *save_fs_header, fat_header_t *fat_header) +{ + if (!ctx || !fat || !save_fs_header || !fat_header) + { + LOG_MSG_ERROR("Invalid parameters!"); + return false; + } + + ctx->allocation_table.base_storage = fat; + ctx->allocation_table.header = fat_header; + ctx->allocation_table.free_list_entry_index = 0; + ctx->header = save_fs_header; + + if (!save_open_fat_storage(ctx, &ctx->file_table.directory_table.storage, fat_header->directory_table_block)) + { + LOG_MSG_ERROR("Failed to open FAT directory storage!"); + return false; + } + + if (!save_open_fat_storage(ctx, &ctx->file_table.file_table.storage, fat_header->file_table_block)) + { + LOG_MSG_ERROR("Failed to open FAT file storage!"); + return false; + } + + ctx->file_table.file_table.free_list_head_index = 0; + ctx->file_table.file_table.used_list_head_index = 1; + ctx->file_table.directory_table.free_list_head_index = 0; + ctx->file_table.directory_table.used_list_head_index = 1; + + return true; +} + +static validity_t save_ivfc_validate(hierarchical_integrity_verification_storage_ctx_t *ctx, ivfc_save_hdr_t *ivfc) +{ + if (!ctx || !ivfc || !ivfc->num_levels) + { + LOG_MSG_ERROR("Invalid parameters!"); + return VALIDITY_INVALID; + } + + validity_t result = VALIDITY_VALID; + + for(u32 i = 0; i < (ivfc->num_levels - 1) && result != VALIDITY_INVALID; i++) + { + integrity_verification_storage_ctx_t *storage = &ctx->integrity_storages[i]; + + u64 block_size = storage->sector_size; + u32 block_count = (u32)((storage->_length + block_size - 1) / block_size); + + u8 *buffer = calloc(1, block_size); + if (!buffer) + { + LOG_MSG_ERROR("Failed to allocate memory for input buffer!"); + result = VALIDITY_INVALID; + break; + } + + for(u32 j = 0; j < block_count; j++) + { + if (ctx->level_validities[ivfc->num_levels - 2][j] == VALIDITY_UNCHECKED) + { + u32 to_read = ((storage->_length - (block_size * j)) < block_size ? (storage->_length - (block_size * j)) : block_size); + if (!save_ivfc_storage_read(storage, buffer, block_size * j, to_read, 1)) + { + LOG_MSG_ERROR("Failed to read IVFC storage data!"); + result = VALIDITY_INVALID; + break; + } + } + + if (ctx->level_validities[ivfc->num_levels - 2][j] == VALIDITY_INVALID) + { + result = VALIDITY_INVALID; + break; + } + } + + free(buffer); + + if (result == VALIDITY_INVALID) break; + } + + return result; +} + +static bool save_ivfc_set_level_validities(hierarchical_integrity_verification_storage_ctx_t *ctx, ivfc_save_hdr_t *ivfc) +{ + if (!ctx || !ivfc || !ivfc->num_levels) + { + LOG_MSG_ERROR("Invalid parameters!"); + return false; + } + + bool success = true; + + for(u32 i = 0; i < (ivfc->num_levels - 1); i++) + { + validity_t level_validity = VALIDITY_VALID; + + for(u32 j = 0; j < ctx->integrity_storages[i].sector_count; j++) + { + if (ctx->level_validities[i][j] == VALIDITY_INVALID) + { + level_validity = VALIDITY_INVALID; + break; + } + + if (ctx->level_validities[i][j] == VALIDITY_UNCHECKED && level_validity != VALIDITY_INVALID) level_validity = VALIDITY_UNCHECKED; + } + + ctx->levels[i].hash_validity = level_validity; + + if (success && level_validity == VALIDITY_INVALID) success = false; + } + + if (!success) LOG_MSG_ERROR("Invalid IVFC level!"); + + return success; +} + +static validity_t save_filesystem_verify(save_ctx_t *ctx) +{ + if (!ctx) + { + LOG_MSG_ERROR("Invalid parameters!"); + return VALIDITY_INVALID; + } + + validity_t journal_validity = save_ivfc_validate(&ctx->core_data_ivfc_storage, &ctx->header.data_ivfc_header); + if (journal_validity == VALIDITY_INVALID) + { + LOG_MSG_ERROR("Invalid core IVFC storage!"); + return journal_validity; + } + + if (!save_ivfc_set_level_validities(&ctx->core_data_ivfc_storage, &ctx->header.data_ivfc_header)) + { + LOG_MSG_ERROR("Invalid IVFC level in core IVFC storage!"); + journal_validity = VALIDITY_INVALID; + return journal_validity; + } + + if (!ctx->fat_ivfc_storage.levels[0].save_ctx) return journal_validity; + + validity_t fat_validity = save_ivfc_validate(&ctx->fat_ivfc_storage, &ctx->header.fat_ivfc_header); + if (fat_validity == VALIDITY_INVALID) + { + LOG_MSG_ERROR("Invalid FAT IVFC storage!"); + return fat_validity; + } + + if (!save_ivfc_set_level_validities(&ctx->fat_ivfc_storage, &ctx->header.fat_ivfc_header)) + { + LOG_MSG_ERROR("Invalid IVFC level in FAT IVFC storage!"); + fat_validity = VALIDITY_INVALID; + return fat_validity; + } + + if (journal_validity != VALIDITY_VALID) return journal_validity; + if (fat_validity != VALIDITY_VALID) return fat_validity; + + return journal_validity; +} + +bool save_process(save_ctx_t *ctx) +{ + if (!ctx || !ctx->file) + { + LOG_MSG_ERROR("Invalid parameters!"); + return false; + } + + size_t read_bytes = 0; + int res = 0; + bool success = false; + + /* Try to parse Header A. */ + rewind(ctx->file); + + read_bytes = fread(&(ctx->header), 1, sizeof(ctx->header), ctx->file); + if (read_bytes != sizeof(ctx->header)) + { + LOG_MSG_ERROR("Failed to read savefile header A! (read 0x%lX, errno %d).", read_bytes, errno); + return success; + } + + if (!save_process_header(ctx) || ctx->header_hash_validity == VALIDITY_INVALID) + { + /* Try to parse Header B. */ + res = fseek(ctx->file, 0x4000, SEEK_SET); + if (res || ftell(ctx->file) != 0x4000) + { + LOG_MSG_ERROR("Failed to seek to offset 0x4000 in savefile! (%d).", errno); + return success; + } + + read_bytes = fread(&(ctx->header), 1, sizeof(ctx->header), ctx->file); + if (read_bytes != sizeof(ctx->header)) + { + LOG_MSG_ERROR("Failed to read savefile header B! (read 0x%lX, errno %d).", read_bytes, errno); + return success; + } + + if (!save_process_header(ctx) || ctx->header_hash_validity == VALIDITY_INVALID) + { + LOG_MSG_ERROR("Savefile header is invalid!"); + return success; + } + } + + u8 cmac[0x10] = {0}; + cmacAes128CalculateMac(cmac, ctx->save_mac_key, &ctx->header.layout, sizeof(ctx->header.layout)); + + ctx->header_cmac_validity = (!memcmp(cmac, &ctx->header.cmac, 0x10) ? VALIDITY_VALID : VALIDITY_INVALID); + + /* Initialize remap storages. */ + ctx->data_remap_storage.type = STORAGE_BYTES; + ctx->data_remap_storage.base_storage_offset = ctx->header.layout.file_map_data_offset; + ctx->data_remap_storage.header = &ctx->header.main_remap_header; + ctx->data_remap_storage.file = ctx->file; + + ctx->data_remap_storage.map_entries = calloc(ctx->data_remap_storage.header->map_entry_count, sizeof(remap_entry_ctx_t)); + if (!ctx->data_remap_storage.map_entries) + { + LOG_MSG_ERROR("Failed to allocate memory for data remap storage entries!"); + return success; + } + + res = fseek(ctx->file, ctx->header.layout.file_map_entry_offset, SEEK_SET); + if (res || ftell(ctx->file) != ctx->header.layout.file_map_entry_offset) + { + LOG_MSG_ERROR("Failed to seek to file map entry offset 0x%lX in savefile! (%d).", ctx->header.layout.file_map_entry_offset, errno); + return success; + } + + for(u32 i = 0; i < ctx->data_remap_storage.header->map_entry_count; i++) + { + read_bytes = fread(&(ctx->data_remap_storage.map_entries[i]), 1, 0x20, ctx->file); + if (read_bytes != 0x20) + { + LOG_MSG_ERROR("Failed to read data remap storage entry #%u! (read 0x%lX, errno %d).", i, read_bytes, errno); + goto end; + } + + ctx->data_remap_storage.map_entries[i].physical_offset_end = (ctx->data_remap_storage.map_entries[i].physical_offset + ctx->data_remap_storage.map_entries[i].size); + ctx->data_remap_storage.map_entries[i].virtual_offset_end = (ctx->data_remap_storage.map_entries[i].virtual_offset + ctx->data_remap_storage.map_entries[i].size); + } + + /* Initialize data remap storage. */ + ctx->data_remap_storage.segments = save_remap_init_segments(ctx->data_remap_storage.header, ctx->data_remap_storage.map_entries, ctx->data_remap_storage.header->map_entry_count); + if (!ctx->data_remap_storage.segments) + { + LOG_MSG_ERROR("Failed to retrieve data remap storage segments!"); + goto end; + } + + /* Initialize duplex storage. */ + ctx->duplex_layers[0].data_a = ((u8*)&ctx->header + ctx->header.layout.duplex_master_offset_a); + ctx->duplex_layers[0].data_b = ((u8*)&ctx->header + ctx->header.layout.duplex_master_offset_b); + memcpy(&ctx->duplex_layers[0].info, &ctx->header.duplex_header.layers[0], sizeof(duplex_info_t)); + + ctx->duplex_layers[1].data_a = calloc(1, ctx->header.layout.duplex_l1_size); + if (!ctx->duplex_layers[1].data_a) + { + LOG_MSG_ERROR("Failed to allocate memory for data_a block in duplex layer #1!"); + goto end; + } + + if (save_remap_read(&ctx->data_remap_storage, ctx->duplex_layers[1].data_a, ctx->header.layout.duplex_l1_offset_a, ctx->header.layout.duplex_l1_size) != ctx->header.layout.duplex_l1_size) + { + LOG_MSG_ERROR("Failed to read data_a block from duplex layer #1 in data remap storage!"); + goto end; + } + + ctx->duplex_layers[1].data_b = calloc(1, ctx->header.layout.duplex_l1_size); + if (!ctx->duplex_layers[1].data_b) + { + LOG_MSG_ERROR("Failed to allocate memory for data_b block in duplex layer #1!"); + goto end; + } + + if (save_remap_read(&ctx->data_remap_storage, ctx->duplex_layers[1].data_b, ctx->header.layout.duplex_l1_offset_b, ctx->header.layout.duplex_l1_size) != ctx->header.layout.duplex_l1_size) + { + LOG_MSG_ERROR("Failed to read data_b block from duplex layer #1 in data remap storage!"); + goto end; + } + + memcpy(&ctx->duplex_layers[1].info, &ctx->header.duplex_header.layers[1], sizeof(duplex_info_t)); + + ctx->duplex_layers[2].data_a = calloc(1, ctx->header.layout.duplex_data_size); + if (!ctx->duplex_layers[2].data_a) + { + LOG_MSG_ERROR("Failed to allocate memory for data_a block in duplex layer #2!"); + goto end; + } + + if (save_remap_read(&ctx->data_remap_storage, ctx->duplex_layers[2].data_a, ctx->header.layout.duplex_data_offset_a, ctx->header.layout.duplex_data_size) != ctx->header.layout.duplex_data_size) + { + LOG_MSG_ERROR("Failed to read data_a block from duplex layer #2 in data remap storage!"); + goto end; + } + + ctx->duplex_layers[2].data_b = calloc(1, ctx->header.layout.duplex_data_size); + if (!ctx->duplex_layers[2].data_b) + { + LOG_MSG_ERROR("Failed to allocate memory for data_b block in duplex layer #2!"); + goto end; + } + + if (save_remap_read(&ctx->data_remap_storage, ctx->duplex_layers[2].data_b, ctx->header.layout.duplex_data_offset_b, ctx->header.layout.duplex_data_size) != ctx->header.layout.duplex_data_size) + { + LOG_MSG_ERROR("Failed to read data_b block from duplex layer #2 in data remap storage!"); + goto end; + } + + memcpy(&ctx->duplex_layers[2].info, &ctx->header.duplex_header.layers[2], sizeof(duplex_info_t)); + + /* Initialize hierarchical duplex storage. */ + u8 *bitmap = (ctx->header.layout.duplex_index == 1 ? ctx->duplex_layers[0].data_b : ctx->duplex_layers[0].data_a); + + if (!save_duplex_storage_init(&ctx->duplex_storage.layers[0], &ctx->duplex_layers[1], bitmap, ctx->header.layout.duplex_master_size)) + { + LOG_MSG_ERROR("Failed to initialize duplex storage layer #0!"); + goto end; + } + + ctx->duplex_storage.layers[0]._length = ctx->header.layout.duplex_l1_size; + + bitmap = calloc(1, ctx->duplex_storage.layers[0]._length); + if (!bitmap) + { + LOG_MSG_ERROR("Failed to allocate memory for duplex storage layer #0 bitmap!"); + goto end; + } + + if (save_duplex_storage_read(&ctx->duplex_storage.layers[0], bitmap, 0, ctx->duplex_storage.layers[0]._length) != ctx->duplex_storage.layers[0]._length) + { + LOG_MSG_ERROR("Failed to read duplex storage layer #0 bitmap!"); + free(bitmap); + goto end; + } + + if (!save_duplex_storage_init(&ctx->duplex_storage.layers[1], &ctx->duplex_layers[2], bitmap, ctx->duplex_storage.layers[0]._length)) + { + LOG_MSG_ERROR("Failed to initialize duplex storage layer #1!"); + goto end; + } + + ctx->duplex_storage.layers[1]._length = ctx->header.layout.duplex_data_size; + + ctx->duplex_storage.data_layer = ctx->duplex_storage.layers[1]; + + /* Initialize meta remap storage. */ + ctx->meta_remap_storage.type = STORAGE_DUPLEX; + ctx->meta_remap_storage.duplex = &ctx->duplex_storage.data_layer; + ctx->meta_remap_storage.header = &ctx->header.meta_remap_header; + ctx->meta_remap_storage.file = ctx->file; + + ctx->meta_remap_storage.map_entries = calloc(ctx->meta_remap_storage.header->map_entry_count, sizeof(remap_entry_ctx_t)); + if (!ctx->meta_remap_storage.map_entries) + { + LOG_MSG_ERROR("Failed to allocate memory for meta remap storage entries!"); + goto end; + } + + res = fseek(ctx->file, ctx->header.layout.meta_map_entry_offset, SEEK_SET); + if (res || ftell(ctx->file) != ctx->header.layout.meta_map_entry_offset) + { + LOG_MSG_ERROR("Failed to seek to meta map entry offset 0x%lX in savefile! (%d).", ctx->header.layout.meta_map_entry_offset, errno); + goto end; + } + + for(u32 i = 0; i < ctx->meta_remap_storage.header->map_entry_count; i++) + { + read_bytes = fread(&(ctx->meta_remap_storage.map_entries[i]), 1, 0x20, ctx->file); + if (read_bytes != 0x20) + { + LOG_MSG_ERROR("Failed to read meta remap storage entry #%u! (read 0x%lX, errno %d).", i, read_bytes, errno); + goto end; + } + + ctx->meta_remap_storage.map_entries[i].physical_offset_end = (ctx->meta_remap_storage.map_entries[i].physical_offset + ctx->meta_remap_storage.map_entries[i].size); + ctx->meta_remap_storage.map_entries[i].virtual_offset_end = (ctx->meta_remap_storage.map_entries[i].virtual_offset + ctx->meta_remap_storage.map_entries[i].size); + } + + ctx->meta_remap_storage.segments = save_remap_init_segments(ctx->meta_remap_storage.header, ctx->meta_remap_storage.map_entries, ctx->meta_remap_storage.header->map_entry_count); + if (!ctx->meta_remap_storage.segments) + { + LOG_MSG_ERROR("Failed to retrieve meta remap storage segments!"); + goto end; + } + + /* Initialize journal map. */ + ctx->journal_map_info.map_storage = calloc(1, ctx->header.layout.journal_map_table_size); + if (!ctx->journal_map_info.map_storage) + { + LOG_MSG_ERROR("Failed to allocate memory for journal map info!"); + goto end; + } + + if (save_remap_read(&ctx->meta_remap_storage, ctx->journal_map_info.map_storage, ctx->header.layout.journal_map_table_offset, ctx->header.layout.journal_map_table_size) != ctx->header.layout.journal_map_table_size) + { + LOG_MSG_ERROR("Failed to read map storage from journal map info in meta remap storage!"); + goto end; + } + + /* Initialize journal storage. */ + ctx->journal_storage.header = &ctx->header.journal_header; + ctx->journal_storage.journal_data_offset = ctx->header.layout.journal_data_offset; + ctx->journal_storage._length = (ctx->journal_storage.header->total_size - ctx->journal_storage.header->journal_size); + ctx->journal_storage.file = ctx->file; + ctx->journal_storage.map.header = &ctx->header.map_header; + ctx->journal_storage.map.map_storage = ctx->journal_map_info.map_storage; + + ctx->journal_storage.map.entries = calloc(ctx->journal_storage.map.header->main_data_block_count, sizeof(journal_map_entry_t)); + if (!ctx->journal_storage.map.entries) + { + LOG_MSG_ERROR("Failed to allocate memory for journal map storage entries!"); + goto end; + } + + u32 *pos = (u32*)ctx->journal_storage.map.map_storage; + + for(u32 i = 0; i < ctx->journal_storage.map.header->main_data_block_count; i++) + { + ctx->journal_storage.map.entries[i].virtual_index = i; + ctx->journal_storage.map.entries[i].physical_index = (*pos & 0x7FFFFFFF); + pos += 2; + } + + ctx->journal_storage.block_size = ctx->journal_storage.header->block_size; + ctx->journal_storage._length = (ctx->journal_storage.header->total_size - ctx->journal_storage.header->journal_size); + + /* Initialize core IVFC storage. */ + for(u32 i = 0; i < 5; i++) ctx->core_data_ivfc_storage.levels[i].save_ctx = ctx; + + if (!save_ivfc_storage_init(&ctx->core_data_ivfc_storage, ctx->header.layout.ivfc_master_hash_offset_a, &ctx->header.data_ivfc_header)) + { + LOG_MSG_ERROR("Failed to initialize core IVFC storage!"); + goto end; + } + + /* Initialize FAT storage. */ + if (ctx->header.layout.version < 0x50000) + { + ctx->fat_storage = calloc(1, ctx->header.layout.fat_size); + if (!ctx->fat_storage) + { + LOG_MSG_ERROR("Failed to allocate memory for FAT storage!"); + goto end; + } + + if (save_remap_read(&ctx->meta_remap_storage, ctx->fat_storage, ctx->header.layout.fat_offset, ctx->header.layout.fat_size) != ctx->header.layout.fat_size) + { + LOG_MSG_ERROR("Failed to read FAT storage from meta remap storage!"); + goto end; + } + } else { + for(u32 i = 0; i < 5; i++) ctx->fat_ivfc_storage.levels[i].save_ctx = ctx; + + if (!save_ivfc_storage_init(&ctx->fat_ivfc_storage, ctx->header.layout.fat_ivfc_master_hash_a, &ctx->header.fat_ivfc_header)) + { + LOG_MSG_ERROR("Failed to initialize FAT storage! (IVFC)."); + goto end; + } + + ctx->fat_storage = calloc(1, ctx->fat_ivfc_storage._length); + if (!ctx->fat_storage) + { + LOG_MSG_ERROR("Failed to allocate memory for FAT storage! (IVFC)."); + goto end; + } + + if (save_remap_read(&ctx->meta_remap_storage, ctx->fat_storage, ctx->header.fat_ivfc_header.level_headers[ctx->header.fat_ivfc_header.num_levels - 2].logical_offset, ctx->fat_ivfc_storage._length) != ctx->fat_ivfc_storage._length) + { + LOG_MSG_ERROR("Failed to read FAT storage from meta remap storage! (IVFC)."); + goto end; + } + } + + if (ctx->tool_ctx.action & ACTION_VERIFY) + { + if (save_filesystem_verify(ctx) == VALIDITY_INVALID) + { + LOG_MSG_ERROR("Savefile FS verification failed!"); + goto end; + } + } + + /* Initialize core save filesystem. */ + ctx->save_filesystem_core.base_storage = &ctx->core_data_ivfc_storage; + if (!save_filesystem_init(&ctx->save_filesystem_core, ctx->fat_storage, &ctx->header.save_header, &ctx->header.fat_header)) + { + LOG_MSG_ERROR("Failed to initialize savefile FS!"); + goto end; + } + + success = true; + +end: + if (!success) save_free_contexts(ctx); + + return success; +} + +bool save_process_header(save_ctx_t *ctx) +{ + if (!ctx) + { + LOG_MSG_ERROR("Invalid parameters!"); + return false; + } + + if (ctx->header.layout.magic != MAGIC_DISF || ctx->header.duplex_header.magic != MAGIC_DPFS || \ + ctx->header.data_ivfc_header.magic != MAGIC_IVFC || ctx->header.journal_header.magic != MAGIC_JNGL || \ + ctx->header.save_header.magic != MAGIC_SAVE || ctx->header.main_remap_header.magic != MAGIC_RMAP || \ + ctx->header.meta_remap_header.magic != MAGIC_RMAP) + { + LOG_MSG_ERROR("Save header is corrupt!"); + return false; + } + + ctx->data_ivfc_master = ((u8*)&ctx->header + ctx->header.layout.ivfc_master_hash_offset_a); + ctx->fat_ivfc_master = ((u8*)&ctx->header + ctx->header.layout.fat_ivfc_master_hash_a); + + u8 hash[SHA256_HASH_SIZE]; + sha256CalculateHash(hash, &ctx->header.duplex_header, 0x3D00); + + ctx->header_hash_validity = (memcmp(hash, ctx->header.layout.hash, SHA256_HASH_SIZE) == 0 ? VALIDITY_VALID : VALIDITY_INVALID); + + ctx->header.data_ivfc_header.num_levels = 5; + + if (ctx->header.layout.version >= 0x50000) ctx->header.fat_ivfc_header.num_levels = 4; + + return true; +} + +void save_free_contexts(save_ctx_t *ctx) +{ + if (!ctx) return; + + if (ctx->data_remap_storage.segments) + { + if (ctx->data_remap_storage.header) + { + for(u32 i = 0; i < ctx->data_remap_storage.header->map_segment_count; i++) + { + if (ctx->data_remap_storage.segments[i].entries) free(ctx->data_remap_storage.segments[i].entries); + } + } + + free(ctx->data_remap_storage.segments); + ctx->data_remap_storage.segments = NULL; + } + + if (ctx->data_remap_storage.map_entries) + { + free(ctx->data_remap_storage.map_entries); + ctx->data_remap_storage.map_entries = NULL; + } + + if (ctx->meta_remap_storage.segments) + { + if (ctx->meta_remap_storage.header) + { + for(u32 i = 0; i < ctx->meta_remap_storage.header->map_segment_count; i++) + { + if (ctx->meta_remap_storage.segments[i].entries) free(ctx->meta_remap_storage.segments[i].entries); + } + } + + free(ctx->meta_remap_storage.segments); + ctx->meta_remap_storage.segments = NULL; + } + + if (ctx->meta_remap_storage.map_entries) + { + free(ctx->meta_remap_storage.map_entries); + ctx->meta_remap_storage.map_entries = NULL; + } + + if (ctx->duplex_storage.layers[0].bitmap.bitmap) + { + free(ctx->duplex_storage.layers[0].bitmap.bitmap); + ctx->duplex_storage.layers[0].bitmap.bitmap = NULL; + } + + if (ctx->duplex_storage.layers[1].bitmap.bitmap) + { + free(ctx->duplex_storage.layers[1].bitmap.bitmap); + ctx->duplex_storage.layers[1].bitmap.bitmap = NULL; + } + + if (ctx->duplex_storage.layers[1].bitmap_storage) + { + free(ctx->duplex_storage.layers[1].bitmap_storage); + ctx->duplex_storage.layers[1].bitmap_storage = NULL; + } + + for(u32 i = 1; i < 3; i++) + { + if (ctx->duplex_layers[i].data_a) + { + free(ctx->duplex_layers[i].data_a); + ctx->duplex_layers[i].data_a = NULL; + } + + if (ctx->duplex_layers[i].data_b) + { + free(ctx->duplex_layers[i].data_b); + ctx->duplex_layers[i].data_b = NULL; + } + } + + if (ctx->journal_map_info.map_storage) + { + free(ctx->journal_map_info.map_storage); + ctx->journal_map_info.map_storage = NULL; + } + + if (ctx->journal_storage.map.entries) + { + free(ctx->journal_storage.map.entries); + ctx->journal_storage.map.entries = NULL; + } + + for(u32 i = 0; i < ctx->header.data_ivfc_header.num_levels - 1; i++) + { + if (ctx->core_data_ivfc_storage.integrity_storages[i].block_validities) + { + free(ctx->core_data_ivfc_storage.integrity_storages[i].block_validities); + ctx->core_data_ivfc_storage.integrity_storages[i].block_validities = NULL; + } + } + + if (ctx->core_data_ivfc_storage.level_validities) + { + free(ctx->core_data_ivfc_storage.level_validities); + ctx->core_data_ivfc_storage.level_validities = NULL; + } + + if (ctx->header.layout.version >= 0x50000) + { + for(u32 i = 0; i < ctx->header.fat_ivfc_header.num_levels - 1; i++) + { + if (ctx->fat_ivfc_storage.integrity_storages[i].block_validities) + { + free(ctx->fat_ivfc_storage.integrity_storages[i].block_validities); + ctx->fat_ivfc_storage.integrity_storages[i].block_validities = NULL; + } + } + } + + if (ctx->fat_ivfc_storage.level_validities) + { + free(ctx->fat_ivfc_storage.level_validities); + ctx->fat_ivfc_storage.level_validities = NULL; + } + + if (ctx->fat_storage) + { + free(ctx->fat_storage); + ctx->fat_storage = NULL; + } +} + +save_ctx_t *save_open_savefile(const char *path, u32 action) +{ + if (!path || !*path) + { + LOG_MSG_ERROR("Invalid savefile path!"); + return NULL; + } + + FILE *save_fp = NULL; + save_ctx_t *save_ctx = NULL; + bool success = false; + + save_fp = fopen(path, "rb"); + if (!save_fp) + { + LOG_MSG_ERROR("Failed to open savefile \"%s\"! (%d).", path, errno); + goto end; + } + + save_ctx = calloc(1, sizeof(save_ctx_t)); + if (!save_ctx) + { + LOG_MSG_ERROR("Unable to allocate memory for savefile \"%s\" context!", path); + goto end; + } + + save_ctx->file = save_fp; + save_ctx->tool_ctx.action = action; + + success = save_process(save_ctx); + if (!success) LOG_MSG_ERROR("Failed to process savefile \"%s\"!", path); + +end: + if (!success) + { + if (save_ctx) + { + free(save_ctx); + save_ctx = NULL; + } + + if (save_fp) fclose(save_fp); + } + + return save_ctx; +} + +void save_close_savefile(save_ctx_t **ctx) +{ + if (!ctx || !*ctx) return; + + if ((*ctx)->file) fclose((*ctx)->file); + + save_free_contexts(*ctx); + + free(*ctx); + *ctx = NULL; +} + +bool save_get_fat_storage_from_file_entry_by_path(save_ctx_t *ctx, const char *path, allocation_table_storage_ctx_t *out_fat_storage, u64 *out_file_entry_size) +{ + if (!ctx || !path || !*path || !out_fat_storage || !out_file_entry_size) + { + LOG_MSG_ERROR("Invalid file entry path!"); + return false; + } + + save_fs_list_entry_t entry = {0}; + + if (!save_hierarchical_file_table_get_file_entry_by_path(&(ctx->save_filesystem_core.file_table), path, &entry)) + { + LOG_MSG_ERROR("Failed to get file entry for \"%s\" in savefile!", path); + return false; + } + + if (!save_open_fat_storage(&(ctx->save_filesystem_core), out_fat_storage, entry.value.save_file_info.start_block)) + { + LOG_MSG_ERROR("Failed to open FAT storage at block 0x%X for \"%s\" in savefile!", entry.value.save_file_info.start_block, path); + return false; + } + + *out_file_entry_size = entry.value.save_file_info.length; + + return true; +}