fix ncz block installs, fix error module value being out of range, display error on install from filebrowser.
the issue with block installs was that i was not tracking the ncz block offset in between transfers. this resulted in the block size being used for each transfer, rather then size-offset. for blocks that were always compressed, this silently worked as zstd stream can handle multiple frames. however, if there existed compressed and uncompressed blocks, then this bug would be exposed. thanks to Marulv for reporting the bug.
This commit is contained in:
@@ -7,7 +7,7 @@
|
||||
|
||||
namespace sphaira::es {
|
||||
|
||||
enum { TicketModule = 522 };
|
||||
enum { TicketModule = 507 };
|
||||
|
||||
enum : Result {
|
||||
// found ticket has missmatching rights_id from it's name.
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
namespace sphaira::yati::source {
|
||||
|
||||
struct Usb final : Base {
|
||||
enum { USBModule = 523 };
|
||||
enum { USBModule = 508 };
|
||||
|
||||
enum : Result {
|
||||
Result_BadMagic = MAKERESULT(USBModule, 0),
|
||||
|
||||
@@ -16,8 +16,18 @@
|
||||
|
||||
namespace sphaira::yati {
|
||||
|
||||
enum { YatiModule = 521 };
|
||||
enum { YatiModule = 506 };
|
||||
|
||||
/*
|
||||
Improving compression ratio via block splitting is now enabled by default for high compression levels (16+).
|
||||
The amount of benefit varies depending on the workload.
|
||||
Compressing archives comprised of heavily differing files will see more improvement than compression of single files that don’t
|
||||
vary much entropically (like text files/enwik). At levels 16+, we observe no measurable regression to compression speed.
|
||||
|
||||
The block splitter can be forcibly enabled on lower compression levels as well with the advanced parameter ZSTD_c_splitBlocks.
|
||||
When forcibly enabled at lower levels, speed regressions can become more notable.
|
||||
Additionally, since more compressed blocks may be produced, decompression speed on these blobs may also see small regressions.
|
||||
*/
|
||||
enum : Result {
|
||||
// unkown container for the source provided.
|
||||
Result_ContainerNotFound = MAKERESULT(YatiModule, 10),
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
namespace sphaira::ui {
|
||||
|
||||
ErrorBox::ErrorBox(const std::string& message) : m_message{message} {
|
||||
log_write("[ERROR] %s\n", m_message.c_str());
|
||||
|
||||
m_pos.w = 770.f;
|
||||
m_pos.h = 430.f;
|
||||
@@ -21,6 +22,7 @@ ErrorBox::ErrorBox(const std::string& message) : m_message{message} {
|
||||
|
||||
ErrorBox::ErrorBox(Result code, const std::string& message) : ErrorBox{message} {
|
||||
m_code = code;
|
||||
log_write("[ERROR] Code: 0x%X Module: %u Description: %u\n", R_VALUE(code), R_MODULE(code), R_DESCRIPTION(code));
|
||||
}
|
||||
|
||||
auto ErrorBox::Update(Controller* controller, TouchInfo* touch) -> void {
|
||||
|
||||
@@ -694,6 +694,8 @@ void FsView::InstallFiles() {
|
||||
}
|
||||
|
||||
R_SUCCEED();
|
||||
}, [this](Result rc){
|
||||
App::PushErrorBox(rc, "File install failed!"_i18n);
|
||||
}));
|
||||
}
|
||||
}));
|
||||
|
||||
@@ -433,7 +433,7 @@ Result Yati::decompressFuncInternal(ThreadData* t) {
|
||||
inflate_buf.reserve(t->max_buffer_size);
|
||||
|
||||
s64 written{};
|
||||
s64 decompress_buf_off{};
|
||||
s64 block_offset{};
|
||||
std::vector<u8> buf{};
|
||||
buf.reserve(t->max_buffer_size);
|
||||
|
||||
@@ -454,14 +454,15 @@ Result Yati::decompressFuncInternal(ThreadData* t) {
|
||||
}
|
||||
|
||||
for (s64 off = 0; off < size;) {
|
||||
// log_write("looking for section\n");
|
||||
if (!ncz_section || !ncz_section->InRange(written)) {
|
||||
log_write("[NCZ] looking for new section: %zu\n", written);
|
||||
auto it = std::ranges::find_if(t->ncz_sections, [written](auto& e){
|
||||
return e.InRange(written);
|
||||
});
|
||||
|
||||
R_UNLESS(it != t->ncz_sections.cend(), Result_NczSectionNotFound);
|
||||
ncz_section = &(*it);
|
||||
log_write("[NCZ] found new section: %zu\n", written);
|
||||
|
||||
if (ncz_section->crypto_type >= nca::EncryptionType_AesCtr) {
|
||||
const auto swp = std::byteswap(u64(written) >> 4);
|
||||
@@ -488,7 +489,7 @@ Result Yati::decompressFuncInternal(ThreadData* t) {
|
||||
|
||||
// restore remaining data to the swapped buffer.
|
||||
if (!temp_vector.empty()) {
|
||||
log_write("storing data size: %zu\n", temp_vector.size());
|
||||
log_write("[NCZ] storing data size: %zu\n", temp_vector.size());
|
||||
inflate_buf = temp_vector;
|
||||
}
|
||||
|
||||
@@ -496,6 +497,7 @@ Result Yati::decompressFuncInternal(ThreadData* t) {
|
||||
};
|
||||
|
||||
while (t->decompress_offset < t->write_size && R_SUCCEEDED(t->GetResults())) {
|
||||
s64 decompress_buf_off{};
|
||||
R_TRY(t->GetDecompressBuf(buf, decompress_buf_off));
|
||||
|
||||
// do we have an nsz? if so, setup buffers.
|
||||
@@ -616,12 +618,14 @@ Result Yati::decompressFuncInternal(ThreadData* t) {
|
||||
// todo: blocks need to use read offset, as the offset + size is compressed range.
|
||||
if (t->ncz_blocks.size()) {
|
||||
if (!ncz_block || !ncz_block->InRange(decompress_buf_off)) {
|
||||
block_offset = 0;
|
||||
log_write("[NCZ] looking for new block: %zu\n", decompress_buf_off);
|
||||
auto it = std::ranges::find_if(t->ncz_blocks, [decompress_buf_off](auto& e){
|
||||
return e.InRange(decompress_buf_off);
|
||||
});
|
||||
|
||||
R_UNLESS(it != t->ncz_blocks.cend(), Result_NczBlockNotFound);
|
||||
// log_write("looking found block\n");
|
||||
log_write("[NCZ] found new block: %zu off: %zd size: %zd\n", decompress_buf_off, it->offset, it->size);
|
||||
ncz_block = &(*it);
|
||||
}
|
||||
|
||||
@@ -629,7 +633,7 @@ Result Yati::decompressFuncInternal(ThreadData* t) {
|
||||
auto decompressedBlockSize = 1 << t->ncz_block_header.block_size_exponent;
|
||||
// special handling for the last block to check it's actually compressed
|
||||
if (ncz_block->offset == t->ncz_blocks.back().offset) {
|
||||
log_write("last block special handling\n");
|
||||
log_write("[NCZ] last block special handling\n");
|
||||
decompressedBlockSize = t->ncz_block_header.decompressed_size % decompressedBlockSize;
|
||||
}
|
||||
|
||||
@@ -637,12 +641,12 @@ Result Yati::decompressFuncInternal(ThreadData* t) {
|
||||
compressed = ncz_block->size < decompressedBlockSize;
|
||||
|
||||
// clip read size as blocks can be up to 32GB in size!
|
||||
const auto size = std::min<u64>(buf.size() - buf_off, ncz_block->size);
|
||||
buffer = {buf.data() + buf_off, size};
|
||||
const auto size = std::min<u64>(buffer.size(), ncz_block->size - block_offset);
|
||||
buffer = buffer.subspan(0, size);
|
||||
}
|
||||
|
||||
if (compressed) {
|
||||
// log_write("COMPRESSED block\n");
|
||||
log_write("[NCZ] COMPRESSED block\n");
|
||||
ZSTD_inBuffer input = { buffer.data(), buffer.size(), 0 };
|
||||
while (input.pos < input.size) {
|
||||
R_TRY(t->GetResults());
|
||||
@@ -650,12 +654,15 @@ Result Yati::decompressFuncInternal(ThreadData* t) {
|
||||
inflate_buf.resize(inflate_offset + chunk_size);
|
||||
ZSTD_outBuffer output = { inflate_buf.data() + inflate_offset, chunk_size, 0 };
|
||||
const auto res = ZSTD_decompressStream(dctx, std::addressof(output), std::addressof(input));
|
||||
if (ZSTD_isError(res)) {
|
||||
log_write("[NCZ] ZSTD_decompressStream() pos: %zu size: %zu res: %zd msg: %s\n", input.pos, input.size, res, ZSTD_getErrorName(res));
|
||||
}
|
||||
R_UNLESS(!ZSTD_isError(res), Result_InvalidNczZstdError);
|
||||
|
||||
t->decompress_offset += output.pos;
|
||||
inflate_offset += output.pos;
|
||||
if (inflate_offset >= INFLATE_BUFFER_MAX) {
|
||||
// log_write("flushing compressed data: %zd vs %zd diff: %zd\n", inflate_offset, INFLATE_BUFFER_MAX, inflate_offset - INFLATE_BUFFER_MAX);
|
||||
log_write("[NCZ] flushing compressed data: %zd vs %zd diff: %zd\n", inflate_offset, INFLATE_BUFFER_MAX, inflate_offset - INFLATE_BUFFER_MAX);
|
||||
R_TRY(ncz_flush(INFLATE_BUFFER_MAX));
|
||||
}
|
||||
}
|
||||
@@ -666,13 +673,14 @@ Result Yati::decompressFuncInternal(ThreadData* t) {
|
||||
t->decompress_offset += buffer.size();
|
||||
inflate_offset += buffer.size();
|
||||
if (inflate_offset >= INFLATE_BUFFER_MAX) {
|
||||
// log_write("flushing copy data\n");
|
||||
log_write("[NCZ] flushing copy data\n");
|
||||
R_TRY(ncz_flush(INFLATE_BUFFER_MAX));
|
||||
}
|
||||
}
|
||||
|
||||
buf_off += buffer.size();
|
||||
decompress_buf_off += buffer.size();
|
||||
block_offset += buffer.size();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user