multi thread game dumps and file uploads.

previous uploads were all single threaded, which meant that it only uploaded as fast as the slowest source.
usb transfer is still single threaded due it being random access for both files and data, making it
hard for the read thread to run freely.
This commit is contained in:
ITotalJustice
2025-05-20 22:50:05 +01:00
parent f956adabc3
commit ef25c3edc7
9 changed files with 555 additions and 146 deletions

View File

@@ -19,6 +19,7 @@
#include "swkbd.hpp"
#include "i18n.hpp"
#include "location.hpp"
#include "threaded_file_transfer.hpp"
#include "yati/yati.hpp"
#include "yati/source/file.hpp"
@@ -1200,36 +1201,66 @@ void Menu::UploadFiles() {
App::Push(std::make_shared<ProgressBox>(0, "Uploading"_i18n, "", [this, loc](auto pbox) -> bool {
auto targets = GetSelectedEntries();
const auto file_add = [&](const fs::FsPath& file_path, const char* name){
const auto file_add = [&](s64 file_size, const fs::FsPath& file_path, const char* name) -> Result {
// the file name needs to be relative to the current directory.
const auto relative_file_name = file_path.s + std::strlen(m_path);
pbox->SetTitle(name);
pbox->NewTransfer(relative_file_name);
const auto result = curl::Api().FromFile(
CURL_LOCATION_TO_API(loc),
curl::Path{file_path},
curl::OnProgress{pbox->OnDownloadProgressCallback()},
curl::UploadInfo{relative_file_name}
);
FsFile file;
R_TRY(m_fs->OpenFile(file_path, FsOpenMode_Read, &file));
ON_SCOPE_EXIT(fsFileClose(&file));
return result.success;
return thread::TransferPull(pbox, file_size,
[&](void* data, s64 off, s64 size, u64* bytes_read) -> Result {
return fsFileRead(&file, off, data, size, FsReadOption_None, bytes_read);
},
[&](thread::PullFunctionCallback pull) -> Result {
s64 offset{};
const auto result = curl::Api().FromMemory(
CURL_LOCATION_TO_API(loc),
curl::OnProgress{pbox->OnDownloadProgressCallback()},
curl::UploadInfo{
relative_file_name, file_size,
[&](void *ptr, size_t size) -> size_t {
// curl will request past the size of the file, causing an error.
if (offset >= file_size) {
log_write("finished file upload\n");
return 0;
}
u64 bytes_read{};
if (R_FAILED(pull(ptr, size, &bytes_read))) {
log_write("failed to read in custom callback: %zd size: %zd\n", offset, size);
return 0;
}
offset += bytes_read;
return bytes_read;
}
}
);
R_UNLESS(result.success, 0x1);
R_SUCCEED();
}
);
};
for (auto& e : targets) {
if (e.IsFile()) {
const auto file_path = GetNewPath(e);
if (!file_add(file_path, e.GetName().c_str())) {
if (R_FAILED(file_add(e.file_size, file_path, e.GetName().c_str()))) {
return false;
}
} else {
FsDirCollections collections;
get_collections(GetNewPath(e), e.name, collections);
get_collections(GetNewPath(e), e.name, collections, true);
for (const auto& collection : collections) {
for (const auto& file : collection.files) {
const auto file_path = fs::AppendPath(collection.path, file.name);
if (!file_add(file_path, file.name)) {
if (R_FAILED(file_add(file.file_size, file_path, file.name))) {
return false;
}
}
@@ -1887,10 +1918,10 @@ auto Menu::get_collection(const fs::FsPath& path, const fs::FsPath& parent_name,
R_SUCCEED();
}
auto Menu::get_collections(const fs::FsPath& path, const fs::FsPath& parent_name, FsDirCollections& out) -> Result {
auto Menu::get_collections(const fs::FsPath& path, const fs::FsPath& parent_name, FsDirCollections& out, bool inc_size) -> Result {
// get a list of all the files / dirs
FsDirCollection collection;
R_TRY(get_collection(path, parent_name, collection, true, true, false));
R_TRY(get_collection(path, parent_name, collection, true, true, inc_size));
log_write("got collection: %s parent_name: %s files: %zu dirs: %zu\n", path.s, parent_name.s, collection.files.size(), collection.dirs.size());
out.emplace_back(collection);
@@ -1900,7 +1931,7 @@ auto Menu::get_collections(const fs::FsPath& path, const fs::FsPath& parent_name
const auto new_path = std::make_unique<fs::FsPath>(Menu::GetNewPath(path, p.name));
const auto new_parent_name = std::make_unique<fs::FsPath>(Menu::GetNewPath(parent_name, p.name));
log_write("trying to get nested collection: %s parent_name: %s\n", new_path->s, new_parent_name->s);
R_TRY(get_collections(*new_path, *new_parent_name, out));
R_TRY(get_collections(*new_path, *new_parent_name, out, inc_size));
}
R_SUCCEED();

View File

@@ -5,6 +5,7 @@
#include "defines.hpp"
#include "i18n.hpp"
#include "location.hpp"
#include "threaded_file_transfer.hpp"
#include "ui/menus/game_menu.hpp"
#include "ui/sidebar.hpp"
@@ -291,15 +292,17 @@ Result DumpNspToFile(ProgressBox* pbox, std::span<NspEntry> entries) {
auto source = std::make_unique<NspSource>(entries);
for (const auto& e : entries) {
auto path = e.path;
const auto file_size = e.nsp_size;
pbox->SetTitle(e.application_name);
pbox->NewTransfer(e.path);
pbox->NewTransfer(path);
const auto temp_path = fs::AppendPath(DUMP_PATH, e.path + ".temp");
const auto temp_path = fs::AppendPath(DUMP_PATH, path + ".temp");
fs.CreateDirectoryRecursivelyWithPath(temp_path);
fs.DeleteFile(temp_path);
const auto flags = e.nsp_size >= BIG_FILE_SIZE ? FsCreateOption_BigFile : 0;
R_TRY(fs.CreateFile(temp_path, e.nsp_size, flags));
const auto flags = file_size >= BIG_FILE_SIZE ? FsCreateOption_BigFile : 0;
R_TRY(fs.CreateFile(temp_path, file_size, flags));
ON_SCOPE_EXIT(fs.DeleteFile(temp_path));
{
@@ -307,27 +310,17 @@ Result DumpNspToFile(ProgressBox* pbox, std::span<NspEntry> entries) {
R_TRY(fs.OpenFile(temp_path, FsOpenMode_Write, &file));
ON_SCOPE_EXIT(fsFileClose(&file));
s64 offset{};
std::vector<u8> buf(1024*1024*4); // 4MiB
while (offset < e.nsp_size) {
if (pbox->ShouldExit()) {
R_THROW(0xFFFF);
R_TRY(thread::Transfer(pbox, file_size,
[&](void* data, s64 off, s64 size, u64* bytes_read) -> Result {
return source->Read(path, data, off, size, bytes_read);
},
[&](const void* data, s64 off, s64 size) -> Result {
return fsFileWrite(&file, off, data, size, FsWriteOption_None);
}
u64 bytes_read;
R_TRY(source->Read(e.path, buf.data(), offset, buf.size(), &bytes_read));
pbox->Yield();
R_TRY(fsFileWrite(&file, offset, buf.data(), bytes_read, FsWriteOption_None));
pbox->Yield();
pbox->UpdateTransfer(offset, e.nsp_size);
offset += bytes_read;
}
));
}
const auto path = fs::AppendPath(DUMP_PATH, e.path);
path = fs::AppendPath(DUMP_PATH, path);
fs.DeleteFile(path);
R_TRY(fs.RenameFile(temp_path, path));
}
@@ -373,24 +366,19 @@ Result DumpNspToUsbS2S(ProgressBox* pbox, std::span<NspEntry> entries) {
Result DumpNspToDevNull(ProgressBox* pbox, std::span<NspEntry> entries) {
auto source = std::make_unique<NspSource>(entries);
for (const auto& e : entries) {
const auto path = e.path;
const auto file_size = e.nsp_size;
pbox->SetTitle(e.application_name);
pbox->NewTransfer(e.path);
pbox->NewTransfer(path);
s64 offset{};
std::vector<u8> buf(1024*1024*4); // 4MiB
while (offset < e.nsp_size) {
if (pbox->ShouldExit()) {
R_THROW(0xFFFF);
R_TRY(thread::Transfer(pbox, file_size,
[&](void* data, s64 off, s64 size, u64* bytes_read) -> Result {
return source->Read(path, data, off, size, bytes_read);
},
[&](const void* data, s64 off, s64 size) -> Result {
R_SUCCEED();
}
u64 bytes_read;
R_TRY(source->Read(e.path, buf.data(), offset, buf.size(), &bytes_read));
pbox->Yield();
pbox->UpdateTransfer(offset, e.nsp_size);
offset += bytes_read;
}
));
}
R_SUCCEED();
@@ -403,39 +391,45 @@ Result DumpNspToNetwork(ProgressBox* pbox, const location::Entry& loc, std::span
R_THROW(0xFFFF);
}
const auto path = e.path;
const auto file_size = e.nsp_size;
pbox->SetTitle(e.application_name);
pbox->NewTransfer(e.path);
pbox->NewTransfer(path);
s64 offset{};
const auto result = curl::Api().FromMemory(
CURL_LOCATION_TO_API(loc),
curl::OnProgress{pbox->OnDownloadProgressCallback()},
curl::UploadInfo{
e.path, e.nsp_size,
[&pbox, &e, &source, &offset](void *ptr, size_t size) -> size_t {
u64 bytes_read{};
if (R_FAILED(source->Read(e.path, ptr, offset, size, &bytes_read))) {
// curl will request past the size of the file, causing an error.
// only log the error if it failed in the middle of a transfer.
if (offset != e.nsp_size) {
log_write("failed to read in custom callback: %zd size: %zd\n", offset, e.nsp_size);
}
return 0;
}
offset += bytes_read;
return bytes_read;
}
R_TRY(thread::TransferPull(pbox, file_size,
[&](void* data, s64 off, s64 size, u64* bytes_read) -> Result {
return source->Read(path, data, off, size, bytes_read);
},
curl::OnUploadSeek{
[&e, &offset](s64 new_offset){
offset = new_offset;
return true;
}
}
);
[&](thread::PullFunctionCallback pull) -> Result {
s64 offset{};
const auto result = curl::Api().FromMemory(
CURL_LOCATION_TO_API(loc),
curl::OnProgress{pbox->OnDownloadProgressCallback()},
curl::UploadInfo{
path, file_size,
[&](void *ptr, size_t size) -> size_t {
// curl will request past the size of the file, causing an error.
if (offset >= file_size) {
log_write("finished file upload\n");
return 0;
}
R_UNLESS(result.success, 0x1);
u64 bytes_read{};
if (R_FAILED(pull(ptr, size, &bytes_read))) {
log_write("failed to read in custom callback: %zd size: %zd\n", offset, size);
return 0;
}
offset += bytes_read;
return bytes_read;
}
}
);
R_UNLESS(result.success, 0x1);
R_SUCCEED();
}
));
}
R_SUCCEED();

View File

@@ -13,6 +13,7 @@
#include "i18n.hpp"
#include "download.hpp"
#include "location.hpp"
#include "threaded_file_transfer.hpp"
#include <cstring>
#include <algorithm>
@@ -285,6 +286,7 @@ Result DumpNspToFile(ProgressBox* pbox, std::span<const fs::FsPath> paths, XciEn
R_TRY(fs.GetFsOpenResult());
for (auto path : paths) {
const auto file_size = e.GetSize(path);
pbox->SetTitle(e.application_name);
pbox->NewTransfer(path);
@@ -292,9 +294,8 @@ Result DumpNspToFile(ProgressBox* pbox, std::span<const fs::FsPath> paths, XciEn
fs.CreateDirectoryRecursivelyWithPath(temp_path);
fs.DeleteFile(temp_path);
const auto size = e.GetSize(path);
const auto flags = size >= BIG_FILE_SIZE ? FsCreateOption_BigFile : 0;
R_TRY(fs.CreateFile(temp_path, size, flags));
const auto flags = file_size >= BIG_FILE_SIZE ? FsCreateOption_BigFile : 0;
R_TRY(fs.CreateFile(temp_path, file_size, flags));
ON_SCOPE_EXIT(fs.DeleteFile(temp_path));
{
@@ -302,24 +303,14 @@ Result DumpNspToFile(ProgressBox* pbox, std::span<const fs::FsPath> paths, XciEn
R_TRY(fs.OpenFile(temp_path, FsOpenMode_Write, &file));
ON_SCOPE_EXIT(fsFileClose(&file));
s64 offset{};
std::vector<u8> buf(1024*1024*4); // 4MiB
while (offset < size) {
if (pbox->ShouldExit()) {
R_THROW(0xFFFF);
R_TRY(thread::Transfer(pbox, file_size,
[&](void* data, s64 off, s64 size, u64* bytes_read) -> Result {
return e.Read(path, data, off, size, bytes_read);
},
[&](const void* data, s64 off, s64 size) -> Result {
return fsFileWrite(&file, off, data, size, FsWriteOption_None);
}
u64 bytes_read;
R_TRY(e.Read(path, buf.data(), offset, buf.size(), &bytes_read));
pbox->Yield();
R_TRY(fsFileWrite(&file, offset, buf.data(), bytes_read, FsWriteOption_None));
pbox->Yield();
pbox->UpdateTransfer(offset, size);
offset += bytes_read;
}
));
}
path = fs::AppendPath(DUMP_PATH, path);
@@ -367,25 +358,18 @@ Result DumpNspToUsbS2S(ProgressBox* pbox, std::span<const fs::FsPath> paths, Xci
Result DumpNspToDevNull(ProgressBox* pbox, std::span<const fs::FsPath> paths, XciEntry& e) {
for (const auto& path : paths) {
const auto file_size = e.GetSize(path);
pbox->SetTitle(e.application_name);
pbox->NewTransfer(path);
s64 offset{};
const auto size = e.GetSize(path);
std::vector<u8> buf(1024*1024*4); // 4MiB
while (offset < size) {
if (pbox->ShouldExit()) {
R_THROW(0xFFFF);
R_TRY(thread::Transfer(pbox, file_size,
[&](void* data, s64 off, s64 size, u64* bytes_read) -> Result {
return e.Read(path, data, off, size, bytes_read);
},
[&](const void* data, s64 off, s64 size) -> Result {
R_SUCCEED();
}
u64 bytes_read;
R_TRY(e.Read(path, buf.data(), offset, buf.size(), &bytes_read));
pbox->Yield();
pbox->UpdateTransfer(offset, size);
offset += bytes_read;
}
));
}
R_SUCCEED();
@@ -397,41 +381,45 @@ Result DumpNspToNetwork(ProgressBox* pbox, const location::Entry& loc, std::span
R_THROW(0xFFFF);
}
const auto file_size = e.GetSize(path);
pbox->SetTitle(e.application_name);
pbox->NewTransfer(path);
s64 offset{};
const auto size = e.GetSize(path);
const auto result = curl::Api().FromMemory(
CURL_LOCATION_TO_API(loc),
curl::OnProgress{pbox->OnDownloadProgressCallback()},
curl::UploadInfo{
path, size,
[&pbox, &e, &offset, &path](void *ptr, size_t size) -> size_t {
u64 bytes_read{};
if (R_FAILED(e.Read(path, ptr, offset, size, &bytes_read))) {
// curl will request past the size of the file, causing an error.
// only log the error if it failed in the middle of a transfer.
if (offset != size) {
log_write("failed to read in custom callback: %zd size: %zd\n", offset, size);
}
return 0;
}
offset += bytes_read;
return bytes_read;
}
R_TRY(thread::TransferPull(pbox, file_size,
[&](void* data, s64 off, s64 size, u64* bytes_read) -> Result {
return e.Read(path, data, off, size, bytes_read);
},
curl::OnUploadSeek{
[&offset](s64 new_offset){
offset = new_offset;
return true;
}
}
);
[&](thread::PullFunctionCallback pull) -> Result {
s64 offset{};
const auto result = curl::Api().FromMemory(
CURL_LOCATION_TO_API(loc),
curl::OnProgress{pbox->OnDownloadProgressCallback()},
curl::UploadInfo{
path, file_size,
[&](void *ptr, size_t size) -> size_t {
// curl will request past the size of the file, causing an error.
if (offset >= file_size) {
log_write("finished file upload\n");
return 0;
}
R_UNLESS(result.success, 0x1);
u64 bytes_read{};
if (R_FAILED(pull(ptr, size, &bytes_read))) {
log_write("failed to read in custom callback: %zd size: %zd\n", offset, size);
return 0;
}
offset += bytes_read;
return bytes_read;
}
}
);
R_UNLESS(result.success, 0x1);
R_SUCCEED();
}
));
}
R_SUCCEED();