@@ -9,8 +9,8 @@ | |||
"range-v3@0.11.0", | |||
"nlohmann-json@3.7.1", | |||
"neo-sqlite3@0.4.1", | |||
"neo-fun~0.5.4", | |||
"neo-compress~0.1.1", | |||
"neo-fun~0.6.0", | |||
"neo-compress~0.2.0", | |||
"neo-url~0.2.2", | |||
"semver@0.2.2", | |||
"pubgrub@0.2.1", | |||
@@ -19,7 +19,7 @@ | |||
"ctre@2.8.1", | |||
"fmt^7.0.3", | |||
"neo-http^0.1.0", | |||
"neo-io^0.1.0", | |||
"neo-io^0.1.1", | |||
"boost.leaf~0.3.0", | |||
], | |||
"test_driver": "Catch-Main" |
@@ -200,7 +200,6 @@ struct cli_catalog { | |||
catalog_path_flag cat_path{cmd}; | |||
args::Flag import_stdin{cmd, "stdin", "Import JSON from stdin", {"stdin"}}; | |||
args::Flag init{cmd, "initial", "Re-import the initial catalog contents", {"initial"}}; | |||
args::ValueFlagList<std::string> | |||
json_paths{cmd, | |||
"json", | |||
@@ -209,9 +208,6 @@ struct cli_catalog { | |||
int run() { | |||
auto cat = cat_path.open(); | |||
if (init.Get()) { | |||
cat.import_initial(); | |||
} | |||
for (const auto& json_fpath : json_paths.Get()) { | |||
cat.import_json_file(json_fpath); | |||
} | |||
@@ -443,7 +439,7 @@ struct cli_repoman { | |||
auto repo | |||
= dds::repo_manager::create(where.Get(), | |||
name ? std::make_optional(name.Get()) : std::nullopt); | |||
dds_log(info, "Created new repository '{}' in {}", repo.root(), repo.name()); | |||
dds_log(info, "Created new repository '{}' in {}", repo.name(), repo.root()); | |||
return 0; | |||
} | |||
} init{*this}; | |||
@@ -717,65 +713,30 @@ struct cli_repo { | |||
args::Flag update{cmd, "update", "Update catalog contents immediately", {"update", 'U'}}; | |||
int run() { | |||
return boost::leaf::try_handle_all( // | |||
[&]() -> dds::result<int> { | |||
try { | |||
auto cat = cat_path.open(); | |||
auto repo = dds::remote_repository::connect(url.Get()); | |||
repo.store(cat.database()); | |||
if (update) { | |||
repo.update_catalog(cat.database()); | |||
} | |||
} catch (...) { | |||
return dds::capture_exception(); | |||
} | |||
return 0; | |||
}, | |||
[&](neo::url_validation_error url_err, dds::e_url_string bad_url) { | |||
dds_log(error, "Invalid URL [{}]: {}", bad_url.value, url_err.what()); | |||
return 1; | |||
}, | |||
[&](const json5::parse_error& e, dds::e_http_url bad_url) { | |||
dds_log(error, | |||
"Error parsing JSON downloaded from URL [{}]: {}", | |||
bad_url.value, | |||
e.what()); | |||
return 1; | |||
}, | |||
[](dds::e_sqlite3_error_exc e, dds::e_url_string url) { | |||
dds_log(error, | |||
"Error accessing remote database (From {}): {}", | |||
url.value, | |||
e.message); | |||
return 1; | |||
}, | |||
[](dds::e_sqlite3_error_exc e) { | |||
dds_log(error, "Unexpected database error: {}", e.message); | |||
return 1; | |||
}, | |||
[&](dds::e_system_error_exc e, dds::e_http_connect conn) { | |||
dds_log(error, | |||
"Error opening connection to [{}:{}]: {}", | |||
conn.host, | |||
conn.port, | |||
e.message); | |||
return 1; | |||
}, | |||
[](const std::exception& e) { | |||
dds_log(error, "An unknown unhandled exception occurred: {}", e.what()); | |||
return 1; | |||
}, | |||
[](dds::e_system_error_exc e) { | |||
dds_log(error, "An unknown system_error occurred: {}", e.message); | |||
return 42; | |||
}, | |||
[](boost::leaf::diagnostic_info const& info) { | |||
dds_log(error, "An unnknown error occurred? {}", info); | |||
return 42; | |||
}); | |||
auto cat = cat_path.open(); | |||
auto repo = dds::remote_repository::connect(url.Get()); | |||
repo.store(cat.database()); | |||
if (update) { | |||
repo.update_catalog(cat.database()); | |||
} | |||
return 0; | |||
} | |||
} add{*this}; | |||
struct { | |||
cli_repo& parent; | |||
args::Command cmd{parent.repo_group, "update", "Update remote package information"}; | |||
common_flags _flags{cmd}; | |||
catalog_path_flag cat_path{cmd}; | |||
int run() { | |||
auto cat = cat_path.open(); | |||
dds::update_all_remotes(cat.database()); | |||
return 0; | |||
} | |||
} update{*this}; | |||
struct { | |||
cli_repo& parent; | |||
args::Command cmd{parent.repo_group, "init", "Initialize a directory as a repository"}; | |||
@@ -792,7 +753,7 @@ struct cli_repo { | |||
} | |||
} init{*this}; | |||
int run() { | |||
int _run() { | |||
if (ls.cmd) { | |||
return ls.run(); | |||
} else if (init.cmd) { | |||
@@ -801,11 +762,67 @@ struct cli_repo { | |||
return import_.run(); | |||
} else if (add.cmd) { | |||
return add.run(); | |||
} else if (update.cmd) { | |||
return update.run(); | |||
} else { | |||
assert(false); | |||
std::terminate(); | |||
} | |||
} | |||
int run() { | |||
return boost::leaf::try_handle_all( // | |||
[&]() -> dds::result<int> { | |||
try { | |||
return _run(); | |||
} catch (...) { | |||
return dds::capture_exception(); | |||
} | |||
return 0; | |||
}, | |||
[&](neo::url_validation_error url_err, dds::e_url_string bad_url) { | |||
dds_log(error, "Invalid URL [{}]: {}", bad_url.value, url_err.what()); | |||
return 1; | |||
}, | |||
[&](const json5::parse_error& e, dds::e_http_url bad_url) { | |||
dds_log(error, | |||
"Error parsing JSON downloaded from URL [{}]: {}", | |||
bad_url.value, | |||
e.what()); | |||
return 1; | |||
}, | |||
[](dds::e_sqlite3_error_exc e, dds::e_url_string url) { | |||
dds_log(error, | |||
"Error accessing remote database (From {}): {}", | |||
url.value, | |||
e.message); | |||
return 1; | |||
}, | |||
[](dds::e_sqlite3_error_exc e) { | |||
dds_log(error, "Unexpected database error: {}", e.message); | |||
return 1; | |||
}, | |||
[&](dds::e_system_error_exc e, dds::e_http_connect conn) { | |||
dds_log(error, | |||
"Error opening connection to [{}:{}]: {}", | |||
conn.host, | |||
conn.port, | |||
e.message); | |||
return 1; | |||
}, | |||
[](const std::exception& e) { | |||
dds_log(error, "An unknown unhandled exception occurred: {}", e.what()); | |||
return 1; | |||
}, | |||
[](dds::e_system_error_exc e) { | |||
dds_log(error, "An unknown system_error occurred: {}", e.message); | |||
return 42; | |||
}, | |||
[](boost::leaf::diagnostic_info const& info) { | |||
dds_log(error, "An unnknown error occurred? {}", info); | |||
return 42; | |||
}); | |||
} | |||
}; | |||
/* |
@@ -2,7 +2,6 @@ | |||
#include "./import.hpp" | |||
#include <dds/catalog/init_catalog.hpp> | |||
#include <dds/dym.hpp> | |||
#include <dds/error/errors.hpp> | |||
#include <dds/solve/solve.hpp> | |||
@@ -92,9 +91,11 @@ void migrate_repodb_3(nsql::database& db) { | |||
version TEXT NOT NULL, | |||
description TEXT NOT NULL, | |||
remote_url TEXT NOT NULL, | |||
remote_id INTEGER REFERENCES dds_cat_remotes DEFAULT NULL, | |||
remote_id INTEGER | |||
REFERENCES dds_cat_remotes | |||
ON DELETE CASCADE, | |||
repo_transform TEXT NOT NULL DEFAULT '[]', | |||
UNIQUE (name, version) | |||
UNIQUE (name, version, remote_id) | |||
); | |||
INSERT INTO dds_cat_pkgs_new(pkg_id, | |||
@@ -118,7 +119,10 @@ void migrate_repodb_3(nsql::database& db) { | |||
CREATE TABLE dds_cat_pkg_deps_new ( | |||
dep_id INTEGER PRIMARY KEY AUTOINCREMENT, | |||
pkg_id INTEGER NOT NULL REFERENCES dds_cat_pkgs_new(pkg_id), | |||
pkg_id INTEGER | |||
NOT NULL | |||
REFERENCES dds_cat_pkgs_new(pkg_id) | |||
ON DELETE CASCADE, | |||
dep_name TEXT NOT NULL, | |||
low TEXT NOT NULL, | |||
high TEXT NOT NULL, | |||
@@ -239,13 +243,6 @@ void do_store_pkg(neo::sqlite3::database& db, | |||
} | |||
} | |||
void store_init_packages(nsql::database& db, nsql::statement_cache& st_cache) { | |||
dds_log(debug, "Restoring initial package data"); | |||
for (auto& pkg : init_catalog_packages()) { | |||
do_store_pkg(db, st_cache, pkg); | |||
} | |||
} | |||
void ensure_migrated(nsql::database& db) { | |||
db.exec(R"( | |||
PRAGMA foreign_keys = 1; | |||
@@ -275,10 +272,6 @@ void ensure_migrated(nsql::database& db) { | |||
int version = version_; | |||
// If this is the first time we're working here, import the initial | |||
// catalog with some useful tidbits. | |||
bool import_init_packages = version == 0; | |||
if (version > current_database_version) { | |||
dds_log(critical, | |||
"Catalog version is {}, but we only support up to {}", | |||
@@ -301,15 +294,6 @@ void ensure_migrated(nsql::database& db) { | |||
} | |||
meta["version"] = current_database_version; | |||
exec(db.prepare("UPDATE dds_cat_meta SET meta=?"), meta.dump()); | |||
if (import_init_packages) { | |||
dds_log( | |||
info, | |||
"A new catalog database case been created, and has been populated with some initial " | |||
"contents."); | |||
neo::sqlite3::statement_cache stmts{db}; | |||
store_init_packages(db, stmts); | |||
} | |||
} | |||
void check_json(bool b, std::string_view what) { | |||
@@ -361,17 +345,13 @@ std::optional<package_info> catalog::get(const package_id& pk_id) const noexcept | |||
description, | |||
repo_transform | |||
FROM dds_cat_pkgs | |||
WHERE name = ? AND version = ? | |||
WHERE name = ?1 AND version = ?2 | |||
ORDER BY pkg_id DESC | |||
)"_sql); | |||
st.reset(); | |||
st.bindings() = std::forward_as_tuple(pk_id.name, ver_str); | |||
auto opt_tup = nsql::unpack_single_opt<std::int64_t, | |||
std::string, | |||
std::string, | |||
std::string, | |||
std::string, | |||
std::string>(st); | |||
if (!opt_tup) { | |||
auto ec = st.step(std::nothrow); | |||
if (ec == nsql::errc::done) { | |||
dym_target::fill([&] { | |||
auto all_ids = this->all(); | |||
auto id_strings | |||
@@ -380,9 +360,36 @@ std::optional<package_info> catalog::get(const package_id& pk_id) const noexcept | |||
}); | |||
return std::nullopt; | |||
} | |||
const auto& [pkg_id, name, version, remote_url, description, repo_transform] = *opt_tup; | |||
assert(pk_id.name == name); | |||
assert(pk_id.version == semver::version::parse(version)); | |||
neo_assert_always(invariant, | |||
ec == nsql::errc::row, | |||
"Failed to pull a package from the catalog database", | |||
ec, | |||
pk_id.to_string(), | |||
nsql::error_category().message(int(ec))); | |||
const auto& [pkg_id, name, version, remote_url, description, repo_transform] | |||
= st.row() | |||
.unpack<std::int64_t, | |||
std::string, | |||
std::string, | |||
std::string, | |||
std::string, | |||
std::string>(); | |||
ec = st.step(std::nothrow); | |||
if (ec == nsql::errc::row) { | |||
dds_log(warn, | |||
"There is more than one entry for package {} in the catalog database. One will be " | |||
"chosen arbitrarily.", | |||
pk_id.to_string()); | |||
} | |||
neo_assert(invariant, | |||
pk_id.name == name && pk_id.version == semver::version::parse(version), | |||
"Package metadata does not match", | |||
pk_id.to_string(), | |||
name, | |||
version); | |||
auto deps = dependencies_of(pk_id); | |||
@@ -439,6 +446,7 @@ std::vector<package_id> catalog::by_name(std::string_view sv) const noexcept { | |||
SELECT name, version | |||
FROM dds_cat_pkgs | |||
WHERE name = ? | |||
ORDER BY pkg_id DESC | |||
)"_sql), | |||
sv) // | |||
| neo::lref // | |||
@@ -485,9 +493,3 @@ void catalog::import_json_str(std::string_view content) { | |||
do_store_pkg(_db, _stmt_cache, pkg); | |||
} | |||
} | |||
void catalog::import_initial() { | |||
nsql::transaction_guard tr{_db}; | |||
dds_log(info, "Restoring built-in initial catalog contents"); | |||
store_init_packages(_db, _stmt_cache); | |||
} |
@@ -39,7 +39,6 @@ public: | |||
std::vector<package_id> by_name(std::string_view sv) const noexcept; | |||
std::vector<dependency> dependencies_of(const package_id& pkg) const noexcept; | |||
void import_initial(); | |||
void import_json_str(std::string_view json_str); | |||
void import_json_file(path_ref json_path) { | |||
auto content = dds::slurp_file(json_path); |
@@ -1,11 +0,0 @@ | |||
#pragma once | |||
#include "./package_info.hpp" | |||
#include <vector> | |||
namespace dds { | |||
const std::vector<package_info>& init_catalog_packages() noexcept; | |||
} // namespace dds |
@@ -1,6 +1,7 @@ | |||
#include "./package_info.hpp" | |||
#include <dds/error/errors.hpp> | |||
#include <dds/util/string.hpp> | |||
#include <neo/url.hpp> | |||
#include <neo/utility.hpp> | |||
@@ -14,6 +15,12 @@ dds::remote_listing_var dds::parse_remote_url(std::string_view sv) { | |||
return git_remote_listing::from_url(sv); | |||
} else if (url.scheme == neo::oper::any_of("http", "https")) { | |||
return http_remote_listing::from_url(sv); | |||
} else if (url.scheme == neo::oper::any_of("dds+http", "dds+https", "http+dds", "https+dds")) { | |||
fs::path path = url.path; | |||
auto leaf = path.filename().string(); | |||
auto namever_path = replace(leaf, "@", "/"); | |||
url.path = (path.parent_path() / "pkg" / namever_path / "sdist.tar.gz").generic_string(); | |||
return http_remote_listing::from_url(url.to_string()); | |||
} else { | |||
throw_user_error< | |||
errc::invalid_remote_url>("Unknown scheme '{}' for remote package URL '{}'", |
@@ -151,6 +151,18 @@ http_remote_listing http_remote_listing::from_url(std::string_view sv) { | |||
unsigned int strip_components = 1; | |||
std::optional<lm::usage> auto_lib; | |||
// IF we are a dds+ URL, strip_components should be zero, and give the url a plain | |||
// HTTP/HTTPS scheme | |||
if (url.scheme.starts_with("dds+")) { | |||
url.scheme = url.scheme.substr(4); | |||
strip_components = 0; | |||
} else if (url.scheme.ends_with("+dds")) { | |||
url.scheme.erase(url.scheme.end() - 3); | |||
strip_components = 0; | |||
} else { | |||
// Leave the URL as-is | |||
} | |||
if (url.query) { | |||
neo::basic_query_string_view qsv{*url.query}; | |||
for (auto qstr : qsv) { |
@@ -3,13 +3,18 @@ | |||
#include <dds/error/errors.hpp> | |||
#include <dds/http/session.hpp> | |||
#include <dds/temp.hpp> | |||
#include <dds/util/log.hpp> | |||
#include <dds/util/result.hpp> | |||
#include <neo/event.hpp> | |||
#include <neo/scope.hpp> | |||
#include <neo/sqlite3/exec.hpp> | |||
#include <neo/sqlite3/iter_tuples.hpp> | |||
#include <neo/sqlite3/single.hpp> | |||
#include <neo/sqlite3/transaction.hpp> | |||
#include <neo/url.hpp> | |||
#include <neo/utility.hpp> | |||
#include <range/v3/range/conversion.hpp> | |||
using namespace dds; | |||
namespace nsql = neo::sqlite3; | |||
@@ -25,9 +30,7 @@ struct remote_db { | |||
url.host.has_value(), | |||
"URL does not have a hostname??", | |||
url.to_string()); | |||
auto sess = url.scheme == "https" | |||
? http_session::connect_ssl(*url.host, url.port_or_default_port_or(443)) | |||
: http_session::connect(*url.host, url.port_or_default_port_or(80)); | |||
auto sess = http_session::connect_for(url); | |||
auto tempdir = temporary_dir::create(); | |||
auto repo_db_dl = tempdir.path() / "repo.db"; | |||
@@ -64,31 +67,39 @@ remote_repository remote_repository::connect(std::string_view url_str) { | |||
auto name_st = db.db.prepare("SELECT name FROM dds_repo_meta"); | |||
auto [name] = nsql::unpack_single<std::string>(name_st); | |||
remote_repository ret; | |||
ret._base_url = url; | |||
ret._name = name; | |||
return ret; | |||
return {name, url}; | |||
} | |||
void remote_repository::store(nsql::database_ref db) { | |||
auto st = db.prepare(R"( | |||
INSERT INTO dds_cat_remotes (name, gen_ident, remote_url) | |||
VALUES (?, ?, ?) | |||
ON CONFLICT (name) DO | |||
UPDATE SET gen_ident = ?2, remote_url = ?3 | |||
)"); | |||
nsql::exec(st, _name, "[placeholder]", _base_url.to_string()); | |||
} | |||
void remote_repository::update_catalog(nsql::database_ref db) { | |||
dds_log(info, "Pulling repository contents for {} [{}]", _name, _base_url.to_string()); | |||
auto rdb = remote_db::download_and_open_for_base(_base_url); | |||
auto base_url_str = _base_url.to_string(); | |||
while (base_url_str.ends_with("/")) { | |||
base_url_str.pop_back(); | |||
} | |||
auto db_path = rdb._tempdir.path() / "repo.db"; | |||
auto rid_st = db.prepare("SELECT remote_id FROM dds_cat_remotes WHERE name = ?"); | |||
rid_st.bindings()[1] = _name; | |||
auto [remote_id] = nsql::unpack_single<std::int64_t>(rid_st); | |||
rid_st.reset(); | |||
nsql::transaction_guard tr{db}; | |||
nsql::exec(db.prepare("ATTACH DATABASE ? AS remote"), db_path.string()); | |||
neo_defer { db.exec("DETACH DATABASE remote"); }; | |||
nsql::transaction_guard tr{db}; | |||
nsql::exec( // | |||
db.prepare(R"( | |||
DELETE FROM dds_cat_pkgs | |||
@@ -103,9 +114,70 @@ void remote_repository::update_catalog(nsql::database_ref db) { | |||
name, | |||
version, | |||
description, | |||
printf('dds:%s/%s', name, version), | |||
CASE | |||
WHEN url LIKE 'dds:%@%' THEN | |||
-- Convert 'dds:name@ver' to 'dds+<base-repo-url>/name@ver' | |||
-- This will later resolve to the actual package URL | |||
printf('dds+%s/%s', ?2, substr(url, 5)) | |||
ELSE | |||
-- Non-'dds:' URLs are kept as-is | |||
url | |||
END, | |||
?1 | |||
FROM remote.dds_repo_packages | |||
)"), | |||
remote_id); | |||
remote_id, | |||
base_url_str); | |||
db.exec(R"( | |||
INSERT OR REPLACE INTO dds_cat_pkg_deps (pkg_id, dep_name, low, high) | |||
SELECT | |||
local_pkgs.pkg_id AS pkg_id, | |||
dep_name, | |||
low, | |||
high | |||
FROM remote.dds_repo_package_deps AS deps, | |||
remote.dds_repo_packages AS pkgs USING(package_id), | |||
dds_cat_pkgs AS local_pkgs USING(name, version) | |||
)"); | |||
// Validate our database | |||
auto fk_check = db.prepare("PRAGMA foreign_key_check"); | |||
auto rows = nsql::iter_tuples<std::string, std::int64_t, std::string, std::string>(fk_check); | |||
bool any_failed = false; | |||
for (auto [child_table, rowid, parent_table, failed_idx] : rows) { | |||
dds_log( | |||
critical, | |||
"Database foreign_key error after import: {0}.{3} referencing {2} violated at row {1}", | |||
child_table, | |||
rowid, | |||
parent_table, | |||
failed_idx); | |||
any_failed = true; | |||
} | |||
auto int_check = db.prepare("PRAGMA main.integrity_check"); | |||
for (auto [error] : nsql::iter_tuples<std::string>(int_check)) { | |||
if (error == "ok") { | |||
continue; | |||
} | |||
dds_log(critical, "Database errors after import: {}", error); | |||
any_failed = true; | |||
} | |||
if (any_failed) { | |||
throw_external_error<errc::corrupted_catalog_db>( | |||
"Database update failed due to data integrity errors"); | |||
} | |||
} | |||
void dds::update_all_remotes(nsql::database_ref db) { | |||
dds_log(info, "Updating catalog from all remotes"); | |||
auto repos_st = db.prepare("SELECT name, remote_url FROM dds_cat_remotes"); | |||
auto tups = nsql::iter_tuples<std::string, std::string>(repos_st) | ranges::to_vector; | |||
for (const auto& [name, remote_url] : tups) { | |||
DDS_E_SCOPE(e_url_string{remote_url}); | |||
remote_repository repo{name, neo::url::parse(remote_url)}; | |||
repo.update_catalog(db); | |||
} | |||
dds_log(info, "Recompacting database..."); | |||
db.exec("VACUUM"); | |||
} |
@@ -16,15 +16,18 @@ class remote_repository { | |||
std::string _name; | |||
neo::url _base_url; | |||
remote_repository(std::string name, neo::url url) | |||
: _name(std::move(name)) | |||
, _base_url(std::move(url)) {} | |||
remote_repository() = default; | |||
public: | |||
static remote_repository connect(std::string_view url); | |||
// const repository_manifest& manifest() const noexcept; | |||
void store(neo::sqlite3::database_ref); | |||
void update_catalog(neo::sqlite3::database_ref); | |||
}; | |||
void update_all_remotes(neo::sqlite3::database_ref); | |||
} // namespace dds |
@@ -1,5 +1,6 @@ | |||
#include "./repoman.hpp" | |||
#include <dds/catalog/import.hpp> | |||
#include <dds/package/manifest.hpp> | |||
#include <dds/util/log.hpp> | |||
#include <dds/util/result.hpp> | |||
@@ -30,6 +31,7 @@ void migrate_db_1(nsql::database_ref db) { | |||
name TEXT NOT NULL, | |||
version TEXT NOT NULL, | |||
description TEXT NOT NULL, | |||
url TEXT NOT NULL, | |||
UNIQUE (name, version) | |||
); | |||
@@ -48,6 +50,7 @@ void migrate_db_1(nsql::database_ref db) { | |||
void ensure_migrated(nsql::database_ref db, std::optional<std::string_view> name) { | |||
db.exec(R"( | |||
PRAGMA busy_timeout = 6000; | |||
PRAGMA foreign_keys = 1; | |||
CREATE TABLE IF NOT EXISTS dds_repo_meta ( | |||
meta_version INTEGER DEFAULT 1, | |||
@@ -87,7 +90,7 @@ repo_manager repo_manager::create(path_ref directory, std::optional<std::string_ | |||
DDS_E_SCOPE(e_init_repo_db{db_path}); | |||
DDS_E_SCOPE(e_open_repo_db{db_path}); | |||
ensure_migrated(db, name); | |||
fs::create_directories(directory / "data"); | |||
fs::create_directories(directory / "pkg"); | |||
} | |||
return open(directory); | |||
} | |||
@@ -148,8 +151,13 @@ void repo_manager::import_targz(path_ref tgz_file) { | |||
dds_log(debug, "Recording package {}@{}", man->pkg_id.name, man->pkg_id.version.to_string()); | |||
nsql::exec( // | |||
_stmts(R"( | |||
INSERT INTO dds_repo_packages (name, version, description) | |||
VALUES (?, ?, 'No description') | |||
INSERT INTO dds_repo_packages (name, version, description, url) | |||
VALUES ( | |||
?1, | |||
?2, | |||
'No description', | |||
printf('dds:%s@%s', ?1, ?2) | |||
) | |||
)"_sql), | |||
man->pkg_id.name, | |||
man->pkg_id.version.to_string()); | |||
@@ -171,9 +179,9 @@ void repo_manager::import_targz(path_ref tgz_file) { | |||
iv_1.high.to_string()); | |||
} | |||
auto dest_dir = data_dir() / man->pkg_id.name; | |||
auto dest_path = dest_dir / fmt::format("{}.tar.gz", man->pkg_id.version.to_string()); | |||
fs::create_directories(dest_dir); | |||
auto dest_path | |||
= pkg_dir() / man->pkg_id.name / man->pkg_id.version.to_string() / "sdist.tar.gz"; | |||
fs::create_directories(dest_path.parent_path()); | |||
fs::copy(tgz_file, dest_path); | |||
tr.commit(); | |||
@@ -194,17 +202,17 @@ void repo_manager::delete_package(package_id pkg_id) { | |||
pkg_id.version.to_string()); | |||
/// XXX: Verify with _db.changes() that we actually deleted one row | |||
auto name_dir = data_dir() / pkg_id.name; | |||
auto ver_file = name_dir / fmt::format("{}.tar.gz", pkg_id.version.to_string()); | |||
auto name_dir = pkg_dir() / pkg_id.name; | |||
auto ver_dir = name_dir / pkg_id.version.to_string(); | |||
DDS_E_SCOPE(e_repo_delete_targz{ver_file}); | |||
DDS_E_SCOPE(e_repo_delete_targz{ver_dir}); | |||
if (!fs::is_regular_file(ver_file)) { | |||
if (!fs::is_directory(ver_dir)) { | |||
throw std::system_error(std::make_error_code(std::errc::no_such_file_or_directory), | |||
"No source archive for the requested package"); | |||
} | |||
fs::remove(ver_file); | |||
fs::remove_all(ver_dir); | |||
tr.commit(); | |||
@@ -49,7 +49,7 @@ public: | |||
static repo_manager create(path_ref directory, std::optional<std::string_view> name); | |||
static repo_manager open(path_ref directory); | |||
auto data_dir() const noexcept { return _root / "data"; } | |||
auto pkg_dir() const noexcept { return _root / "pkg"; } | |||
path_ref root() const noexcept { return _root; } | |||
std::string name() const noexcept; | |||
@@ -14,17 +14,17 @@ const auto DATA_DIR = REPO_ROOT / "data"; | |||
} // namespace | |||
TEST_CASE("Open a repository") { | |||
TEST_CASE("Open and import into a repository") { | |||
auto tdir = dds::temporary_dir::create(); | |||
auto repo = dds::repo_manager::create(tdir.path(), "test-repo"); | |||
auto neo_url_tgz = DATA_DIR / "neo-url@0.2.1.tar.gz"; | |||
repo.import_targz(neo_url_tgz); | |||
CHECK(dds::fs::is_directory(repo.data_dir() / "neo-url/")); | |||
CHECK(dds::fs::is_regular_file(repo.data_dir() / "neo-url/0.2.1.tar.gz")); | |||
CHECK(dds::fs::is_directory(repo.pkg_dir() / "neo-url/")); | |||
CHECK(dds::fs::is_regular_file(repo.pkg_dir() / "neo-url/0.2.1/sdist.tar.gz")); | |||
CHECK_THROWS_AS(repo.import_targz(neo_url_tgz), neo::sqlite3::constraint_unique_error); | |||
repo.delete_package(dds::package_id::parse("neo-url@0.2.1")); | |||
CHECK_FALSE(dds::fs::is_regular_file(repo.data_dir() / "neo-url/0.2.1.tar.gz")); | |||
CHECK_FALSE(dds::fs::is_directory(repo.data_dir() / "neo-url")); | |||
CHECK_FALSE(dds::fs::is_regular_file(repo.pkg_dir() / "neo-url/0.2.1/sdist.tar.gz")); | |||
CHECK_FALSE(dds::fs::is_directory(repo.pkg_dir() / "neo-url")); | |||
CHECK_THROWS_AS(repo.delete_package(dds::package_id::parse("neo-url@0.2.1")), | |||
std::system_error); | |||
CHECK_NOTHROW(repo.import_targz(neo_url_tgz)); |
@@ -1,15 +1,9 @@ | |||
{ | |||
"version": 2, | |||
"packages": { | |||
"neo-sqlite3": { | |||
"0.1.0": { | |||
"url": "git+https://github.com/vector-of-bool/neo-sqlite3.git#0.1.0" | |||
}, | |||
"0.2.2": { | |||
"url": "git+https://github.com/vector-of-bool/neo-sqlite3.git#0.2.2" | |||
}, | |||
"neo-fun": { | |||
"0.3.0": { | |||
"url": "git+https://github.com/vector-of-bool/neo-sqlite3.git#0.3.0" | |||
"url": "git+https://github.com/vector-of-bool/neo-fun.git#0.3.0" | |||
} | |||
} | |||
} |