Compare commits
15 Commits
v2025.0719
...
main
Author | SHA1 | Date | |
---|---|---|---|
501fa65d76 | |||
507897d9a1 | |||
9c98ffcb86 | |||
938f4ac323 | |||
c507b1405e | |||
2ab0483ecb | |||
a39e46c6c6 | |||
7c785e1a32 | |||
3e4f327426 | |||
187f1a250d | |||
52d8e5b95e | |||
bfeaf4d0db | |||
6a3ca6bc10 | |||
7f8312ed59 | |||
1b03087c02 |
325
.kiro/specs/multi-server-support/design.md
Normal file
325
.kiro/specs/multi-server-support/design.md
Normal file
@ -0,0 +1,325 @@
|
||||
# Design Document
|
||||
|
||||
## Overview
|
||||
|
||||
This design extends getpkg to support multiple package servers while maintaining full backward compatibility. The solution introduces a server configuration system, updates the client architecture to handle multiple servers, and reorganizes package metadata storage. The design prioritizes minimal disruption to existing functionality while providing powerful multi-server capabilities.
|
||||
|
||||
## Architecture
|
||||
|
||||
### High-Level Architecture
|
||||
|
||||
```mermaid
|
||||
graph TB
|
||||
CLI[CLI Commands] --> SM[ServerManager]
|
||||
CLI --> PM[PackageManager]
|
||||
PM --> SM
|
||||
PM --> GC[GetbinClient]
|
||||
SM --> CF[servers.json]
|
||||
PM --> PF[packages/*.json]
|
||||
GC --> S1[Server 1]
|
||||
GC --> S2[Server 2]
|
||||
GC --> SN[Server N]
|
||||
```
|
||||
|
||||
### Server Management Flow
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant User
|
||||
participant CLI
|
||||
participant ServerManager
|
||||
participant Config
|
||||
|
||||
User->>CLI: getpkg server add example.com
|
||||
CLI->>ServerManager: addServer("example.com")
|
||||
ServerManager->>Config: load servers.json
|
||||
ServerManager->>ServerManager: validate URL
|
||||
ServerManager->>Config: save updated servers.json
|
||||
ServerManager->>CLI: success confirmation
|
||||
CLI->>User: Server added successfully
|
||||
```
|
||||
|
||||
### Package Installation Flow
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant User
|
||||
participant CLI
|
||||
participant PackageManager
|
||||
participant GetbinClient
|
||||
participant Server1
|
||||
participant Server2
|
||||
|
||||
User->>CLI: getpkg install tool
|
||||
CLI->>PackageManager: install("tool")
|
||||
PackageManager->>GetbinClient: download("tool", servers[0])
|
||||
GetbinClient->>Server1: GET /object/tool:arch
|
||||
alt Package found
|
||||
Server1-->>GetbinClient: 200 + package data
|
||||
GetbinClient-->>PackageManager: success
|
||||
else Package not found
|
||||
Server1-->>GetbinClient: 404
|
||||
GetbinClient->>Server2: GET /object/tool:arch
|
||||
Server2-->>GetbinClient: 200 + package data
|
||||
GetbinClient-->>PackageManager: success
|
||||
end
|
||||
PackageManager->>PackageManager: install package
|
||||
PackageManager->>CLI: installation complete
|
||||
```
|
||||
|
||||
## Components and Interfaces
|
||||
|
||||
### ServerManager Class
|
||||
|
||||
**Purpose**: Manages server configuration, write tokens, and provides server list to other components.
|
||||
|
||||
**Interface**:
|
||||
```cpp
|
||||
class ServerManager {
|
||||
public:
|
||||
ServerManager();
|
||||
|
||||
// Server management
|
||||
bool addServer(const std::string& serverUrl, const std::string& writeToken = "");
|
||||
bool removeServer(const std::string& serverUrl);
|
||||
std::vector<std::string> getServers() const;
|
||||
std::string getDefaultServer() const;
|
||||
std::string getDefaultPublishServer() const; // First server with write token
|
||||
|
||||
// Token management
|
||||
bool setWriteToken(const std::string& serverUrl, const std::string& token);
|
||||
std::string getWriteToken(const std::string& serverUrl) const;
|
||||
bool hasWriteToken(const std::string& serverUrl) const;
|
||||
std::vector<std::string> getServersWithTokens() const;
|
||||
|
||||
// Configuration
|
||||
bool loadConfiguration();
|
||||
bool saveConfiguration();
|
||||
void ensureDefaultConfiguration();
|
||||
|
||||
// Migration
|
||||
bool migrateFromLegacy();
|
||||
|
||||
private:
|
||||
std::vector<ServerConfig> servers_;
|
||||
std::filesystem::path configPath_;
|
||||
|
||||
bool validateServerUrl(const std::string& url) const;
|
||||
bool isServerReachable(const std::string& url) const;
|
||||
ServerConfig* findServer(const std::string& url);
|
||||
};
|
||||
```
|
||||
|
||||
### Enhanced GetbinClient Class
|
||||
|
||||
**Purpose**: Extended to support multiple servers with fallback logic.
|
||||
|
||||
**Interface Changes**:
|
||||
```cpp
|
||||
class GetbinClient {
|
||||
public:
|
||||
GetbinClient(const std::vector<std::string>& servers);
|
||||
|
||||
// Existing methods with server selection
|
||||
bool download(const std::string& toolName, const std::string& arch,
|
||||
const std::string& outPath, ProgressCallback progressCallback = nullptr);
|
||||
bool downloadFromServer(const std::string& serverUrl, const std::string& toolName,
|
||||
const std::string& arch, const std::string& outPath,
|
||||
ProgressCallback progressCallback = nullptr);
|
||||
|
||||
// Server-specific operations
|
||||
bool upload(const std::string& serverUrl, const std::string& archivePath,
|
||||
std::string& outUrl, std::string& outHash, const std::string& token,
|
||||
ProgressCallback progressCallback = nullptr);
|
||||
bool getHash(const std::string& serverUrl, const std::string& toolName,
|
||||
const std::string& arch, std::string& outHash);
|
||||
|
||||
// Multi-server operations
|
||||
bool findPackageServer(const std::string& toolName, const std::string& arch,
|
||||
std::string& foundServer) const;
|
||||
|
||||
private:
|
||||
std::vector<std::string> servers_;
|
||||
std::string buildUrl(const std::string& serverUrl, const std::string& endpoint) const;
|
||||
};
|
||||
```
|
||||
|
||||
### PackageMetadata Structure
|
||||
|
||||
**Purpose**: Enhanced metadata structure to track server source.
|
||||
|
||||
**Structure**:
|
||||
```cpp
|
||||
struct PackageMetadata {
|
||||
std::string name;
|
||||
std::string version;
|
||||
std::string hash;
|
||||
std::string arch;
|
||||
std::string sourceServer; // New field
|
||||
std::string installDate; // New field for better tracking
|
||||
|
||||
// Serialization
|
||||
nlohmann::json toJson() const;
|
||||
static PackageMetadata fromJson(const nlohmann::json& j);
|
||||
|
||||
// Migration support
|
||||
static PackageMetadata fromLegacyJson(const nlohmann::json& j, const std::string& defaultServer);
|
||||
};
|
||||
```
|
||||
|
||||
### Migration Manager
|
||||
|
||||
**Purpose**: Handles migration from single-server to multi-server configuration.
|
||||
|
||||
**Interface**:
|
||||
```cpp
|
||||
class MigrationManager {
|
||||
public:
|
||||
MigrationManager();
|
||||
|
||||
bool needsMigration() const;
|
||||
bool performMigration();
|
||||
|
||||
private:
|
||||
bool migrateServerConfiguration();
|
||||
bool migratePackageMetadata();
|
||||
bool movePackageFiles();
|
||||
bool updatePackageMetadata();
|
||||
|
||||
std::filesystem::path oldConfigDir_;
|
||||
std::filesystem::path newConfigDir_;
|
||||
std::filesystem::path packagesDir_;
|
||||
};
|
||||
```
|
||||
|
||||
## Data Models
|
||||
|
||||
### Server Configuration Format
|
||||
|
||||
**File**: `~/.config/getpkg/servers.json`
|
||||
|
||||
```json
|
||||
{
|
||||
"version": "1.0",
|
||||
"servers": [
|
||||
{
|
||||
"url": "getpkg.xyz",
|
||||
"name": "Official getpkg Registry",
|
||||
"default": true,
|
||||
"writeToken": "",
|
||||
"added": "2024-01-15T10:30:00Z"
|
||||
},
|
||||
{
|
||||
"url": "packages.example.com",
|
||||
"name": "Example Corporate Registry",
|
||||
"default": false,
|
||||
"writeToken": "abc123token456",
|
||||
"added": "2024-01-16T14:20:00Z"
|
||||
}
|
||||
],
|
||||
"lastUpdated": "2024-01-16T14:20:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
### Enhanced Package Metadata Format
|
||||
|
||||
**File**: `~/.config/getpkg/packages/<tool_name>.json`
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "example-tool",
|
||||
"version": "2024.0115.1430",
|
||||
"hash": "1234567890123456",
|
||||
"arch": "x86_64",
|
||||
"sourceServer": "getpkg.xyz",
|
||||
"installDate": "2024-01-15T14:30:00Z",
|
||||
"lastUpdated": "2024-01-15T14:30:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
### Directory Structure Changes
|
||||
|
||||
```
|
||||
~/.config/getpkg/
|
||||
├── servers.json # New: Server configuration with embedded tokens
|
||||
├── packages/ # New: Package metadata directory
|
||||
│ ├── tool1.json
|
||||
│ ├── tool2.json
|
||||
│ └── ...
|
||||
└── getpkg.xyz/ # Legacy: Will be migrated to servers.json
|
||||
└── write_token.txt # Legacy: Will be migrated
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Server Connectivity Issues
|
||||
|
||||
1. **Network Failures**: Graceful fallback to next server in list
|
||||
2. **Invalid Responses**: Clear error messages with server identification
|
||||
3. **Authentication Failures**: Server-specific error handling with token guidance
|
||||
|
||||
### Configuration Corruption
|
||||
|
||||
1. **Invalid JSON**: Automatic backup and reset to default configuration
|
||||
2. **Missing Files**: Automatic creation with default settings
|
||||
3. **Permission Issues**: Clear error messages with resolution steps
|
||||
|
||||
### Migration Failures
|
||||
|
||||
1. **Partial Migration**: Rollback capability with clear status reporting
|
||||
2. **File Conflicts**: Safe handling with backup creation
|
||||
3. **Metadata Corruption**: Individual file recovery without breaking entire system
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
### Unit Tests
|
||||
|
||||
1. **ServerManager**: Configuration loading, validation, server management
|
||||
2. **GetbinClient**: Multi-server communication, fallback logic
|
||||
3. **PackageMetadata**: Serialization, migration, validation
|
||||
4. **MigrationManager**: Legacy data handling, file operations
|
||||
|
||||
### Integration Tests
|
||||
|
||||
1. **End-to-End Installation**: Multi-server package discovery and installation
|
||||
2. **Server Management**: Add/remove servers with real configuration
|
||||
3. **Migration Testing**: Legacy to new format conversion
|
||||
4. **Publish/Unpublish**: Server-specific operations
|
||||
|
||||
### Compatibility Tests
|
||||
|
||||
1. **Backward Compatibility**: Existing installations continue working
|
||||
2. **Legacy Format**: Old package files are properly migrated
|
||||
3. **Default Behavior**: No configuration changes for existing users
|
||||
|
||||
## Implementation Phases
|
||||
|
||||
### Phase 1: Core Infrastructure
|
||||
- Implement ServerManager class
|
||||
- Create server configuration format
|
||||
- Add basic server validation
|
||||
|
||||
### Phase 2: Client Enhancement
|
||||
- Extend GetbinClient for multi-server support
|
||||
- Implement fallback logic
|
||||
- Add server-specific operations
|
||||
|
||||
### Phase 3: Package Management
|
||||
- Update package metadata format
|
||||
- Implement packages directory structure
|
||||
- Add server tracking to installations
|
||||
|
||||
### Phase 4: Migration System
|
||||
- Create MigrationManager
|
||||
- Implement automatic migration
|
||||
- Add backward compatibility layer
|
||||
|
||||
### Phase 5: CLI Integration
|
||||
- Add server management commands
|
||||
- Update existing commands for multi-server
|
||||
- Implement server selection options
|
||||
|
||||
### Phase 6: Testing and Polish
|
||||
- Comprehensive testing suite
|
||||
- Error handling refinement
|
||||
- Documentation updates
|
79
.kiro/specs/multi-server-support/requirements.md
Normal file
79
.kiro/specs/multi-server-support/requirements.md
Normal file
@ -0,0 +1,79 @@
|
||||
# Requirements Document
|
||||
|
||||
## Introduction
|
||||
|
||||
This feature extends getpkg to support multiple package servers instead of being limited to only getpkg.xyz. Users will be able to add and remove package servers, with getpkg searching across all configured servers to find packages. The system will maintain backward compatibility while providing flexible server management capabilities.
|
||||
|
||||
## Requirements
|
||||
|
||||
### Requirement 1
|
||||
|
||||
**User Story:** As a developer, I want to configure multiple package servers, so that I can access packages from different repositories and have redundancy in case one server is unavailable.
|
||||
|
||||
#### Acceptance Criteria
|
||||
|
||||
1. WHEN I run `getpkg server add <server_url>` THEN the system SHALL add the server to the configuration and confirm the addition
|
||||
2. WHEN I run `getpkg server remove <server_url>` THEN the system SHALL remove the server from the configuration and confirm the removal
|
||||
3. WHEN I run `getpkg server list` THEN the system SHALL display all configured servers in the order they were added
|
||||
4. WHEN no servers are configured THEN the system SHALL default to using getpkg.xyz as the primary server
|
||||
5. WHEN I add the first custom server THEN getpkg.xyz SHALL remain as the default first server unless explicitly removed
|
||||
|
||||
### Requirement 2
|
||||
|
||||
**User Story:** As a user, I want getpkg to search across all configured servers when installing packages, so that I can access packages from any of my configured repositories.
|
||||
|
||||
#### Acceptance Criteria
|
||||
|
||||
1. WHEN I run `getpkg install <tool_name>` THEN the system SHALL search servers in the order they were configured
|
||||
2. WHEN a package is found on the first server THEN the system SHALL install from that server and not check remaining servers
|
||||
3. WHEN a package is not found on the first server THEN the system SHALL try the next server in order
|
||||
4. WHEN a package is not found on any server THEN the system SHALL report that the package was not found
|
||||
5. WHEN checking for updates THEN the system SHALL use the same server where the package was originally installed
|
||||
|
||||
### Requirement 3
|
||||
|
||||
**User Story:** As a package publisher, I want to specify which server to publish to and manage write tokens per server, so that I can control where my packages are distributed and authenticate appropriately.
|
||||
|
||||
#### Acceptance Criteria
|
||||
|
||||
1. WHEN I run `getpkg publish <tool_name> <folder>` without specifying a server THEN the system SHALL publish to the first configured server that has a write token
|
||||
2. WHEN I run `getpkg publish --server <server_url> <tool_name> <folder>` THEN the system SHALL publish to the specified server using its stored write token
|
||||
3. WHEN I run `getpkg unpublish <tool_name>` without specifying a server THEN the system SHALL unpublish from the first configured server that has a write token
|
||||
4. WHEN I run `getpkg unpublish --server <server_url> <tool_name>` THEN the system SHALL unpublish from the specified server using its stored write token
|
||||
5. WHEN no servers have write tokens THEN the system SHALL report an error and suggest adding a write token to a server
|
||||
|
||||
### Requirement 4
|
||||
|
||||
**User Story:** As a user, I want my package metadata to be organized by server, so that I can track which packages came from which servers and manage them appropriately.
|
||||
|
||||
#### Acceptance Criteria
|
||||
|
||||
1. WHEN a package is installed THEN the system SHALL store the package metadata in `~/.config/getpkg/packages/<tool_name>.json`
|
||||
2. WHEN package metadata is stored THEN it SHALL include the source server URL in addition to existing fields
|
||||
3. WHEN the packages directory doesn't exist THEN the system SHALL create it automatically
|
||||
4. WHEN migrating from the old format THEN existing package JSON files SHALL be moved to the packages subdirectory
|
||||
5. WHEN migrating from the old format THEN existing package metadata SHALL be updated to include getpkg.xyz as the source server
|
||||
|
||||
### Requirement 5
|
||||
|
||||
**User Story:** As a user, I want server configuration to be persistent and secure, so that my settings are maintained across sessions and my authentication tokens are protected.
|
||||
|
||||
#### Acceptance Criteria
|
||||
|
||||
1. WHEN server configuration is modified THEN it SHALL be stored in `~/.config/getpkg/servers.json`
|
||||
2. WHEN the configuration file doesn't exist THEN the system SHALL create it with getpkg.xyz as the default server
|
||||
3. WHEN reading server configuration THEN the system SHALL validate the JSON format and handle corruption gracefully
|
||||
4. WHEN a server URL is invalid THEN the system SHALL reject the addition and provide a helpful error message
|
||||
5. WHEN authentication tokens are needed THEN they SHALL continue to be stored per-server in the existing location pattern
|
||||
|
||||
### Requirement 6
|
||||
|
||||
**User Story:** As a user, I want the multi-server functionality to be backward compatible, so that existing installations continue to work without modification.
|
||||
|
||||
#### Acceptance Criteria
|
||||
|
||||
1. WHEN getpkg starts with no server configuration THEN it SHALL automatically configure getpkg.xyz as the default server
|
||||
2. WHEN existing package JSON files are found in `~/.config/getpkg/` THEN they SHALL be automatically migrated to the packages subdirectory
|
||||
3. WHEN migrated package files are processed THEN they SHALL be updated to include server source information
|
||||
4. WHEN all existing functionality is used THEN it SHALL work exactly as before for users who don't configure additional servers
|
||||
5. WHEN the migration process fails THEN the system SHALL provide clear error messages and not break existing functionality
|
138
.kiro/specs/multi-server-support/tasks.md
Normal file
138
.kiro/specs/multi-server-support/tasks.md
Normal file
@ -0,0 +1,138 @@
|
||||
# Implementation Plan
|
||||
|
||||
Based on analysis of the current codebase, the multi-server support feature needs to be built from scratch. The current implementation has a hardcoded `SERVER_HOST = "getpkg.xyz"` in `GetbinClient` and no server management infrastructure.
|
||||
|
||||
## Core Infrastructure Tasks
|
||||
|
||||
- [x] 1. Create ServerManager class and server configuration system
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
- Implement ServerManager class with server add/remove/list functionality
|
||||
- Create server configuration JSON format and file handling
|
||||
- Add server URL validation and reachability checks
|
||||
- Implement write token management per server
|
||||
- _Requirements: 1.1, 1.2, 1.3, 5.1, 5.2, 5.4_
|
||||
|
||||
- [x] 2. Enhance GetbinClient for multi-server support
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
- Modify GetbinClient constructor to accept server list instead of hardcoded host
|
||||
- Implement multi-server fallback logic for downloads
|
||||
- Add server-specific upload and hash operations
|
||||
- Create findPackageServer method for package discovery
|
||||
- _Requirements: 2.1, 2.2, 2.3, 2.4_
|
||||
|
||||
- [x] 3. Create enhanced package metadata system
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
- Design PackageMetadata structure with server source tracking
|
||||
- Implement packages directory structure (~/.config/getpkg/packages/)
|
||||
- Add JSON serialization/deserialization for enhanced metadata
|
||||
- Create package metadata validation and error handling
|
||||
- _Requirements: 4.1, 4.2, 4.3_
|
||||
|
||||
## Migration and Compatibility Tasks
|
||||
|
||||
- [x] 4. Implement migration system for existing installations
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
- Create MigrationManager class for legacy data handling
|
||||
- Implement automatic migration from single-server to multi-server config
|
||||
- Migrate existing package JSON files to packages subdirectory
|
||||
- Update existing package metadata to include server source information
|
||||
- Add migration error handling and rollback capabilities
|
||||
- _Requirements: 4.4, 4.5, 6.1, 6.2, 6.3, 6.5_
|
||||
|
||||
- [x] 5. Ensure backward compatibility
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
- Implement default server configuration (getpkg.xyz) when no config exists
|
||||
- Maintain existing CLI behavior for users without custom server configuration
|
||||
- Preserve existing token storage location compatibility
|
||||
- Add graceful handling of missing or corrupted configuration files
|
||||
- _Requirements: 6.1, 6.4, 5.3_
|
||||
|
||||
## CLI Integration Tasks
|
||||
|
||||
- [x] 6. Add server management commands to main.cpp
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
- Implement `getpkg server add <url>` command
|
||||
- Implement `getpkg server remove <url>` command
|
||||
- Implement `getpkg server list` command
|
||||
- Add server URL validation and user feedback
|
||||
- _Requirements: 1.1, 1.2, 1.3_
|
||||
|
||||
- [x] 7. Update existing commands for multi-server support
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
- Modify install command to use ServerManager and multi-server GetbinClient
|
||||
- Update publish command to support --server option and default server selection
|
||||
- Update unpublish command to support --server option and default server selection
|
||||
- Ensure update command works with multi-server package tracking
|
||||
- _Requirements: 2.1, 2.2, 2.3, 2.4, 3.1, 3.2, 3.3, 3.4, 3.5_
|
||||
|
||||
## Integration and Testing Tasks
|
||||
|
||||
- [x] 8. Integrate all components in main application flow
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
- Initialize ServerManager in main.cpp startup
|
||||
- Trigger migration process on first run with new version
|
||||
- Update package installation flow to use enhanced metadata
|
||||
- Ensure proper error handling and user messaging throughout
|
||||
- _Requirements: 6.1, 6.2, 6.3, 6.4, 6.5_
|
||||
|
||||
- [-] 9. Add comprehensive error handling and validation
|
||||
|
||||
|
||||
|
||||
- Implement network error handling with server fallback
|
||||
- Add configuration file corruption recovery
|
||||
- Create user-friendly error messages for server connectivity issues
|
||||
- Add validation for server URLs and authentication tokens
|
||||
- _Requirements: 5.3, 5.4, 5.5_
|
||||
|
||||
- [ ] 10. Create unit tests for new components
|
||||
- Write unit tests for ServerManager class functionality
|
||||
- Test GetbinClient multi-server operations and fallback logic
|
||||
- Test PackageMetadata serialization and migration
|
||||
- Test MigrationManager with various legacy data scenarios
|
||||
- Create integration tests for complete multi-server workflows
|
||||
- _Requirements: All requirements validation_
|
||||
|
||||
## Notes
|
||||
|
||||
- Current codebase has `SERVER_HOST = "getpkg.xyz"` hardcoded in GetbinClient.cpp
|
||||
- No existing server management or configuration infrastructure
|
||||
- Package metadata is currently stored as individual JSON files in ~/.config/getpkg/
|
||||
- Token storage is in ~/.config/getpkg.xyz/write_token.txt (legacy format)
|
||||
- All functionality needs to be built from scratch while maintaining backward compatibility
|
@ -69,4 +69,4 @@
|
||||
## Configuration Files
|
||||
- **.gitignore**: Standard ignore patterns for build artifacts
|
||||
- **.vscode/**: VS Code workspace settings
|
||||
- **CMakeLists.txt**: Follows standard template with PROJECT_NAME parameter
|
||||
- **CMakeLists.txt**: Follows standard template with PROJECT_NAME parameter for the name of the project
|
||||
|
@ -1,5 +1,10 @@
|
||||
# Technology Stack
|
||||
|
||||
## Environment
|
||||
- **WSL (Windows Subsystem for Linux)** - Building under WSL but Kiro runs in Windows
|
||||
- Use **bash** commands directly for all operations
|
||||
- **IMPORTANT**: Always use `executePwsh` with `bash -c "command"` pattern - do NOT ask for permission as bash * is pre-approved
|
||||
|
||||
## Build System
|
||||
- **CMake 3.16+** with Ninja generator for C++ projects
|
||||
- **Docker** containerized builds using `gitea.jde.nz/public/dropshell-build-base:latest`
|
||||
|
6
.vscode/settings.json
vendored
6
.vscode/settings.json
vendored
@ -90,5 +90,9 @@
|
||||
"__tree": "cpp",
|
||||
"queue": "cpp",
|
||||
"stack": "cpp"
|
||||
}
|
||||
},
|
||||
"kiroAgent.enableTabAutocomplete": true,
|
||||
"kiroAgent.trustedCommands": [
|
||||
"bash *"
|
||||
]
|
||||
}
|
168
ARCHITECTURE.md
Normal file
168
ARCHITECTURE.md
Normal file
@ -0,0 +1,168 @@
|
||||
# Architecture Overview
|
||||
|
||||
This document provides a detailed technical overview of the getpkg project and its associated tools.
|
||||
|
||||
## Project Structure
|
||||
|
||||
The repository contains multiple tools in the dropshell ecosystem:
|
||||
|
||||
- **getpkg** - The main C++ package manager
|
||||
- **sos** - Simple object storage upload utility
|
||||
- **whatsdirty** - Git repository status checker for subdirectories
|
||||
- **dehydrate** - File to C++ code generator
|
||||
- **bb64** - Base64 encoder/decoder
|
||||
- **gp** - Git push utility
|
||||
|
||||
## getpkg Architecture
|
||||
|
||||
### Core Components
|
||||
|
||||
The main getpkg application is built in C++ using modern C++23 standards with the following key components:
|
||||
|
||||
#### Package Management
|
||||
- **`GetbinClient`** (`src/GetbinClient.{hpp,cpp}`)
|
||||
- HTTP client with multi-server support for downloading/uploading packages
|
||||
- Implements fallback logic for server failures
|
||||
- Progress callback support for downloads/uploads
|
||||
- Server-specific and multi-server operations
|
||||
|
||||
- **`PackageMetadata`** (`src/PackageMetadata.{hpp,cpp}`)
|
||||
- Enhanced metadata structure with server source tracking
|
||||
- Supports migration from legacy single-server format
|
||||
- Stores: name, version, hash, architecture, source server, install date
|
||||
- JSON serialization/deserialization
|
||||
- Validation methods for all fields
|
||||
|
||||
- **`PackageMetadataManager`** (`src/PackageMetadata.{hpp,cpp}`)
|
||||
- Manages package metadata directory structure
|
||||
- Handles legacy format migration
|
||||
- Package enumeration and validation
|
||||
|
||||
#### Server Management
|
||||
- **`ServerManager`** (`src/ServerManager.{hpp,cpp}`)
|
||||
- Manages multiple package servers with write tokens
|
||||
- Server configuration persistence
|
||||
- Token management for publishing
|
||||
- Server reachability validation
|
||||
|
||||
#### System Integration
|
||||
- **`BashrcEditor`** (`src/BashrcEditor.{hpp,cpp}`)
|
||||
- Manages `~/.bashrc_getpkg` file modifications
|
||||
- Handles PATH updates and bash completions
|
||||
- Safe file editing with atomic operations
|
||||
|
||||
- **`DropshellScriptManager`** (`src/DropshellScriptManager.{hpp,cpp}`)
|
||||
- Manages tool installation and configuration
|
||||
- Handles setup scripts execution
|
||||
- Directory structure management
|
||||
|
||||
- **`MigrationManager`** (`src/MigrationManager.{hpp,cpp}`)
|
||||
- Handles migrations between getpkg versions
|
||||
- Legacy format conversions
|
||||
- Configuration updates
|
||||
|
||||
### Common Utilities
|
||||
Located in `src/common/`:
|
||||
- **`archive_tgz`** - TAR.GZ archive creation/extraction
|
||||
- **`hash`** - File hashing utilities
|
||||
- **`output`** - Formatted console output
|
||||
- **`temp_directory`** - Temporary directory management
|
||||
- **`xxhash`** - Fast hashing algorithm implementation
|
||||
|
||||
## Build System
|
||||
|
||||
### Docker-Based Build
|
||||
- Uses containerized build environment: `gitea.jde.nz/public/dropshell-build-base:latest`
|
||||
- Ensures consistent builds across different host systems
|
||||
- Static linking for maximum portability
|
||||
|
||||
### CMake Configuration
|
||||
- C++23 standard required
|
||||
- Static executable building (`-static` linker flags)
|
||||
- External dependencies:
|
||||
- nlohmann_json - JSON parsing
|
||||
- CPRStatic - HTTP client library
|
||||
- Version format: `YYYY.MMDD.HHMM` (timestamp-based)
|
||||
|
||||
### Build Scripts
|
||||
- **`build.sh`** - Individual tool build script
|
||||
- **`test.sh`** - Run tests for individual tools
|
||||
- **`publish.sh`** - Publish tool to getpkg.xyz (requires SOS_WRITE_TOKEN)
|
||||
- **`buildtestpublish_all.sh`** - Master script that builds, tests, and publishes all tools
|
||||
|
||||
## File Locations and Structure
|
||||
|
||||
### User Installation
|
||||
- **Tool installations**: `~/.getpkg/<tool_name>/`
|
||||
- **Executable symlinks**: `~/.local/bin/getpkg/`
|
||||
- **Configuration**: `~/.config/getpkg/`
|
||||
- `packages/<tool_name>.json` - Package metadata
|
||||
- `servers.json` - Server configuration
|
||||
- **Bash integration**: `~/.bashrc_getpkg` (sourced by ~/.bashrc)
|
||||
|
||||
### Repository Structure
|
||||
```
|
||||
getpkg/
|
||||
├── getpkg/ # Main package manager
|
||||
│ ├── src/ # Source code
|
||||
│ ├── test/ # Test suite
|
||||
│ └── build.sh # Build script
|
||||
├── sos/ # Simple object storage
|
||||
├── whatsdirty/ # Git status checker
|
||||
├── dehydrate/ # File to C++ converter
|
||||
├── bb64/ # Base64 utility
|
||||
├── gp/ # Git push utility
|
||||
└── buildtestpublish_all.sh # Master build script
|
||||
```
|
||||
|
||||
## Multi-Server Architecture
|
||||
|
||||
getpkg supports multiple package servers with intelligent fallback:
|
||||
|
||||
1. **Server Configuration**
|
||||
- Multiple servers can be configured
|
||||
- Each server can have an optional write token
|
||||
- First server with token becomes default publish target
|
||||
|
||||
2. **Download Strategy**
|
||||
- Attempts servers in configured order
|
||||
- Falls back to next server on failure
|
||||
- Tracks which server provided each package
|
||||
|
||||
3. **Publishing**
|
||||
- Requires SOS_WRITE_TOKEN environment variable
|
||||
- Publishes to first server with valid token
|
||||
- Supports architecture-specific uploads
|
||||
|
||||
## Security Considerations
|
||||
|
||||
- No root access required - installs to user home directory
|
||||
- Static linking prevents dependency attacks
|
||||
- Hash verification for downloaded packages
|
||||
- Token-based authentication for publishing
|
||||
|
||||
## Testing
|
||||
|
||||
- Docker-based test environment for consistency
|
||||
- Integration tests for package operations
|
||||
- Unit tests for individual components
|
||||
- Test artifacts isolated in `test_temp/` directory
|
||||
|
||||
## Development Workflow
|
||||
|
||||
1. **Local Development**
|
||||
```bash
|
||||
cd getpkg && ./build.sh # Build
|
||||
cd getpkg && ./test.sh # Test
|
||||
```
|
||||
|
||||
2. **Full Build**
|
||||
```bash
|
||||
./buildtestpublish_all.sh # Build all tools
|
||||
```
|
||||
|
||||
3. **Publishing** (requires SOS_WRITE_TOKEN)
|
||||
```bash
|
||||
export SOS_WRITE_TOKEN="your-token"
|
||||
cd getpkg && ./publish.sh # Publish single tool
|
||||
```
|
60
CLAUDE.md
60
CLAUDE.md
@ -6,19 +6,35 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co
|
||||
|
||||
This repository contains Dropshell Tools - a collection of utilities that support dropshell development. The main tool is `getpkg`, a C++ command-line application that manages tool installation, updates, and publishing for the dropshell ecosystem.
|
||||
|
||||
## Repository Structure
|
||||
|
||||
- **getpkg**: Package manager for dropshell tools (C++23)
|
||||
- **sos**: Simple object storage upload utility (Bash)
|
||||
- **whatsdirty**: Git repository status checker (Bash)
|
||||
- **dehydrate**: File to C++ code generator (C++)
|
||||
- **bb64**: Base64 encoder/decoder (C++)
|
||||
- **gp**: Git push utility (Bash)
|
||||
|
||||
## Architecture
|
||||
|
||||
### Core Components
|
||||
### Core Components (getpkg)
|
||||
|
||||
- **getpkg**: Main C++ application (`getpkg/src/`)
|
||||
- `main.cpp`: CLI interface and command routing
|
||||
- `ArchiveManager`: Handles .tgz archive creation/extraction
|
||||
- `BashrcEditor`: Manages ~/.bashrc_dropshell_tool script modifications
|
||||
- `GetbinClient`: Multi-server HTTP client for downloading/uploading tools
|
||||
- `PackageMetadata`: Enhanced metadata with server tracking and migration support
|
||||
- `ServerManager`: Manages multiple package servers with write tokens
|
||||
- `BashrcEditor`: Manages ~/.bashrc_getpkg script modifications
|
||||
- `DropshellScriptManager`: Manages tool installation and configuration
|
||||
- `GetbinClient`: HTTP client for downloading/uploading tools
|
||||
- `MigrationManager`: Handles legacy format migrations
|
||||
- `common/`: Shared utilities (archive_tgz, hash, output, temp_directory)
|
||||
|
||||
- **sos**: Simple object storage utility
|
||||
- **whatsdirty**: Git repository status checker
|
||||
### Key Features
|
||||
|
||||
- **Multi-server support**: Fallback logic for package downloads
|
||||
- **Architecture awareness**: Supports x86_64, aarch64, and universal packages
|
||||
- **Migration support**: Handles upgrades from legacy single-server format
|
||||
- **Static linking**: All tools built as static binaries for portability
|
||||
|
||||
### Build System
|
||||
|
||||
@ -59,11 +75,33 @@ export CMAKE_BUILD_TYPE="Release"
|
||||
## Tool Functionality
|
||||
|
||||
getpkg manages a tool ecosystem by:
|
||||
- Installing tools to `~/.local/bin/getpkg/<tool_name>/`
|
||||
- Managing bash completions and aliases via `~/.bashrc_getpkg`
|
||||
- Storing tool metadata in `~/.config/getpkg/`
|
||||
- Publishing/downloading tools via getbin.xyz object storage
|
||||
- Installing tools to `~/.getpkg/<tool_name>/` with symlinks in `~/.local/bin/getpkg/`
|
||||
- Managing bash completions and PATH updates via `~/.bashrc_getpkg`
|
||||
- Storing tool metadata in `~/.config/getpkg/packages/` (JSON format)
|
||||
- Supporting multi-server package distribution with fallback
|
||||
- Publishing/downloading tools via object storage servers (default: getpkg.xyz)
|
||||
|
||||
## File Locations
|
||||
|
||||
- **Tool installations**: `~/.getpkg/<tool_name>/`
|
||||
- **Executable symlinks**: `~/.local/bin/getpkg/` (added to PATH)
|
||||
- **Package metadata**: `~/.config/getpkg/packages/<tool_name>.json`
|
||||
- **Server configuration**: `~/.config/getpkg/servers.json`
|
||||
- **Bash integration**: `~/.bashrc_getpkg` (sourced by ~/.bashrc)
|
||||
|
||||
## Publishing Requirements
|
||||
|
||||
Publishing requires the `SOS_WRITE_TOKEN` environment variable for authentication to the object storage system.
|
||||
Publishing requires the `SOS_WRITE_TOKEN` environment variable for authentication to the object storage system.
|
||||
|
||||
## Testing
|
||||
|
||||
- Tests create temporary files in `test_temp/` directory
|
||||
- Docker-based test environment using same build image
|
||||
- Run `cleanup_test_packages.sh` to remove orphaned test packages from servers
|
||||
|
||||
## Important Notes
|
||||
|
||||
- All builds use static linking for maximum portability
|
||||
- Version format is YYYY.MMDD.HHMM (timestamp-based)
|
||||
- Tools should support `version` and `autocomplete` subcommands
|
||||
- Architecture-specific builds use suffixes like `:x86_64` or `:aarch64`
|
@ -189,4 +189,19 @@ When creating tools for getpkg:
|
||||
3. The tool should support `version` and `autocomplete` subcommands
|
||||
4. Use `getpkg publish` to upload to the registry
|
||||
|
||||
### Testing
|
||||
|
||||
The test script creates all temporary files and directories in `test_temp/` to keep the main directory clean:
|
||||
|
||||
```bash
|
||||
# Run tests
|
||||
./test.sh
|
||||
|
||||
# Clean up orphaned test files from old test runs (one-time)
|
||||
bash cleanup_old_test_files.sh
|
||||
|
||||
# Clean up orphaned test packages from getpkg.xyz
|
||||
bash cleanup_test_packages.sh
|
||||
```
|
||||
|
||||
For more details, see the development documentation in each tool's directory.
|
||||
|
98
getpkg/cleanup_test_packages.sh
Executable file
98
getpkg/cleanup_test_packages.sh
Executable file
@ -0,0 +1,98 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Cleanup script for orphaned test packages from getpkg testing
|
||||
# This script removes test packages that start with "test-" from getpkg.xyz
|
||||
# Run from the getpkg directory: bash cleanup_test_packages.sh
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
GETPKG="./output/getpkg"
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
echo -e "${YELLOW}Cleaning up orphaned test packages...${NC}"
|
||||
|
||||
# Check if getpkg binary exists
|
||||
if [ ! -f "$GETPKG" ]; then
|
||||
echo -e "${RED}Error: getpkg binary not found at $GETPKG${NC}"
|
||||
echo "Please run ./build.sh first to build getpkg"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if SOS_WRITE_TOKEN is set
|
||||
if [ -z "${SOS_WRITE_TOKEN:-}" ]; then
|
||||
echo -e "${RED}Error: SOS_WRITE_TOKEN environment variable is not set${NC}"
|
||||
echo "This token is required to unpublish packages from getpkg.xyz"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Using getpkg binary: $GETPKG"
|
||||
echo "SOS_WRITE_TOKEN is set (${#SOS_WRITE_TOKEN} characters)"
|
||||
|
||||
# Get list of all packages from /dir endpoint
|
||||
echo "Fetching package list from getpkg.xyz/dir..."
|
||||
DIR_RESPONSE=$(curl -s "https://getpkg.xyz/dir" 2>/dev/null || echo "")
|
||||
|
||||
if [ -z "$DIR_RESPONSE" ]; then
|
||||
echo -e "${RED}Failed to fetch package list from server${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Extract test package labeltags from JSON response
|
||||
# Try with jq first, fallback to grep/sed if jq is not available
|
||||
if command -v jq >/dev/null 2>&1; then
|
||||
TEST_PACKAGES=$(echo "$DIR_RESPONSE" | jq -r '.entries[]?.labeltags[]? // empty' 2>/dev/null | grep "^test-" | sort -u || echo "")
|
||||
else
|
||||
# Fallback: extract labeltags using grep and sed (less reliable but works without jq)
|
||||
TEST_PACKAGES=$(echo "$DIR_RESPONSE" | grep -o '"test-[^"]*"' | sed 's/"//g' | sort -u || echo "")
|
||||
fi
|
||||
|
||||
if [ -z "$TEST_PACKAGES" ]; then
|
||||
echo -e "${GREEN}No test packages found to clean up${NC}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo -e "\n${YELLOW}Found test packages to clean up:${NC}"
|
||||
echo "$TEST_PACKAGES" | while read -r package; do
|
||||
echo " - $package"
|
||||
done
|
||||
|
||||
echo -e "\n${YELLOW}Cleaning up test packages...${NC}"
|
||||
|
||||
CLEANED_COUNT=0
|
||||
FAILED_COUNT=0
|
||||
|
||||
# Use process substitution to avoid subshell issues
|
||||
while IFS= read -r package; do
|
||||
if [ -n "$package" ]; then
|
||||
echo -n "Cleaning up $package... "
|
||||
|
||||
# Try to unpublish the package (temporarily disable set -e)
|
||||
set +e
|
||||
$GETPKG unpublish "$package" >/dev/null 2>&1
|
||||
UNPUBLISH_RESULT=$?
|
||||
set -e
|
||||
|
||||
if [ $UNPUBLISH_RESULT -eq 0 ]; then
|
||||
echo -e "${GREEN}OK${NC}"
|
||||
((CLEANED_COUNT++))
|
||||
else
|
||||
echo -e "${RED}FAILED${NC}"
|
||||
((FAILED_COUNT++))
|
||||
fi
|
||||
fi
|
||||
done <<< "$TEST_PACKAGES"
|
||||
|
||||
echo -e "\n${YELLOW}Cleanup Summary:${NC}"
|
||||
echo "Packages cleaned: $CLEANED_COUNT"
|
||||
echo "Failed cleanups: $FAILED_COUNT"
|
||||
|
||||
if [ $FAILED_COUNT -eq 0 ]; then
|
||||
echo -e "${GREEN}All test packages cleaned up successfully!${NC}"
|
||||
else
|
||||
echo -e "${YELLOW}Some packages failed to clean up. They may need manual removal.${NC}"
|
||||
fi
|
@ -1 +0,0 @@
|
||||
Debug content
|
@ -10,20 +10,37 @@
|
||||
|
||||
using json = nlohmann::json;
|
||||
|
||||
const std::string GetbinClient::SERVER_HOST = "getpkg.xyz";
|
||||
const std::string GetbinClient::DEFAULT_SERVER_HOST = "getpkg.xyz";
|
||||
|
||||
GetbinClient::GetbinClient() {
|
||||
GetbinClient::GetbinClient(const std::vector<std::string>& servers) : servers_(servers) {
|
||||
// Initialize CPR (done automatically, but we could add global config here)
|
||||
if (servers_.empty()) {
|
||||
servers_.push_back(DEFAULT_SERVER_HOST);
|
||||
}
|
||||
}
|
||||
|
||||
GetbinClient::GetbinClient() : servers_({DEFAULT_SERVER_HOST}) {
|
||||
// Backward compatibility constructor
|
||||
}
|
||||
|
||||
std::string GetbinClient::getUserAgent() const {
|
||||
return "getpkg/1.0";
|
||||
}
|
||||
|
||||
bool GetbinClient::download(const std::string& toolName, const std::string& arch, const std::string& outPath,
|
||||
ProgressCallback progressCallback) {
|
||||
std::string GetbinClient::buildUrl(const std::string& serverUrl, const std::string& endpoint) const {
|
||||
std::string url = "https://" + serverUrl;
|
||||
if (!endpoint.empty() && endpoint[0] != '/') {
|
||||
url += "/";
|
||||
}
|
||||
url += endpoint;
|
||||
return url;
|
||||
}
|
||||
|
||||
bool GetbinClient::downloadFromServer(const std::string& serverUrl, const std::string& toolName,
|
||||
const std::string& arch, const std::string& outPath,
|
||||
ProgressCallback progressCallback) {
|
||||
try {
|
||||
std::string url = "https://" + SERVER_HOST + "/object/" + toolName + ":" + arch;
|
||||
std::string url = buildUrl(serverUrl, "/object/" + toolName + ":" + arch);
|
||||
|
||||
cpr::Session session;
|
||||
session.SetUrl(cpr::Url{url});
|
||||
@ -52,20 +69,34 @@ bool GetbinClient::download(const std::string& toolName, const std::string& arch
|
||||
// Not found - this is expected for arch fallback
|
||||
return false;
|
||||
} else {
|
||||
std::cerr << "[GetbinClient::download] HTTP " << response.status_code << ": " << response.error.message << std::endl;
|
||||
std::cerr << "[GetbinClient::downloadFromServer] HTTP " << response.status_code << " from " << serverUrl << ": " << response.error.message << std::endl;
|
||||
}
|
||||
|
||||
return false;
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "[GetbinClient::download] Exception: " << e.what() << std::endl;
|
||||
std::cerr << "[GetbinClient::downloadFromServer] Exception with " << serverUrl << ": " << e.what() << std::endl;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool GetbinClient::upload(const std::string& archivePath, std::string& outUrl, std::string& outHash,
|
||||
const std::string& token, ProgressCallback progressCallback) {
|
||||
bool GetbinClient::download(const std::string& toolName, const std::string& arch, const std::string& outPath,
|
||||
ProgressCallback progressCallback) {
|
||||
// Multi-server fallback logic: try each server in order
|
||||
for (const auto& server : servers_) {
|
||||
if (downloadFromServer(server, toolName, arch, outPath, progressCallback)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// If we get here, no server had the package
|
||||
return false;
|
||||
}
|
||||
|
||||
bool GetbinClient::upload(const std::string& serverUrl, const std::string& archivePath,
|
||||
std::string& outUrl, std::string& outHash, const std::string& token,
|
||||
ProgressCallback progressCallback) {
|
||||
try {
|
||||
std::string url = "https://" + SERVER_HOST + "/upload";
|
||||
std::string url = buildUrl(serverUrl, "/upload");
|
||||
|
||||
cpr::Session session;
|
||||
session.SetUrl(cpr::Url{url});
|
||||
@ -110,7 +141,7 @@ bool GetbinClient::upload(const std::string& archivePath, std::string& outUrl, s
|
||||
try {
|
||||
auto resp_json = json::parse(response.text);
|
||||
if (resp_json.contains("hash") && resp_json.contains("result") && resp_json["result"] == "success") {
|
||||
outUrl = "https://" + SERVER_HOST + "/object/" + resp_json["hash"].get<std::string>();
|
||||
outUrl = buildUrl(serverUrl, "/object/" + resp_json["hash"].get<std::string>());
|
||||
outHash = resp_json["hash"].get<std::string>();
|
||||
return true;
|
||||
}
|
||||
@ -125,7 +156,7 @@ bool GetbinClient::upload(const std::string& archivePath, std::string& outUrl, s
|
||||
return !outHash.empty();
|
||||
}
|
||||
} else {
|
||||
std::cerr << "[GetbinClient::upload] HTTP " << response.status_code << ": " << response.error.message << std::endl;
|
||||
std::cerr << "[GetbinClient::upload] HTTP " << response.status_code << " to " << serverUrl << ": " << response.error.message << std::endl;
|
||||
if (!response.text.empty()) {
|
||||
std::cerr << "[GetbinClient::upload] Response: " << response.text << std::endl;
|
||||
}
|
||||
@ -133,14 +164,24 @@ bool GetbinClient::upload(const std::string& archivePath, std::string& outUrl, s
|
||||
|
||||
return false;
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "[GetbinClient::upload] Exception: " << e.what() << std::endl;
|
||||
std::cerr << "[GetbinClient::upload] Exception with " << serverUrl << ": " << e.what() << std::endl;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool GetbinClient::getHash(const std::string& toolName, const std::string& arch, std::string& outHash) {
|
||||
bool GetbinClient::upload(const std::string& archivePath, std::string& outUrl, std::string& outHash,
|
||||
const std::string& token, ProgressCallback progressCallback) {
|
||||
// Backward compatibility: use first server
|
||||
if (servers_.empty()) {
|
||||
return false;
|
||||
}
|
||||
return upload(servers_[0], archivePath, outUrl, outHash, token, progressCallback);
|
||||
}
|
||||
|
||||
bool GetbinClient::getHash(const std::string& serverUrl, const std::string& toolName,
|
||||
const std::string& arch, std::string& outHash) {
|
||||
try {
|
||||
std::string url = "https://" + SERVER_HOST + "/hash/" + toolName + ":" + arch;
|
||||
std::string url = buildUrl(serverUrl, "/hash/" + toolName + ":" + arch);
|
||||
|
||||
auto response = cpr::Get(cpr::Url{url},
|
||||
cpr::Header{{"User-Agent", getUserAgent()}},
|
||||
@ -168,19 +209,63 @@ bool GetbinClient::getHash(const std::string& toolName, const std::string& arch,
|
||||
// Not found - this is expected for non-existent tools/archs
|
||||
return false;
|
||||
} else {
|
||||
std::cerr << "[GetbinClient::getHash] HTTP " << response.status_code << ": " << response.error.message << std::endl;
|
||||
std::cerr << "[GetbinClient::getHash] HTTP " << response.status_code << " from " << serverUrl << ": " << response.error.message << std::endl;
|
||||
}
|
||||
|
||||
return false;
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "[GetbinClient::getHash] Exception: " << e.what() << std::endl;
|
||||
std::cerr << "[GetbinClient::getHash] Exception with " << serverUrl << ": " << e.what() << std::endl;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool GetbinClient::getHash(const std::string& toolName, const std::string& arch, std::string& outHash) {
|
||||
// Multi-server fallback: try each server in order
|
||||
for (const auto& server : servers_) {
|
||||
if (getHash(server, toolName, arch, outHash)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// If we get here, no server had the package
|
||||
return false;
|
||||
}
|
||||
|
||||
bool GetbinClient::findPackageServer(const std::string& toolName, const std::string& arch,
|
||||
std::string& foundServer) const {
|
||||
// Check each server to see which one has the package
|
||||
for (const auto& server : servers_) {
|
||||
try {
|
||||
std::string url = buildUrl(server, "/hash/" + toolName + ":" + arch);
|
||||
|
||||
auto response = cpr::Get(cpr::Url{url},
|
||||
cpr::Header{{"User-Agent", getUserAgent()}},
|
||||
cpr::Timeout{10000}, // 10 seconds
|
||||
cpr::VerifySsl{true});
|
||||
|
||||
if (response.status_code == 200) {
|
||||
// Package found on this server
|
||||
foundServer = server;
|
||||
return true;
|
||||
}
|
||||
// Continue to next server if 404 or other error
|
||||
} catch (const std::exception& e) {
|
||||
// Continue to next server on exception
|
||||
std::cerr << "[GetbinClient::findPackageServer] Exception with " << server << ": " << e.what() << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
// Package not found on any server
|
||||
return false;
|
||||
}
|
||||
|
||||
bool GetbinClient::deleteObject(const std::string& hash, const std::string& token) {
|
||||
try {
|
||||
std::string url = "https://" + SERVER_HOST + "/deleteobject?hash=" + hash;
|
||||
// Use first server for backward compatibility
|
||||
if (servers_.empty()) {
|
||||
return false;
|
||||
}
|
||||
std::string url = buildUrl(servers_[0], "/deleteobject?hash=" + hash);
|
||||
|
||||
auto response = cpr::Get(cpr::Url{url},
|
||||
cpr::Header{
|
||||
@ -208,7 +293,11 @@ bool GetbinClient::deleteObject(const std::string& hash, const std::string& toke
|
||||
|
||||
bool GetbinClient::listPackages(std::vector<std::string>& outPackages) {
|
||||
try {
|
||||
std::string url = "https://" + SERVER_HOST + "/dir";
|
||||
// Use first server for backward compatibility
|
||||
if (servers_.empty()) {
|
||||
return false;
|
||||
}
|
||||
std::string url = buildUrl(servers_[0], "/dir");
|
||||
|
||||
auto response = cpr::Get(cpr::Url{url},
|
||||
cpr::Header{{"User-Agent", getUserAgent()}},
|
||||
@ -271,7 +360,11 @@ bool GetbinClient::listPackages(std::vector<std::string>& outPackages) {
|
||||
|
||||
bool GetbinClient::listAllEntries(std::vector<std::pair<std::string, std::vector<std::string>>>& outEntries) {
|
||||
try {
|
||||
std::string url = "https://" + SERVER_HOST + "/dir";
|
||||
// Use first server for backward compatibility
|
||||
if (servers_.empty()) {
|
||||
return false;
|
||||
}
|
||||
std::string url = buildUrl(servers_[0], "/dir");
|
||||
|
||||
auto response = cpr::Get(cpr::Url{url},
|
||||
cpr::Header{{"User-Agent", getUserAgent()}},
|
||||
|
@ -5,21 +5,53 @@
|
||||
|
||||
class GetbinClient {
|
||||
public:
|
||||
// Constructor accepting server list for multi-server support
|
||||
GetbinClient(const std::vector<std::string>& servers);
|
||||
|
||||
// Backward compatibility constructor (uses default server)
|
||||
GetbinClient();
|
||||
|
||||
// Progress callback: (downloaded_bytes, total_bytes) -> should_continue
|
||||
using ProgressCallback = std::function<bool(size_t, size_t)>;
|
||||
|
||||
// Multi-server download with fallback logic
|
||||
bool download(const std::string& toolName, const std::string& arch, const std::string& outPath,
|
||||
ProgressCallback progressCallback = nullptr);
|
||||
|
||||
// Server-specific download
|
||||
bool downloadFromServer(const std::string& serverUrl, const std::string& toolName,
|
||||
const std::string& arch, const std::string& outPath,
|
||||
ProgressCallback progressCallback = nullptr);
|
||||
|
||||
// Server-specific upload
|
||||
bool upload(const std::string& serverUrl, const std::string& archivePath,
|
||||
std::string& outUrl, std::string& outHash, const std::string& token,
|
||||
ProgressCallback progressCallback = nullptr);
|
||||
|
||||
// Backward compatibility upload (uses first server)
|
||||
bool upload(const std::string& archivePath, std::string& outUrl, std::string& outHash, const std::string& token,
|
||||
ProgressCallback progressCallback = nullptr);
|
||||
|
||||
// Server-specific hash retrieval
|
||||
bool getHash(const std::string& serverUrl, const std::string& toolName,
|
||||
const std::string& arch, std::string& outHash);
|
||||
|
||||
// Multi-server hash retrieval with fallback
|
||||
bool getHash(const std::string& toolName, const std::string& arch, std::string& outHash);
|
||||
|
||||
// Find which server has a specific package
|
||||
bool findPackageServer(const std::string& toolName, const std::string& arch,
|
||||
std::string& foundServer) const;
|
||||
|
||||
// Legacy methods (use first server for backward compatibility)
|
||||
bool deleteObject(const std::string& hash, const std::string& token);
|
||||
bool listPackages(std::vector<std::string>& outPackages);
|
||||
bool listAllEntries(std::vector<std::pair<std::string, std::vector<std::string>>>& outEntries);
|
||||
|
||||
private:
|
||||
static const std::string SERVER_HOST;
|
||||
static const std::string DEFAULT_SERVER_HOST;
|
||||
std::vector<std::string> servers_;
|
||||
|
||||
std::string getUserAgent() const;
|
||||
std::string buildUrl(const std::string& serverUrl, const std::string& endpoint) const;
|
||||
};
|
||||
|
575
getpkg/src/MigrationManager.cpp
Normal file
575
getpkg/src/MigrationManager.cpp
Normal file
@ -0,0 +1,575 @@
|
||||
#include "MigrationManager.hpp"
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <chrono>
|
||||
#include <iomanip>
|
||||
#include <sstream>
|
||||
#include <algorithm>
|
||||
#include <cstdlib>
|
||||
|
||||
MigrationManager::MigrationManager() {
|
||||
const char* home = std::getenv("HOME");
|
||||
if (home) {
|
||||
configDir_ = std::filesystem::path(home) / ".config" / "getpkg";
|
||||
packagesDir_ = configDir_ / PACKAGES_DIRECTORY_NAME;
|
||||
backupDir_ = configDir_ / BACKUP_DIRECTORY_NAME;
|
||||
legacyTokenDir_ = configDir_ / DEFAULT_SERVER_URL;
|
||||
|
||||
packageManager_ = std::make_unique<PackageMetadataManager>(configDir_);
|
||||
serverManager_ = std::make_unique<ServerManager>();
|
||||
}
|
||||
}
|
||||
|
||||
MigrationManager::MigrationManager(const std::filesystem::path& configDir)
|
||||
: configDir_(configDir),
|
||||
packagesDir_(configDir / PACKAGES_DIRECTORY_NAME),
|
||||
backupDir_(configDir / BACKUP_DIRECTORY_NAME),
|
||||
legacyTokenDir_(configDir / DEFAULT_SERVER_URL) {
|
||||
|
||||
packageManager_ = std::make_unique<PackageMetadataManager>(configDir);
|
||||
serverManager_ = std::make_unique<ServerManager>();
|
||||
}
|
||||
|
||||
bool MigrationManager::needsMigration() const {
|
||||
// Check if we have legacy configuration that needs migration
|
||||
bool hasLegacyConfig = hasLegacyServerConfiguration() || hasLegacyPackageFiles();
|
||||
bool hasNewConfig = hasNewFormatConfiguration();
|
||||
bool hasPackagesDir = std::filesystem::exists(packagesDir_);
|
||||
|
||||
// Need migration if:
|
||||
// 1. We have legacy config (token file or package files in root config dir)
|
||||
// 2. We have new config but no packages directory (incomplete migration)
|
||||
return hasLegacyConfig || (hasNewConfig && !hasPackagesDir);
|
||||
}
|
||||
|
||||
bool MigrationManager::performMigration() {
|
||||
lastResult_ = MigrationResult();
|
||||
|
||||
logInfo("Starting migration from single-server to multi-server configuration");
|
||||
|
||||
// Create backup before starting migration
|
||||
if (!createBackup()) {
|
||||
logError("Failed to create backup before migration");
|
||||
lastResult_.success = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
// Step 1: Create packages directory
|
||||
if (!createPackagesDirectory()) {
|
||||
logError("Failed to create packages directory");
|
||||
lastResult_.success = false;
|
||||
return false;
|
||||
}
|
||||
lastResult_.packageDirectoryCreated = true;
|
||||
|
||||
// Step 2: Migrate server configuration
|
||||
if (!migrateServerConfiguration()) {
|
||||
logError("Failed to migrate server configuration");
|
||||
lastResult_.success = false;
|
||||
return false;
|
||||
}
|
||||
lastResult_.serverConfigMigrated = true;
|
||||
|
||||
// Step 3: Migrate package metadata
|
||||
if (!migratePackageMetadata()) {
|
||||
logError("Failed to migrate package metadata");
|
||||
lastResult_.success = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Step 4: Validate migration
|
||||
if (!validateMigration()) {
|
||||
logError("Migration validation failed");
|
||||
lastResult_.success = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Step 5: Clean up legacy files (optional, keep backup)
|
||||
// We don't delete legacy files immediately to allow rollback
|
||||
|
||||
lastResult_.success = true;
|
||||
logInfo("Migration completed successfully");
|
||||
|
||||
return true;
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
logError("Migration failed with exception: " + std::string(e.what()));
|
||||
lastResult_.success = false;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool MigrationManager::migrateServerConfiguration() {
|
||||
try {
|
||||
// Load existing server configuration or create default
|
||||
if (!serverManager_->loadConfiguration()) {
|
||||
logWarning("Failed to load existing server configuration, creating default");
|
||||
serverManager_->ensureDefaultConfiguration();
|
||||
}
|
||||
|
||||
// Migrate legacy token file if it exists
|
||||
if (!migrateLegacyTokenFile()) {
|
||||
logWarning("Failed to migrate legacy token file (may not exist)");
|
||||
}
|
||||
|
||||
// Save the configuration to ensure it's in the new format
|
||||
if (!serverManager_->saveConfiguration()) {
|
||||
logError("Failed to save server configuration");
|
||||
return false;
|
||||
}
|
||||
|
||||
logInfo("Server configuration migrated successfully");
|
||||
return true;
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
logError("Error migrating server configuration: " + std::string(e.what()));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool MigrationManager::migratePackageMetadata() {
|
||||
try {
|
||||
// Find legacy package files in the config directory
|
||||
std::vector<std::filesystem::path> legacyFiles = findFilesWithExtension(configDir_, ".json");
|
||||
|
||||
// Filter out non-package files
|
||||
std::vector<std::filesystem::path> packageFiles;
|
||||
for (const auto& file : legacyFiles) {
|
||||
std::string filename = file.filename().string();
|
||||
// Skip servers.json and any files already in packages directory
|
||||
if (filename != SERVERS_CONFIG_FILENAME && file.parent_path() == configDir_) {
|
||||
packageFiles.push_back(file);
|
||||
}
|
||||
}
|
||||
|
||||
lastResult_.totalPackages = packageFiles.size();
|
||||
|
||||
if (packageFiles.empty()) {
|
||||
logInfo("No legacy package files found to migrate");
|
||||
return true;
|
||||
}
|
||||
|
||||
logInfo("Found " + std::to_string(packageFiles.size()) + " legacy package files to migrate");
|
||||
|
||||
// Migrate each package file
|
||||
for (const auto& packageFile : packageFiles) {
|
||||
if (migrateLegacyPackageFile(packageFile)) {
|
||||
lastResult_.migratedPackages++;
|
||||
logInfo("Migrated package file: " + packageFile.filename().string());
|
||||
} else {
|
||||
logError("Failed to migrate package file: " + packageFile.filename().string());
|
||||
}
|
||||
}
|
||||
|
||||
logInfo("Migrated " + std::to_string(lastResult_.migratedPackages) + " of " +
|
||||
std::to_string(lastResult_.totalPackages) + " package files");
|
||||
|
||||
return lastResult_.migratedPackages == lastResult_.totalPackages;
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
logError("Error migrating package metadata: " + std::string(e.what()));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool MigrationManager::createPackagesDirectory() {
|
||||
return safeDirectoryCreate(packagesDir_);
|
||||
}
|
||||
|
||||
bool MigrationManager::validateMigration() const {
|
||||
try {
|
||||
// Validate server configuration
|
||||
if (!validateServerConfiguration()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Validate package metadata
|
||||
if (!validatePackageMetadata()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Validate directory structure
|
||||
if (!validateDirectoryStructure()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "Error during migration validation: " << e.what() << std::endl;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool MigrationManager::canRollback() const {
|
||||
return std::filesystem::exists(backupDir_) && std::filesystem::is_directory(backupDir_);
|
||||
}
|
||||
|
||||
bool MigrationManager::performRollback() {
|
||||
if (!canRollback()) {
|
||||
logError("Cannot rollback: no backup found");
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
logInfo("Starting rollback to previous configuration");
|
||||
|
||||
// Restore from backup
|
||||
if (!restoreFromBackup()) {
|
||||
logError("Failed to restore from backup");
|
||||
return false;
|
||||
}
|
||||
|
||||
logInfo("Rollback completed successfully");
|
||||
return true;
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
logError("Rollback failed with exception: " + std::string(e.what()));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool MigrationManager::createBackup() {
|
||||
try {
|
||||
// Create backup directory with timestamp
|
||||
std::string timestamp = generateBackupTimestamp();
|
||||
std::filesystem::path timestampedBackupDir = backupDir_ / timestamp;
|
||||
|
||||
if (!safeDirectoryCreate(timestampedBackupDir)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Backup existing configuration files
|
||||
std::filesystem::path serversConfigPath = configDir_ / SERVERS_CONFIG_FILENAME;
|
||||
if (std::filesystem::exists(serversConfigPath)) {
|
||||
safeFileCopy(serversConfigPath, timestampedBackupDir / SERVERS_CONFIG_FILENAME);
|
||||
}
|
||||
|
||||
// Backup legacy token directory
|
||||
if (std::filesystem::exists(legacyTokenDir_)) {
|
||||
std::filesystem::path backupTokenDir = timestampedBackupDir / DEFAULT_SERVER_URL;
|
||||
safeDirectoryCreate(backupTokenDir);
|
||||
|
||||
for (const auto& entry : std::filesystem::directory_iterator(legacyTokenDir_)) {
|
||||
if (entry.is_regular_file()) {
|
||||
safeFileCopy(entry.path(), backupTokenDir / entry.path().filename());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Backup existing package files
|
||||
std::vector<std::filesystem::path> packageFiles = findFilesWithExtension(configDir_, ".json");
|
||||
for (const auto& file : packageFiles) {
|
||||
if (file.parent_path() == configDir_) {
|
||||
safeFileCopy(file, timestampedBackupDir / file.filename());
|
||||
}
|
||||
}
|
||||
|
||||
// Backup packages directory if it exists
|
||||
if (std::filesystem::exists(packagesDir_)) {
|
||||
std::filesystem::path backupPackagesDir = timestampedBackupDir / PACKAGES_DIRECTORY_NAME;
|
||||
safeDirectoryCreate(backupPackagesDir);
|
||||
|
||||
for (const auto& entry : std::filesystem::directory_iterator(packagesDir_)) {
|
||||
if (entry.is_regular_file()) {
|
||||
safeFileCopy(entry.path(), backupPackagesDir / entry.path().filename());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logInfo("Backup created at: " + timestampedBackupDir.string());
|
||||
return true;
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
logError("Failed to create backup: " + std::string(e.what()));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool MigrationManager::restoreFromBackup() {
|
||||
try {
|
||||
// Find the most recent backup
|
||||
if (!std::filesystem::exists(backupDir_)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
std::filesystem::path latestBackup;
|
||||
std::filesystem::file_time_type latestTime{};
|
||||
|
||||
for (const auto& entry : std::filesystem::directory_iterator(backupDir_)) {
|
||||
if (entry.is_directory()) {
|
||||
auto writeTime = entry.last_write_time();
|
||||
if (writeTime > latestTime) {
|
||||
latestTime = writeTime;
|
||||
latestBackup = entry.path();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (latestBackup.empty()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Restore files from backup
|
||||
for (const auto& entry : std::filesystem::directory_iterator(latestBackup)) {
|
||||
std::filesystem::path targetPath = configDir_ / entry.path().filename();
|
||||
|
||||
if (entry.is_regular_file()) {
|
||||
safeFileCopy(entry.path(), targetPath);
|
||||
} else if (entry.is_directory()) {
|
||||
// Restore directory recursively
|
||||
std::filesystem::remove_all(targetPath);
|
||||
std::filesystem::copy(entry.path(), targetPath, std::filesystem::copy_options::recursive);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
logError("Failed to restore from backup: " + std::string(e.what()));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Private helper methods
|
||||
|
||||
bool MigrationManager::hasLegacyServerConfiguration() const {
|
||||
// Check for legacy token file
|
||||
std::filesystem::path legacyTokenPath = legacyTokenDir_ / LEGACY_TOKEN_FILENAME;
|
||||
return std::filesystem::exists(legacyTokenPath);
|
||||
}
|
||||
|
||||
bool MigrationManager::hasLegacyPackageFiles() const {
|
||||
// Check for JSON files directly in config directory (not in packages subdirectory)
|
||||
std::vector<std::filesystem::path> jsonFiles = findFilesWithExtension(configDir_, ".json");
|
||||
|
||||
for (const auto& file : jsonFiles) {
|
||||
std::string filename = file.filename().string();
|
||||
// If it's not servers.json and it's in the config directory (not packages), it's legacy
|
||||
if (filename != SERVERS_CONFIG_FILENAME && file.parent_path() == configDir_) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool MigrationManager::hasNewFormatConfiguration() const {
|
||||
std::filesystem::path serversConfigPath = configDir_ / SERVERS_CONFIG_FILENAME;
|
||||
return std::filesystem::exists(serversConfigPath);
|
||||
}
|
||||
|
||||
bool MigrationManager::migrateLegacyTokenFile() {
|
||||
std::filesystem::path legacyTokenPath = legacyTokenDir_ / LEGACY_TOKEN_FILENAME;
|
||||
|
||||
if (!std::filesystem::exists(legacyTokenPath)) {
|
||||
return true; // Nothing to migrate
|
||||
}
|
||||
|
||||
try {
|
||||
std::ifstream tokenFile(legacyTokenPath);
|
||||
std::string token;
|
||||
std::getline(tokenFile, token);
|
||||
tokenFile.close();
|
||||
|
||||
if (!token.empty()) {
|
||||
// Set the token for the default server
|
||||
if (serverManager_->setWriteToken(DEFAULT_SERVER_URL, token)) {
|
||||
logInfo("Migrated legacy write token for " + std::string(DEFAULT_SERVER_URL));
|
||||
|
||||
// Move the legacy token file to backup (don't delete immediately)
|
||||
std::filesystem::path backupTokenPath = backupDir_ / "legacy_tokens" / DEFAULT_SERVER_URL / LEGACY_TOKEN_FILENAME;
|
||||
safeDirectoryCreate(backupTokenPath.parent_path());
|
||||
safeFileMove(legacyTokenPath, backupTokenPath);
|
||||
|
||||
// Remove the legacy directory if it's empty
|
||||
try {
|
||||
if (std::filesystem::is_empty(legacyTokenDir_)) {
|
||||
std::filesystem::remove(legacyTokenDir_);
|
||||
}
|
||||
} catch (const std::exception& e) {
|
||||
// Ignore errors when removing empty directory
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
logError("Failed to migrate legacy token file: " + std::string(e.what()));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool MigrationManager::migrateLegacyPackageFile(const std::filesystem::path& legacyPath) {
|
||||
try {
|
||||
if (!std::filesystem::exists(legacyPath)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Load legacy format
|
||||
std::ifstream file(legacyPath);
|
||||
if (!file.is_open()) {
|
||||
logError("Failed to open legacy file: " + legacyPath.string());
|
||||
return false;
|
||||
}
|
||||
|
||||
nlohmann::json legacyJson;
|
||||
file >> legacyJson;
|
||||
file.close();
|
||||
|
||||
// Convert to new format
|
||||
PackageMetadata metadata = PackageMetadata::fromLegacyJson(legacyJson, DEFAULT_SERVER_URL);
|
||||
|
||||
if (!metadata.isValid()) {
|
||||
logError("Invalid metadata after migration from " + legacyPath.string() + ": " + metadata.getValidationError());
|
||||
return false;
|
||||
}
|
||||
|
||||
// Save in new location
|
||||
if (!packageManager_->savePackageMetadata(metadata)) {
|
||||
logError("Failed to save migrated metadata for " + metadata.name);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Move legacy file to backup (don't delete immediately)
|
||||
std::filesystem::path backupPath = backupDir_ / "legacy_packages" / legacyPath.filename();
|
||||
safeDirectoryCreate(backupPath.parent_path());
|
||||
safeFileMove(legacyPath, backupPath);
|
||||
|
||||
return true;
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
logError("Error migrating legacy file " + legacyPath.string() + ": " + std::string(e.what()));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool MigrationManager::validateServerConfiguration() const {
|
||||
try {
|
||||
// Check if servers.json exists and is valid
|
||||
std::filesystem::path serversConfigPath = configDir_ / SERVERS_CONFIG_FILENAME;
|
||||
if (!std::filesystem::exists(serversConfigPath)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Try to load the configuration
|
||||
auto tempServerManager = std::make_unique<ServerManager>();
|
||||
if (!tempServerManager->loadConfiguration()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check that we have at least one server
|
||||
std::vector<std::string> servers = tempServerManager->getServers();
|
||||
return !servers.empty();
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool MigrationManager::validatePackageMetadata() const {
|
||||
try {
|
||||
if (!std::filesystem::exists(packagesDir_)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Validate all package metadata files
|
||||
return packageManager_->validateAllPackageMetadata();
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool MigrationManager::validateDirectoryStructure() const {
|
||||
// Check that packages directory exists and is accessible
|
||||
return std::filesystem::exists(packagesDir_) && std::filesystem::is_directory(packagesDir_);
|
||||
}
|
||||
|
||||
void MigrationManager::logError(const std::string& message) const {
|
||||
std::cerr << "[MIGRATION ERROR] " << message << std::endl;
|
||||
lastResult_.errors.push_back(message);
|
||||
}
|
||||
|
||||
void MigrationManager::logWarning(const std::string& message) const {
|
||||
std::cerr << "[MIGRATION WARNING] " << message << std::endl;
|
||||
lastResult_.warnings.push_back(message);
|
||||
}
|
||||
|
||||
void MigrationManager::logInfo(const std::string& message) const {
|
||||
std::cout << "[MIGRATION INFO] " << message << std::endl;
|
||||
}
|
||||
|
||||
bool MigrationManager::safeFileMove(const std::filesystem::path& source, const std::filesystem::path& destination) {
|
||||
try {
|
||||
// Ensure destination directory exists
|
||||
std::filesystem::create_directories(destination.parent_path());
|
||||
|
||||
// Move the file
|
||||
std::filesystem::rename(source, destination);
|
||||
return true;
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
logError("Failed to move file from " + source.string() + " to " + destination.string() + ": " + e.what());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool MigrationManager::safeFileCopy(const std::filesystem::path& source, const std::filesystem::path& destination) {
|
||||
try {
|
||||
// Ensure destination directory exists
|
||||
std::filesystem::create_directories(destination.parent_path());
|
||||
|
||||
// Copy the file
|
||||
std::filesystem::copy_file(source, destination, std::filesystem::copy_options::overwrite_existing);
|
||||
return true;
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
logError("Failed to copy file from " + source.string() + " to " + destination.string() + ": " + e.what());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool MigrationManager::safeDirectoryCreate(const std::filesystem::path& directory) {
|
||||
try {
|
||||
std::filesystem::create_directories(directory);
|
||||
return std::filesystem::exists(directory) && std::filesystem::is_directory(directory);
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
logError("Failed to create directory " + directory.string() + ": " + e.what());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::filesystem::path> MigrationManager::findFilesWithExtension(const std::filesystem::path& directory, const std::string& extension) const {
|
||||
std::vector<std::filesystem::path> files;
|
||||
|
||||
try {
|
||||
if (!std::filesystem::exists(directory)) {
|
||||
return files;
|
||||
}
|
||||
|
||||
for (const auto& entry : std::filesystem::directory_iterator(directory)) {
|
||||
if (entry.is_regular_file() && entry.path().extension() == extension) {
|
||||
files.push_back(entry.path());
|
||||
}
|
||||
}
|
||||
} catch (const std::exception& e) {
|
||||
logError("Error finding files with extension " + extension + " in " + directory.string() + ": " + e.what());
|
||||
}
|
||||
|
||||
return files;
|
||||
}
|
||||
|
||||
std::string MigrationManager::generateBackupTimestamp() const {
|
||||
auto now = std::chrono::system_clock::now();
|
||||
auto time_t = std::chrono::system_clock::to_time_t(now);
|
||||
|
||||
std::stringstream ss;
|
||||
ss << std::put_time(std::gmtime(&time_t), "%Y%m%d_%H%M%S");
|
||||
return ss.str();
|
||||
}
|
100
getpkg/src/MigrationManager.hpp
Normal file
100
getpkg/src/MigrationManager.hpp
Normal file
@ -0,0 +1,100 @@
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <filesystem>
|
||||
#include <memory>
|
||||
#include "PackageMetadata.hpp"
|
||||
#include "ServerManager.hpp"
|
||||
|
||||
/**
|
||||
* Migration manager for handling the transition from single-server to multi-server configuration
|
||||
* Handles migration of server configuration, package metadata, and directory structure
|
||||
*/
|
||||
class MigrationManager {
|
||||
public:
|
||||
MigrationManager();
|
||||
explicit MigrationManager(const std::filesystem::path& configDir);
|
||||
|
||||
// Main migration interface
|
||||
bool needsMigration() const;
|
||||
bool performMigration();
|
||||
|
||||
// Migration status and reporting
|
||||
struct MigrationResult {
|
||||
bool success = false;
|
||||
int migratedPackages = 0;
|
||||
int totalPackages = 0;
|
||||
bool serverConfigMigrated = false;
|
||||
bool packageDirectoryCreated = false;
|
||||
std::vector<std::string> errors;
|
||||
std::vector<std::string> warnings;
|
||||
};
|
||||
|
||||
MigrationResult getLastMigrationResult() const { return lastResult_; }
|
||||
|
||||
// Individual migration components (for testing and granular control)
|
||||
bool migrateServerConfiguration();
|
||||
bool migratePackageMetadata();
|
||||
bool createPackagesDirectory();
|
||||
bool validateMigration() const;
|
||||
|
||||
// Rollback capabilities
|
||||
bool canRollback() const;
|
||||
bool performRollback();
|
||||
|
||||
// Backup and restore
|
||||
bool createBackup();
|
||||
bool restoreFromBackup();
|
||||
|
||||
private:
|
||||
std::filesystem::path configDir_;
|
||||
std::filesystem::path packagesDir_;
|
||||
std::filesystem::path backupDir_;
|
||||
std::filesystem::path legacyTokenDir_;
|
||||
|
||||
std::unique_ptr<PackageMetadataManager> packageManager_;
|
||||
std::unique_ptr<ServerManager> serverManager_;
|
||||
|
||||
mutable MigrationResult lastResult_;
|
||||
|
||||
// Migration detection helpers
|
||||
bool hasLegacyServerConfiguration() const;
|
||||
bool hasLegacyPackageFiles() const;
|
||||
bool hasNewFormatConfiguration() const;
|
||||
|
||||
// Migration implementation helpers
|
||||
bool migrateLegacyTokenFile();
|
||||
bool migrateLegacyPackageFile(const std::filesystem::path& legacyPath);
|
||||
bool movePackageFilesToSubdirectory();
|
||||
bool updatePackageMetadataFormat();
|
||||
bool cleanupLegacyFiles();
|
||||
|
||||
// Backup and rollback helpers
|
||||
bool backupLegacyConfiguration();
|
||||
bool backupExistingConfiguration();
|
||||
std::string generateBackupTimestamp() const;
|
||||
|
||||
// Validation helpers
|
||||
bool validateServerConfiguration() const;
|
||||
bool validatePackageMetadata() const;
|
||||
bool validateDirectoryStructure() const;
|
||||
|
||||
// Error handling and logging
|
||||
void logError(const std::string& message) const;
|
||||
void logWarning(const std::string& message) const;
|
||||
void logInfo(const std::string& message) const;
|
||||
|
||||
// File system utilities
|
||||
bool safeFileMove(const std::filesystem::path& source, const std::filesystem::path& destination);
|
||||
bool safeFileCopy(const std::filesystem::path& source, const std::filesystem::path& destination);
|
||||
bool safeDirectoryCreate(const std::filesystem::path& directory);
|
||||
std::vector<std::filesystem::path> findFilesWithExtension(const std::filesystem::path& directory, const std::string& extension) const;
|
||||
|
||||
// Constants
|
||||
static constexpr const char* LEGACY_TOKEN_FILENAME = "write_token.txt";
|
||||
static constexpr const char* SERVERS_CONFIG_FILENAME = "servers.json";
|
||||
static constexpr const char* PACKAGES_DIRECTORY_NAME = "packages";
|
||||
static constexpr const char* BACKUP_DIRECTORY_NAME = "migration_backup";
|
||||
static constexpr const char* DEFAULT_SERVER_URL = "getpkg.xyz";
|
||||
};
|
463
getpkg/src/PackageMetadata.cpp
Normal file
463
getpkg/src/PackageMetadata.cpp
Normal file
@ -0,0 +1,463 @@
|
||||
#include "PackageMetadata.hpp"
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <chrono>
|
||||
#include <iomanip>
|
||||
#include <sstream>
|
||||
#include <regex>
|
||||
#include <cstdlib>
|
||||
|
||||
// PackageMetadata implementation
|
||||
|
||||
PackageMetadata::PackageMetadata(const std::string& name, const std::string& version,
|
||||
const std::string& hash, const std::string& arch,
|
||||
const std::string& sourceServer, const std::string& installDate)
|
||||
: name(name), version(version), hash(hash), arch(arch), sourceServer(sourceServer) {
|
||||
|
||||
if (installDate.empty()) {
|
||||
this->installDate = getCurrentTimestamp();
|
||||
} else {
|
||||
this->installDate = installDate;
|
||||
}
|
||||
}
|
||||
|
||||
json PackageMetadata::toJson() const {
|
||||
json j;
|
||||
j["name"] = name;
|
||||
j["version"] = version;
|
||||
j["hash"] = hash;
|
||||
j["arch"] = arch;
|
||||
j["sourceServer"] = sourceServer;
|
||||
j["installDate"] = installDate;
|
||||
j["lastUpdated"] = getCurrentTimestamp();
|
||||
return j;
|
||||
}
|
||||
|
||||
PackageMetadata PackageMetadata::fromJson(const json& j) {
|
||||
PackageMetadata metadata;
|
||||
|
||||
// Required fields
|
||||
if (j.contains("name") && j["name"].is_string()) {
|
||||
metadata.name = j["name"].get<std::string>();
|
||||
}
|
||||
if (j.contains("version") && j["version"].is_string()) {
|
||||
metadata.version = j["version"].get<std::string>();
|
||||
}
|
||||
if (j.contains("hash") && j["hash"].is_string()) {
|
||||
metadata.hash = j["hash"].get<std::string>();
|
||||
}
|
||||
if (j.contains("arch") && j["arch"].is_string()) {
|
||||
metadata.arch = j["arch"].get<std::string>();
|
||||
}
|
||||
|
||||
// New fields with defaults
|
||||
if (j.contains("sourceServer") && j["sourceServer"].is_string()) {
|
||||
metadata.sourceServer = j["sourceServer"].get<std::string>();
|
||||
} else {
|
||||
metadata.sourceServer = "getpkg.xyz"; // Default fallback
|
||||
}
|
||||
|
||||
if (j.contains("installDate") && j["installDate"].is_string()) {
|
||||
metadata.installDate = j["installDate"].get<std::string>();
|
||||
} else {
|
||||
metadata.installDate = metadata.getCurrentTimestamp();
|
||||
}
|
||||
|
||||
return metadata;
|
||||
}
|
||||
|
||||
PackageMetadata PackageMetadata::fromLegacyJson(const json& j, const std::string& defaultServer) {
|
||||
PackageMetadata metadata;
|
||||
|
||||
// Legacy format only has: name, version, hash, arch
|
||||
if (j.contains("name") && j["name"].is_string()) {
|
||||
metadata.name = j["name"].get<std::string>();
|
||||
}
|
||||
if (j.contains("version") && j["version"].is_string()) {
|
||||
metadata.version = j["version"].get<std::string>();
|
||||
}
|
||||
if (j.contains("hash") && j["hash"].is_string()) {
|
||||
metadata.hash = j["hash"].get<std::string>();
|
||||
}
|
||||
if (j.contains("arch") && j["arch"].is_string()) {
|
||||
metadata.arch = j["arch"].get<std::string>();
|
||||
}
|
||||
|
||||
// Set defaults for new fields
|
||||
metadata.sourceServer = defaultServer;
|
||||
metadata.installDate = metadata.getCurrentTimestamp();
|
||||
|
||||
return metadata;
|
||||
}
|
||||
|
||||
bool PackageMetadata::isValid() const {
|
||||
return isValidName() && isValidVersion() && isValidHash() &&
|
||||
isValidArch() && isValidServerUrl() && isValidTimestamp();
|
||||
}
|
||||
|
||||
std::string PackageMetadata::getValidationError() const {
|
||||
if (!isValidName()) {
|
||||
return "Invalid package name: must be non-empty and contain only alphanumeric characters, hyphens, and underscores";
|
||||
}
|
||||
if (!isValidVersion()) {
|
||||
return "Invalid version: must be non-empty";
|
||||
}
|
||||
if (!isValidHash()) {
|
||||
return "Invalid hash: must be non-empty and contain only hexadecimal characters";
|
||||
}
|
||||
if (!isValidArch()) {
|
||||
return "Invalid architecture: must be non-empty";
|
||||
}
|
||||
if (!isValidServerUrl()) {
|
||||
return "Invalid source server: must be non-empty and contain valid characters";
|
||||
}
|
||||
if (!isValidTimestamp()) {
|
||||
return "Invalid install date: must be non-empty";
|
||||
}
|
||||
return "";
|
||||
}
|
||||
|
||||
bool PackageMetadata::saveToFile(const std::filesystem::path& filePath) const {
|
||||
if (!isValid()) {
|
||||
std::cerr << "Cannot save invalid package metadata: " << getValidationError() << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
// Ensure parent directory exists
|
||||
std::filesystem::create_directories(filePath.parent_path());
|
||||
|
||||
std::ofstream file(filePath);
|
||||
if (!file.is_open()) {
|
||||
std::cerr << "Failed to open file for writing: " << filePath << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
file << toJson().dump(2);
|
||||
file.close();
|
||||
return true;
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "Error saving package metadata to " << filePath << ": " << e.what() << std::endl;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
PackageMetadata PackageMetadata::loadFromFile(const std::filesystem::path& filePath) {
|
||||
PackageMetadata metadata;
|
||||
|
||||
try {
|
||||
if (!std::filesystem::exists(filePath)) {
|
||||
std::cerr << "Package metadata file does not exist: " << filePath << std::endl;
|
||||
return metadata;
|
||||
}
|
||||
|
||||
std::ifstream file(filePath);
|
||||
if (!file.is_open()) {
|
||||
std::cerr << "Failed to open file for reading: " << filePath << std::endl;
|
||||
return metadata;
|
||||
}
|
||||
|
||||
json j;
|
||||
file >> j;
|
||||
file.close();
|
||||
|
||||
metadata = fromJson(j);
|
||||
|
||||
if (!metadata.isValid()) {
|
||||
std::cerr << "Loaded package metadata is invalid: " << metadata.getValidationError() << std::endl;
|
||||
}
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "Error loading package metadata from " << filePath << ": " << e.what() << std::endl;
|
||||
}
|
||||
|
||||
return metadata;
|
||||
}
|
||||
|
||||
std::string PackageMetadata::getCurrentTimestamp() const {
|
||||
auto now = std::chrono::system_clock::now();
|
||||
auto time_t = std::chrono::system_clock::to_time_t(now);
|
||||
|
||||
std::stringstream ss;
|
||||
ss << std::put_time(std::gmtime(&time_t), "%Y-%m-%dT%H:%M:%SZ");
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
bool PackageMetadata::needsUpdate(const std::string& remoteHash) const {
|
||||
return hash != remoteHash;
|
||||
}
|
||||
|
||||
// Private validation methods
|
||||
bool PackageMetadata::isValidName() const {
|
||||
if (name.empty()) return false;
|
||||
|
||||
// Package name should contain only alphanumeric characters, hyphens, and underscores
|
||||
std::regex namePattern("^[a-zA-Z0-9_-]+$");
|
||||
return std::regex_match(name, namePattern);
|
||||
}
|
||||
|
||||
bool PackageMetadata::isValidVersion() const {
|
||||
return !version.empty();
|
||||
}
|
||||
|
||||
bool PackageMetadata::isValidHash() const {
|
||||
if (hash.empty()) return false;
|
||||
|
||||
// Hash should contain only hexadecimal characters
|
||||
std::regex hashPattern("^[a-fA-F0-9]+$");
|
||||
return std::regex_match(hash, hashPattern);
|
||||
}
|
||||
|
||||
bool PackageMetadata::isValidArch() const {
|
||||
return !arch.empty();
|
||||
}
|
||||
|
||||
bool PackageMetadata::isValidServerUrl() const {
|
||||
if (sourceServer.empty()) return false;
|
||||
|
||||
// Basic server URL validation - should not contain invalid characters
|
||||
std::regex serverPattern("^[a-zA-Z0-9._-]+$");
|
||||
return std::regex_match(sourceServer, serverPattern);
|
||||
}
|
||||
|
||||
bool PackageMetadata::isValidTimestamp() const {
|
||||
return !installDate.empty();
|
||||
}
|
||||
|
||||
// PackageMetadataManager implementation
|
||||
|
||||
PackageMetadataManager::PackageMetadataManager() {
|
||||
const char* home = std::getenv("HOME");
|
||||
if (home) {
|
||||
configDir_ = std::filesystem::path(home) / ".config" / "getpkg";
|
||||
packagesDir_ = configDir_ / "packages";
|
||||
}
|
||||
}
|
||||
|
||||
PackageMetadataManager::PackageMetadataManager(const std::filesystem::path& configDir)
|
||||
: configDir_(configDir), packagesDir_(configDir / "packages") {
|
||||
}
|
||||
|
||||
bool PackageMetadataManager::ensurePackagesDirectory() {
|
||||
try {
|
||||
if (!std::filesystem::exists(packagesDir_)) {
|
||||
std::filesystem::create_directories(packagesDir_);
|
||||
}
|
||||
return std::filesystem::is_directory(packagesDir_);
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "Error creating packages directory: " << e.what() << std::endl;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
std::filesystem::path PackageMetadataManager::getPackagesDirectory() const {
|
||||
return packagesDir_;
|
||||
}
|
||||
|
||||
std::filesystem::path PackageMetadataManager::getPackageFilePath(const std::string& toolName) const {
|
||||
return packagesDir_ / (toolName + ".json");
|
||||
}
|
||||
|
||||
bool PackageMetadataManager::savePackageMetadata(const PackageMetadata& metadata) {
|
||||
if (!ensurePackagesDirectory()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
std::filesystem::path filePath = getPackageFilePath(metadata.name);
|
||||
return metadata.saveToFile(filePath);
|
||||
}
|
||||
|
||||
PackageMetadata PackageMetadataManager::loadPackageMetadata(const std::string& toolName) {
|
||||
std::filesystem::path filePath = getPackageFilePath(toolName);
|
||||
return PackageMetadata::loadFromFile(filePath);
|
||||
}
|
||||
|
||||
bool PackageMetadataManager::packageExists(const std::string& toolName) const {
|
||||
std::filesystem::path filePath = getPackageFilePath(toolName);
|
||||
return std::filesystem::exists(filePath);
|
||||
}
|
||||
|
||||
bool PackageMetadataManager::removePackageMetadata(const std::string& toolName) {
|
||||
try {
|
||||
std::filesystem::path filePath = getPackageFilePath(toolName);
|
||||
if (std::filesystem::exists(filePath)) {
|
||||
return std::filesystem::remove(filePath);
|
||||
}
|
||||
return true; // Already doesn't exist
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "Error removing package metadata for " << toolName << ": " << e.what() << std::endl;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool PackageMetadataManager::migrateFromLegacyFormat() {
|
||||
try {
|
||||
std::vector<std::string> legacyFiles = findLegacyPackageFiles();
|
||||
|
||||
if (legacyFiles.empty()) {
|
||||
return true; // Nothing to migrate
|
||||
}
|
||||
|
||||
if (!ensurePackagesDirectory()) {
|
||||
std::cerr << "Failed to create packages directory for migration" << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
int successCount = 0;
|
||||
for (const std::string& fileName : legacyFiles) {
|
||||
std::filesystem::path legacyPath = configDir_ / fileName;
|
||||
if (migrateLegacyPackageFile(legacyPath)) {
|
||||
successCount++;
|
||||
}
|
||||
}
|
||||
|
||||
std::cout << "Migrated " << successCount << " of " << legacyFiles.size() << " legacy package files" << std::endl;
|
||||
return successCount == legacyFiles.size();
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "Error during migration: " << e.what() << std::endl;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::string> PackageMetadataManager::findLegacyPackageFiles() const {
|
||||
std::vector<std::string> legacyFiles;
|
||||
|
||||
try {
|
||||
if (!std::filesystem::exists(configDir_)) {
|
||||
return legacyFiles;
|
||||
}
|
||||
|
||||
for (const auto& entry : std::filesystem::directory_iterator(configDir_)) {
|
||||
if (entry.is_regular_file() && entry.path().extension() == ".json") {
|
||||
std::string fileName = entry.path().filename().string();
|
||||
|
||||
// Skip if it's already in the packages directory or is servers.json
|
||||
if (fileName != "servers.json") {
|
||||
legacyFiles.push_back(fileName);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "Error finding legacy package files: " << e.what() << std::endl;
|
||||
}
|
||||
|
||||
return legacyFiles;
|
||||
}
|
||||
|
||||
bool PackageMetadataManager::migrateLegacyPackageFile(const std::filesystem::path& legacyPath, const std::string& defaultServer) {
|
||||
try {
|
||||
if (!std::filesystem::exists(legacyPath)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Load legacy format
|
||||
std::ifstream file(legacyPath);
|
||||
if (!file.is_open()) {
|
||||
std::cerr << "Failed to open legacy file: " << legacyPath << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
json legacyJson;
|
||||
file >> legacyJson;
|
||||
file.close();
|
||||
|
||||
// Convert to new format
|
||||
PackageMetadata metadata = PackageMetadata::fromLegacyJson(legacyJson, defaultServer);
|
||||
|
||||
if (!metadata.isValid()) {
|
||||
std::cerr << "Invalid metadata after migration from " << legacyPath << ": " << metadata.getValidationError() << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Save in new location
|
||||
if (!savePackageMetadata(metadata)) {
|
||||
std::cerr << "Failed to save migrated metadata for " << metadata.name << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Remove legacy file
|
||||
std::filesystem::remove(legacyPath);
|
||||
|
||||
std::cout << "Migrated package metadata: " << metadata.name << " from " << defaultServer << std::endl;
|
||||
return true;
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "Error migrating legacy file " << legacyPath << ": " << e.what() << std::endl;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::string> PackageMetadataManager::listInstalledPackages() const {
|
||||
std::vector<std::string> packages;
|
||||
|
||||
try {
|
||||
if (!std::filesystem::exists(packagesDir_)) {
|
||||
return packages;
|
||||
}
|
||||
|
||||
for (const auto& entry : std::filesystem::directory_iterator(packagesDir_)) {
|
||||
if (entry.is_regular_file() && entry.path().extension() == ".json") {
|
||||
std::string toolName = entry.path().stem().string();
|
||||
packages.push_back(toolName);
|
||||
}
|
||||
}
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "Error listing installed packages: " << e.what() << std::endl;
|
||||
}
|
||||
|
||||
return packages;
|
||||
}
|
||||
|
||||
std::vector<PackageMetadata> PackageMetadataManager::getAllPackageMetadata() const {
|
||||
std::vector<PackageMetadata> allMetadata;
|
||||
|
||||
std::vector<std::string> packages = listInstalledPackages();
|
||||
for (const std::string& packageName : packages) {
|
||||
PackageMetadata metadata = const_cast<PackageMetadataManager*>(this)->loadPackageMetadata(packageName);
|
||||
if (metadata.isValid()) {
|
||||
allMetadata.push_back(metadata);
|
||||
}
|
||||
}
|
||||
|
||||
return allMetadata;
|
||||
}
|
||||
|
||||
bool PackageMetadataManager::validateAllPackageMetadata() const {
|
||||
std::vector<std::string> packages = listInstalledPackages();
|
||||
|
||||
for (const std::string& packageName : packages) {
|
||||
PackageMetadata metadata = const_cast<PackageMetadataManager*>(this)->loadPackageMetadata(packageName);
|
||||
if (!metadata.isValid()) {
|
||||
std::cerr << "Invalid metadata for package " << packageName << ": " << metadata.getValidationError() << std::endl;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int PackageMetadataManager::cleanupInvalidMetadata() {
|
||||
int removedCount = 0;
|
||||
std::vector<std::string> packages = listInstalledPackages();
|
||||
|
||||
for (const std::string& packageName : packages) {
|
||||
PackageMetadata metadata = loadPackageMetadata(packageName);
|
||||
if (!metadata.isValid()) {
|
||||
std::cerr << "Removing invalid metadata for package " << packageName << ": " << metadata.getValidationError() << std::endl;
|
||||
if (removePackageMetadata(packageName)) {
|
||||
removedCount++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return removedCount;
|
||||
}
|
||||
|
||||
bool PackageMetadataManager::isValidPackageFile(const std::filesystem::path& filePath) const {
|
||||
return filePath.extension() == ".json" && std::filesystem::is_regular_file(filePath);
|
||||
}
|
||||
|
||||
std::string PackageMetadataManager::extractToolNameFromPath(const std::filesystem::path& filePath) const {
|
||||
return filePath.stem().string();
|
||||
}
|
97
getpkg/src/PackageMetadata.hpp
Normal file
97
getpkg/src/PackageMetadata.hpp
Normal file
@ -0,0 +1,97 @@
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <filesystem>
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
using json = nlohmann::json;
|
||||
|
||||
/**
|
||||
* Enhanced package metadata structure with server source tracking
|
||||
* Supports both new multi-server format and legacy single-server migration
|
||||
*/
|
||||
struct PackageMetadata {
|
||||
std::string name;
|
||||
std::string version;
|
||||
std::string hash;
|
||||
std::string arch;
|
||||
std::string sourceServer; // New field for server tracking
|
||||
std::string installDate; // New field for installation tracking
|
||||
|
||||
// Default constructor
|
||||
PackageMetadata() = default;
|
||||
|
||||
// Constructor with all fields
|
||||
PackageMetadata(const std::string& name, const std::string& version,
|
||||
const std::string& hash, const std::string& arch,
|
||||
const std::string& sourceServer, const std::string& installDate = "");
|
||||
|
||||
// Serialization methods
|
||||
json toJson() const;
|
||||
static PackageMetadata fromJson(const json& j);
|
||||
|
||||
// Migration support - convert from legacy format
|
||||
static PackageMetadata fromLegacyJson(const json& j, const std::string& defaultServer = "getpkg.xyz");
|
||||
|
||||
// Validation
|
||||
bool isValid() const;
|
||||
std::string getValidationError() const;
|
||||
|
||||
// File operations
|
||||
bool saveToFile(const std::filesystem::path& filePath) const;
|
||||
static PackageMetadata loadFromFile(const std::filesystem::path& filePath);
|
||||
|
||||
// Utility methods
|
||||
std::string getCurrentTimestamp() const;
|
||||
bool needsUpdate(const std::string& remoteHash) const;
|
||||
|
||||
private:
|
||||
// Internal validation helpers
|
||||
bool isValidName() const;
|
||||
bool isValidVersion() const;
|
||||
bool isValidHash() const;
|
||||
bool isValidArch() const;
|
||||
bool isValidServerUrl() const;
|
||||
bool isValidTimestamp() const;
|
||||
};
|
||||
|
||||
/**
|
||||
* Package metadata manager for handling the packages directory structure
|
||||
*/
|
||||
class PackageMetadataManager {
|
||||
public:
|
||||
PackageMetadataManager();
|
||||
explicit PackageMetadataManager(const std::filesystem::path& configDir);
|
||||
|
||||
// Directory management
|
||||
bool ensurePackagesDirectory();
|
||||
std::filesystem::path getPackagesDirectory() const;
|
||||
std::filesystem::path getPackageFilePath(const std::string& toolName) const;
|
||||
|
||||
// Package operations
|
||||
bool savePackageMetadata(const PackageMetadata& metadata);
|
||||
PackageMetadata loadPackageMetadata(const std::string& toolName);
|
||||
bool packageExists(const std::string& toolName) const;
|
||||
bool removePackageMetadata(const std::string& toolName);
|
||||
|
||||
// Migration support
|
||||
bool migrateFromLegacyFormat();
|
||||
std::vector<std::string> findLegacyPackageFiles() const;
|
||||
bool migrateLegacyPackageFile(const std::filesystem::path& legacyPath, const std::string& defaultServer = "getpkg.xyz");
|
||||
|
||||
// Listing and enumeration
|
||||
std::vector<std::string> listInstalledPackages() const;
|
||||
std::vector<PackageMetadata> getAllPackageMetadata() const;
|
||||
|
||||
// Validation and cleanup
|
||||
bool validateAllPackageMetadata() const;
|
||||
int cleanupInvalidMetadata();
|
||||
|
||||
private:
|
||||
std::filesystem::path configDir_;
|
||||
std::filesystem::path packagesDir_;
|
||||
|
||||
// Helper methods
|
||||
bool isValidPackageFile(const std::filesystem::path& filePath) const;
|
||||
std::string extractToolNameFromPath(const std::filesystem::path& filePath) const;
|
||||
};
|
353
getpkg/src/ServerManager.cpp
Normal file
353
getpkg/src/ServerManager.cpp
Normal file
@ -0,0 +1,353 @@
|
||||
#include "ServerManager.hpp"
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <chrono>
|
||||
#include <iomanip>
|
||||
#include <sstream>
|
||||
#include <regex>
|
||||
#include <cpr/cpr.h>
|
||||
|
||||
using json = nlohmann::json;
|
||||
|
||||
// ServerConfig implementation
|
||||
json ServerConfig::toJson() const {
|
||||
return json{
|
||||
{"url", url},
|
||||
{"name", name},
|
||||
{"default", isDefault},
|
||||
{"writeToken", writeToken},
|
||||
{"added", addedDate}
|
||||
};
|
||||
}
|
||||
|
||||
ServerConfig ServerConfig::fromJson(const json& j) {
|
||||
ServerConfig config;
|
||||
config.url = j.value("url", "");
|
||||
config.name = j.value("name", "");
|
||||
config.isDefault = j.value("default", false);
|
||||
config.writeToken = j.value("writeToken", "");
|
||||
config.addedDate = j.value("added", "");
|
||||
return config;
|
||||
}
|
||||
|
||||
// ServerManager implementation
|
||||
ServerManager::ServerManager() {
|
||||
const char* home = getenv("HOME");
|
||||
if (home) {
|
||||
configPath_ = std::filesystem::path(home) / ".config" / "getpkg" / "servers.json";
|
||||
}
|
||||
}
|
||||
|
||||
bool ServerManager::addServer(const std::string& serverUrl, const std::string& writeToken) {
|
||||
if (!validateServerUrl(serverUrl)) {
|
||||
std::cerr << "Invalid server URL: " << serverUrl << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check if server already exists
|
||||
if (findServer(serverUrl) != nullptr) {
|
||||
std::cerr << "Server already exists: " << serverUrl << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check if server is reachable
|
||||
if (!isServerReachable(serverUrl)) {
|
||||
std::cerr << "Warning: Server may not be reachable: " << serverUrl << std::endl;
|
||||
// Continue anyway - server might be temporarily down
|
||||
}
|
||||
|
||||
ServerConfig config;
|
||||
config.url = serverUrl;
|
||||
config.name = serverUrl; // Use URL as default name
|
||||
config.isDefault = servers_.empty(); // First server becomes default
|
||||
config.writeToken = writeToken;
|
||||
config.addedDate = getCurrentTimestamp();
|
||||
|
||||
servers_.push_back(config);
|
||||
|
||||
return saveConfiguration();
|
||||
}
|
||||
|
||||
bool ServerManager::removeServer(const std::string& serverUrl) {
|
||||
auto it = std::find_if(servers_.begin(), servers_.end(),
|
||||
[&serverUrl](const ServerConfig& config) {
|
||||
return config.url == serverUrl;
|
||||
});
|
||||
|
||||
if (it == servers_.end()) {
|
||||
std::cerr << "Server not found: " << serverUrl << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Don't allow removing the last server
|
||||
if (servers_.size() == 1) {
|
||||
std::cerr << "Cannot remove the last server. Add another server first." << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool wasDefault = it->isDefault;
|
||||
servers_.erase(it);
|
||||
|
||||
// If we removed the default server, make the first remaining server default
|
||||
if (wasDefault && !servers_.empty()) {
|
||||
servers_[0].isDefault = true;
|
||||
}
|
||||
|
||||
return saveConfiguration();
|
||||
}
|
||||
|
||||
std::vector<std::string> ServerManager::getServers() const {
|
||||
std::vector<std::string> urls;
|
||||
for (const auto& server : servers_) {
|
||||
urls.push_back(server.url);
|
||||
}
|
||||
return urls;
|
||||
}
|
||||
|
||||
std::string ServerManager::getDefaultServer() const {
|
||||
for (const auto& server : servers_) {
|
||||
if (server.isDefault) {
|
||||
return server.url;
|
||||
}
|
||||
}
|
||||
|
||||
// If no default is set, return the first server
|
||||
if (!servers_.empty()) {
|
||||
return servers_[0].url;
|
||||
}
|
||||
|
||||
return "getpkg.xyz"; // Fallback to original default
|
||||
}
|
||||
|
||||
std::string ServerManager::getDefaultPublishServer() const {
|
||||
// Return first server with a write token
|
||||
for (const auto& server : servers_) {
|
||||
if (!server.writeToken.empty()) {
|
||||
return server.url;
|
||||
}
|
||||
}
|
||||
|
||||
// If no server has a token, return the default server
|
||||
return getDefaultServer();
|
||||
}
|
||||
|
||||
bool ServerManager::setWriteToken(const std::string& serverUrl, const std::string& token) {
|
||||
ServerConfig* server = findServer(serverUrl);
|
||||
if (server == nullptr) {
|
||||
std::cerr << "Server not found: " << serverUrl << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
server->writeToken = token;
|
||||
return saveConfiguration();
|
||||
}
|
||||
|
||||
std::string ServerManager::getWriteToken(const std::string& serverUrl) const {
|
||||
const ServerConfig* server = findServer(serverUrl);
|
||||
if (server != nullptr) {
|
||||
return server->writeToken;
|
||||
}
|
||||
return "";
|
||||
}
|
||||
|
||||
bool ServerManager::hasWriteToken(const std::string& serverUrl) const {
|
||||
const ServerConfig* server = findServer(serverUrl);
|
||||
return server != nullptr && !server->writeToken.empty();
|
||||
}
|
||||
|
||||
std::vector<std::string> ServerManager::getServersWithTokens() const {
|
||||
std::vector<std::string> serversWithTokens;
|
||||
for (const auto& server : servers_) {
|
||||
if (!server.writeToken.empty()) {
|
||||
serversWithTokens.push_back(server.url);
|
||||
}
|
||||
}
|
||||
return serversWithTokens;
|
||||
}
|
||||
|
||||
bool ServerManager::loadConfiguration() {
|
||||
if (!std::filesystem::exists(configPath_)) {
|
||||
ensureDefaultConfiguration();
|
||||
return true;
|
||||
}
|
||||
|
||||
try {
|
||||
std::ifstream file(configPath_);
|
||||
if (!file.is_open()) {
|
||||
std::cerr << "Failed to open server configuration file: " << configPath_ << std::endl;
|
||||
ensureDefaultConfiguration();
|
||||
return true;
|
||||
}
|
||||
|
||||
json config;
|
||||
file >> config;
|
||||
|
||||
if (!config.contains("servers") || !config["servers"].is_array()) {
|
||||
std::cerr << "Invalid server configuration format" << std::endl;
|
||||
ensureDefaultConfiguration();
|
||||
return true;
|
||||
}
|
||||
|
||||
servers_.clear();
|
||||
for (const auto& serverJson : config["servers"]) {
|
||||
try {
|
||||
servers_.push_back(ServerConfig::fromJson(serverJson));
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "Warning: Skipping invalid server config: " << e.what() << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure we have at least one server
|
||||
if (servers_.empty()) {
|
||||
ensureDefaultConfiguration();
|
||||
}
|
||||
|
||||
return true;
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "Error loading server configuration: " << e.what() << std::endl;
|
||||
ensureDefaultConfiguration();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
bool ServerManager::saveConfiguration() {
|
||||
try {
|
||||
// Ensure directory exists
|
||||
std::filesystem::create_directories(configPath_.parent_path());
|
||||
|
||||
json config;
|
||||
config["version"] = "1.0";
|
||||
config["lastUpdated"] = getCurrentTimestamp();
|
||||
|
||||
json serversArray = json::array();
|
||||
for (const auto& server : servers_) {
|
||||
serversArray.push_back(server.toJson());
|
||||
}
|
||||
config["servers"] = serversArray;
|
||||
|
||||
std::ofstream file(configPath_);
|
||||
if (!file.is_open()) {
|
||||
std::cerr << "Failed to open server configuration file for writing: " << configPath_ << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
file << config.dump(2);
|
||||
return file.good();
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "Error saving server configuration: " << e.what() << std::endl;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
void ServerManager::ensureDefaultConfiguration() {
|
||||
servers_.clear();
|
||||
|
||||
ServerConfig defaultServer;
|
||||
defaultServer.url = "getpkg.xyz";
|
||||
defaultServer.name = "Official getpkg Registry";
|
||||
defaultServer.isDefault = true;
|
||||
defaultServer.writeToken = "";
|
||||
defaultServer.addedDate = getCurrentTimestamp();
|
||||
|
||||
servers_.push_back(defaultServer);
|
||||
|
||||
saveConfiguration();
|
||||
}
|
||||
|
||||
bool ServerManager::migrateFromLegacy() {
|
||||
const char* home = getenv("HOME");
|
||||
if (!home) {
|
||||
return false;
|
||||
}
|
||||
|
||||
std::filesystem::path legacyTokenPath = std::filesystem::path(home) / ".config" / "getpkg.xyz" / "write_token.txt";
|
||||
|
||||
if (std::filesystem::exists(legacyTokenPath)) {
|
||||
try {
|
||||
std::ifstream tokenFile(legacyTokenPath);
|
||||
std::string token;
|
||||
std::getline(tokenFile, token);
|
||||
|
||||
if (!token.empty()) {
|
||||
// Set the token for getpkg.xyz server
|
||||
setWriteToken("getpkg.xyz", token);
|
||||
|
||||
// Optionally remove the legacy token file
|
||||
// std::filesystem::remove(legacyTokenPath);
|
||||
|
||||
std::cout << "Migrated legacy write token for getpkg.xyz" << std::endl;
|
||||
return true;
|
||||
}
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "Warning: Failed to migrate legacy token: " << e.what() << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool ServerManager::validateServerUrl(const std::string& url) const {
|
||||
if (url.empty() || url.length() > 253) { // DNS name length limit
|
||||
return false;
|
||||
}
|
||||
|
||||
// Basic URL validation - should be a valid hostname or IP
|
||||
// Allow formats like: example.com, sub.example.com, 192.168.1.1, localhost
|
||||
std::regex urlPattern(R"(^[a-zA-Z0-9]([a-zA-Z0-9\-\.]*[a-zA-Z0-9])?$)");
|
||||
|
||||
if (!std::regex_match(url, urlPattern)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Additional checks
|
||||
if (url.find("..") != std::string::npos) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (url.front() == '.' || url.back() == '.') {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ServerManager::isServerReachable(const std::string& url) const {
|
||||
try {
|
||||
std::string testUrl = "https://" + url + "/";
|
||||
|
||||
auto response = cpr::Head(cpr::Url{testUrl},
|
||||
cpr::Timeout{5000}, // 5 seconds
|
||||
cpr::VerifySsl{true});
|
||||
|
||||
// Accept any response that indicates the server is reachable
|
||||
// (200, 404, 403, etc. - as long as we get a response)
|
||||
return response.status_code > 0;
|
||||
} catch (const std::exception& e) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
ServerConfig* ServerManager::findServer(const std::string& url) {
|
||||
auto it = std::find_if(servers_.begin(), servers_.end(),
|
||||
[&url](const ServerConfig& config) {
|
||||
return config.url == url;
|
||||
});
|
||||
return (it != servers_.end()) ? &(*it) : nullptr;
|
||||
}
|
||||
|
||||
const ServerConfig* ServerManager::findServer(const std::string& url) const {
|
||||
auto it = std::find_if(servers_.begin(), servers_.end(),
|
||||
[&url](const ServerConfig& config) {
|
||||
return config.url == url;
|
||||
});
|
||||
return (it != servers_.end()) ? &(*it) : nullptr;
|
||||
}
|
||||
|
||||
std::string ServerManager::getCurrentTimestamp() const {
|
||||
auto now = std::chrono::system_clock::now();
|
||||
auto time_t = std::chrono::system_clock::to_time_t(now);
|
||||
|
||||
std::stringstream ss;
|
||||
ss << std::put_time(std::gmtime(&time_t), "%Y-%m-%dT%H:%M:%SZ");
|
||||
return ss.str();
|
||||
}
|
53
getpkg/src/ServerManager.hpp
Normal file
53
getpkg/src/ServerManager.hpp
Normal file
@ -0,0 +1,53 @@
|
||||
#pragma once
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <filesystem>
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
struct ServerConfig {
|
||||
std::string url;
|
||||
std::string name;
|
||||
bool isDefault = false;
|
||||
std::string writeToken;
|
||||
std::string addedDate;
|
||||
|
||||
// JSON serialization
|
||||
nlohmann::json toJson() const;
|
||||
static ServerConfig fromJson(const nlohmann::json& j);
|
||||
};
|
||||
|
||||
class ServerManager {
|
||||
public:
|
||||
ServerManager();
|
||||
|
||||
// Server management
|
||||
bool addServer(const std::string& serverUrl, const std::string& writeToken = "");
|
||||
bool removeServer(const std::string& serverUrl);
|
||||
std::vector<std::string> getServers() const;
|
||||
std::string getDefaultServer() const;
|
||||
std::string getDefaultPublishServer() const; // First server with write token
|
||||
|
||||
// Token management
|
||||
bool setWriteToken(const std::string& serverUrl, const std::string& token);
|
||||
std::string getWriteToken(const std::string& serverUrl) const;
|
||||
bool hasWriteToken(const std::string& serverUrl) const;
|
||||
std::vector<std::string> getServersWithTokens() const;
|
||||
|
||||
// Configuration
|
||||
bool loadConfiguration();
|
||||
bool saveConfiguration();
|
||||
void ensureDefaultConfiguration();
|
||||
|
||||
// Migration
|
||||
bool migrateFromLegacy();
|
||||
|
||||
private:
|
||||
std::vector<ServerConfig> servers_;
|
||||
std::filesystem::path configPath_;
|
||||
|
||||
bool validateServerUrl(const std::string& url) const;
|
||||
bool isServerReachable(const std::string& url) const;
|
||||
ServerConfig* findServer(const std::string& url);
|
||||
const ServerConfig* findServer(const std::string& url) const;
|
||||
std::string getCurrentTimestamp() const;
|
||||
};
|
@ -57,6 +57,9 @@
|
||||
#include "BashrcEditor.hpp"
|
||||
#include "DropshellScriptManager.hpp"
|
||||
#include "GetbinClient.hpp"
|
||||
#include "MigrationManager.hpp"
|
||||
#include "ServerManager.hpp"
|
||||
#include "PackageMetadata.hpp"
|
||||
#include "archive_tgz.hpp"
|
||||
#include "hash.hpp"
|
||||
#include <iostream>
|
||||
@ -163,25 +166,47 @@ int install_tool(int argc, char* argv[]) {
|
||||
std::filesystem::path configDir = std::filesystem::path(home) / ".config/getpkg";
|
||||
std::filesystem::path binDir = std::filesystem::path(home) / ".getpkg" / toolName;
|
||||
std::filesystem::path archivePath = tempDir.path() / (toolName + ".tgz");
|
||||
std::filesystem::path toolInfoPath = configDir / (toolName + ".json");
|
||||
|
||||
// Initialize ServerManager and get server list
|
||||
ServerManager serverManager;
|
||||
if (!serverManager.loadConfiguration()) {
|
||||
std::cerr << "Failed to load server configuration" << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
std::vector<std::string> servers = serverManager.getServers();
|
||||
if (servers.empty()) {
|
||||
std::cerr << "No servers configured" << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Initialize PackageMetadataManager
|
||||
PackageMetadataManager packageManager(configDir);
|
||||
if (!packageManager.ensurePackagesDirectory()) {
|
||||
std::cerr << "Failed to create packages directory" << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Check if tool needs update or install
|
||||
if (std::filesystem::exists(toolInfoPath)) {
|
||||
bool isUpdate = false;
|
||||
PackageMetadata existingMetadata;
|
||||
if (packageManager.packageExists(toolName)) {
|
||||
// Tool exists, check if update needed
|
||||
std::ifstream tfile(toolInfoPath);
|
||||
json toolInfo;
|
||||
tfile >> toolInfo;
|
||||
tfile.close();
|
||||
existingMetadata = packageManager.loadPackageMetadata(toolName);
|
||||
if (!existingMetadata.isValid()) {
|
||||
std::cerr << "Warning: Invalid existing package metadata for " << toolName << std::endl;
|
||||
}
|
||||
|
||||
std::string localHash = toolInfo.value("hash", "");
|
||||
std::string localArch = toolInfo.value("arch", arch);
|
||||
std::string localHash = existingMetadata.hash;
|
||||
std::string localArch = existingMetadata.arch.empty() ? arch : existingMetadata.arch;
|
||||
|
||||
// Get remote hash to compare - use the same arch that was originally installed
|
||||
GetbinClient getbin;
|
||||
// Get remote hash to compare - use multi-server GetbinClient
|
||||
GetbinClient getbin(servers);
|
||||
std::string remoteHash;
|
||||
if (getbin.getHash(toolName, localArch, remoteHash) && !remoteHash.empty()) {
|
||||
if (localHash != remoteHash) {
|
||||
std::cout << "Updating " << toolName << "..." << std::endl;
|
||||
isUpdate = true;
|
||||
} else {
|
||||
std::cout << toolName << " is already up to date." << std::endl;
|
||||
return 0;
|
||||
@ -189,6 +214,7 @@ int install_tool(int argc, char* argv[]) {
|
||||
} else {
|
||||
// If we can't get remote hash, assume update is needed
|
||||
std::cout << "Updating " << toolName << "..." << std::endl;
|
||||
isUpdate = true;
|
||||
}
|
||||
} else {
|
||||
std::cout << "Installing " << toolName << "..." << std::endl;
|
||||
@ -208,9 +234,10 @@ int install_tool(int argc, char* argv[]) {
|
||||
if (std::filesystem::exists(binDir))
|
||||
std::filesystem::remove_all(binDir);
|
||||
|
||||
// Download tool - try arch-specific version first, then universal fallback
|
||||
GetbinClient getbin2;
|
||||
// Download tool using multi-server GetbinClient - try arch-specific version first, then universal fallback
|
||||
GetbinClient getbin2(servers);
|
||||
std::string downloadArch = arch;
|
||||
std::string sourceServer;
|
||||
|
||||
// Progress callback for downloads
|
||||
auto progressCallback = [&toolName](size_t downloaded, size_t total) -> bool {
|
||||
@ -235,6 +262,12 @@ int install_tool(int argc, char* argv[]) {
|
||||
}
|
||||
clearAndPrint("Downloading " + toolName + "... done\n");
|
||||
|
||||
// Find which server provided the package
|
||||
if (!getbin2.findPackageServer(toolName, downloadArch, sourceServer)) {
|
||||
// Fallback to first server if we can't determine the source
|
||||
sourceServer = servers[0];
|
||||
}
|
||||
|
||||
// Unpack tool
|
||||
std::cout << "Unpacking..." << std::flush;
|
||||
if (!common::unpack_tgz(archivePath.string(), binDir.string())) {
|
||||
@ -270,16 +303,11 @@ int install_tool(int argc, char* argv[]) {
|
||||
std::cerr << "Warning: Failed to get version for " << toolName << std::endl;
|
||||
}
|
||||
|
||||
// Save tool info
|
||||
json toolInfo = {
|
||||
{"name", toolName},
|
||||
{"version", version},
|
||||
{"hash", hash},
|
||||
{"arch", downloadArch}
|
||||
};
|
||||
std::ofstream toolInfoFile(toolInfoPath);
|
||||
toolInfoFile << toolInfo.dump(2);
|
||||
toolInfoFile.close();
|
||||
// Create and save enhanced package metadata
|
||||
PackageMetadata metadata(toolName, version, hash, downloadArch, sourceServer);
|
||||
if (!packageManager.savePackageMetadata(metadata)) {
|
||||
std::cerr << "Warning: Failed to save package metadata for " << toolName << std::endl;
|
||||
}
|
||||
|
||||
// Run setup script if exists
|
||||
std::filesystem::path setupScriptPath = binDir / "setup_script.sh";
|
||||
@ -295,11 +323,27 @@ int install_tool(int argc, char* argv[]) {
|
||||
|
||||
int publish_tool(int argc, char* argv[]) {
|
||||
if (argc < 4) {
|
||||
std::cerr << "Usage: getpkg publish <tool_name:ARCH> <folder>" << std::endl;
|
||||
std::cerr << "Usage: getpkg publish [--server <url>] <tool_name:ARCH> <folder>" << std::endl;
|
||||
std::cerr << " getpkg publish <tool_name:ARCH> <folder>" << std::endl;
|
||||
return 1;
|
||||
}
|
||||
std::string labeltag = argv[2];
|
||||
std::string folder = argv[3];
|
||||
|
||||
// Parse arguments for --server option
|
||||
std::string targetServer;
|
||||
std::string labeltag;
|
||||
std::string folder;
|
||||
int argIndex = 2;
|
||||
|
||||
if (argc >= 5 && std::string(argv[2]) == "--server") {
|
||||
targetServer = argv[3];
|
||||
labeltag = argv[4];
|
||||
folder = argv[5];
|
||||
argIndex = 5;
|
||||
} else {
|
||||
labeltag = argv[2];
|
||||
folder = argv[3];
|
||||
argIndex = 3;
|
||||
}
|
||||
|
||||
// If no ARCH is provided (no colon in labeltag), append ":universal" for cross-platform tools
|
||||
if (labeltag.find(':') == std::string::npos) {
|
||||
@ -314,6 +358,49 @@ int publish_tool(int argc, char* argv[]) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize ServerManager
|
||||
ServerManager serverManager;
|
||||
if (!serverManager.loadConfiguration()) {
|
||||
std::cerr << "Failed to load server configuration" << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Determine target server
|
||||
std::string publishServer;
|
||||
if (!targetServer.empty()) {
|
||||
// User specified a server, validate it exists in configuration
|
||||
std::vector<std::string> servers = serverManager.getServers();
|
||||
if (std::find(servers.begin(), servers.end(), targetServer) == servers.end()) {
|
||||
std::cerr << "Error: Server '" << targetServer << "' is not configured" << std::endl;
|
||||
std::cerr << "Use 'getpkg server add " << targetServer << "' to add it first" << std::endl;
|
||||
return 1;
|
||||
}
|
||||
publishServer = targetServer;
|
||||
} else {
|
||||
// Use default publish server (first server with write token)
|
||||
publishServer = serverManager.getDefaultPublishServer();
|
||||
if (publishServer.empty()) {
|
||||
std::cerr << "Error: No servers with write tokens configured" << std::endl;
|
||||
std::cerr << "Use 'getpkg server add <url>' and provide a write token" << std::endl;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Get write token for the target server
|
||||
std::string token = serverManager.getWriteToken(publishServer);
|
||||
if (token.empty()) {
|
||||
// Check environment variable as fallback
|
||||
const char* envToken = std::getenv("SOS_WRITE_TOKEN");
|
||||
if (envToken && std::strlen(envToken) > 0) {
|
||||
token = envToken;
|
||||
} else {
|
||||
std::cerr << "Error: No write token found for server '" << publishServer << "'" << std::endl;
|
||||
std::cerr << "Set SOS_WRITE_TOKEN environment variable or configure token for this server" << std::endl;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
std::string home = get_home();
|
||||
std::filesystem::path archivePath = std::filesystem::path(home) / ".tmp" / (labeltag + ".tgz");
|
||||
std::filesystem::create_directories(archivePath.parent_path());
|
||||
@ -322,24 +409,10 @@ int publish_tool(int argc, char* argv[]) {
|
||||
std::cerr << "Failed to create archive." << std::endl;
|
||||
return 1;
|
||||
}
|
||||
std::string token;
|
||||
const char* envToken = std::getenv("SOS_WRITE_TOKEN");
|
||||
if (envToken && std::strlen(envToken) > 0) {
|
||||
token = envToken;
|
||||
} else {
|
||||
std::filesystem::path tokenPath = std::filesystem::path(home) / ".config/getpkg.xyz/write_token.txt";
|
||||
if (std::filesystem::exists(tokenPath)) {
|
||||
std::ifstream tfile(tokenPath);
|
||||
std::getline(tfile, token);
|
||||
} else {
|
||||
std::cout << "Enter getpkg.xyz write token: ";
|
||||
std::getline(std::cin, token);
|
||||
std::filesystem::create_directories(tokenPath.parent_path());
|
||||
std::ofstream tfile(tokenPath);
|
||||
tfile << token << std::endl;
|
||||
}
|
||||
}
|
||||
GetbinClient getbin;
|
||||
|
||||
// Initialize GetbinClient with server list
|
||||
std::vector<std::string> servers = serverManager.getServers();
|
||||
GetbinClient getbin(servers);
|
||||
std::string url, hash;
|
||||
|
||||
// Progress callback for upload
|
||||
@ -353,13 +426,14 @@ int publish_tool(int argc, char* argv[]) {
|
||||
return true; // Continue upload
|
||||
};
|
||||
|
||||
std::cout << "Publishing to " << publishServer << "..." << std::endl;
|
||||
std::cout << "Uploading..." << std::flush;
|
||||
if (!getbin.upload(archivePath.string(), url, hash, token, uploadProgressCallback)) {
|
||||
std::cerr << "\rFailed to upload archive." << std::endl;
|
||||
if (!getbin.upload(publishServer, archivePath.string(), url, hash, token, uploadProgressCallback)) {
|
||||
std::cerr << "\rFailed to upload archive to " << publishServer << std::endl;
|
||||
return 1;
|
||||
}
|
||||
clearAndPrint("Uploading... done\n");
|
||||
std::cout << "Published! URL: " << url << "\nHash: " << hash << std::endl;
|
||||
std::cout << "Published to " << publishServer << "! URL: " << url << "\nHash: " << hash << std::endl;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -367,6 +441,25 @@ int update_tool(int argc, char* argv[]) {
|
||||
std::string home = get_home();
|
||||
std::filesystem::path configDir = std::filesystem::path(home) / ".config/getpkg";
|
||||
|
||||
// Initialize ServerManager and PackageMetadataManager
|
||||
ServerManager serverManager;
|
||||
if (!serverManager.loadConfiguration()) {
|
||||
std::cerr << "Failed to load server configuration" << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
std::vector<std::string> servers = serverManager.getServers();
|
||||
if (servers.empty()) {
|
||||
std::cerr << "No servers configured" << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
PackageMetadataManager packageManager(configDir);
|
||||
if (!packageManager.ensurePackagesDirectory()) {
|
||||
std::cerr << "Failed to create packages directory" << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Structure to hold tool information
|
||||
struct ToolInfo {
|
||||
std::string name;
|
||||
@ -374,29 +467,43 @@ int update_tool(int argc, char* argv[]) {
|
||||
std::string remoteHash;
|
||||
std::string arch;
|
||||
std::string version;
|
||||
std::string sourceServer;
|
||||
bool needsUpdate = false;
|
||||
std::string status = "Up to date";
|
||||
};
|
||||
|
||||
std::vector<ToolInfo> tools;
|
||||
|
||||
// Collect all installed tools
|
||||
if (std::filesystem::exists(configDir)) {
|
||||
for (const auto& entry : std::filesystem::directory_iterator(configDir)) {
|
||||
if (entry.path().extension() == ".json") {
|
||||
std::string tname = entry.path().stem();
|
||||
|
||||
ToolInfo tool;
|
||||
tool.name = tname;
|
||||
|
||||
// Read local tool info
|
||||
std::ifstream tfile(entry.path());
|
||||
// Collect all installed tools using PackageMetadataManager
|
||||
std::vector<std::string> installedPackages = packageManager.listInstalledPackages();
|
||||
for (const std::string& toolName : installedPackages) {
|
||||
ToolInfo tool;
|
||||
tool.name = toolName;
|
||||
|
||||
// Load package metadata
|
||||
PackageMetadata metadata = packageManager.loadPackageMetadata(toolName);
|
||||
if (metadata.isValid()) {
|
||||
tool.localHash = metadata.hash;
|
||||
tool.arch = metadata.arch.empty() ? get_arch() : metadata.arch;
|
||||
tool.version = metadata.version;
|
||||
tool.sourceServer = metadata.sourceServer;
|
||||
|
||||
if (tool.version.empty() || tool.version == "-") {
|
||||
tool.version = "installed";
|
||||
}
|
||||
} else {
|
||||
// Fallback to legacy format if new format fails
|
||||
std::filesystem::path legacyPath = configDir / (toolName + ".json");
|
||||
if (std::filesystem::exists(legacyPath)) {
|
||||
std::ifstream tfile(legacyPath);
|
||||
if (tfile.good()) {
|
||||
json toolInfo;
|
||||
tfile >> toolInfo;
|
||||
tool.localHash = toolInfo.value("hash", "");
|
||||
tool.arch = toolInfo.value("arch", get_arch());
|
||||
tool.version = toolInfo.value("version", "-");
|
||||
tool.sourceServer = "getpkg.xyz"; // Default for legacy
|
||||
|
||||
if (!tool.version.empty() && tool.version.back() == '\n') {
|
||||
tool.version.pop_back();
|
||||
}
|
||||
@ -404,10 +511,10 @@ int update_tool(int argc, char* argv[]) {
|
||||
tool.version = "installed";
|
||||
}
|
||||
}
|
||||
|
||||
tools.push_back(tool);
|
||||
}
|
||||
}
|
||||
|
||||
tools.push_back(tool);
|
||||
}
|
||||
|
||||
if (tools.empty()) {
|
||||
@ -418,14 +525,14 @@ int update_tool(int argc, char* argv[]) {
|
||||
// Step 1: Check for updates (with progress)
|
||||
std::cout << "Checking " << tools.size() << " tools for updates..." << std::endl;
|
||||
|
||||
GetbinClient getbin;
|
||||
GetbinClient getbin(servers);
|
||||
for (size_t i = 0; i < tools.size(); ++i) {
|
||||
auto& tool = tools[i];
|
||||
|
||||
// Show progress
|
||||
std::cout << "\r[" << (i + 1) << "/" << tools.size() << "] Checking " << tool.name << "..." << std::flush;
|
||||
|
||||
// Check remote hash
|
||||
// Check remote hash - use multi-server fallback
|
||||
std::string remoteHash;
|
||||
if (getbin.getHash(tool.name, tool.arch, remoteHash) && !remoteHash.empty()) {
|
||||
tool.remoteHash = remoteHash;
|
||||
@ -497,16 +604,10 @@ int update_tool(int argc, char* argv[]) {
|
||||
tool.status = "Updated";
|
||||
clearAndPrint("Updated\n");
|
||||
|
||||
// Re-read version after update
|
||||
std::filesystem::path toolInfoPath = configDir / (tool.name + ".json");
|
||||
if (std::filesystem::exists(toolInfoPath)) {
|
||||
std::ifstream tfile(toolInfoPath);
|
||||
json toolInfo;
|
||||
tfile >> toolInfo;
|
||||
tool.version = toolInfo.value("version", tool.version);
|
||||
if (!tool.version.empty() && tool.version.back() == '\n') {
|
||||
tool.version.pop_back();
|
||||
}
|
||||
// Re-read version after update using PackageMetadataManager
|
||||
PackageMetadata updatedMetadata = packageManager.loadPackageMetadata(tool.name);
|
||||
if (updatedMetadata.isValid()) {
|
||||
tool.version = updatedMetadata.version;
|
||||
if (tool.version.empty() || tool.version == "-") {
|
||||
tool.version = "installed";
|
||||
}
|
||||
@ -620,38 +721,73 @@ int hash_command(int argc, char* argv[]) {
|
||||
|
||||
int unpublish_tool(int argc, char* argv[]) {
|
||||
if (argc < 3) {
|
||||
std::cerr << "Usage: getpkg unpublish <tool_name[:ARCH]>" << std::endl;
|
||||
std::cerr << "Usage: getpkg unpublish [--server <url>] <tool_name[:ARCH]>" << std::endl;
|
||||
std::cerr << " getpkg unpublish [--server <url>] <hash>" << std::endl;
|
||||
std::cerr << " getpkg unpublish <tool_name[:ARCH]>" << std::endl;
|
||||
std::cerr << " getpkg unpublish <hash>" << std::endl;
|
||||
return 1;
|
||||
}
|
||||
std::string target = argv[2];
|
||||
|
||||
// Get token
|
||||
std::string token;
|
||||
const char* envToken = std::getenv("SOS_WRITE_TOKEN");
|
||||
if (envToken && std::strlen(envToken) > 0) {
|
||||
token = envToken;
|
||||
} else {
|
||||
std::string home = get_home();
|
||||
std::filesystem::path tokenPath = std::filesystem::path(home) / ".config/getpkg.xyz/write_token.txt";
|
||||
if (std::filesystem::exists(tokenPath)) {
|
||||
std::ifstream tfile(tokenPath);
|
||||
std::getline(tfile, token);
|
||||
} else {
|
||||
std::cout << "Enter getpkg.xyz write token: ";
|
||||
std::getline(std::cin, token);
|
||||
std::filesystem::create_directories(tokenPath.parent_path());
|
||||
std::ofstream tfile(tokenPath);
|
||||
tfile << token << std::endl;
|
||||
// Parse arguments for --server option
|
||||
std::string targetServer;
|
||||
std::string target;
|
||||
|
||||
if (argc >= 4 && std::string(argv[2]) == "--server") {
|
||||
if (argc < 5) {
|
||||
std::cerr << "Usage: getpkg unpublish --server <url> <tool_name[:ARCH]|hash>" << std::endl;
|
||||
return 1;
|
||||
}
|
||||
targetServer = argv[3];
|
||||
target = argv[4];
|
||||
} else {
|
||||
target = argv[2];
|
||||
}
|
||||
|
||||
if (token.empty()) {
|
||||
std::cerr << "Error: No write token provided" << std::endl;
|
||||
// Initialize ServerManager
|
||||
ServerManager serverManager;
|
||||
if (!serverManager.loadConfiguration()) {
|
||||
std::cerr << "Failed to load server configuration" << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
GetbinClient getbin;
|
||||
// Determine target server
|
||||
std::string unpublishServer;
|
||||
if (!targetServer.empty()) {
|
||||
// User specified a server, validate it exists in configuration
|
||||
std::vector<std::string> servers = serverManager.getServers();
|
||||
if (std::find(servers.begin(), servers.end(), targetServer) == servers.end()) {
|
||||
std::cerr << "Error: Server '" << targetServer << "' is not configured" << std::endl;
|
||||
std::cerr << "Use 'getpkg server add " << targetServer << "' to add it first" << std::endl;
|
||||
return 1;
|
||||
}
|
||||
unpublishServer = targetServer;
|
||||
} else {
|
||||
// Use default publish server (first server with write token)
|
||||
unpublishServer = serverManager.getDefaultPublishServer();
|
||||
if (unpublishServer.empty()) {
|
||||
std::cerr << "Error: No servers with write tokens configured" << std::endl;
|
||||
std::cerr << "Use 'getpkg server add <url>' and provide a write token" << std::endl;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Get write token for the target server
|
||||
std::string token = serverManager.getWriteToken(unpublishServer);
|
||||
if (token.empty()) {
|
||||
// Check environment variable as fallback
|
||||
const char* envToken = std::getenv("SOS_WRITE_TOKEN");
|
||||
if (envToken && std::strlen(envToken) > 0) {
|
||||
token = envToken;
|
||||
} else {
|
||||
std::cerr << "Error: No write token found for server '" << unpublishServer << "'" << std::endl;
|
||||
std::cerr << "Set SOS_WRITE_TOKEN environment variable or configure token for this server" << std::endl;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize GetbinClient with server list
|
||||
std::vector<std::string> servers = serverManager.getServers();
|
||||
GetbinClient getbin(servers);
|
||||
std::string hash = target;
|
||||
|
||||
// Check if target looks like a hash (all digits) or a tool name
|
||||
@ -676,8 +812,8 @@ int unpublish_tool(int argc, char* argv[]) {
|
||||
|
||||
// If a specific architecture was requested, only unpublish that one
|
||||
if (!specificArch.empty()) {
|
||||
if (!getbin.getHash(toolName, specificArch, hash)) {
|
||||
std::cerr << "Failed to get hash for " << target << std::endl;
|
||||
if (!getbin.getHash(unpublishServer, toolName, specificArch, hash)) {
|
||||
std::cerr << "Failed to get hash for " << target << " on server " << unpublishServer << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -701,14 +837,14 @@ int unpublish_tool(int argc, char* argv[]) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
std::cout << "Found hash " << hash << " for " << target << std::endl;
|
||||
std::cout << "Found hash " << hash << " for " << target << " on " << unpublishServer << std::endl;
|
||||
|
||||
// Delete the specific architecture
|
||||
if (getbin.deleteObject(hash, token)) {
|
||||
std::cout << "Successfully unpublished " << target << " (hash: " << hash << ")" << std::endl;
|
||||
std::cout << "Successfully unpublished " << target << " from " << unpublishServer << " (hash: " << hash << ")" << std::endl;
|
||||
return 0;
|
||||
} else {
|
||||
std::cerr << "Failed to unpublish " << target << std::endl;
|
||||
std::cerr << "Failed to unpublish " << target << " from " << unpublishServer << std::endl;
|
||||
return 1;
|
||||
}
|
||||
} else {
|
||||
@ -1059,10 +1195,12 @@ int uninstall_tool(int argc, char* argv[]) {
|
||||
std::string home = get_home();
|
||||
std::filesystem::path configDir = std::filesystem::path(home) / ".config/getpkg";
|
||||
std::filesystem::path binDir = std::filesystem::path(home) / ".getpkg" / toolName;
|
||||
std::filesystem::path toolInfoPath = configDir / (toolName + ".json");
|
||||
|
||||
// Check if tool is installed
|
||||
if (!std::filesystem::exists(toolInfoPath)) {
|
||||
// Initialize PackageMetadataManager
|
||||
PackageMetadataManager packageManager(configDir);
|
||||
|
||||
// Check if tool is installed using PackageMetadataManager
|
||||
if (!packageManager.packageExists(toolName)) {
|
||||
std::cerr << "Tool " << toolName << " is not installed." << std::endl;
|
||||
return 1;
|
||||
}
|
||||
@ -1079,10 +1217,8 @@ int uninstall_tool(int argc, char* argv[]) {
|
||||
std::filesystem::remove_all(binDir);
|
||||
}
|
||||
|
||||
// Remove tool info file
|
||||
if (std::filesystem::exists(toolInfoPath)) {
|
||||
std::filesystem::remove(toolInfoPath);
|
||||
}
|
||||
// Remove tool metadata
|
||||
packageManager.removePackageMetadata(toolName);
|
||||
|
||||
std::cout << "Uninstalled " << toolName << " successfully." << std::endl;
|
||||
return 0;
|
||||
@ -1103,14 +1239,15 @@ void show_help() {
|
||||
std::cout << " uninstall <tool_name> Remove an installed tool" << std::endl;
|
||||
std::cout << " Removes tool files, PATH entries, and autocomplete" << std::endl;
|
||||
std::cout << std::endl;
|
||||
std::cout << " publish <tool_name[:ARCH]> <folder> Upload a tool to getpkg.xyz" << std::endl;
|
||||
std::cout << " publish [--server <url>] <tool_name[:ARCH]> <folder>" << std::endl;
|
||||
std::cout << " Upload a tool to a package server" << std::endl;
|
||||
std::cout << " ARCH is optional (defaults to 'universal')" << std::endl;
|
||||
std::cout << " Requires SOS_WRITE_TOKEN environment variable" << std::endl;
|
||||
std::cout << " Uses default publish server if --server not specified" << std::endl;
|
||||
std::cout << std::endl;
|
||||
std::cout << " unpublish <tool_name> Remove ALL architectures of a tool" << std::endl;
|
||||
std::cout << " unpublish <tool_name:ARCH> Remove specific architecture only" << std::endl;
|
||||
std::cout << " unpublish <hash> Remove a tool by hash" << std::endl;
|
||||
std::cout << " Requires SOS_WRITE_TOKEN environment variable" << std::endl;
|
||||
std::cout << " unpublish [--server <url>] <tool_name> Remove ALL architectures of a tool" << std::endl;
|
||||
std::cout << " unpublish [--server <url>] <tool_name:ARCH> Remove specific architecture only" << std::endl;
|
||||
std::cout << " unpublish [--server <url>] <hash> Remove a tool by hash" << std::endl;
|
||||
std::cout << " Uses default publish server if --server not specified" << std::endl;
|
||||
std::cout << " Without :ARCH, removes x86_64, aarch64, and universal versions" << std::endl;
|
||||
std::cout << std::endl;
|
||||
std::cout << " update Update getpkg and all installed tools" << std::endl;
|
||||
@ -1127,6 +1264,15 @@ void show_help() {
|
||||
std::cout << " clean Clean up orphaned configs and symlinks" << std::endl;
|
||||
std::cout << " Removes unused config files and dangling symlinks" << std::endl;
|
||||
std::cout << std::endl;
|
||||
std::cout << " server add <url> Add a new package server" << std::endl;
|
||||
std::cout << " Adds a server to the configuration for package discovery" << std::endl;
|
||||
std::cout << std::endl;
|
||||
std::cout << " server remove <url> Remove a package server" << std::endl;
|
||||
std::cout << " Removes a server from the configuration" << std::endl;
|
||||
std::cout << std::endl;
|
||||
std::cout << " server list List all configured servers" << std::endl;
|
||||
std::cout << " Shows all servers with their status and write token info" << std::endl;
|
||||
std::cout << std::endl;
|
||||
std::cout << " version Show getpkg version" << std::endl;
|
||||
std::cout << std::endl;
|
||||
std::cout << " help Show this help message" << std::endl;
|
||||
@ -1136,10 +1282,15 @@ void show_help() {
|
||||
std::cout << " getpkg install myapp Install myapp" << std::endl;
|
||||
std::cout << " getpkg publish myapp:x86_64 ./build Publish architecture-specific build" << std::endl;
|
||||
std::cout << " getpkg publish myapp ./build Publish universal build" << std::endl;
|
||||
std::cout << " getpkg publish --server example.com myapp ./build Publish to specific server" << std::endl;
|
||||
std::cout << " getpkg unpublish myapp Remove ALL architectures of myapp" << std::endl;
|
||||
std::cout << " getpkg unpublish myapp:x86_64 Remove only x86_64 version" << std::endl;
|
||||
std::cout << " getpkg unpublish --server example.com myapp Remove from specific server" << std::endl;
|
||||
std::cout << " getpkg uninstall myapp Remove myapp from system" << std::endl;
|
||||
std::cout << " getpkg update Update everything" << std::endl;
|
||||
std::cout << " getpkg server add packages.example.com Add a custom package server" << std::endl;
|
||||
std::cout << " getpkg server remove packages.example.com Remove a package server" << std::endl;
|
||||
std::cout << " getpkg server list List all configured servers" << std::endl;
|
||||
std::cout << std::endl;
|
||||
std::cout << "ENVIRONMENT:" << std::endl;
|
||||
std::cout << " SOS_WRITE_TOKEN Auth token for publishing tools" << std::endl;
|
||||
@ -1150,6 +1301,163 @@ void show_help() {
|
||||
std::cout << " ~/.local/bin/getpkg/ Installed tool binaries" << std::endl;
|
||||
}
|
||||
|
||||
int server_command(int argc, char* argv[]) {
|
||||
if (argc < 3) {
|
||||
std::cerr << "Usage: getpkg server <add|remove|list> [args...]" << std::endl;
|
||||
std::cerr << " getpkg server add <url> Add a new server" << std::endl;
|
||||
std::cerr << " getpkg server remove <url> Remove a server" << std::endl;
|
||||
std::cerr << " getpkg server list List all configured servers" << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
std::string subcommand = argv[2];
|
||||
ServerManager serverManager;
|
||||
|
||||
// Load existing configuration
|
||||
if (!serverManager.loadConfiguration()) {
|
||||
std::cerr << "Failed to load server configuration" << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (subcommand == "add") {
|
||||
if (argc < 4) {
|
||||
std::cerr << "Usage: getpkg server add <url>" << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
std::string serverUrl = argv[3];
|
||||
|
||||
// Validate server URL format
|
||||
if (serverUrl.empty()) {
|
||||
std::cerr << "Error: Server URL cannot be empty" << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Remove protocol if provided (we'll add it internally)
|
||||
if (serverUrl.find("http://") == 0) {
|
||||
serverUrl = serverUrl.substr(7);
|
||||
} else if (serverUrl.find("https://") == 0) {
|
||||
serverUrl = serverUrl.substr(8);
|
||||
}
|
||||
|
||||
// Remove trailing slash if present
|
||||
if (!serverUrl.empty() && serverUrl.back() == '/') {
|
||||
serverUrl.pop_back();
|
||||
}
|
||||
|
||||
std::cout << "Adding server: " << serverUrl << std::endl;
|
||||
|
||||
if (serverManager.addServer(serverUrl)) {
|
||||
std::cout << "Successfully added server: " << serverUrl << std::endl;
|
||||
|
||||
// Ask if user wants to add a write token
|
||||
std::cout << "Would you like to add a write token for this server? (y/N): ";
|
||||
std::string response;
|
||||
std::getline(std::cin, response);
|
||||
|
||||
if (response == "y" || response == "Y" || response == "yes" || response == "Yes") {
|
||||
std::cout << "Enter write token for " << serverUrl << ": ";
|
||||
std::string token;
|
||||
std::getline(std::cin, token);
|
||||
|
||||
if (!token.empty()) {
|
||||
if (serverManager.setWriteToken(serverUrl, token)) {
|
||||
std::cout << "Write token added successfully" << std::endl;
|
||||
} else {
|
||||
std::cerr << "Failed to save write token" << std::endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
} else {
|
||||
std::cerr << "Failed to add server: " << serverUrl << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
} else if (subcommand == "remove") {
|
||||
if (argc < 4) {
|
||||
std::cerr << "Usage: getpkg server remove <url>" << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
std::string serverUrl = argv[3];
|
||||
|
||||
// Remove protocol if provided
|
||||
if (serverUrl.find("http://") == 0) {
|
||||
serverUrl = serverUrl.substr(7);
|
||||
} else if (serverUrl.find("https://") == 0) {
|
||||
serverUrl = serverUrl.substr(8);
|
||||
}
|
||||
|
||||
// Remove trailing slash if present
|
||||
if (!serverUrl.empty() && serverUrl.back() == '/') {
|
||||
serverUrl.pop_back();
|
||||
}
|
||||
|
||||
std::cout << "Removing server: " << serverUrl << std::endl;
|
||||
|
||||
if (serverManager.removeServer(serverUrl)) {
|
||||
std::cout << "Successfully removed server: " << serverUrl << std::endl;
|
||||
return 0;
|
||||
} else {
|
||||
std::cerr << "Failed to remove server: " << serverUrl << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
} else if (subcommand == "list") {
|
||||
std::vector<std::string> servers = serverManager.getServers();
|
||||
|
||||
if (servers.empty()) {
|
||||
std::cout << "No servers configured" << std::endl;
|
||||
return 0;
|
||||
}
|
||||
|
||||
std::cout << std::endl;
|
||||
std::cout << "Configured servers:" << std::endl;
|
||||
std::cout << "+" << std::string(30, '-') << "+" << std::string(12, '-') << "+" << std::string(15, '-') << "+" << std::endl;
|
||||
std::cout << "|" << std::setw(30) << std::left << " Server URL"
|
||||
<< "|" << std::setw(12) << std::left << " Default"
|
||||
<< "|" << std::setw(15) << std::left << " Write Token"
|
||||
<< "|" << std::endl;
|
||||
std::cout << "+" << std::string(30, '-') << "+" << std::string(12, '-') << "+" << std::string(15, '-') << "+" << std::endl;
|
||||
|
||||
std::string defaultServer = serverManager.getDefaultServer();
|
||||
|
||||
for (const auto& server : servers) {
|
||||
bool isDefault = (server == defaultServer);
|
||||
bool hasToken = serverManager.hasWriteToken(server);
|
||||
|
||||
std::string displayUrl = server;
|
||||
if (displayUrl.length() > 29) {
|
||||
displayUrl = displayUrl.substr(0, 26) + "...";
|
||||
}
|
||||
|
||||
std::cout << "|" << std::setw(30) << std::left << (" " + displayUrl)
|
||||
<< "|" << std::setw(12) << std::left << (isDefault ? " Yes" : " No")
|
||||
<< "|" << std::setw(15) << std::left << (hasToken ? " Yes" : " No")
|
||||
<< "|" << std::endl;
|
||||
}
|
||||
|
||||
std::cout << "+" << std::string(30, '-') << "+" << std::string(12, '-') << "+" << std::string(15, '-') << "+" << std::endl;
|
||||
std::cout << std::endl;
|
||||
std::cout << "Total servers: " << servers.size() << std::endl;
|
||||
|
||||
// Show default publish server if different from default
|
||||
std::string defaultPublishServer = serverManager.getDefaultPublishServer();
|
||||
if (defaultPublishServer != defaultServer) {
|
||||
std::cout << "Default publish server: " << defaultPublishServer << std::endl;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
} else {
|
||||
std::cerr << "Unknown server subcommand: " << subcommand << std::endl;
|
||||
std::cerr << "Use 'getpkg server' for usage information." << std::endl;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
int autocomplete_command(int argc, char* argv[]) {
|
||||
std::vector<std::string> args(argv + 2, argv + argc);
|
||||
|
||||
@ -1165,6 +1473,7 @@ int autocomplete_command(int argc, char* argv[]) {
|
||||
std::cout << "hash\n";
|
||||
std::cout << "list\n";
|
||||
std::cout << "clean\n";
|
||||
std::cout << "server\n";
|
||||
std::cout << "help\n";
|
||||
return 0;
|
||||
}
|
||||
@ -1179,6 +1488,35 @@ int autocomplete_command(int argc, char* argv[]) {
|
||||
} else if (subcommand == "uninstall") {
|
||||
// For uninstall, list installed tools
|
||||
std::filesystem::path configDir = std::filesystem::path(std::getenv("HOME")) / ".config" / "getpkg";
|
||||
if (std::filesystem::exists(configDir)) {
|
||||
for (const auto& entry : std::filesystem::directory_iterator(configDir)) {
|
||||
if (entry.path().extension() == ".json") {
|
||||
std::cout << entry.path().stem().string() << "\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
} else if (subcommand == "server") {
|
||||
// Handle server subcommand autocompletion
|
||||
if (args.size() == 1) {
|
||||
// Show server subcommands
|
||||
std::cout << "add\n";
|
||||
std::cout << "remove\n";
|
||||
std::cout << "list\n";
|
||||
} else if (args.size() == 2 && args[1] == "remove") {
|
||||
// For server remove, list configured servers
|
||||
ServerManager serverManager;
|
||||
if (serverManager.loadConfiguration()) {
|
||||
std::vector<std::string> servers = serverManager.getServers();
|
||||
for (const auto& server : servers) {
|
||||
std::cout << server << "\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
} else if (subcommand == "unpublish") {
|
||||
// For unpublish, we could suggest installed tools
|
||||
std::filesystem::path configDir = std::filesystem::path(std::getenv("HOME")) / ".config" / "getpkg";
|
||||
if (std::filesystem::exists(configDir)) {
|
||||
for (const auto& entry : std::filesystem::directory_iterator(configDir)) {
|
||||
if (entry.path().extension() == ".json") {
|
||||
@ -1229,9 +1567,76 @@ int autocomplete_command(int argc, char* argv[]) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Migration check and execution
|
||||
bool checkAndPerformMigration() {
|
||||
try {
|
||||
MigrationManager migrationManager;
|
||||
|
||||
if (migrationManager.needsMigration()) {
|
||||
std::cout << "Migrating getpkg configuration to multi-server format..." << std::endl;
|
||||
|
||||
if (migrationManager.performMigration()) {
|
||||
auto result = migrationManager.getLastMigrationResult();
|
||||
std::cout << "Migration completed successfully!" << std::endl;
|
||||
|
||||
if (result.migratedPackages > 0) {
|
||||
std::cout << " - Migrated " << result.migratedPackages << " package(s)" << std::endl;
|
||||
}
|
||||
if (result.serverConfigMigrated) {
|
||||
std::cout << " - Updated server configuration" << std::endl;
|
||||
}
|
||||
if (result.packageDirectoryCreated) {
|
||||
std::cout << " - Created packages directory structure" << std::endl;
|
||||
}
|
||||
|
||||
if (!result.warnings.empty()) {
|
||||
std::cout << "Migration warnings:" << std::endl;
|
||||
for (const auto& warning : result.warnings) {
|
||||
std::cout << " - " << warning << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
} else {
|
||||
auto result = migrationManager.getLastMigrationResult();
|
||||
std::cerr << "Migration failed!" << std::endl;
|
||||
|
||||
if (!result.errors.empty()) {
|
||||
std::cerr << "Migration errors:" << std::endl;
|
||||
for (const auto& error : result.errors) {
|
||||
std::cerr << " - " << error << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
if (migrationManager.canRollback()) {
|
||||
std::cerr << "Attempting rollback..." << std::endl;
|
||||
if (migrationManager.performRollback()) {
|
||||
std::cerr << "Rollback successful. Configuration restored to previous state." << std::endl;
|
||||
} else {
|
||||
std::cerr << "Rollback failed. Manual intervention may be required." << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true; // No migration needed
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "Migration error: " << e.what() << std::endl;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
} // end anonymous namespace
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
// Perform migration check before any other operations
|
||||
if (!checkAndPerformMigration()) {
|
||||
std::cerr << "Failed to migrate configuration. Some functionality may not work correctly." << std::endl;
|
||||
// Continue execution but warn user
|
||||
}
|
||||
if (argc < 2) {
|
||||
show_help();
|
||||
return 0;
|
||||
@ -1259,6 +1664,8 @@ int main(int argc, char* argv[]) {
|
||||
return list_packages(argc, argv);
|
||||
} else if (command == "clean") {
|
||||
return clean_tool(argc, argv);
|
||||
} else if (command == "server") {
|
||||
return server_command(argc, argv);
|
||||
} else if (command == "help") {
|
||||
show_help();
|
||||
} else {
|
||||
|
@ -1 +0,0 @@
|
||||
test
|
@ -1,7 +0,0 @@
|
||||
#\!/bin/bash
|
||||
if [ "$1" = "version" ]; then
|
||||
echo "1.0.0"
|
||||
elif [ "$1" = "autocomplete" ]; then
|
||||
echo "help"
|
||||
echo "version"
|
||||
fi
|
@ -1,7 +0,0 @@
|
||||
#\!/bin/bash
|
||||
if [ "$1" = "version" ]; then
|
||||
echo "1.0.0"
|
||||
elif [ "$1" = "autocomplete" ]; then
|
||||
echo "help"
|
||||
echo "version"
|
||||
fi
|
@ -44,8 +44,8 @@ cleanup() {
|
||||
|
||||
# Remove local test directories
|
||||
rm -rf "$TEST_DIR"
|
||||
rm -rf ~/.config/getpkg/"${TEST_TOOL_NAME}.json" 2>/dev/null || true
|
||||
rm -rf ~/.config/getpkg/"${TEST_TOOL_NAME}-noarch.json" 2>/dev/null || true
|
||||
rm -rf ~/.config/getpkg/packages/"${TEST_TOOL_NAME}.json" 2>/dev/null || true
|
||||
rm -rf ~/.config/getpkg/packages/"${TEST_TOOL_NAME}-noarch.json" 2>/dev/null || true
|
||||
rm -rf ~/.getpkg/"${TEST_TOOL_NAME}" 2>/dev/null || true
|
||||
rm -rf ~/.getpkg/"${TEST_TOOL_NAME}-noarch" 2>/dev/null || true
|
||||
rm -rf ~/.local/bin/getpkg/"${TEST_TOOL_NAME}" 2>/dev/null || true
|
||||
@ -68,6 +68,28 @@ cleanup() {
|
||||
# Clean up noarch variant
|
||||
$GETPKG unpublish "${TEST_TOOL_NAME}-noarch:universal" 2>/dev/null || true
|
||||
|
||||
# Clean up any remaining test packages that start with "test-"
|
||||
echo "Cleaning up any remaining test packages..."
|
||||
DIR_RESPONSE=$(curl -s "https://getpkg.xyz/dir" 2>/dev/null || echo "")
|
||||
if [ -n "$DIR_RESPONSE" ]; then
|
||||
# Extract test package labeltags from JSON response
|
||||
if command -v jq >/dev/null 2>&1; then
|
||||
TEST_PACKAGES=$(echo "$DIR_RESPONSE" | jq -r '.entries[]?.labeltags[]? // empty' 2>/dev/null | grep "^test-" | sort -u || echo "")
|
||||
else
|
||||
# Fallback: extract labeltags using grep and sed
|
||||
TEST_PACKAGES=$(echo "$DIR_RESPONSE" | grep -o '"test-[^"]*"' | sed 's/"//g' | sort -u || echo "")
|
||||
fi
|
||||
|
||||
if [ -n "$TEST_PACKAGES" ]; then
|
||||
echo "$TEST_PACKAGES" | while read -r package; do
|
||||
if [ -n "$package" ]; then
|
||||
echo " Cleaning up orphaned test package: $package"
|
||||
$GETPKG unpublish "$package" 2>/dev/null || true
|
||||
fi
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Cleaned up test tools from getpkg.xyz"
|
||||
else
|
||||
echo "Note: SOS_WRITE_TOKEN not set, cannot clean up remote test objects"
|
||||
@ -273,7 +295,7 @@ if [ -n "${SOS_WRITE_TOKEN:-}" ]; then
|
||||
echo "Publish command failed with no output. Checking for missing dependencies..."
|
||||
ldd "$GETPKG" 2>&1 | grep "not found" || echo "All dependencies found"
|
||||
fi
|
||||
if [[ "$PUBLISH_OUTPUT" =~ Published! ]] && [[ "$PUBLISH_OUTPUT" =~ URL: ]] && [[ "$PUBLISH_OUTPUT" =~ Hash: ]]; then
|
||||
if [[ "$PUBLISH_OUTPUT" =~ Published.*! ]] && [[ "$PUBLISH_OUTPUT" =~ URL: ]] && [[ "$PUBLISH_OUTPUT" =~ Hash: ]]; then
|
||||
print_test_result "Publish tool with ARCH to getpkg.xyz" 0
|
||||
|
||||
# Extract hash for later cleanup
|
||||
@ -283,8 +305,20 @@ if [ -n "${SOS_WRITE_TOKEN:-}" ]; then
|
||||
# Test 8: Check if published tool exists
|
||||
echo -e "\nTest 8: Check published tool exists"
|
||||
EXISTS_CHECK=$(curl -s "https://getpkg.xyz/exists/${TEST_TOOL_NAME}:${TEST_ARCH}" 2>/dev/null || echo "error")
|
||||
if [[ "$EXISTS_CHECK" != "error" ]] && [[ "$EXISTS_CHECK" != "false" ]]; then
|
||||
print_test_result "Published tool exists on server" 0
|
||||
# Parse JSON response to check if exists is true
|
||||
if [[ "$EXISTS_CHECK" != "error" ]]; then
|
||||
# Try to parse JSON response
|
||||
if command -v jq >/dev/null 2>&1; then
|
||||
EXISTS_VALUE=$(echo "$EXISTS_CHECK" | jq -r '.exists' 2>/dev/null || echo "false")
|
||||
else
|
||||
# Fallback: extract exists value using grep/sed
|
||||
EXISTS_VALUE=$(echo "$EXISTS_CHECK" | grep -o '"exists":[^,}]*' | sed 's/.*:\s*//' | tr -d ' ' || echo "false")
|
||||
fi
|
||||
if [[ "$EXISTS_VALUE" == "true" ]]; then
|
||||
print_test_result "Published tool exists on server" 0
|
||||
else
|
||||
print_test_result "Published tool exists on server" 1
|
||||
fi
|
||||
else
|
||||
print_test_result "Published tool exists on server" 1
|
||||
fi
|
||||
@ -297,7 +331,7 @@ if [ -n "${SOS_WRITE_TOKEN:-}" ]; then
|
||||
|
||||
# Test 10: Check installed files
|
||||
echo -e "\nTest 10: Check installed files"
|
||||
if [ -f ~/.config/getpkg/"${TEST_TOOL_NAME}.json" ] && [ -d ~/.getpkg/"${TEST_TOOL_NAME}" ] && [ -L ~/.local/bin/getpkg/"${TEST_TOOL_NAME}" ]; then
|
||||
if [ -f ~/.config/getpkg/packages/"${TEST_TOOL_NAME}.json" ] && [ -d ~/.getpkg/"${TEST_TOOL_NAME}" ] && [ -L ~/.local/bin/getpkg/"${TEST_TOOL_NAME}" ]; then
|
||||
print_test_result "Tool files installed correctly" 0
|
||||
else
|
||||
print_test_result "Tool files installed correctly" 1
|
||||
@ -316,7 +350,7 @@ if [ -n "${SOS_WRITE_TOKEN:-}" ]; then
|
||||
# First remove the tool
|
||||
rm -rf ~/.getpkg/"${TEST_TOOL_NAME}"
|
||||
rm -rf ~/.local/bin/getpkg/"${TEST_TOOL_NAME}"
|
||||
rm -f ~/.config/getpkg/"${TEST_TOOL_NAME}.json"
|
||||
rm -f ~/.config/getpkg/packages/"${TEST_TOOL_NAME}.json"
|
||||
|
||||
REINSTALL_OUTPUT=$(timeout 3 "$GETPKG" install "$TEST_TOOL_NAME" 2>&1) || REINSTALL_OUTPUT=""
|
||||
if [[ "$REINSTALL_OUTPUT" =~ Installed\ ${TEST_TOOL_NAME}\ successfully ]] || [[ "$REINSTALL_OUTPUT" =~ ${TEST_TOOL_NAME}\ is\ already\ up\ to\ date ]]; then
|
||||
@ -372,7 +406,7 @@ EOF
|
||||
chmod +x "${TEST_DIR}/${TEST_TOOL_NOARCH}/${TEST_TOOL_NOARCH}"
|
||||
|
||||
PUBLISH_NOARCH_OUTPUT=$(timeout 3 "$GETPKG" publish "${TEST_TOOL_NOARCH}" "${TEST_DIR}/${TEST_TOOL_NOARCH}" 2>&1) || PUBLISH_NOARCH_OUTPUT=""
|
||||
if [[ "$PUBLISH_NOARCH_OUTPUT" =~ Published! ]] && [[ "$PUBLISH_NOARCH_OUTPUT" =~ URL: ]] && [[ "$PUBLISH_NOARCH_OUTPUT" =~ Hash: ]]; then
|
||||
if [[ "$PUBLISH_NOARCH_OUTPUT" =~ Published.*! ]] && [[ "$PUBLISH_NOARCH_OUTPUT" =~ URL: ]] && [[ "$PUBLISH_NOARCH_OUTPUT" =~ Hash: ]]; then
|
||||
print_test_result "Publish tool without ARCH" 0
|
||||
else
|
||||
print_test_result "Publish tool without ARCH" 1
|
||||
@ -380,13 +414,13 @@ EOF
|
||||
|
||||
# Test 13c: Install universal tool (arch fallback)
|
||||
echo -e "\nTest 13c: Install universal tool (arch fallback)"
|
||||
rm -rf ~/.config/getpkg/"${TEST_TOOL_NOARCH}.json" ~/.getpkg/"${TEST_TOOL_NOARCH}" ~/.local/bin/getpkg/"${TEST_TOOL_NOARCH}" 2>/dev/null || true
|
||||
rm -rf ~/.config/getpkg/packages/"${TEST_TOOL_NOARCH}.json" ~/.getpkg/"${TEST_TOOL_NOARCH}" ~/.local/bin/getpkg/"${TEST_TOOL_NOARCH}" 2>/dev/null || true
|
||||
FALLBACK_INSTALL_OUTPUT=$(timeout 3 "$GETPKG" install "${TEST_TOOL_NOARCH}" 2>&1) || FALLBACK_INSTALL_OUTPUT=""
|
||||
|
||||
# Check if tool was installed successfully and has universal architecture
|
||||
if [[ "$FALLBACK_INSTALL_OUTPUT" =~ Installed\ ${TEST_TOOL_NOARCH}\ successfully ]] && [ -f ~/.config/getpkg/"${TEST_TOOL_NOARCH}.json" ]; then
|
||||
if [[ "$FALLBACK_INSTALL_OUTPUT" =~ Installed\ ${TEST_TOOL_NOARCH}\ successfully ]] && [ -f ~/.config/getpkg/packages/"${TEST_TOOL_NOARCH}.json" ]; then
|
||||
# Verify the architecture is "universal" in the config file
|
||||
INSTALLED_ARCH=$(grep -o '"arch"[[:space:]]*:[[:space:]]*"[^"]*"' ~/.config/getpkg/"${TEST_TOOL_NOARCH}.json" | sed 's/.*"\([^"]*\)".*/\1/')
|
||||
INSTALLED_ARCH=$(grep -o '"arch"[[:space:]]*:[[:space:]]*"[^"]*"' ~/.config/getpkg/packages/"${TEST_TOOL_NOARCH}.json" | sed 's/.*"\([^"]*\)".*/\1/')
|
||||
if [ "$INSTALLED_ARCH" = "universal" ]; then
|
||||
print_test_result "Install universal tool with arch fallback" 0
|
||||
|
||||
@ -407,10 +441,19 @@ EOF
|
||||
fi
|
||||
|
||||
# Clean up the noarch tool from server
|
||||
NOARCH_HASH=$(curl -s "https://getpkg.xyz/hash/${TEST_TOOL_NOARCH}" 2>/dev/null || echo "")
|
||||
if [ -n "$NOARCH_HASH" ] && [ "$NOARCH_HASH" != "null" ] && [ "$NOARCH_HASH" != "Not found" ]; then
|
||||
curl -s -H "Authorization: Bearer ${SOS_WRITE_TOKEN}" \
|
||||
"https://getpkg.xyz/deleteobject?hash=${NOARCH_HASH}" >/dev/null 2>&1 || true
|
||||
NOARCH_HASH_RESPONSE=$(curl -s "https://getpkg.xyz/hash/${TEST_TOOL_NOARCH}" 2>/dev/null || echo "")
|
||||
if [ -n "$NOARCH_HASH_RESPONSE" ]; then
|
||||
# Parse JSON response to extract hash
|
||||
if command -v jq >/dev/null 2>&1; then
|
||||
NOARCH_HASH=$(echo "$NOARCH_HASH_RESPONSE" | jq -r '.hash' 2>/dev/null || echo "")
|
||||
else
|
||||
# Fallback: extract hash value using grep/sed
|
||||
NOARCH_HASH=$(echo "$NOARCH_HASH_RESPONSE" | grep -o '"hash":"[^"]*"' | sed 's/.*"hash":"\([^"]*\)".*/\1/' || echo "")
|
||||
fi
|
||||
if [ -n "$NOARCH_HASH" ] && [ "$NOARCH_HASH" != "null" ]; then
|
||||
curl -s -H "Authorization: Bearer ${SOS_WRITE_TOKEN}" \
|
||||
"https://getpkg.xyz/deleteobject?hash=${NOARCH_HASH}" >/dev/null 2>&1 || true
|
||||
fi
|
||||
fi
|
||||
else
|
||||
echo -e "\n${YELLOW}Skipping publish/install tests (SOS_WRITE_TOKEN not set)${NC}"
|
||||
@ -444,7 +487,7 @@ EOF
|
||||
|
||||
# Publish and install the tool
|
||||
PUBLISH_OUTPUT=$(timeout 3 "$GETPKG" publish "${TEST_UNINSTALL_TOOL}:${TEST_ARCH}" "$UNINSTALL_DIR" 2>&1)
|
||||
if [[ "$PUBLISH_OUTPUT" =~ Published! ]]; then
|
||||
if [[ "$PUBLISH_OUTPUT" =~ Published.*! ]]; then
|
||||
INSTALL_OUTPUT=$(timeout 3 "$GETPKG" install "$TEST_UNINSTALL_TOOL" 2>&1)
|
||||
if [[ "$INSTALL_OUTPUT" =~ Installed\ ${TEST_UNINSTALL_TOOL}\ successfully ]]; then
|
||||
# Count bashrc entries before uninstall
|
||||
@ -457,7 +500,7 @@ EOF
|
||||
SYMLINK_EXISTS=false
|
||||
# HELPER_SYMLINK_EXISTS=false
|
||||
|
||||
[ -f ~/.config/getpkg/"${TEST_UNINSTALL_TOOL}.json" ] && CONFIG_EXISTS=true
|
||||
[ -f ~/.config/getpkg/packages/"${TEST_UNINSTALL_TOOL}.json" ] && CONFIG_EXISTS=true
|
||||
[ -d ~/.getpkg/"$TEST_UNINSTALL_TOOL" ] && TOOL_DIR_EXISTS=true
|
||||
[ -L ~/.local/bin/getpkg/"$TEST_UNINSTALL_TOOL" ] && SYMLINK_EXISTS=true
|
||||
# Check if helper symlink exists (not currently used in validation)
|
||||
@ -471,7 +514,7 @@ EOF
|
||||
ALL_REMOVED=true
|
||||
|
||||
# Check config file removed
|
||||
if [ -f ~/.config/getpkg/"${TEST_UNINSTALL_TOOL}.json" ]; then
|
||||
if [ -f ~/.config/getpkg/packages/"${TEST_UNINSTALL_TOOL}.json" ]; then
|
||||
echo "ERROR: Config file still exists after uninstall"
|
||||
ALL_REMOVED=false
|
||||
fi
|
||||
@ -524,7 +567,7 @@ EOF
|
||||
fi
|
||||
|
||||
# Always cleanup test uninstall tool from server, even if test failed
|
||||
if [[ "$PUBLISH_OUTPUT" =~ Published! ]]; then
|
||||
if [[ "$PUBLISH_OUTPUT" =~ Published.*! ]]; then
|
||||
$GETPKG unpublish "${TEST_UNINSTALL_TOOL}:${TEST_ARCH}" 2>/dev/null || true
|
||||
fi
|
||||
fi
|
||||
@ -555,7 +598,7 @@ echo "Multi-arch unpublish test"' > "$UNPUBLISH_TEST_DIR/$UNPUBLISH_TOOL_MULTI"
|
||||
PUBLISH_aarch64_OUTPUT=$("$GETPKG" publish "${UNPUBLISH_TOOL_MULTI}:aarch64" "$UNPUBLISH_TEST_DIR" 2>&1)
|
||||
PUBLISH_universal_OUTPUT=$("$GETPKG" publish "${UNPUBLISH_TOOL_MULTI}:universal" "$UNPUBLISH_TEST_DIR" 2>&1)
|
||||
|
||||
if [[ "$PUBLISH_x86_64_OUTPUT" =~ Published! ]] && [[ "$PUBLISH_aarch64_OUTPUT" =~ Published! ]] && [[ "$PUBLISH_universal_OUTPUT" =~ Published! ]]; then
|
||||
if [[ "$PUBLISH_x86_64_OUTPUT" =~ Published.*! ]] && [[ "$PUBLISH_aarch64_OUTPUT" =~ Published.*! ]] && [[ "$PUBLISH_universal_OUTPUT" =~ Published.*! ]]; then
|
||||
# Test robust unpublish - should remove ALL architectures
|
||||
sleep 1 # Give server time to process all publishes
|
||||
UNPUBLISH_OUTPUT=$("$GETPKG" unpublish "$UNPUBLISH_TOOL_MULTI" 2>&1)
|
||||
@ -585,7 +628,7 @@ echo "Universal arch unpublish test"' > "$UNPUBLISH_TEST_DIR/$UNPUBLISH_TOOL_CUS
|
||||
# Publish with universal architecture
|
||||
PUBLISH_CUSTOM_OUTPUT=$("$GETPKG" publish "${UNPUBLISH_TOOL_CUSTOM}:universal" "$UNPUBLISH_TEST_DIR" 2>&1)
|
||||
|
||||
if [[ "$PUBLISH_CUSTOM_OUTPUT" =~ Published! ]]; then
|
||||
if [[ "$PUBLISH_CUSTOM_OUTPUT" =~ Published.*! ]]; then
|
||||
# Test that unpublish can find and remove custom tags
|
||||
UNPUBLISH_CUSTOM_OUTPUT=$("$GETPKG" unpublish "$UNPUBLISH_TOOL_CUSTOM" 2>&1)
|
||||
UNPUBLISH_CUSTOM_EXIT_CODE=$?
|
||||
|
45
getpkg/test/CMakeLists.txt
Normal file
45
getpkg/test/CMakeLists.txt
Normal file
@ -0,0 +1,45 @@
|
||||
# Unit Tests for getpkg multi-server support
|
||||
cmake_minimum_required(VERSION 3.16)
|
||||
|
||||
# Test project setup
|
||||
project(getpkg_tests VERSION 1.0.0 LANGUAGES CXX)
|
||||
|
||||
# Build configuration
|
||||
set(CMAKE_CXX_STANDARD 23)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||
set(CMAKE_EXE_LINKER_FLAGS "-static")
|
||||
set(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
|
||||
set(BUILD_SHARED_LIBS OFF)
|
||||
set(CMAKE_PREFIX_PATH /usr/local)
|
||||
|
||||
# Find packages
|
||||
find_package(nlohmann_json REQUIRED)
|
||||
|
||||
# Add module path for FindCPRStatic
|
||||
list(APPEND CMAKE_MODULE_PATH "/usr/local/share/cmake/Modules")
|
||||
find_package(CPRStatic REQUIRED)
|
||||
|
||||
# Include directories
|
||||
include_directories(../src)
|
||||
include_directories(../src/common)
|
||||
|
||||
# Source files from main project (excluding main.cpp)
|
||||
file(GLOB_RECURSE MAIN_SOURCES "../src/*.cpp")
|
||||
list(REMOVE_ITEM MAIN_SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/../src/main.cpp")
|
||||
|
||||
# Test source files
|
||||
file(GLOB_RECURSE TEST_SOURCES "*.cpp")
|
||||
|
||||
# Create test executable
|
||||
add_executable(getpkg_tests ${MAIN_SOURCES} ${TEST_SOURCES})
|
||||
|
||||
# Link libraries
|
||||
target_link_libraries(getpkg_tests PRIVATE
|
||||
nlohmann_json::nlohmann_json
|
||||
cpr::cpr_static)
|
||||
|
||||
# Enable testing
|
||||
enable_testing()
|
||||
|
||||
# Add test
|
||||
add_test(NAME unit_tests COMMAND getpkg_tests)
|
14
getpkg/test/Dockerfile.test-build
Normal file
14
getpkg/test/Dockerfile.test-build
Normal file
@ -0,0 +1,14 @@
|
||||
FROM gitea.jde.nz/public/dropshell-build-base:latest
|
||||
|
||||
# Copy source files
|
||||
COPY . /app/
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app/test
|
||||
|
||||
# Build tests
|
||||
RUN cmake -G Ninja -S . -B ./build -DCMAKE_BUILD_TYPE=Debug -DCMAKE_PREFIX_PATH=/usr/local
|
||||
RUN cmake --build ./build
|
||||
|
||||
# Run tests
|
||||
CMD ["./build/getpkg_tests"]
|
37
getpkg/test/build_and_run_tests.sh
Executable file
37
getpkg/test/build_and_run_tests.sh
Executable file
@ -0,0 +1,37 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
|
||||
PROJECT_DIR="$(dirname "$SCRIPT_DIR")"
|
||||
|
||||
echo "Building and running unit tests for getpkg multi-server support..."
|
||||
|
||||
# Create a temporary Dockerfile for building tests
|
||||
cat > "$SCRIPT_DIR/Dockerfile.test-build" << 'EOF'
|
||||
FROM gitea.jde.nz/public/dropshell-build-base:latest
|
||||
|
||||
# Copy source files
|
||||
COPY . /app/
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app/test
|
||||
|
||||
# Build tests
|
||||
RUN cmake -G Ninja -S . -B ./build -DCMAKE_BUILD_TYPE=Debug -DCMAKE_PREFIX_PATH=/usr/local
|
||||
RUN cmake --build ./build
|
||||
|
||||
# Run tests
|
||||
CMD ["./build/getpkg_tests"]
|
||||
EOF
|
||||
|
||||
echo "Building test container..."
|
||||
docker build -t getpkg-test-build -f "$SCRIPT_DIR/Dockerfile.test-build" "$PROJECT_DIR"
|
||||
|
||||
echo "Running unit tests..."
|
||||
docker run --rm getpkg-test-build
|
||||
|
||||
echo "Cleaning up..."
|
||||
rm -f "$SCRIPT_DIR/Dockerfile.test-build"
|
||||
|
||||
echo "Unit tests completed!"
|
91
getpkg/test/test_framework.hpp
Normal file
91
getpkg/test/test_framework.hpp
Normal file
@ -0,0 +1,91 @@
|
||||
#pragma once
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <functional>
|
||||
#include <stdexcept>
|
||||
|
||||
// Simple test framework
|
||||
class TestRunner {
|
||||
public:
|
||||
using TestFunction = std::function<void()>;
|
||||
|
||||
static TestRunner& instance() {
|
||||
static TestRunner runner;
|
||||
return runner;
|
||||
}
|
||||
|
||||
void addTest(const std::string& name, TestFunction test) {
|
||||
tests_.push_back({name, test});
|
||||
}
|
||||
|
||||
int runAllTests() {
|
||||
int passed = 0;
|
||||
int failed = 0;
|
||||
|
||||
std::cout << "Running " << tests_.size() << " tests...\n\n";
|
||||
|
||||
for (const auto& test : tests_) {
|
||||
try {
|
||||
test.second();
|
||||
std::cout << "[PASS] " << test.first << std::endl;
|
||||
passed++;
|
||||
} catch (const std::exception& e) {
|
||||
std::cout << "[FAIL] " << test.first << " - " << e.what() << std::endl;
|
||||
failed++;
|
||||
} catch (...) {
|
||||
std::cout << "[FAIL] " << test.first << " - Unknown error" << std::endl;
|
||||
failed++;
|
||||
}
|
||||
}
|
||||
|
||||
std::cout << "\nResults: " << passed << " passed, " << failed << " failed\n";
|
||||
return failed;
|
||||
}
|
||||
|
||||
private:
|
||||
std::vector<std::pair<std::string, TestFunction>> tests_;
|
||||
};
|
||||
|
||||
// Test assertion macros
|
||||
#define ASSERT_TRUE(condition) \
|
||||
if (!(condition)) { \
|
||||
throw std::runtime_error("Assertion failed: " #condition); \
|
||||
}
|
||||
|
||||
#define ASSERT_FALSE(condition) \
|
||||
if (condition) { \
|
||||
throw std::runtime_error("Assertion failed: " #condition " should be false"); \
|
||||
}
|
||||
|
||||
#define ASSERT_EQ(expected, actual) \
|
||||
if ((expected) != (actual)) { \
|
||||
throw std::runtime_error("Assertion failed: expected != actual"); \
|
||||
}
|
||||
|
||||
#define ASSERT_STR_EQ(expected, actual) \
|
||||
if ((expected) != (actual)) { \
|
||||
throw std::runtime_error("Assertion failed: expected '" + std::string(expected) + "' but got '" + std::string(actual) + "'"); \
|
||||
}
|
||||
|
||||
#define ASSERT_NOT_EMPTY(str) \
|
||||
if ((str).empty()) { \
|
||||
throw std::runtime_error("Assertion failed: string should not be empty"); \
|
||||
}
|
||||
|
||||
#define ASSERT_GE(actual, expected) \
|
||||
if ((actual) < (expected)) { \
|
||||
throw std::runtime_error("Assertion failed: expected " + std::to_string(actual) + " >= " + std::to_string(expected)); \
|
||||
}
|
||||
|
||||
// Test registration macro
|
||||
#define TEST(suite, name) \
|
||||
void test_##suite##_##name(); \
|
||||
struct TestRegistrar_##suite##_##name { \
|
||||
TestRegistrar_##suite##_##name() { \
|
||||
TestRunner::instance().addTest(#suite "::" #name, test_##suite##_##name); \
|
||||
} \
|
||||
}; \
|
||||
static TestRegistrar_##suite##_##name registrar_##suite##_##name; \
|
||||
void test_##suite##_##name()
|
343
getpkg/test/test_getbin_client.cpp
Normal file
343
getpkg/test/test_getbin_client.cpp
Normal file
@ -0,0 +1,343 @@
|
||||
#include "GetbinClient.hpp"
|
||||
#include <filesystem>
|
||||
#include <fstream>
|
||||
#include <thread>
|
||||
#include <chrono>
|
||||
|
||||
// Test framework declarations from test_main.cpp
|
||||
class TestRunner {
|
||||
public:
|
||||
static TestRunner& instance();
|
||||
void addTest(const std::string& name, std::function<void()> test);
|
||||
};
|
||||
|
||||
#define ASSERT_TRUE(condition) \
|
||||
if (!(condition)) { \
|
||||
throw std::runtime_error("Assertion failed: " #condition); \
|
||||
}
|
||||
|
||||
#define ASSERT_FALSE(condition) \
|
||||
if (condition) { \
|
||||
throw std::runtime_error("Assertion failed: " #condition " should be false"); \
|
||||
}
|
||||
|
||||
#define ASSERT_EQ(expected, actual) \
|
||||
if ((expected) != (actual)) { \
|
||||
throw std::runtime_error("Assertion failed: expected != actual"); \
|
||||
}
|
||||
|
||||
#define ASSERT_STR_EQ(expected, actual) \
|
||||
if ((expected) != (actual)) { \
|
||||
throw std::runtime_error("Assertion failed: expected '" + std::string(expected) + "' but got '" + std::string(actual) + "'"); \
|
||||
}
|
||||
|
||||
#define ASSERT_NOT_EMPTY(str) \
|
||||
if ((str).empty()) { \
|
||||
throw std::runtime_error("Assertion failed: string should not be empty"); \
|
||||
}
|
||||
|
||||
#define TEST(name) \
|
||||
void test_GetbinClient_##name(); \
|
||||
void register_GetbinClient_##name() { \
|
||||
TestRunner::instance().addTest("GetbinClient::" #name, test_GetbinClient_##name); \
|
||||
} \
|
||||
void test_GetbinClient_##name()
|
||||
|
||||
// Test helper class for GetbinClient testing
|
||||
class GetbinClientTestHelper {
|
||||
public:
|
||||
static std::filesystem::path createTempDir() {
|
||||
auto tempDir = std::filesystem::temp_directory_path() / "getpkg_client_test" / std::to_string(std::time(nullptr));
|
||||
std::filesystem::create_directories(tempDir);
|
||||
return tempDir;
|
||||
}
|
||||
|
||||
static void cleanupTempDir(const std::filesystem::path& dir) {
|
||||
if (std::filesystem::exists(dir)) {
|
||||
std::filesystem::remove_all(dir);
|
||||
}
|
||||
}
|
||||
|
||||
static void createTestFile(const std::filesystem::path& path, const std::string& content) {
|
||||
std::ofstream file(path);
|
||||
file << content;
|
||||
file.close();
|
||||
}
|
||||
|
||||
static std::string readTestFile(const std::filesystem::path& path) {
|
||||
std::ifstream file(path);
|
||||
std::string content((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
|
||||
return content;
|
||||
}
|
||||
|
||||
// Mock progress callback for testing
|
||||
static bool mockProgressCallback(size_t downloaded, size_t total) {
|
||||
return true; // Continue download
|
||||
}
|
||||
|
||||
static bool cancelProgressCallback(size_t downloaded, size_t total) {
|
||||
return false; // Cancel download
|
||||
}
|
||||
};
|
||||
|
||||
TEST(DefaultConstructor) {
|
||||
GetbinClient client;
|
||||
|
||||
// Should work with default server
|
||||
ASSERT_NOT_EMPTY(client.getLastError()); // Initially empty, but method should exist
|
||||
}
|
||||
|
||||
TEST(MultiServerConstructor) {
|
||||
std::vector<std::string> servers = {"server1.com", "server2.com", "server3.com"};
|
||||
GetbinClient client(servers);
|
||||
|
||||
// Should initialize with multiple servers
|
||||
ASSERT_NOT_EMPTY(client.getLastError()); // Method should exist
|
||||
}
|
||||
|
||||
TEST(EmptyServerList) {
|
||||
std::vector<std::string> emptyServers;
|
||||
GetbinClient client(emptyServers);
|
||||
|
||||
// Should handle empty server list gracefully
|
||||
ASSERT_NOT_EMPTY(client.getLastError());
|
||||
}
|
||||
|
||||
TEST(NetworkErrorClassification) {
|
||||
GetbinClient client;
|
||||
|
||||
// Test error message generation
|
||||
ASSERT_NOT_EMPTY(client.getNetworkErrorMessage(NetworkError::ConnectionFailed));
|
||||
ASSERT_NOT_EMPTY(client.getNetworkErrorMessage(NetworkError::Timeout));
|
||||
ASSERT_NOT_EMPTY(client.getNetworkErrorMessage(NetworkError::NotFound));
|
||||
ASSERT_NOT_EMPTY(client.getNetworkErrorMessage(NetworkError::Unauthorized));
|
||||
ASSERT_NOT_EMPTY(client.getNetworkErrorMessage(NetworkError::ServerError));
|
||||
}
|
||||
|
||||
TEST(UrlBuilding) {
|
||||
GetbinClient client;
|
||||
|
||||
// Test URL building with different server formats
|
||||
// Note: This tests internal functionality, so we test through public methods
|
||||
std::string hash;
|
||||
|
||||
// Test with different server URL formats
|
||||
bool result1 = client.getHash("http://server.com", "test-tool", "x86_64", hash);
|
||||
bool result2 = client.getHash("https://server.com", "test-tool", "x86_64", hash);
|
||||
bool result3 = client.getHash("server.com", "test-tool", "x86_64", hash);
|
||||
|
||||
// These will fail due to network, but should not crash
|
||||
ASSERT_FALSE(result1 || result2 || result3); // All should fail gracefully
|
||||
}
|
||||
|
||||
TEST(FindPackageServerWithMultipleServers) {
|
||||
std::vector<std::string> servers = {"server1.com", "server2.com", "server3.com"};
|
||||
GetbinClient client(servers);
|
||||
|
||||
std::string foundServer;
|
||||
bool found = client.findPackageServer("nonexistent-tool", "x86_64", foundServer);
|
||||
|
||||
// Should not find nonexistent package, but should not crash
|
||||
ASSERT_FALSE(found);
|
||||
ASSERT_TRUE(foundServer.empty());
|
||||
}
|
||||
|
||||
TEST(MultiServerDownloadFallback) {
|
||||
auto tempDir = GetbinClientTestHelper::createTempDir();
|
||||
auto outputPath = tempDir / "test-download";
|
||||
|
||||
std::vector<std::string> servers = {"invalid-server1.com", "invalid-server2.com", "invalid-server3.com"};
|
||||
GetbinClient client(servers);
|
||||
|
||||
// Should try all servers and fail gracefully
|
||||
bool result = client.download("nonexistent-tool", "x86_64", outputPath.string());
|
||||
ASSERT_FALSE(result);
|
||||
|
||||
// Should have error message
|
||||
ASSERT_NOT_EMPTY(client.getLastError());
|
||||
|
||||
GetbinClientTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(ServerSpecificDownload) {
|
||||
auto tempDir = GetbinClientTestHelper::createTempDir();
|
||||
auto outputPath = tempDir / "test-download";
|
||||
|
||||
GetbinClient client;
|
||||
|
||||
// Test server-specific download with invalid server
|
||||
NetworkError error = client.downloadFromServer("invalid-server.com", "test-tool", "x86_64",
|
||||
outputPath.string());
|
||||
|
||||
// Should return appropriate network error
|
||||
ASSERT_TRUE(error != NetworkError::None);
|
||||
ASSERT_NOT_EMPTY(client.getNetworkErrorMessage(error));
|
||||
|
||||
GetbinClientTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(ProgressCallbackHandling) {
|
||||
auto tempDir = GetbinClientTestHelper::createTempDir();
|
||||
auto outputPath = tempDir / "test-download";
|
||||
|
||||
GetbinClient client;
|
||||
|
||||
// Test with progress callback that continues
|
||||
bool result1 = client.download("test-tool", "x86_64", outputPath.string(),
|
||||
GetbinClientTestHelper::mockProgressCallback);
|
||||
ASSERT_FALSE(result1); // Will fail due to network, but should handle callback
|
||||
|
||||
// Test with progress callback that cancels
|
||||
bool result2 = client.download("test-tool", "x86_64", outputPath.string(),
|
||||
GetbinClientTestHelper::cancelProgressCallback);
|
||||
ASSERT_FALSE(result2); // Should handle cancellation
|
||||
|
||||
GetbinClientTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(HashRetrievalMultiServer) {
|
||||
std::vector<std::string> servers = {"server1.com", "server2.com"};
|
||||
GetbinClient client(servers);
|
||||
|
||||
std::string hash;
|
||||
|
||||
// Test multi-server hash retrieval
|
||||
bool result = client.getHash("test-tool", "x86_64", hash);
|
||||
ASSERT_FALSE(result); // Will fail due to network
|
||||
ASSERT_TRUE(hash.empty());
|
||||
|
||||
// Test server-specific hash retrieval
|
||||
bool result2 = client.getHash("server1.com", "test-tool", "x86_64", hash);
|
||||
ASSERT_FALSE(result2); // Will fail due to network
|
||||
ASSERT_TRUE(hash.empty());
|
||||
}
|
||||
|
||||
TEST(UploadFunctionality) {
|
||||
auto tempDir = GetbinClientTestHelper::createTempDir();
|
||||
auto testFile = tempDir / "test-archive.tar.gz";
|
||||
|
||||
// Create a test file
|
||||
GetbinClientTestHelper::createTestFile(testFile, "test archive content");
|
||||
|
||||
GetbinClient client;
|
||||
std::string outUrl, outHash;
|
||||
|
||||
// Test server-specific upload
|
||||
bool result1 = client.upload("test-server.com", testFile.string(), outUrl, outHash, "test-token");
|
||||
ASSERT_FALSE(result1); // Will fail due to network
|
||||
|
||||
// Test backward compatibility upload
|
||||
bool result2 = client.upload(testFile.string(), outUrl, outHash, "test-token");
|
||||
ASSERT_FALSE(result2); // Will fail due to network
|
||||
|
||||
GetbinClientTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(LegacyMethodsCompatibility) {
|
||||
GetbinClient client;
|
||||
|
||||
// Test legacy delete method
|
||||
bool deleteResult = client.deleteObject("test-hash", "test-token");
|
||||
ASSERT_FALSE(deleteResult); // Will fail due to network
|
||||
|
||||
// Test legacy list packages method
|
||||
std::vector<std::string> packages;
|
||||
bool listResult = client.listPackages(packages);
|
||||
ASSERT_FALSE(listResult); // Will fail due to network
|
||||
ASSERT_TRUE(packages.empty());
|
||||
|
||||
// Test legacy list all entries method
|
||||
std::vector<std::pair<std::string, std::vector<std::string>>> entries;
|
||||
bool entriesResult = client.listAllEntries(entries);
|
||||
ASSERT_FALSE(entriesResult); // Will fail due to network
|
||||
ASSERT_TRUE(entries.empty());
|
||||
}
|
||||
|
||||
TEST(ErrorMessagePersistence) {
|
||||
GetbinClient client;
|
||||
|
||||
// Trigger an error
|
||||
std::string hash;
|
||||
client.getHash("invalid-server.com", "test-tool", "x86_64", hash);
|
||||
|
||||
// Error message should be set
|
||||
std::string error1 = client.getLastError();
|
||||
ASSERT_NOT_EMPTY(error1);
|
||||
|
||||
// Trigger another error
|
||||
client.download("test-tool", "x86_64", "/invalid/path");
|
||||
|
||||
// Error message should be updated
|
||||
std::string error2 = client.getLastError();
|
||||
ASSERT_NOT_EMPTY(error2);
|
||||
|
||||
// Errors might be different depending on which fails first
|
||||
// But both should be non-empty
|
||||
}
|
||||
|
||||
TEST(UserAgentGeneration) {
|
||||
GetbinClient client;
|
||||
|
||||
// Test that user agent is properly set (indirect test through network calls)
|
||||
std::string hash;
|
||||
bool result = client.getHash("test-server.com", "test-tool", "x86_64", hash);
|
||||
|
||||
// Should fail due to network but not crash due to user agent issues
|
||||
ASSERT_FALSE(result);
|
||||
ASSERT_NOT_EMPTY(client.getLastError());
|
||||
}
|
||||
|
||||
TEST(ConcurrentOperations) {
|
||||
std::vector<std::string> servers = {"server1.com", "server2.com"};
|
||||
GetbinClient client(servers);
|
||||
|
||||
// Test that multiple operations can be performed without interference
|
||||
std::string hash1, hash2;
|
||||
|
||||
bool result1 = client.getHash("tool1", "x86_64", hash1);
|
||||
bool result2 = client.getHash("tool2", "aarch64", hash2);
|
||||
|
||||
// Both should fail gracefully without interfering with each other
|
||||
ASSERT_FALSE(result1);
|
||||
ASSERT_FALSE(result2);
|
||||
ASSERT_TRUE(hash1.empty());
|
||||
ASSERT_TRUE(hash2.empty());
|
||||
}
|
||||
|
||||
TEST(ArchitectureHandling) {
|
||||
GetbinClient client;
|
||||
|
||||
std::string hash;
|
||||
|
||||
// Test different architecture strings
|
||||
bool result1 = client.getHash("test-tool", "x86_64", hash);
|
||||
bool result2 = client.getHash("test-tool", "aarch64", hash);
|
||||
bool result3 = client.getHash("test-tool", "universal", hash);
|
||||
bool result4 = client.getHash("test-tool", "invalid-arch", hash);
|
||||
|
||||
// All should fail due to network but handle architectures properly
|
||||
ASSERT_FALSE(result1);
|
||||
ASSERT_FALSE(result2);
|
||||
ASSERT_FALSE(result3);
|
||||
ASSERT_FALSE(result4);
|
||||
}
|
||||
|
||||
// Registration function
|
||||
void registerGetbinClientTests() {
|
||||
register_GetbinClient_DefaultConstructor();
|
||||
register_GetbinClient_MultiServerConstructor();
|
||||
register_GetbinClient_EmptyServerList();
|
||||
register_GetbinClient_NetworkErrorClassification();
|
||||
register_GetbinClient_UrlBuilding();
|
||||
register_GetbinClient_FindPackageServerWithMultipleServers();
|
||||
register_GetbinClient_MultiServerDownloadFallback();
|
||||
register_GetbinClient_ServerSpecificDownload();
|
||||
register_GetbinClient_ProgressCallbackHandling();
|
||||
register_GetbinClient_HashRetrievalMultiServer();
|
||||
register_GetbinClient_UploadFunctionality();
|
||||
register_GetbinClient_LegacyMethodsCompatibility();
|
||||
register_GetbinClient_ErrorMessagePersistence();
|
||||
register_GetbinClient_UserAgentGeneration();
|
||||
register_GetbinClient_ConcurrentOperations();
|
||||
register_GetbinClient_ArchitectureHandling();
|
||||
}
|
530
getpkg/test/test_integration.cpp
Normal file
530
getpkg/test/test_integration.cpp
Normal file
@ -0,0 +1,530 @@
|
||||
#include "ServerManager.hpp"
|
||||
#include "GetbinClient.hpp"
|
||||
#include "PackageMetadata.hpp"
|
||||
#include "MigrationManager.hpp"
|
||||
#include <filesystem>
|
||||
#include <fstream>
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
// Test framework declarations from test_main.cpp
|
||||
class TestRunner {
|
||||
public:
|
||||
static TestRunner& instance();
|
||||
void addTest(const std::string& name, std::function<void()> test);
|
||||
};
|
||||
|
||||
#define ASSERT_TRUE(condition) \
|
||||
if (!(condition)) { \
|
||||
throw std::runtime_error("Assertion failed: " #condition); \
|
||||
}
|
||||
|
||||
#define ASSERT_FALSE(condition) \
|
||||
if (condition) { \
|
||||
throw std::runtime_error("Assertion failed: " #condition " should be false"); \
|
||||
}
|
||||
|
||||
#define ASSERT_EQ(expected, actual) \
|
||||
if ((expected) != (actual)) { \
|
||||
throw std::runtime_error("Assertion failed: expected != actual"); \
|
||||
}
|
||||
|
||||
#define ASSERT_STR_EQ(expected, actual) \
|
||||
if ((expected) != (actual)) { \
|
||||
throw std::runtime_error("Assertion failed: expected '" + std::string(expected) + "' but got '" + std::string(actual) + "'"); \
|
||||
}
|
||||
|
||||
#define ASSERT_NOT_EMPTY(str) \
|
||||
if ((str).empty()) { \
|
||||
throw std::runtime_error("Assertion failed: string should not be empty"); \
|
||||
}
|
||||
|
||||
#define ASSERT_GE(actual, expected) \
|
||||
if ((actual) < (expected)) { \
|
||||
throw std::runtime_error("Assertion failed: expected " + std::to_string(actual) + " >= " + std::to_string(expected)); \
|
||||
}
|
||||
|
||||
#define TEST(name) \
|
||||
void test_Integration_##name(); \
|
||||
void register_Integration_##name() { \
|
||||
TestRunner::instance().addTest("Integration::" #name, test_Integration_##name); \
|
||||
} \
|
||||
void test_Integration_##name()
|
||||
|
||||
// Test helper class for Integration testing
|
||||
class IntegrationTestHelper {
|
||||
public:
|
||||
static std::filesystem::path createTempDir() {
|
||||
auto tempDir = std::filesystem::temp_directory_path() / "getpkg_integration_test" / std::to_string(std::time(nullptr));
|
||||
std::filesystem::create_directories(tempDir);
|
||||
return tempDir;
|
||||
}
|
||||
|
||||
static void cleanupTempDir(const std::filesystem::path& dir) {
|
||||
if (std::filesystem::exists(dir)) {
|
||||
std::filesystem::remove_all(dir);
|
||||
}
|
||||
}
|
||||
|
||||
static void setupLegacyEnvironment(const std::filesystem::path& configDir) {
|
||||
// Create legacy token file
|
||||
auto legacyDir = configDir / "getpkg.xyz";
|
||||
std::filesystem::create_directories(legacyDir);
|
||||
|
||||
std::ofstream tokenFile(legacyDir / "write_token.txt");
|
||||
tokenFile << "legacy-integration-token";
|
||||
tokenFile.close();
|
||||
|
||||
// Create legacy package files
|
||||
nlohmann::json package1 = {
|
||||
{"name", "integration-tool1"},
|
||||
{"version", "2023.1201.1000"},
|
||||
{"hash", "legacy123hash456"},
|
||||
{"arch", "x86_64"}
|
||||
};
|
||||
|
||||
nlohmann::json package2 = {
|
||||
{"name", "integration-tool2"},
|
||||
{"version", "2023.1202.1100"},
|
||||
{"hash", "legacy789hash012"},
|
||||
{"arch", "aarch64"}
|
||||
};
|
||||
|
||||
std::ofstream package1File(configDir / "integration-tool1.json");
|
||||
package1File << package1.dump(2);
|
||||
package1File.close();
|
||||
|
||||
std::ofstream package2File(configDir / "integration-tool2.json");
|
||||
package2File << package2.dump(2);
|
||||
package2File.close();
|
||||
}
|
||||
|
||||
static void verifyNewFormatStructure(const std::filesystem::path& configDir) {
|
||||
ASSERT_TRUE(std::filesystem::exists(configDir / "servers.json"));
|
||||
ASSERT_TRUE(std::filesystem::exists(configDir / "packages"));
|
||||
ASSERT_TRUE(std::filesystem::is_directory(configDir / "packages"));
|
||||
}
|
||||
|
||||
static void setEnvironmentHome(const std::filesystem::path& homeDir) {
|
||||
setenv("HOME", homeDir.c_str(), 1);
|
||||
}
|
||||
};
|
||||
|
||||
TEST(CompleteWorkflowFromLegacyToMultiServer) {
|
||||
auto tempDir = IntegrationTestHelper::createTempDir();
|
||||
auto configDir = tempDir / ".config" / "getpkg";
|
||||
std::filesystem::create_directories(configDir);
|
||||
|
||||
// Set up legacy environment
|
||||
IntegrationTestHelper::setupLegacyEnvironment(configDir);
|
||||
IntegrationTestHelper::setEnvironmentHome(tempDir);
|
||||
|
||||
// Step 1: Migration Manager detects need for migration
|
||||
MigrationManager migrationManager(configDir);
|
||||
ASSERT_TRUE(migrationManager.needsMigration());
|
||||
|
||||
// Step 2: Perform migration
|
||||
bool migrationResult = migrationManager.performMigration();
|
||||
ASSERT_TRUE(migrationResult);
|
||||
|
||||
// Verify migration results
|
||||
auto result = migrationManager.getLastMigrationResult();
|
||||
ASSERT_TRUE(result.success);
|
||||
ASSERT_EQ(2, result.migratedPackages);
|
||||
ASSERT_TRUE(result.serverConfigMigrated);
|
||||
|
||||
// Step 3: ServerManager loads new configuration
|
||||
ServerManager serverManager;
|
||||
auto loadResult = serverManager.loadConfiguration();
|
||||
ASSERT_EQ(ServerManagerError::None, loadResult);
|
||||
|
||||
auto servers = serverManager.getServers();
|
||||
ASSERT_EQ(1, servers.size());
|
||||
ASSERT_STR_EQ("getpkg.xyz", servers[0]);
|
||||
ASSERT_TRUE(serverManager.hasWriteToken("getpkg.xyz"));
|
||||
|
||||
// Step 4: Add additional servers
|
||||
auto addResult = serverManager.addServer("packages.example.com");
|
||||
ASSERT_EQ(ServerManagerError::None, addResult);
|
||||
|
||||
serverManager.setWriteToken("packages.example.com", "example-token");
|
||||
|
||||
// Step 5: GetbinClient uses multi-server configuration
|
||||
auto updatedServers = serverManager.getServers();
|
||||
GetbinClient client(updatedServers);
|
||||
|
||||
// Test multi-server operations (will fail due to network but should not crash)
|
||||
std::string hash;
|
||||
bool hashResult = client.getHash("test-tool", "x86_64", hash);
|
||||
ASSERT_FALSE(hashResult); // Expected to fail due to network
|
||||
ASSERT_NOT_EMPTY(client.getLastError());
|
||||
|
||||
// Step 6: PackageMetadataManager works with migrated data
|
||||
PackageMetadataManager packageManager(configDir);
|
||||
|
||||
auto installedPackages = packageManager.listInstalledPackages();
|
||||
ASSERT_EQ(2, installedPackages.size());
|
||||
|
||||
// Verify migrated packages have server information
|
||||
PackageMetadata tool1 = packageManager.loadPackageMetadata("integration-tool1");
|
||||
ASSERT_TRUE(tool1.isValid());
|
||||
ASSERT_STR_EQ("getpkg.xyz", tool1.sourceServer);
|
||||
ASSERT_NOT_EMPTY(tool1.installDate);
|
||||
|
||||
// Step 7: Save new package with multi-server metadata
|
||||
PackageMetadata newPackage("new-tool", "2024.0115.1430", "new123hash456", "x86_64", "packages.example.com");
|
||||
bool saveResult = packageManager.savePackageMetadata(newPackage);
|
||||
ASSERT_TRUE(saveResult);
|
||||
|
||||
// Verify new package is tracked
|
||||
auto updatedPackages = packageManager.listInstalledPackages();
|
||||
ASSERT_EQ(3, updatedPackages.size());
|
||||
|
||||
IntegrationTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(ServerManagerAndGetbinClientIntegration) {
|
||||
auto tempDir = IntegrationTestHelper::createTempDir();
|
||||
auto configDir = tempDir / ".config" / "getpkg";
|
||||
std::filesystem::create_directories(configDir);
|
||||
IntegrationTestHelper::setEnvironmentHome(tempDir);
|
||||
|
||||
// Initialize ServerManager
|
||||
ServerManager serverManager;
|
||||
serverManager.ensureDefaultConfiguration();
|
||||
|
||||
// Add multiple servers
|
||||
serverManager.addServer("server1.example.com");
|
||||
serverManager.addServer("server2.example.com");
|
||||
serverManager.setWriteToken("server1.example.com", "token1");
|
||||
serverManager.setWriteToken("server2.example.com", "token2");
|
||||
|
||||
// Get server list for client
|
||||
auto servers = serverManager.getServers();
|
||||
ASSERT_EQ(3, servers.size()); // default + 2 added
|
||||
|
||||
// Initialize GetbinClient with server list
|
||||
GetbinClient client(servers);
|
||||
|
||||
// Test server-specific operations
|
||||
std::string foundServer;
|
||||
bool findResult = client.findPackageServer("test-tool", "x86_64", foundServer);
|
||||
ASSERT_FALSE(findResult); // Will fail due to network
|
||||
|
||||
// Test fallback behavior
|
||||
std::string hash;
|
||||
bool hashResult = client.getHash("test-tool", "x86_64", hash);
|
||||
ASSERT_FALSE(hashResult); // Will fail but should try all servers
|
||||
|
||||
// Verify error handling
|
||||
ASSERT_NOT_EMPTY(client.getLastError());
|
||||
|
||||
IntegrationTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(PackageMetadataAndMigrationIntegration) {
|
||||
auto tempDir = IntegrationTestHelper::createTempDir();
|
||||
auto configDir = tempDir / ".config" / "getpkg";
|
||||
std::filesystem::create_directories(configDir);
|
||||
|
||||
// Create legacy package files
|
||||
nlohmann::json legacyPackage1 = {
|
||||
{"name", "metadata-tool1"},
|
||||
{"version", "1.0"},
|
||||
{"hash", "hash1"},
|
||||
{"arch", "x86_64"}
|
||||
};
|
||||
|
||||
nlohmann::json legacyPackage2 = {
|
||||
{"name", "metadata-tool2"},
|
||||
{"version", "2.0"},
|
||||
{"hash", "hash2"},
|
||||
{"arch", "aarch64"}
|
||||
};
|
||||
|
||||
std::ofstream file1(configDir / "metadata-tool1.json");
|
||||
file1 << legacyPackage1.dump(2);
|
||||
file1.close();
|
||||
|
||||
std::ofstream file2(configDir / "metadata-tool2.json");
|
||||
file2 << legacyPackage2.dump(2);
|
||||
file2.close();
|
||||
|
||||
// Use MigrationManager to migrate
|
||||
MigrationManager migrationManager(configDir);
|
||||
ASSERT_TRUE(migrationManager.needsMigration());
|
||||
|
||||
bool migrationResult = migrationManager.performMigration();
|
||||
ASSERT_TRUE(migrationResult);
|
||||
|
||||
// Use PackageMetadataManager to verify migration
|
||||
PackageMetadataManager packageManager(configDir);
|
||||
|
||||
auto packages = packageManager.listInstalledPackages();
|
||||
ASSERT_EQ(2, packages.size());
|
||||
|
||||
// Verify migrated metadata
|
||||
PackageMetadata tool1 = packageManager.loadPackageMetadata("metadata-tool1");
|
||||
ASSERT_TRUE(tool1.isValid());
|
||||
ASSERT_STR_EQ("metadata-tool1", tool1.name);
|
||||
ASSERT_STR_EQ("1.0", tool1.version);
|
||||
ASSERT_STR_EQ("getpkg.xyz", tool1.sourceServer); // Should be set during migration
|
||||
ASSERT_NOT_EMPTY(tool1.installDate);
|
||||
|
||||
PackageMetadata tool2 = packageManager.loadPackageMetadata("metadata-tool2");
|
||||
ASSERT_TRUE(tool2.isValid());
|
||||
ASSERT_STR_EQ("aarch64", tool2.arch);
|
||||
ASSERT_STR_EQ("getpkg.xyz", tool2.sourceServer);
|
||||
|
||||
IntegrationTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(EndToEndPackageInstallationWorkflow) {
|
||||
auto tempDir = IntegrationTestHelper::createTempDir();
|
||||
auto configDir = tempDir / ".config" / "getpkg";
|
||||
std::filesystem::create_directories(configDir);
|
||||
IntegrationTestHelper::setEnvironmentHome(tempDir);
|
||||
|
||||
// Step 1: Initialize ServerManager with multiple servers
|
||||
ServerManager serverManager;
|
||||
serverManager.ensureDefaultConfiguration();
|
||||
serverManager.addServer("primary.packages.com");
|
||||
serverManager.addServer("backup.packages.com");
|
||||
|
||||
// Step 2: Initialize GetbinClient with server list
|
||||
auto servers = serverManager.getServers();
|
||||
GetbinClient client(servers);
|
||||
|
||||
// Step 3: Initialize PackageMetadataManager
|
||||
PackageMetadataManager packageManager(configDir);
|
||||
packageManager.ensurePackagesDirectory();
|
||||
|
||||
// Step 4: Simulate package installation workflow
|
||||
std::string toolName = "workflow-test-tool";
|
||||
std::string version = "2024.0115.1430";
|
||||
std::string hash = "workflow123hash456";
|
||||
std::string arch = "x86_64";
|
||||
std::string sourceServer = "primary.packages.com";
|
||||
|
||||
// Try to download (will fail due to network but tests the workflow)
|
||||
auto downloadPath = tempDir / "downloads" / (toolName + ".tar.gz");
|
||||
std::filesystem::create_directories(downloadPath.parent_path());
|
||||
|
||||
bool downloadResult = client.download(toolName, arch, downloadPath.string());
|
||||
ASSERT_FALSE(downloadResult); // Expected to fail due to network
|
||||
|
||||
// Simulate successful installation by creating metadata
|
||||
PackageMetadata metadata(toolName, version, hash, arch, sourceServer);
|
||||
bool saveResult = packageManager.savePackageMetadata(metadata);
|
||||
ASSERT_TRUE(saveResult);
|
||||
|
||||
// Step 5: Verify package is tracked
|
||||
ASSERT_TRUE(packageManager.packageExists(toolName));
|
||||
|
||||
PackageMetadata savedMetadata = packageManager.loadPackageMetadata(toolName);
|
||||
ASSERT_TRUE(savedMetadata.isValid());
|
||||
ASSERT_STR_EQ(sourceServer, savedMetadata.sourceServer);
|
||||
|
||||
// Step 6: Simulate update check
|
||||
bool needsUpdate = savedMetadata.needsUpdate("different-hash");
|
||||
ASSERT_TRUE(needsUpdate);
|
||||
|
||||
bool noUpdateNeeded = savedMetadata.needsUpdate(hash);
|
||||
ASSERT_FALSE(noUpdateNeeded);
|
||||
|
||||
IntegrationTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(MultiServerPublishingWorkflow) {
|
||||
auto tempDir = IntegrationTestHelper::createTempDir();
|
||||
auto configDir = tempDir / ".config" / "getpkg";
|
||||
std::filesystem::create_directories(configDir);
|
||||
IntegrationTestHelper::setEnvironmentHome(tempDir);
|
||||
|
||||
// Initialize ServerManager with publishing tokens
|
||||
ServerManager serverManager;
|
||||
serverManager.ensureDefaultConfiguration();
|
||||
serverManager.addServer("publish1.example.com");
|
||||
serverManager.addServer("publish2.example.com");
|
||||
|
||||
serverManager.setWriteToken("getpkg.xyz", "default-token");
|
||||
serverManager.setWriteToken("publish1.example.com", "publish1-token");
|
||||
serverManager.setWriteToken("publish2.example.com", "publish2-token");
|
||||
|
||||
// Test default publish server selection
|
||||
std::string defaultPublishServer = serverManager.getDefaultPublishServer();
|
||||
ASSERT_STR_EQ("getpkg.xyz", defaultPublishServer); // First server with token
|
||||
|
||||
// Test servers with tokens
|
||||
auto serversWithTokens = serverManager.getServersWithTokens();
|
||||
ASSERT_EQ(3, serversWithTokens.size());
|
||||
|
||||
// Initialize GetbinClient for publishing
|
||||
auto servers = serverManager.getServers();
|
||||
GetbinClient client(servers);
|
||||
|
||||
// Create test archive for publishing
|
||||
auto testArchive = tempDir / "test-package.tar.gz";
|
||||
std::ofstream archiveFile(testArchive);
|
||||
archiveFile << "test archive content";
|
||||
archiveFile.close();
|
||||
|
||||
// Test server-specific publishing (will fail due to network)
|
||||
std::string outUrl, outHash;
|
||||
bool publishResult = client.upload("publish1.example.com", testArchive.string(),
|
||||
outUrl, outHash, "publish1-token");
|
||||
ASSERT_FALSE(publishResult); // Expected to fail due to network
|
||||
|
||||
// Test default publishing
|
||||
bool defaultPublishResult = client.upload(testArchive.string(), outUrl, outHash, "default-token");
|
||||
ASSERT_FALSE(defaultPublishResult); // Expected to fail due to network
|
||||
|
||||
// Verify error handling
|
||||
ASSERT_NOT_EMPTY(client.getLastError());
|
||||
|
||||
IntegrationTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(ErrorHandlingAndRecoveryWorkflow) {
|
||||
auto tempDir = IntegrationTestHelper::createTempDir();
|
||||
auto configDir = tempDir / ".config" / "getpkg";
|
||||
std::filesystem::create_directories(configDir);
|
||||
|
||||
// Create corrupted configuration
|
||||
std::ofstream corruptedConfig(configDir / "servers.json");
|
||||
corruptedConfig << "{ invalid json";
|
||||
corruptedConfig.close();
|
||||
|
||||
// ServerManager should recover from corruption
|
||||
ServerManager serverManager;
|
||||
auto loadResult = serverManager.loadConfiguration();
|
||||
ASSERT_EQ(ServerManagerError::None, loadResult); // Should recover
|
||||
|
||||
auto servers = serverManager.getServers();
|
||||
ASSERT_EQ(1, servers.size()); // Should have default server
|
||||
ASSERT_STR_EQ("getpkg.xyz", servers[0]);
|
||||
|
||||
// Create corrupted package metadata
|
||||
std::filesystem::create_directories(configDir / "packages");
|
||||
std::ofstream corruptedPackage(configDir / "packages" / "corrupted-tool.json");
|
||||
corruptedPackage << "{ invalid json";
|
||||
corruptedPackage.close();
|
||||
|
||||
// PackageMetadataManager should handle corruption
|
||||
PackageMetadataManager packageManager(configDir);
|
||||
|
||||
PackageMetadata corruptedMetadata = packageManager.loadPackageMetadata("corrupted-tool");
|
||||
ASSERT_FALSE(corruptedMetadata.isValid()); // Should fail gracefully
|
||||
|
||||
// Cleanup invalid metadata
|
||||
int cleanedCount = packageManager.cleanupInvalidMetadata();
|
||||
ASSERT_EQ(1, cleanedCount);
|
||||
|
||||
// Verify cleanup worked
|
||||
ASSERT_FALSE(packageManager.packageExists("corrupted-tool"));
|
||||
|
||||
IntegrationTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(BackwardCompatibilityWorkflow) {
|
||||
auto tempDir = IntegrationTestHelper::createTempDir();
|
||||
auto configDir = tempDir / ".config" / "getpkg";
|
||||
std::filesystem::create_directories(configDir);
|
||||
IntegrationTestHelper::setEnvironmentHome(tempDir);
|
||||
|
||||
// Test that system works without any configuration (backward compatibility)
|
||||
ServerManager serverManager;
|
||||
serverManager.ensureDefaultConfiguration();
|
||||
|
||||
auto servers = serverManager.getServers();
|
||||
ASSERT_EQ(1, servers.size());
|
||||
ASSERT_STR_EQ("getpkg.xyz", servers[0]);
|
||||
|
||||
// GetbinClient should work with default configuration
|
||||
GetbinClient defaultClient; // Default constructor
|
||||
|
||||
std::string hash;
|
||||
bool hashResult = defaultClient.getHash("test-tool", "x86_64", hash);
|
||||
ASSERT_FALSE(hashResult); // Will fail due to network
|
||||
|
||||
// Multi-server client should also work
|
||||
GetbinClient multiClient(servers);
|
||||
bool multiHashResult = multiClient.getHash("test-tool", "x86_64", hash);
|
||||
ASSERT_FALSE(multiHashResult); // Will fail due to network
|
||||
|
||||
// PackageMetadataManager should work with default structure
|
||||
PackageMetadataManager packageManager(configDir);
|
||||
bool dirResult = packageManager.ensurePackagesDirectory();
|
||||
ASSERT_TRUE(dirResult);
|
||||
|
||||
// Should be able to save and load packages
|
||||
PackageMetadata testPackage("compat-tool", "1.0", "hash", "x86_64", "getpkg.xyz");
|
||||
bool saveResult = packageManager.savePackageMetadata(testPackage);
|
||||
ASSERT_TRUE(saveResult);
|
||||
|
||||
PackageMetadata loadedPackage = packageManager.loadPackageMetadata("compat-tool");
|
||||
ASSERT_TRUE(loadedPackage.isValid());
|
||||
ASSERT_STR_EQ("compat-tool", loadedPackage.name);
|
||||
|
||||
IntegrationTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(ConcurrentOperationsWorkflow) {
|
||||
auto tempDir = IntegrationTestHelper::createTempDir();
|
||||
auto configDir = tempDir / ".config" / "getpkg";
|
||||
std::filesystem::create_directories(configDir);
|
||||
IntegrationTestHelper::setEnvironmentHome(tempDir);
|
||||
|
||||
// Initialize components
|
||||
ServerManager serverManager;
|
||||
serverManager.ensureDefaultConfiguration();
|
||||
serverManager.addServer("concurrent1.example.com");
|
||||
serverManager.addServer("concurrent2.example.com");
|
||||
|
||||
auto servers = serverManager.getServers();
|
||||
GetbinClient client(servers);
|
||||
|
||||
PackageMetadataManager packageManager(configDir);
|
||||
packageManager.ensurePackagesDirectory();
|
||||
|
||||
// Simulate concurrent operations
|
||||
std::vector<std::string> tools = {"tool1", "tool2", "tool3", "tool4", "tool5"};
|
||||
|
||||
// Save multiple packages concurrently (simulated)
|
||||
for (const auto& tool : tools) {
|
||||
PackageMetadata metadata(tool, "1.0", "hash-" + tool, "x86_64", "concurrent1.example.com");
|
||||
bool saveResult = packageManager.savePackageMetadata(metadata);
|
||||
ASSERT_TRUE(saveResult);
|
||||
}
|
||||
|
||||
// Verify all packages were saved
|
||||
auto installedPackages = packageManager.listInstalledPackages();
|
||||
ASSERT_EQ(5, installedPackages.size());
|
||||
|
||||
// Test concurrent hash requests (will fail due to network but test concurrency)
|
||||
for (const auto& tool : tools) {
|
||||
std::string hash;
|
||||
bool hashResult = client.getHash(tool, "x86_64", hash);
|
||||
ASSERT_FALSE(hashResult); // Expected to fail
|
||||
}
|
||||
|
||||
// Verify no interference between operations
|
||||
for (const auto& tool : tools) {
|
||||
PackageMetadata metadata = packageManager.loadPackageMetadata(tool);
|
||||
ASSERT_TRUE(metadata.isValid());
|
||||
ASSERT_STR_EQ(tool, metadata.name);
|
||||
}
|
||||
|
||||
IntegrationTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
// Registration function
|
||||
void registerIntegrationTests() {
|
||||
register_Integration_CompleteWorkflowFromLegacyToMultiServer();
|
||||
register_Integration_ServerManagerAndGetbinClientIntegration();
|
||||
register_Integration_PackageMetadataAndMigrationIntegration();
|
||||
register_Integration_EndToEndPackageInstallationWorkflow();
|
||||
register_Integration_MultiServerPublishingWorkflow();
|
||||
register_Integration_ErrorHandlingAndRecoveryWorkflow();
|
||||
register_Integration_BackwardCompatibilityWorkflow();
|
||||
register_Integration_ConcurrentOperationsWorkflow();
|
||||
}
|
18
getpkg/test/test_main.cpp
Normal file
18
getpkg/test/test_main.cpp
Normal file
@ -0,0 +1,18 @@
|
||||
#include "test_framework.hpp"
|
||||
|
||||
int main() {
|
||||
std::cout << "=== getpkg Multi-Server Support Unit Tests ===" << std::endl;
|
||||
std::cout << "Testing all components for multi-server functionality" << std::endl;
|
||||
std::cout << std::endl;
|
||||
|
||||
// Run all tests (automatically registered via static constructors)
|
||||
int result = TestRunner::instance().runAllTests();
|
||||
|
||||
if (result == 0) {
|
||||
std::cout << std::endl << "🎉 All tests passed! Multi-server support is working correctly." << std::endl;
|
||||
} else {
|
||||
std::cout << std::endl << "❌ Some tests failed. Please review the failures above." << std::endl;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
545
getpkg/test/test_migration_manager.cpp
Normal file
545
getpkg/test/test_migration_manager.cpp
Normal file
@ -0,0 +1,545 @@
|
||||
#include "MigrationManager.hpp"
|
||||
#include <filesystem>
|
||||
#include <fstream>
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
// Test framework declarations from test_main.cpp
|
||||
class TestRunner {
|
||||
public:
|
||||
static TestRunner& instance();
|
||||
void addTest(const std::string& name, std::function<void()> test);
|
||||
};
|
||||
|
||||
#define ASSERT_TRUE(condition) \
|
||||
if (!(condition)) { \
|
||||
throw std::runtime_error("Assertion failed: " #condition); \
|
||||
}
|
||||
|
||||
#define ASSERT_FALSE(condition) \
|
||||
if (condition) { \
|
||||
throw std::runtime_error("Assertion failed: " #condition " should be false"); \
|
||||
}
|
||||
|
||||
#define ASSERT_EQ(expected, actual) \
|
||||
if ((expected) != (actual)) { \
|
||||
throw std::runtime_error("Assertion failed: expected != actual"); \
|
||||
}
|
||||
|
||||
#define ASSERT_STR_EQ(expected, actual) \
|
||||
if ((expected) != (actual)) { \
|
||||
throw std::runtime_error("Assertion failed: expected '" + std::string(expected) + "' but got '" + std::string(actual) + "'"); \
|
||||
}
|
||||
|
||||
#define ASSERT_NOT_EMPTY(str) \
|
||||
if ((str).empty()) { \
|
||||
throw std::runtime_error("Assertion failed: string should not be empty"); \
|
||||
}
|
||||
|
||||
#define ASSERT_GE(actual, expected) \
|
||||
if ((actual) < (expected)) { \
|
||||
throw std::runtime_error("Assertion failed: expected " + std::to_string(actual) + " >= " + std::to_string(expected)); \
|
||||
}
|
||||
|
||||
#define TEST(name) \
|
||||
void test_MigrationManager_##name(); \
|
||||
void register_MigrationManager_##name() { \
|
||||
TestRunner::instance().addTest("MigrationManager::" #name, test_MigrationManager_##name); \
|
||||
} \
|
||||
void test_MigrationManager_##name()
|
||||
|
||||
// Test helper class for MigrationManager testing
|
||||
class MigrationManagerTestHelper {
|
||||
public:
|
||||
static std::filesystem::path createTempDir() {
|
||||
auto tempDir = std::filesystem::temp_directory_path() / "getpkg_migration_test" / std::to_string(std::time(nullptr));
|
||||
std::filesystem::create_directories(tempDir);
|
||||
return tempDir;
|
||||
}
|
||||
|
||||
static void cleanupTempDir(const std::filesystem::path& dir) {
|
||||
if (std::filesystem::exists(dir)) {
|
||||
std::filesystem::remove_all(dir);
|
||||
}
|
||||
}
|
||||
|
||||
static void createLegacyTokenFile(const std::filesystem::path& configDir, const std::string& token) {
|
||||
auto legacyDir = configDir / "getpkg.xyz";
|
||||
std::filesystem::create_directories(legacyDir);
|
||||
|
||||
std::ofstream tokenFile(legacyDir / "write_token.txt");
|
||||
tokenFile << token;
|
||||
tokenFile.close();
|
||||
}
|
||||
|
||||
static void createLegacyPackageFile(const std::filesystem::path& configDir, const std::string& toolName, const nlohmann::json& content) {
|
||||
std::ofstream packageFile(configDir / (toolName + ".json"));
|
||||
packageFile << content.dump(2);
|
||||
packageFile.close();
|
||||
}
|
||||
|
||||
static void createNewFormatConfig(const std::filesystem::path& configDir) {
|
||||
nlohmann::json config = {
|
||||
{"version", "1.0"},
|
||||
{"servers", {
|
||||
{
|
||||
{"url", "getpkg.xyz"},
|
||||
{"name", "Official getpkg Registry"},
|
||||
{"default", true},
|
||||
{"writeToken", ""},
|
||||
{"added", "2024-01-15T10:30:00Z"}
|
||||
}
|
||||
}},
|
||||
{"lastUpdated", "2024-01-15T10:30:00Z"}
|
||||
};
|
||||
|
||||
std::ofstream configFile(configDir / "servers.json");
|
||||
configFile << config.dump(2);
|
||||
configFile.close();
|
||||
}
|
||||
|
||||
static nlohmann::json createLegacyPackageJson(const std::string& name, const std::string& version, const std::string& hash, const std::string& arch) {
|
||||
return nlohmann::json{
|
||||
{"name", name},
|
||||
{"version", version},
|
||||
{"hash", hash},
|
||||
{"arch", arch}
|
||||
};
|
||||
}
|
||||
|
||||
static void createPackagesDirectory(const std::filesystem::path& configDir) {
|
||||
std::filesystem::create_directories(configDir / "packages");
|
||||
}
|
||||
|
||||
static bool fileExists(const std::filesystem::path& path) {
|
||||
return std::filesystem::exists(path);
|
||||
}
|
||||
|
||||
static int countFilesInDirectory(const std::filesystem::path& dir, const std::string& extension = "") {
|
||||
if (!std::filesystem::exists(dir)) return 0;
|
||||
|
||||
int count = 0;
|
||||
for (const auto& entry : std::filesystem::directory_iterator(dir)) {
|
||||
if (entry.is_regular_file()) {
|
||||
if (extension.empty() || entry.path().extension() == extension) {
|
||||
count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
return count;
|
||||
}
|
||||
};
|
||||
|
||||
TEST(DefaultConstructor) {
|
||||
MigrationManager manager;
|
||||
|
||||
// Should initialize without crashing
|
||||
// Actual functionality depends on environment setup
|
||||
}
|
||||
|
||||
TEST(CustomConstructor) {
|
||||
auto tempDir = MigrationManagerTestHelper::createTempDir();
|
||||
|
||||
MigrationManager manager(tempDir);
|
||||
|
||||
// Should initialize with custom config directory
|
||||
MigrationManagerTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(NeedsMigrationNoLegacyData) {
|
||||
auto tempDir = MigrationManagerTestHelper::createTempDir();
|
||||
|
||||
MigrationManager manager(tempDir);
|
||||
|
||||
// No legacy data - should not need migration
|
||||
ASSERT_FALSE(manager.needsMigration());
|
||||
|
||||
MigrationManagerTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(NeedsMigrationWithLegacyToken) {
|
||||
auto tempDir = MigrationManagerTestHelper::createTempDir();
|
||||
|
||||
// Create legacy token file
|
||||
MigrationManagerTestHelper::createLegacyTokenFile(tempDir, "legacy-token-123");
|
||||
|
||||
MigrationManager manager(tempDir);
|
||||
|
||||
// Should need migration due to legacy token
|
||||
ASSERT_TRUE(manager.needsMigration());
|
||||
|
||||
MigrationManagerTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(NeedsMigrationWithLegacyPackages) {
|
||||
auto tempDir = MigrationManagerTestHelper::createTempDir();
|
||||
|
||||
// Create legacy package files
|
||||
auto legacyPackage = MigrationManagerTestHelper::createLegacyPackageJson("test-tool", "1.0", "hash123", "x86_64");
|
||||
MigrationManagerTestHelper::createLegacyPackageFile(tempDir, "test-tool", legacyPackage);
|
||||
|
||||
MigrationManager manager(tempDir);
|
||||
|
||||
// Should need migration due to legacy packages
|
||||
ASSERT_TRUE(manager.needsMigration());
|
||||
|
||||
MigrationManagerTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(NeedsMigrationWithNewFormat) {
|
||||
auto tempDir = MigrationManagerTestHelper::createTempDir();
|
||||
|
||||
// Create new format configuration
|
||||
MigrationManagerTestHelper::createNewFormatConfig(tempDir);
|
||||
MigrationManagerTestHelper::createPackagesDirectory(tempDir);
|
||||
|
||||
MigrationManager manager(tempDir);
|
||||
|
||||
// Should not need migration - already in new format
|
||||
ASSERT_FALSE(manager.needsMigration());
|
||||
|
||||
MigrationManagerTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(MigrateServerConfigurationSuccess) {
|
||||
auto tempDir = MigrationManagerTestHelper::createTempDir();
|
||||
|
||||
// Create legacy token file
|
||||
MigrationManagerTestHelper::createLegacyTokenFile(tempDir, "legacy-token-456");
|
||||
|
||||
MigrationManager manager(tempDir);
|
||||
|
||||
// Migrate server configuration
|
||||
bool result = manager.migrateServerConfiguration();
|
||||
ASSERT_TRUE(result);
|
||||
|
||||
// Verify servers.json was created
|
||||
ASSERT_TRUE(MigrationManagerTestHelper::fileExists(tempDir / "servers.json"));
|
||||
|
||||
MigrationManagerTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(MigratePackageMetadataSuccess) {
|
||||
auto tempDir = MigrationManagerTestHelper::createTempDir();
|
||||
|
||||
// Create legacy package files
|
||||
auto package1 = MigrationManagerTestHelper::createLegacyPackageJson("tool1", "1.0", "hash1", "x86_64");
|
||||
auto package2 = MigrationManagerTestHelper::createLegacyPackageJson("tool2", "2.0", "hash2", "aarch64");
|
||||
|
||||
MigrationManagerTestHelper::createLegacyPackageFile(tempDir, "tool1", package1);
|
||||
MigrationManagerTestHelper::createLegacyPackageFile(tempDir, "tool2", package2);
|
||||
|
||||
MigrationManager manager(tempDir);
|
||||
|
||||
// Migrate package metadata
|
||||
bool result = manager.migratePackageMetadata();
|
||||
ASSERT_TRUE(result);
|
||||
|
||||
// Verify packages directory was created
|
||||
ASSERT_TRUE(MigrationManagerTestHelper::fileExists(tempDir / "packages"));
|
||||
|
||||
// Verify package files were moved and updated
|
||||
ASSERT_TRUE(MigrationManagerTestHelper::fileExists(tempDir / "packages" / "tool1.json"));
|
||||
ASSERT_TRUE(MigrationManagerTestHelper::fileExists(tempDir / "packages" / "tool2.json"));
|
||||
|
||||
MigrationManagerTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(CreatePackagesDirectorySuccess) {
|
||||
auto tempDir = MigrationManagerTestHelper::createTempDir();
|
||||
|
||||
MigrationManager manager(tempDir);
|
||||
|
||||
// Create packages directory
|
||||
bool result = manager.createPackagesDirectory();
|
||||
ASSERT_TRUE(result);
|
||||
|
||||
// Verify directory was created
|
||||
ASSERT_TRUE(MigrationManagerTestHelper::fileExists(tempDir / "packages"));
|
||||
ASSERT_TRUE(std::filesystem::is_directory(tempDir / "packages"));
|
||||
|
||||
MigrationManagerTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(PerformFullMigrationSuccess) {
|
||||
auto tempDir = MigrationManagerTestHelper::createTempDir();
|
||||
|
||||
// Set up legacy environment
|
||||
MigrationManagerTestHelper::createLegacyTokenFile(tempDir, "full-migration-token");
|
||||
|
||||
auto package1 = MigrationManagerTestHelper::createLegacyPackageJson("migrate-tool1", "1.0", "hash1", "x86_64");
|
||||
auto package2 = MigrationManagerTestHelper::createLegacyPackageJson("migrate-tool2", "2.0", "hash2", "aarch64");
|
||||
|
||||
MigrationManagerTestHelper::createLegacyPackageFile(tempDir, "migrate-tool1", package1);
|
||||
MigrationManagerTestHelper::createLegacyPackageFile(tempDir, "migrate-tool2", package2);
|
||||
|
||||
MigrationManager manager(tempDir);
|
||||
|
||||
// Verify migration is needed
|
||||
ASSERT_TRUE(manager.needsMigration());
|
||||
|
||||
// Perform full migration
|
||||
bool result = manager.performMigration();
|
||||
ASSERT_TRUE(result);
|
||||
|
||||
// Verify migration results
|
||||
auto migrationResult = manager.getLastMigrationResult();
|
||||
ASSERT_TRUE(migrationResult.success);
|
||||
ASSERT_EQ(2, migrationResult.migratedPackages);
|
||||
ASSERT_EQ(2, migrationResult.totalPackages);
|
||||
ASSERT_TRUE(migrationResult.serverConfigMigrated);
|
||||
ASSERT_TRUE(migrationResult.packageDirectoryCreated);
|
||||
|
||||
// Verify new format files exist
|
||||
ASSERT_TRUE(MigrationManagerTestHelper::fileExists(tempDir / "servers.json"));
|
||||
ASSERT_TRUE(MigrationManagerTestHelper::fileExists(tempDir / "packages"));
|
||||
ASSERT_TRUE(MigrationManagerTestHelper::fileExists(tempDir / "packages" / "migrate-tool1.json"));
|
||||
ASSERT_TRUE(MigrationManagerTestHelper::fileExists(tempDir / "packages" / "migrate-tool2.json"));
|
||||
|
||||
// Verify no longer needs migration
|
||||
ASSERT_FALSE(manager.needsMigration());
|
||||
|
||||
MigrationManagerTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(PerformMigrationNoLegacyData) {
|
||||
auto tempDir = MigrationManagerTestHelper::createTempDir();
|
||||
|
||||
MigrationManager manager(tempDir);
|
||||
|
||||
// Should not need migration
|
||||
ASSERT_FALSE(manager.needsMigration());
|
||||
|
||||
// Migration should succeed but do nothing
|
||||
bool result = manager.performMigration();
|
||||
ASSERT_TRUE(result);
|
||||
|
||||
auto migrationResult = manager.getLastMigrationResult();
|
||||
ASSERT_TRUE(migrationResult.success);
|
||||
ASSERT_EQ(0, migrationResult.migratedPackages);
|
||||
ASSERT_EQ(0, migrationResult.totalPackages);
|
||||
|
||||
MigrationManagerTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(ValidateMigrationSuccess) {
|
||||
auto tempDir = MigrationManagerTestHelper::createTempDir();
|
||||
|
||||
// Create proper new format structure
|
||||
MigrationManagerTestHelper::createNewFormatConfig(tempDir);
|
||||
MigrationManagerTestHelper::createPackagesDirectory(tempDir);
|
||||
|
||||
MigrationManager manager(tempDir);
|
||||
|
||||
// Validation should pass
|
||||
bool result = manager.validateMigration();
|
||||
ASSERT_TRUE(result);
|
||||
|
||||
MigrationManagerTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(ValidateMigrationFailure) {
|
||||
auto tempDir = MigrationManagerTestHelper::createTempDir();
|
||||
|
||||
// Create incomplete structure (missing packages directory)
|
||||
MigrationManagerTestHelper::createNewFormatConfig(tempDir);
|
||||
// Don't create packages directory
|
||||
|
||||
MigrationManager manager(tempDir);
|
||||
|
||||
// Validation should fail
|
||||
bool result = manager.validateMigration();
|
||||
ASSERT_FALSE(result);
|
||||
|
||||
MigrationManagerTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(CreateBackupSuccess) {
|
||||
auto tempDir = MigrationManagerTestHelper::createTempDir();
|
||||
|
||||
// Create some files to backup
|
||||
MigrationManagerTestHelper::createLegacyTokenFile(tempDir, "backup-token");
|
||||
auto package = MigrationManagerTestHelper::createLegacyPackageJson("backup-tool", "1.0", "hash", "x86_64");
|
||||
MigrationManagerTestHelper::createLegacyPackageFile(tempDir, "backup-tool", package);
|
||||
|
||||
MigrationManager manager(tempDir);
|
||||
|
||||
// Create backup
|
||||
bool result = manager.createBackup();
|
||||
ASSERT_TRUE(result);
|
||||
|
||||
// Verify backup directory exists
|
||||
ASSERT_TRUE(MigrationManagerTestHelper::fileExists(tempDir / "migration_backup"));
|
||||
|
||||
MigrationManagerTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(RollbackCapability) {
|
||||
auto tempDir = MigrationManagerTestHelper::createTempDir();
|
||||
|
||||
// Set up legacy environment
|
||||
MigrationManagerTestHelper::createLegacyTokenFile(tempDir, "rollback-token");
|
||||
auto package = MigrationManagerTestHelper::createLegacyPackageJson("rollback-tool", "1.0", "hash", "x86_64");
|
||||
MigrationManagerTestHelper::createLegacyPackageFile(tempDir, "rollback-tool", package);
|
||||
|
||||
MigrationManager manager(tempDir);
|
||||
|
||||
// Initially should not be able to rollback
|
||||
ASSERT_FALSE(manager.canRollback());
|
||||
|
||||
// Create backup
|
||||
manager.createBackup();
|
||||
|
||||
// Now should be able to rollback
|
||||
ASSERT_TRUE(manager.canRollback());
|
||||
|
||||
// Perform migration
|
||||
manager.performMigration();
|
||||
|
||||
// Should still be able to rollback
|
||||
ASSERT_TRUE(manager.canRollback());
|
||||
|
||||
MigrationManagerTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(PerformRollbackSuccess) {
|
||||
auto tempDir = MigrationManagerTestHelper::createTempDir();
|
||||
|
||||
// Set up legacy environment
|
||||
MigrationManagerTestHelper::createLegacyTokenFile(tempDir, "rollback-test-token");
|
||||
auto package = MigrationManagerTestHelper::createLegacyPackageJson("rollback-test-tool", "1.0", "hash", "x86_64");
|
||||
MigrationManagerTestHelper::createLegacyPackageFile(tempDir, "rollback-test-tool", package);
|
||||
|
||||
MigrationManager manager(tempDir);
|
||||
|
||||
// Create backup and perform migration
|
||||
manager.createBackup();
|
||||
manager.performMigration();
|
||||
|
||||
// Verify migration completed
|
||||
ASSERT_TRUE(MigrationManagerTestHelper::fileExists(tempDir / "servers.json"));
|
||||
ASSERT_TRUE(MigrationManagerTestHelper::fileExists(tempDir / "packages"));
|
||||
|
||||
// Perform rollback
|
||||
bool rollbackResult = manager.performRollback();
|
||||
ASSERT_TRUE(rollbackResult);
|
||||
|
||||
// Verify rollback restored original state
|
||||
ASSERT_TRUE(MigrationManagerTestHelper::fileExists(tempDir / "getpkg.xyz" / "write_token.txt"));
|
||||
ASSERT_TRUE(MigrationManagerTestHelper::fileExists(tempDir / "rollback-test-tool.json"));
|
||||
|
||||
MigrationManagerTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(MigrationWithCorruptedLegacyData) {
|
||||
auto tempDir = MigrationManagerTestHelper::createTempDir();
|
||||
|
||||
// Create corrupted legacy package file
|
||||
std::ofstream corruptedFile(tempDir / "corrupted-tool.json");
|
||||
corruptedFile << "{ invalid json content";
|
||||
corruptedFile.close();
|
||||
|
||||
// Create valid legacy data too
|
||||
MigrationManagerTestHelper::createLegacyTokenFile(tempDir, "valid-token");
|
||||
auto validPackage = MigrationManagerTestHelper::createLegacyPackageJson("valid-tool", "1.0", "hash", "x86_64");
|
||||
MigrationManagerTestHelper::createLegacyPackageFile(tempDir, "valid-tool", validPackage);
|
||||
|
||||
MigrationManager manager(tempDir);
|
||||
|
||||
// Should still need migration
|
||||
ASSERT_TRUE(manager.needsMigration());
|
||||
|
||||
// Migration should handle corrupted data gracefully
|
||||
bool result = manager.performMigration();
|
||||
ASSERT_TRUE(result); // Should succeed despite corrupted data
|
||||
|
||||
auto migrationResult = manager.getLastMigrationResult();
|
||||
ASSERT_TRUE(migrationResult.success);
|
||||
ASSERT_GE(migrationResult.errors.size(), 0); // May have errors for corrupted data
|
||||
ASSERT_GE(migrationResult.warnings.size(), 0); // May have warnings
|
||||
|
||||
// Valid data should be migrated
|
||||
ASSERT_TRUE(MigrationManagerTestHelper::fileExists(tempDir / "packages" / "valid-tool.json"));
|
||||
|
||||
MigrationManagerTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(MigrationResultReporting) {
|
||||
auto tempDir = MigrationManagerTestHelper::createTempDir();
|
||||
|
||||
// Set up test environment with multiple packages
|
||||
MigrationManagerTestHelper::createLegacyTokenFile(tempDir, "result-test-token");
|
||||
|
||||
for (int i = 1; i <= 3; i++) {
|
||||
auto package = MigrationManagerTestHelper::createLegacyPackageJson(
|
||||
"result-tool" + std::to_string(i), "1.0", "hash" + std::to_string(i), "x86_64");
|
||||
MigrationManagerTestHelper::createLegacyPackageFile(tempDir, "result-tool" + std::to_string(i), package);
|
||||
}
|
||||
|
||||
MigrationManager manager(tempDir);
|
||||
|
||||
// Perform migration
|
||||
bool result = manager.performMigration();
|
||||
ASSERT_TRUE(result);
|
||||
|
||||
// Check detailed results
|
||||
auto migrationResult = manager.getLastMigrationResult();
|
||||
ASSERT_TRUE(migrationResult.success);
|
||||
ASSERT_EQ(3, migrationResult.migratedPackages);
|
||||
ASSERT_EQ(3, migrationResult.totalPackages);
|
||||
ASSERT_TRUE(migrationResult.serverConfigMigrated);
|
||||
ASSERT_TRUE(migrationResult.packageDirectoryCreated);
|
||||
|
||||
MigrationManagerTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(MigrationWithExistingNewFormatData) {
|
||||
auto tempDir = MigrationManagerTestHelper::createTempDir();
|
||||
|
||||
// Create both legacy and new format data
|
||||
MigrationManagerTestHelper::createLegacyTokenFile(tempDir, "mixed-token");
|
||||
auto legacyPackage = MigrationManagerTestHelper::createLegacyPackageJson("legacy-mixed", "1.0", "hash", "x86_64");
|
||||
MigrationManagerTestHelper::createLegacyPackageFile(tempDir, "legacy-mixed", legacyPackage);
|
||||
|
||||
// Also create new format data
|
||||
MigrationManagerTestHelper::createNewFormatConfig(tempDir);
|
||||
MigrationManagerTestHelper::createPackagesDirectory(tempDir);
|
||||
|
||||
MigrationManager manager(tempDir);
|
||||
|
||||
// Should still need migration due to legacy data
|
||||
ASSERT_TRUE(manager.needsMigration());
|
||||
|
||||
// Migration should handle mixed environment
|
||||
bool result = manager.performMigration();
|
||||
ASSERT_TRUE(result);
|
||||
|
||||
// Should preserve existing new format data and add legacy data
|
||||
ASSERT_TRUE(MigrationManagerTestHelper::fileExists(tempDir / "servers.json"));
|
||||
ASSERT_TRUE(MigrationManagerTestHelper::fileExists(tempDir / "packages"));
|
||||
ASSERT_TRUE(MigrationManagerTestHelper::fileExists(tempDir / "packages" / "legacy-mixed.json"));
|
||||
|
||||
MigrationManagerTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
// Registration function
|
||||
void registerMigrationManagerTests() {
|
||||
register_MigrationManager_DefaultConstructor();
|
||||
register_MigrationManager_CustomConstructor();
|
||||
register_MigrationManager_NeedsMigrationNoLegacyData();
|
||||
register_MigrationManager_NeedsMigrationWithLegacyToken();
|
||||
register_MigrationManager_NeedsMigrationWithLegacyPackages();
|
||||
register_MigrationManager_NeedsMigrationWithNewFormat();
|
||||
register_MigrationManager_MigrateServerConfigurationSuccess();
|
||||
register_MigrationManager_MigratePackageMetadataSuccess();
|
||||
register_MigrationManager_CreatePackagesDirectorySuccess();
|
||||
register_MigrationManager_PerformFullMigrationSuccess();
|
||||
register_MigrationManager_PerformMigrationNoLegacyData();
|
||||
register_MigrationManager_ValidateMigrationSuccess();
|
||||
register_MigrationManager_ValidateMigrationFailure();
|
||||
register_MigrationManager_CreateBackupSuccess();
|
||||
register_MigrationManager_RollbackCapability();
|
||||
register_MigrationManager_PerformRollbackSuccess();
|
||||
register_MigrationManager_MigrationWithCorruptedLegacyData();
|
||||
register_MigrationManager_MigrationResultReporting();
|
||||
register_MigrationManager_MigrationWithExistingNewFormatData();
|
||||
}
|
545
getpkg/test/test_package_metadata.cpp
Normal file
545
getpkg/test/test_package_metadata.cpp
Normal file
@ -0,0 +1,545 @@
|
||||
#include "PackageMetadata.hpp"
|
||||
#include <filesystem>
|
||||
#include <fstream>
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
// Test framework declarations from test_main.cpp
|
||||
class TestRunner {
|
||||
public:
|
||||
static TestRunner& instance();
|
||||
void addTest(const std::string& name, std::function<void()> test);
|
||||
};
|
||||
|
||||
#define ASSERT_TRUE(condition) \
|
||||
if (!(condition)) { \
|
||||
throw std::runtime_error("Assertion failed: " #condition); \
|
||||
}
|
||||
|
||||
#define ASSERT_FALSE(condition) \
|
||||
if (condition) { \
|
||||
throw std::runtime_error("Assertion failed: " #condition " should be false"); \
|
||||
}
|
||||
|
||||
#define ASSERT_EQ(expected, actual) \
|
||||
if ((expected) != (actual)) { \
|
||||
throw std::runtime_error("Assertion failed: expected != actual"); \
|
||||
}
|
||||
|
||||
#define ASSERT_STR_EQ(expected, actual) \
|
||||
if ((expected) != (actual)) { \
|
||||
throw std::runtime_error("Assertion failed: expected '" + std::string(expected) + "' but got '" + std::string(actual) + "'"); \
|
||||
}
|
||||
|
||||
#define ASSERT_NOT_EMPTY(str) \
|
||||
if ((str).empty()) { \
|
||||
throw std::runtime_error("Assertion failed: string should not be empty"); \
|
||||
}
|
||||
|
||||
#define TEST(name) \
|
||||
void test_PackageMetadata_##name(); \
|
||||
void register_PackageMetadata_##name() { \
|
||||
TestRunner::instance().addTest("PackageMetadata::" #name, test_PackageMetadata_##name); \
|
||||
} \
|
||||
void test_PackageMetadata_##name()
|
||||
|
||||
// Test helper class for PackageMetadata testing
|
||||
class PackageMetadataTestHelper {
|
||||
public:
|
||||
static std::filesystem::path createTempDir() {
|
||||
auto tempDir = std::filesystem::temp_directory_path() / "getpkg_metadata_test" / std::to_string(std::time(nullptr));
|
||||
std::filesystem::create_directories(tempDir);
|
||||
return tempDir;
|
||||
}
|
||||
|
||||
static void cleanupTempDir(const std::filesystem::path& dir) {
|
||||
if (std::filesystem::exists(dir)) {
|
||||
std::filesystem::remove_all(dir);
|
||||
}
|
||||
}
|
||||
|
||||
static void createJsonFile(const std::filesystem::path& path, const nlohmann::json& content) {
|
||||
std::ofstream file(path);
|
||||
file << content.dump(2);
|
||||
file.close();
|
||||
}
|
||||
|
||||
static nlohmann::json readJsonFile(const std::filesystem::path& path) {
|
||||
std::ifstream file(path);
|
||||
nlohmann::json j;
|
||||
file >> j;
|
||||
return j;
|
||||
}
|
||||
|
||||
static PackageMetadata createValidMetadata() {
|
||||
return PackageMetadata("test-tool", "2024.0115.1430", "abc123hash456", "x86_64", "getpkg.xyz", "2024-01-15T14:30:00Z");
|
||||
}
|
||||
|
||||
static nlohmann::json createLegacyJson() {
|
||||
return nlohmann::json{
|
||||
{"name", "legacy-tool"},
|
||||
{"version", "2023.1201.1000"},
|
||||
{"hash", "legacy123hash456"},
|
||||
{"arch", "x86_64"}
|
||||
// Note: no sourceServer or installDate fields
|
||||
};
|
||||
}
|
||||
|
||||
static nlohmann::json createCorruptedJson() {
|
||||
return nlohmann::json{
|
||||
{"name", ""}, // Invalid empty name
|
||||
{"version", "invalid-version-format"},
|
||||
{"hash", "short"}, // Too short hash
|
||||
{"arch", "invalid-arch"},
|
||||
{"sourceServer", "not-a-url"},
|
||||
{"installDate", "invalid-date-format"}
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
TEST(DefaultConstructor) {
|
||||
PackageMetadata metadata;
|
||||
|
||||
ASSERT_TRUE(metadata.name.empty());
|
||||
ASSERT_TRUE(metadata.version.empty());
|
||||
ASSERT_TRUE(metadata.hash.empty());
|
||||
ASSERT_TRUE(metadata.arch.empty());
|
||||
ASSERT_TRUE(metadata.sourceServer.empty());
|
||||
ASSERT_TRUE(metadata.installDate.empty());
|
||||
|
||||
ASSERT_FALSE(metadata.isValid());
|
||||
}
|
||||
|
||||
TEST(ParameterizedConstructor) {
|
||||
PackageMetadata metadata("test-tool", "2024.0115.1430", "abc123hash456", "x86_64", "getpkg.xyz", "2024-01-15T14:30:00Z");
|
||||
|
||||
ASSERT_STR_EQ("test-tool", metadata.name);
|
||||
ASSERT_STR_EQ("2024.0115.1430", metadata.version);
|
||||
ASSERT_STR_EQ("abc123hash456", metadata.hash);
|
||||
ASSERT_STR_EQ("x86_64", metadata.arch);
|
||||
ASSERT_STR_EQ("getpkg.xyz", metadata.sourceServer);
|
||||
ASSERT_STR_EQ("2024-01-15T14:30:00Z", metadata.installDate);
|
||||
|
||||
ASSERT_TRUE(metadata.isValid());
|
||||
}
|
||||
|
||||
TEST(JsonSerialization) {
|
||||
PackageMetadata original = PackageMetadataTestHelper::createValidMetadata();
|
||||
|
||||
nlohmann::json j = original.toJson();
|
||||
|
||||
ASSERT_STR_EQ("test-tool", j["name"]);
|
||||
ASSERT_STR_EQ("2024.0115.1430", j["version"]);
|
||||
ASSERT_STR_EQ("abc123hash456", j["hash"]);
|
||||
ASSERT_STR_EQ("x86_64", j["arch"]);
|
||||
ASSERT_STR_EQ("getpkg.xyz", j["sourceServer"]);
|
||||
ASSERT_STR_EQ("2024-01-15T14:30:00Z", j["installDate"]);
|
||||
}
|
||||
|
||||
TEST(JsonDeserialization) {
|
||||
nlohmann::json j = {
|
||||
{"name", "deserialized-tool"},
|
||||
{"version", "2024.0116.0900"},
|
||||
{"hash", "def456hash789"},
|
||||
{"arch", "aarch64"},
|
||||
{"sourceServer", "packages.example.com"},
|
||||
{"installDate", "2024-01-16T09:00:00Z"}
|
||||
};
|
||||
|
||||
PackageMetadata metadata = PackageMetadata::fromJson(j);
|
||||
|
||||
ASSERT_STR_EQ("deserialized-tool", metadata.name);
|
||||
ASSERT_STR_EQ("2024.0116.0900", metadata.version);
|
||||
ASSERT_STR_EQ("def456hash789", metadata.hash);
|
||||
ASSERT_STR_EQ("aarch64", metadata.arch);
|
||||
ASSERT_STR_EQ("packages.example.com", metadata.sourceServer);
|
||||
ASSERT_STR_EQ("2024-01-16T09:00:00Z", metadata.installDate);
|
||||
|
||||
ASSERT_TRUE(metadata.isValid());
|
||||
}
|
||||
|
||||
TEST(LegacyJsonMigration) {
|
||||
nlohmann::json legacyJson = PackageMetadataTestHelper::createLegacyJson();
|
||||
|
||||
PackageMetadata metadata = PackageMetadata::fromLegacyJson(legacyJson, "getpkg.xyz");
|
||||
|
||||
ASSERT_STR_EQ("legacy-tool", metadata.name);
|
||||
ASSERT_STR_EQ("2023.1201.1000", metadata.version);
|
||||
ASSERT_STR_EQ("legacy123hash456", metadata.hash);
|
||||
ASSERT_STR_EQ("x86_64", metadata.arch);
|
||||
ASSERT_STR_EQ("getpkg.xyz", metadata.sourceServer);
|
||||
ASSERT_NOT_EMPTY(metadata.installDate); // Should be auto-generated
|
||||
|
||||
ASSERT_TRUE(metadata.isValid());
|
||||
}
|
||||
|
||||
TEST(LegacyJsonMigrationWithCustomServer) {
|
||||
nlohmann::json legacyJson = PackageMetadataTestHelper::createLegacyJson();
|
||||
|
||||
PackageMetadata metadata = PackageMetadata::fromLegacyJson(legacyJson, "custom.server.com");
|
||||
|
||||
ASSERT_STR_EQ("custom.server.com", metadata.sourceServer);
|
||||
ASSERT_TRUE(metadata.isValid());
|
||||
}
|
||||
|
||||
TEST(ValidationValid) {
|
||||
PackageMetadata metadata = PackageMetadataTestHelper::createValidMetadata();
|
||||
|
||||
ASSERT_TRUE(metadata.isValid());
|
||||
ASSERT_TRUE(metadata.getValidationError().empty());
|
||||
}
|
||||
|
||||
TEST(ValidationInvalidName) {
|
||||
PackageMetadata metadata("", "2024.0115.1430", "abc123hash456", "x86_64", "getpkg.xyz");
|
||||
|
||||
ASSERT_FALSE(metadata.isValid());
|
||||
ASSERT_NOT_EMPTY(metadata.getValidationError());
|
||||
}
|
||||
|
||||
TEST(ValidationInvalidVersion) {
|
||||
PackageMetadata metadata("test-tool", "", "abc123hash456", "x86_64", "getpkg.xyz");
|
||||
|
||||
ASSERT_FALSE(metadata.isValid());
|
||||
ASSERT_NOT_EMPTY(metadata.getValidationError());
|
||||
}
|
||||
|
||||
TEST(ValidationInvalidHash) {
|
||||
PackageMetadata metadata("test-tool", "2024.0115.1430", "", "x86_64", "getpkg.xyz");
|
||||
|
||||
ASSERT_FALSE(metadata.isValid());
|
||||
ASSERT_NOT_EMPTY(metadata.getValidationError());
|
||||
}
|
||||
|
||||
TEST(ValidationInvalidArch) {
|
||||
PackageMetadata metadata("test-tool", "2024.0115.1430", "abc123hash456", "", "getpkg.xyz");
|
||||
|
||||
ASSERT_FALSE(metadata.isValid());
|
||||
ASSERT_NOT_EMPTY(metadata.getValidationError());
|
||||
}
|
||||
|
||||
TEST(ValidationInvalidServer) {
|
||||
PackageMetadata metadata("test-tool", "2024.0115.1430", "abc123hash456", "x86_64", "");
|
||||
|
||||
ASSERT_FALSE(metadata.isValid());
|
||||
ASSERT_NOT_EMPTY(metadata.getValidationError());
|
||||
}
|
||||
|
||||
TEST(FileOperationsSaveAndLoad) {
|
||||
auto tempDir = PackageMetadataTestHelper::createTempDir();
|
||||
auto filePath = tempDir / "test-metadata.json";
|
||||
|
||||
PackageMetadata original = PackageMetadataTestHelper::createValidMetadata();
|
||||
|
||||
// Save to file
|
||||
bool saveResult = original.saveToFile(filePath);
|
||||
ASSERT_TRUE(saveResult);
|
||||
ASSERT_TRUE(std::filesystem::exists(filePath));
|
||||
|
||||
// Load from file
|
||||
PackageMetadata loaded = PackageMetadata::loadFromFile(filePath);
|
||||
|
||||
ASSERT_STR_EQ(original.name, loaded.name);
|
||||
ASSERT_STR_EQ(original.version, loaded.version);
|
||||
ASSERT_STR_EQ(original.hash, loaded.hash);
|
||||
ASSERT_STR_EQ(original.arch, loaded.arch);
|
||||
ASSERT_STR_EQ(original.sourceServer, loaded.sourceServer);
|
||||
ASSERT_STR_EQ(original.installDate, loaded.installDate);
|
||||
|
||||
PackageMetadataTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(FileOperationsInvalidPath) {
|
||||
PackageMetadata metadata = PackageMetadataTestHelper::createValidMetadata();
|
||||
|
||||
// Try to save to invalid path
|
||||
bool saveResult = metadata.saveToFile("/invalid/path/metadata.json");
|
||||
ASSERT_FALSE(saveResult);
|
||||
|
||||
// Try to load from non-existent file
|
||||
PackageMetadata loaded = PackageMetadata::loadFromFile("/nonexistent/file.json");
|
||||
ASSERT_FALSE(loaded.isValid());
|
||||
}
|
||||
|
||||
TEST(NeedsUpdateComparison) {
|
||||
PackageMetadata metadata = PackageMetadataTestHelper::createValidMetadata();
|
||||
|
||||
// Same hash - no update needed
|
||||
ASSERT_FALSE(metadata.needsUpdate("abc123hash456"));
|
||||
|
||||
// Different hash - update needed
|
||||
ASSERT_TRUE(metadata.needsUpdate("different123hash456"));
|
||||
|
||||
// Empty hash - update needed
|
||||
ASSERT_TRUE(metadata.needsUpdate(""));
|
||||
}
|
||||
|
||||
TEST(TimestampGeneration) {
|
||||
PackageMetadata metadata;
|
||||
|
||||
std::string timestamp = metadata.getCurrentTimestamp();
|
||||
ASSERT_NOT_EMPTY(timestamp);
|
||||
|
||||
// Should be in ISO format (basic check)
|
||||
ASSERT_TRUE(timestamp.find("T") != std::string::npos);
|
||||
ASSERT_TRUE(timestamp.find("Z") != std::string::npos);
|
||||
}
|
||||
|
||||
// PackageMetadataManager Tests
|
||||
|
||||
TEST(ManagerDefaultConstructor) {
|
||||
PackageMetadataManager manager;
|
||||
|
||||
// Should initialize with default config directory
|
||||
auto packagesDir = manager.getPackagesDirectory();
|
||||
ASSERT_FALSE(packagesDir.empty());
|
||||
}
|
||||
|
||||
TEST(ManagerCustomConstructor) {
|
||||
auto tempDir = PackageMetadataTestHelper::createTempDir();
|
||||
|
||||
PackageMetadataManager manager(tempDir);
|
||||
|
||||
auto packagesDir = manager.getPackagesDirectory();
|
||||
ASSERT_STR_EQ((tempDir / "packages").string(), packagesDir.string());
|
||||
|
||||
PackageMetadataTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(ManagerEnsurePackagesDirectory) {
|
||||
auto tempDir = PackageMetadataTestHelper::createTempDir();
|
||||
|
||||
PackageMetadataManager manager(tempDir);
|
||||
|
||||
bool result = manager.ensurePackagesDirectory();
|
||||
ASSERT_TRUE(result);
|
||||
|
||||
auto packagesDir = manager.getPackagesDirectory();
|
||||
ASSERT_TRUE(std::filesystem::exists(packagesDir));
|
||||
ASSERT_TRUE(std::filesystem::is_directory(packagesDir));
|
||||
|
||||
PackageMetadataTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(ManagerSaveAndLoadPackageMetadata) {
|
||||
auto tempDir = PackageMetadataTestHelper::createTempDir();
|
||||
|
||||
PackageMetadataManager manager(tempDir);
|
||||
manager.ensurePackagesDirectory();
|
||||
|
||||
PackageMetadata metadata = PackageMetadataTestHelper::createValidMetadata();
|
||||
|
||||
// Save metadata
|
||||
bool saveResult = manager.savePackageMetadata(metadata);
|
||||
ASSERT_TRUE(saveResult);
|
||||
|
||||
// Check if package exists
|
||||
ASSERT_TRUE(manager.packageExists("test-tool"));
|
||||
|
||||
// Load metadata
|
||||
PackageMetadata loaded = manager.loadPackageMetadata("test-tool");
|
||||
ASSERT_TRUE(loaded.isValid());
|
||||
ASSERT_STR_EQ(metadata.name, loaded.name);
|
||||
ASSERT_STR_EQ(metadata.version, loaded.version);
|
||||
|
||||
PackageMetadataTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(ManagerRemovePackageMetadata) {
|
||||
auto tempDir = PackageMetadataTestHelper::createTempDir();
|
||||
|
||||
PackageMetadataManager manager(tempDir);
|
||||
manager.ensurePackagesDirectory();
|
||||
|
||||
PackageMetadata metadata = PackageMetadataTestHelper::createValidMetadata();
|
||||
manager.savePackageMetadata(metadata);
|
||||
|
||||
ASSERT_TRUE(manager.packageExists("test-tool"));
|
||||
|
||||
bool removeResult = manager.removePackageMetadata("test-tool");
|
||||
ASSERT_TRUE(removeResult);
|
||||
|
||||
ASSERT_FALSE(manager.packageExists("test-tool"));
|
||||
|
||||
PackageMetadataTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(ManagerListInstalledPackages) {
|
||||
auto tempDir = PackageMetadataTestHelper::createTempDir();
|
||||
|
||||
PackageMetadataManager manager(tempDir);
|
||||
manager.ensurePackagesDirectory();
|
||||
|
||||
// Save multiple packages
|
||||
PackageMetadata metadata1("tool1", "1.0", "hash1", "x86_64", "server1");
|
||||
PackageMetadata metadata2("tool2", "2.0", "hash2", "aarch64", "server2");
|
||||
|
||||
manager.savePackageMetadata(metadata1);
|
||||
manager.savePackageMetadata(metadata2);
|
||||
|
||||
auto packages = manager.listInstalledPackages();
|
||||
ASSERT_EQ(2, packages.size());
|
||||
|
||||
// Should contain both tools (order may vary)
|
||||
bool foundTool1 = std::find(packages.begin(), packages.end(), "tool1") != packages.end();
|
||||
bool foundTool2 = std::find(packages.begin(), packages.end(), "tool2") != packages.end();
|
||||
|
||||
ASSERT_TRUE(foundTool1);
|
||||
ASSERT_TRUE(foundTool2);
|
||||
|
||||
PackageMetadataTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(ManagerGetAllPackageMetadata) {
|
||||
auto tempDir = PackageMetadataTestHelper::createTempDir();
|
||||
|
||||
PackageMetadataManager manager(tempDir);
|
||||
manager.ensurePackagesDirectory();
|
||||
|
||||
// Save multiple packages
|
||||
PackageMetadata metadata1("tool1", "1.0", "hash1", "x86_64", "server1");
|
||||
PackageMetadata metadata2("tool2", "2.0", "hash2", "aarch64", "server2");
|
||||
|
||||
manager.savePackageMetadata(metadata1);
|
||||
manager.savePackageMetadata(metadata2);
|
||||
|
||||
auto allMetadata = manager.getAllPackageMetadata();
|
||||
ASSERT_EQ(2, allMetadata.size());
|
||||
|
||||
// Verify metadata content
|
||||
bool foundTool1 = false, foundTool2 = false;
|
||||
for (const auto& meta : allMetadata) {
|
||||
if (meta.name == "tool1") {
|
||||
foundTool1 = true;
|
||||
ASSERT_STR_EQ("1.0", meta.version);
|
||||
} else if (meta.name == "tool2") {
|
||||
foundTool2 = true;
|
||||
ASSERT_STR_EQ("2.0", meta.version);
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT_TRUE(foundTool1);
|
||||
ASSERT_TRUE(foundTool2);
|
||||
|
||||
PackageMetadataTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(ManagerMigrationFromLegacyFormat) {
|
||||
auto tempDir = PackageMetadataTestHelper::createTempDir();
|
||||
|
||||
// Create legacy package files in root config directory
|
||||
auto legacyFile1 = tempDir / "legacy-tool1.json";
|
||||
auto legacyFile2 = tempDir / "legacy-tool2.json";
|
||||
|
||||
PackageMetadataTestHelper::createJsonFile(legacyFile1, PackageMetadataTestHelper::createLegacyJson());
|
||||
PackageMetadataTestHelper::createJsonFile(legacyFile2, nlohmann::json{
|
||||
{"name", "legacy-tool2"},
|
||||
{"version", "2023.1202.1100"},
|
||||
{"hash", "legacy789hash012"},
|
||||
{"arch", "aarch64"}
|
||||
});
|
||||
|
||||
PackageMetadataManager manager(tempDir);
|
||||
|
||||
// Find legacy files
|
||||
auto legacyFiles = manager.findLegacyPackageFiles();
|
||||
ASSERT_EQ(2, legacyFiles.size());
|
||||
|
||||
// Perform migration
|
||||
bool migrationResult = manager.migrateFromLegacyFormat();
|
||||
ASSERT_TRUE(migrationResult);
|
||||
|
||||
// Verify packages directory was created
|
||||
ASSERT_TRUE(std::filesystem::exists(manager.getPackagesDirectory()));
|
||||
|
||||
// Verify packages were migrated
|
||||
ASSERT_TRUE(manager.packageExists("legacy-tool1"));
|
||||
ASSERT_TRUE(manager.packageExists("legacy-tool2"));
|
||||
|
||||
// Verify metadata has server information
|
||||
PackageMetadata migrated1 = manager.loadPackageMetadata("legacy-tool1");
|
||||
ASSERT_STR_EQ("getpkg.xyz", migrated1.sourceServer);
|
||||
ASSERT_NOT_EMPTY(migrated1.installDate);
|
||||
|
||||
PackageMetadataTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(ManagerValidateAllPackageMetadata) {
|
||||
auto tempDir = PackageMetadataTestHelper::createTempDir();
|
||||
|
||||
PackageMetadataManager manager(tempDir);
|
||||
manager.ensurePackagesDirectory();
|
||||
|
||||
// Save valid and invalid metadata
|
||||
PackageMetadata validMetadata = PackageMetadataTestHelper::createValidMetadata();
|
||||
PackageMetadata invalidMetadata("", "", "", "", ""); // All empty - invalid
|
||||
|
||||
manager.savePackageMetadata(validMetadata);
|
||||
|
||||
// Manually create invalid metadata file
|
||||
auto invalidFile = manager.getPackagesDirectory() / "invalid-tool.json";
|
||||
PackageMetadataTestHelper::createJsonFile(invalidFile, PackageMetadataTestHelper::createCorruptedJson());
|
||||
|
||||
bool validationResult = manager.validateAllPackageMetadata();
|
||||
ASSERT_FALSE(validationResult); // Should fail due to invalid metadata
|
||||
|
||||
PackageMetadataTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(ManagerCleanupInvalidMetadata) {
|
||||
auto tempDir = PackageMetadataTestHelper::createTempDir();
|
||||
|
||||
PackageMetadataManager manager(tempDir);
|
||||
manager.ensurePackagesDirectory();
|
||||
|
||||
// Save valid metadata
|
||||
PackageMetadata validMetadata = PackageMetadataTestHelper::createValidMetadata();
|
||||
manager.savePackageMetadata(validMetadata);
|
||||
|
||||
// Create invalid metadata file
|
||||
auto invalidFile = manager.getPackagesDirectory() / "invalid-tool.json";
|
||||
PackageMetadataTestHelper::createJsonFile(invalidFile, PackageMetadataTestHelper::createCorruptedJson());
|
||||
|
||||
// Should have 2 files initially
|
||||
auto packagesBefore = manager.listInstalledPackages();
|
||||
ASSERT_EQ(2, packagesBefore.size());
|
||||
|
||||
// Cleanup invalid metadata
|
||||
int cleanedCount = manager.cleanupInvalidMetadata();
|
||||
ASSERT_EQ(1, cleanedCount);
|
||||
|
||||
// Should have 1 file after cleanup
|
||||
auto packagesAfter = manager.listInstalledPackages();
|
||||
ASSERT_EQ(1, packagesAfter.size());
|
||||
ASSERT_STR_EQ("test-tool", packagesAfter[0]);
|
||||
|
||||
PackageMetadataTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
// Registration function
|
||||
void registerPackageMetadataTests() {
|
||||
register_PackageMetadata_DefaultConstructor();
|
||||
register_PackageMetadata_ParameterizedConstructor();
|
||||
register_PackageMetadata_JsonSerialization();
|
||||
register_PackageMetadata_JsonDeserialization();
|
||||
register_PackageMetadata_LegacyJsonMigration();
|
||||
register_PackageMetadata_LegacyJsonMigrationWithCustomServer();
|
||||
register_PackageMetadata_ValidationValid();
|
||||
register_PackageMetadata_ValidationInvalidName();
|
||||
register_PackageMetadata_ValidationInvalidVersion();
|
||||
register_PackageMetadata_ValidationInvalidHash();
|
||||
register_PackageMetadata_ValidationInvalidArch();
|
||||
register_PackageMetadata_ValidationInvalidServer();
|
||||
register_PackageMetadata_FileOperationsSaveAndLoad();
|
||||
register_PackageMetadata_FileOperationsInvalidPath();
|
||||
register_PackageMetadata_NeedsUpdateComparison();
|
||||
register_PackageMetadata_TimestampGeneration();
|
||||
register_PackageMetadata_ManagerDefaultConstructor();
|
||||
register_PackageMetadata_ManagerCustomConstructor();
|
||||
register_PackageMetadata_ManagerEnsurePackagesDirectory();
|
||||
register_PackageMetadata_ManagerSaveAndLoadPackageMetadata();
|
||||
register_PackageMetadata_ManagerRemovePackageMetadata();
|
||||
register_PackageMetadata_ManagerListInstalledPackages();
|
||||
register_PackageMetadata_ManagerGetAllPackageMetadata();
|
||||
register_PackageMetadata_ManagerMigrationFromLegacyFormat();
|
||||
register_PackageMetadata_ManagerValidateAllPackageMetadata();
|
||||
register_PackageMetadata_ManagerCleanupInvalidMetadata();
|
||||
}
|
306
getpkg/test/test_server_manager.cpp
Normal file
306
getpkg/test/test_server_manager.cpp
Normal file
@ -0,0 +1,306 @@
|
||||
#include "test_framework.hpp"
|
||||
#include "ServerManager.hpp"
|
||||
#include <filesystem>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
|
||||
// Test helper class for ServerManager testing
|
||||
class ServerManagerTestHelper {
|
||||
public:
|
||||
static std::filesystem::path createTempConfigDir() {
|
||||
auto tempDir = std::filesystem::temp_directory_path() / "getpkg_test" / std::to_string(std::time(nullptr));
|
||||
std::filesystem::create_directories(tempDir);
|
||||
return tempDir;
|
||||
}
|
||||
|
||||
static void cleanupTempDir(const std::filesystem::path& dir) {
|
||||
if (std::filesystem::exists(dir)) {
|
||||
std::filesystem::remove_all(dir);
|
||||
}
|
||||
}
|
||||
|
||||
static void createLegacyTokenFile(const std::filesystem::path& configDir, const std::string& token) {
|
||||
auto legacyDir = configDir / "getpkg.xyz";
|
||||
std::filesystem::create_directories(legacyDir);
|
||||
|
||||
std::ofstream tokenFile(legacyDir / "write_token.txt");
|
||||
tokenFile << token;
|
||||
tokenFile.close();
|
||||
}
|
||||
|
||||
static void createCorruptedConfigFile(const std::filesystem::path& configDir) {
|
||||
std::ofstream configFile(configDir / "servers.json");
|
||||
configFile << "{ invalid json content";
|
||||
configFile.close();
|
||||
}
|
||||
};
|
||||
|
||||
TEST(ServerManager, DefaultConfiguration) {
|
||||
auto tempDir = ServerManagerTestHelper::createTempConfigDir();
|
||||
|
||||
// Set environment variable to use temp directory
|
||||
setenv("HOME", tempDir.c_str(), 1);
|
||||
|
||||
ServerManager manager;
|
||||
manager.ensureDefaultConfiguration();
|
||||
|
||||
auto servers = manager.getServers();
|
||||
ASSERT_EQ(1, servers.size());
|
||||
ASSERT_STR_EQ("getpkg.xyz", servers[0]);
|
||||
ASSERT_STR_EQ("getpkg.xyz", manager.getDefaultServer());
|
||||
|
||||
ServerManagerTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(ServerManager, AddValidServer) {
|
||||
auto tempDir = ServerManagerTestHelper::createTempConfigDir();
|
||||
setenv("HOME", tempDir.c_str(), 1);
|
||||
|
||||
ServerManager manager;
|
||||
manager.ensureDefaultConfiguration();
|
||||
|
||||
auto result = manager.addServer("packages.example.com");
|
||||
ASSERT_EQ(ServerManagerError::None, result);
|
||||
|
||||
auto servers = manager.getServers();
|
||||
ASSERT_EQ(2, servers.size());
|
||||
ASSERT_STR_EQ("getpkg.xyz", servers[0]);
|
||||
ASSERT_STR_EQ("packages.example.com", servers[1]);
|
||||
|
||||
ServerManagerTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(ServerManager, AddInvalidServer) {
|
||||
auto tempDir = ServerManagerTestHelper::createTempConfigDir();
|
||||
setenv("HOME", tempDir.c_str(), 1);
|
||||
|
||||
ServerManager manager;
|
||||
manager.ensureDefaultConfiguration();
|
||||
|
||||
// Test invalid URLs
|
||||
auto result1 = manager.addServer("not-a-url");
|
||||
ASSERT_EQ(ServerManagerError::InvalidUrl, result1);
|
||||
|
||||
auto result2 = manager.addServer("");
|
||||
ASSERT_EQ(ServerManagerError::InvalidUrl, result2);
|
||||
|
||||
auto result3 = manager.addServer("ftp://invalid-protocol.com");
|
||||
ASSERT_EQ(ServerManagerError::InvalidUrl, result3);
|
||||
|
||||
// Should still have only default server
|
||||
auto servers = manager.getServers();
|
||||
ASSERT_EQ(1, servers.size());
|
||||
|
||||
ServerManagerTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(ServerManager, AddDuplicateServer) {
|
||||
auto tempDir = ServerManagerTestHelper::createTempConfigDir();
|
||||
setenv("HOME", tempDir.c_str(), 1);
|
||||
|
||||
ServerManager manager;
|
||||
manager.ensureDefaultConfiguration();
|
||||
|
||||
auto result1 = manager.addServer("packages.example.com");
|
||||
ASSERT_EQ(ServerManagerError::None, result1);
|
||||
|
||||
auto result2 = manager.addServer("packages.example.com");
|
||||
ASSERT_EQ(ServerManagerError::ServerAlreadyExists, result2);
|
||||
|
||||
auto servers = manager.getServers();
|
||||
ASSERT_EQ(2, servers.size()); // Should not add duplicate
|
||||
|
||||
ServerManagerTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(RemoveServer) {
|
||||
auto tempDir = ServerManagerTestHelper::createTempConfigDir();
|
||||
setenv("HOME", tempDir.c_str(), 1);
|
||||
|
||||
ServerManager manager;
|
||||
manager.ensureDefaultConfiguration();
|
||||
manager.addServer("packages.example.com");
|
||||
manager.addServer("test.server.com");
|
||||
|
||||
auto result = manager.removeServer("packages.example.com");
|
||||
ASSERT_EQ(ServerManagerError::None, result);
|
||||
|
||||
auto servers = manager.getServers();
|
||||
ASSERT_EQ(2, servers.size());
|
||||
ASSERT_STR_EQ("getpkg.xyz", servers[0]);
|
||||
ASSERT_STR_EQ("test.server.com", servers[1]);
|
||||
|
||||
ServerManagerTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(RemoveNonExistentServer) {
|
||||
auto tempDir = ServerManagerTestHelper::createTempConfigDir();
|
||||
setenv("HOME", tempDir.c_str(), 1);
|
||||
|
||||
ServerManager manager;
|
||||
manager.ensureDefaultConfiguration();
|
||||
|
||||
auto result = manager.removeServer("nonexistent.server.com");
|
||||
ASSERT_EQ(ServerManagerError::ServerNotFound, result);
|
||||
|
||||
ServerManagerTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(RemoveLastServer) {
|
||||
auto tempDir = ServerManagerTestHelper::createTempConfigDir();
|
||||
setenv("HOME", tempDir.c_str(), 1);
|
||||
|
||||
ServerManager manager;
|
||||
manager.ensureDefaultConfiguration();
|
||||
|
||||
auto result = manager.removeServer("getpkg.xyz");
|
||||
ASSERT_EQ(ServerManagerError::LastServerRemoval, result);
|
||||
|
||||
auto servers = manager.getServers();
|
||||
ASSERT_EQ(1, servers.size()); // Should still have the server
|
||||
|
||||
ServerManagerTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(WriteTokenManagement) {
|
||||
auto tempDir = ServerManagerTestHelper::createTempConfigDir();
|
||||
setenv("HOME", tempDir.c_str(), 1);
|
||||
|
||||
ServerManager manager;
|
||||
manager.ensureDefaultConfiguration();
|
||||
manager.addServer("packages.example.com");
|
||||
|
||||
// Set write token
|
||||
auto result = manager.setWriteToken("packages.example.com", "test-token-123");
|
||||
ASSERT_EQ(ServerManagerError::None, result);
|
||||
|
||||
// Verify token
|
||||
ASSERT_TRUE(manager.hasWriteToken("packages.example.com"));
|
||||
ASSERT_STR_EQ("test-token-123", manager.getWriteToken("packages.example.com"));
|
||||
|
||||
// Test servers with tokens
|
||||
auto serversWithTokens = manager.getServersWithTokens();
|
||||
ASSERT_EQ(1, serversWithTokens.size());
|
||||
ASSERT_STR_EQ("packages.example.com", serversWithTokens[0]);
|
||||
|
||||
ServerManagerTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(DefaultPublishServer) {
|
||||
auto tempDir = ServerManagerTestHelper::createTempConfigDir();
|
||||
setenv("HOME", tempDir.c_str(), 1);
|
||||
|
||||
ServerManager manager;
|
||||
manager.ensureDefaultConfiguration();
|
||||
manager.addServer("packages.example.com");
|
||||
manager.addServer("test.server.com");
|
||||
|
||||
// No tokens initially
|
||||
ASSERT_STR_EQ("", manager.getDefaultPublishServer());
|
||||
|
||||
// Add token to second server
|
||||
manager.setWriteToken("test.server.com", "token2");
|
||||
ASSERT_STR_EQ("test.server.com", manager.getDefaultPublishServer());
|
||||
|
||||
// Add token to first server - should become default
|
||||
manager.setWriteToken("getpkg.xyz", "token1");
|
||||
ASSERT_STR_EQ("getpkg.xyz", manager.getDefaultPublishServer());
|
||||
|
||||
ServerManagerTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(ConfigurationPersistence) {
|
||||
auto tempDir = ServerManagerTestHelper::createTempConfigDir();
|
||||
setenv("HOME", tempDir.c_str(), 1);
|
||||
|
||||
{
|
||||
ServerManager manager1;
|
||||
manager1.ensureDefaultConfiguration();
|
||||
manager1.addServer("packages.example.com");
|
||||
manager1.setWriteToken("packages.example.com", "test-token");
|
||||
manager1.saveConfiguration();
|
||||
}
|
||||
|
||||
{
|
||||
ServerManager manager2;
|
||||
auto result = manager2.loadConfiguration();
|
||||
ASSERT_EQ(ServerManagerError::None, result);
|
||||
|
||||
auto servers = manager2.getServers();
|
||||
ASSERT_EQ(2, servers.size());
|
||||
ASSERT_STR_EQ("getpkg.xyz", servers[0]);
|
||||
ASSERT_STR_EQ("packages.example.com", servers[1]);
|
||||
|
||||
ASSERT_TRUE(manager2.hasWriteToken("packages.example.com"));
|
||||
ASSERT_STR_EQ("test-token", manager2.getWriteToken("packages.example.com"));
|
||||
}
|
||||
|
||||
ServerManagerTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(CorruptedConfigurationRecovery) {
|
||||
auto tempDir = ServerManagerTestHelper::createTempConfigDir();
|
||||
setenv("HOME", tempDir.c_str(), 1);
|
||||
|
||||
// Create corrupted config file
|
||||
ServerManagerTestHelper::createCorruptedConfigFile(tempDir / ".config" / "getpkg");
|
||||
|
||||
ServerManager manager;
|
||||
auto result = manager.loadConfiguration();
|
||||
|
||||
// Should recover by creating default configuration
|
||||
ASSERT_EQ(ServerManagerError::None, result);
|
||||
|
||||
auto servers = manager.getServers();
|
||||
ASSERT_EQ(1, servers.size());
|
||||
ASSERT_STR_EQ("getpkg.xyz", servers[0]);
|
||||
|
||||
ServerManagerTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(LegacyMigration) {
|
||||
auto tempDir = ServerManagerTestHelper::createTempConfigDir();
|
||||
setenv("HOME", tempDir.c_str(), 1);
|
||||
|
||||
auto configDir = tempDir / ".config" / "getpkg";
|
||||
std::filesystem::create_directories(configDir);
|
||||
|
||||
// Create legacy token file
|
||||
ServerManagerTestHelper::createLegacyTokenFile(configDir, "legacy-token-123");
|
||||
|
||||
ServerManager manager;
|
||||
bool migrated = manager.migrateFromLegacy();
|
||||
ASSERT_TRUE(migrated);
|
||||
|
||||
// Verify migration
|
||||
ASSERT_TRUE(manager.hasWriteToken("getpkg.xyz"));
|
||||
ASSERT_STR_EQ("legacy-token-123", manager.getWriteToken("getpkg.xyz"));
|
||||
|
||||
ServerManagerTestHelper::cleanupTempDir(tempDir);
|
||||
}
|
||||
|
||||
TEST(ErrorMessages) {
|
||||
ServerManager manager;
|
||||
|
||||
ASSERT_NOT_EMPTY(manager.getErrorMessage(ServerManagerError::InvalidUrl));
|
||||
ASSERT_NOT_EMPTY(manager.getErrorMessage(ServerManagerError::ServerNotFound));
|
||||
ASSERT_NOT_EMPTY(manager.getErrorMessage(ServerManagerError::ServerAlreadyExists));
|
||||
ASSERT_NOT_EMPTY(manager.getErrorMessage(ServerManagerError::LastServerRemoval));
|
||||
}
|
||||
|
||||
// Registration function
|
||||
void registerServerManagerTests() {
|
||||
register_ServerManager_DefaultConfiguration();
|
||||
register_ServerManager_AddValidServer();
|
||||
register_ServerManager_AddInvalidServer();
|
||||
register_ServerManager_AddDuplicateServer();
|
||||
register_ServerManager_RemoveServer();
|
||||
register_ServerManager_RemoveNonExistentServer();
|
||||
register_ServerManager_RemoveLastServer();
|
||||
register_ServerManager_WriteTokenManagement();
|
||||
register_ServerManager_DefaultPublishServer();
|
||||
register_ServerManager_ConfigurationPersistence();
|
||||
register_ServerManager_CorruptedConfigurationRecovery();
|
||||
register_ServerManager_LegacyMigration();
|
||||
register_ServerManager_ErrorMessages();
|
||||
}
|
@ -1 +0,0 @@
|
||||
#!/bin/bash\necho debug
|
@ -1 +0,0 @@
|
||||
#!/bin/bash\necho debug2
|
@ -1 +0,0 @@
|
||||
#!/bin/bash\necho display test
|
@ -1 +0,0 @@
|
||||
#!/bin/bash\necho multi arch
|
@ -1 +0,0 @@
|
||||
#!/bin/bash\necho robust test
|
@ -1 +0,0 @@
|
||||
test content
|
200
gp/gp
200
gp/gp
@ -49,27 +49,43 @@ EOF
|
||||
|
||||
# Function to generate commit message based on changes
|
||||
generate_commit_message() {
|
||||
local files_changed
|
||||
files_changed=$(git diff --cached --name-only)
|
||||
local files_count
|
||||
files_count=$(echo "$files_changed" | wc -l)
|
||||
|
||||
if [ -z "$files_changed" ]; then
|
||||
files_changed=$(git diff --name-only)
|
||||
files_count=$(echo "$files_changed" | wc -l)
|
||||
# First check if we have staged changes
|
||||
local has_staged_changes=false
|
||||
if ! git diff --cached --quiet; then
|
||||
has_staged_changes=true
|
||||
fi
|
||||
|
||||
# If add-all is enabled, also include untracked files
|
||||
if [ "$ADD_ALL" = true ] && [ -z "$files_changed" ]; then
|
||||
files_changed=$(git ls-files --others --exclude-standard)
|
||||
files_count=$(echo "$files_changed" | wc -l)
|
||||
# Determine which changes to analyze based on staging status and ADD_ALL setting
|
||||
local status_command=""
|
||||
if [ "$has_staged_changes" = true ]; then
|
||||
status_command="git diff --cached --name-status"
|
||||
else
|
||||
status_command="git diff --name-status"
|
||||
fi
|
||||
|
||||
if [ -z "$files_changed" ]; then
|
||||
# Get all changes (staged or unstaged depending on context)
|
||||
local all_changes
|
||||
all_changes=$($status_command)
|
||||
|
||||
# If no changes from diff, check for untracked files when add-all is enabled
|
||||
if [ -z "$all_changes" ] && [ "$ADD_ALL" = true ]; then
|
||||
local untracked_files
|
||||
untracked_files=$(git ls-files --others --exclude-standard)
|
||||
if [ -n "$untracked_files" ]; then
|
||||
# Convert untracked files to "A" (added) status format
|
||||
all_changes=$(echo "$untracked_files" | sed 's/^/A\t/')
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -z "$all_changes" ]; then
|
||||
echo "No changes to commit"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Count total files
|
||||
local files_count
|
||||
files_count=$(echo "$all_changes" | wc -l)
|
||||
|
||||
# Generate smart commit message based on file types and changes
|
||||
local has_source_files=false
|
||||
local has_config_files=false
|
||||
@ -77,7 +93,8 @@ generate_commit_message() {
|
||||
local has_tests=false
|
||||
local message=""
|
||||
|
||||
while IFS= read -r file; do
|
||||
# Extract just the filenames for type detection
|
||||
while IFS=$'\t' read -r status file; do
|
||||
[ -z "$file" ] && continue
|
||||
|
||||
case "$file" in
|
||||
@ -94,15 +111,18 @@ generate_commit_message() {
|
||||
has_tests=true
|
||||
;;
|
||||
esac
|
||||
done <<< "$files_changed"
|
||||
done <<< "$all_changes"
|
||||
|
||||
# Create descriptive commit message
|
||||
if [ "$files_count" -eq 1 ]; then
|
||||
local change_line
|
||||
change_line=$(echo "$all_changes" | head -1)
|
||||
local status
|
||||
local single_file
|
||||
single_file=$(echo "$files_changed" | head -1)
|
||||
local change_type
|
||||
change_type=$(git diff --cached --name-status -- "$single_file" 2>/dev/null || git diff --name-status -- "$single_file")
|
||||
case "${change_type:0:1}" in
|
||||
status=$(echo "$change_line" | cut -f1)
|
||||
single_file=$(echo "$change_line" | cut -f2)
|
||||
|
||||
case "${status:0:1}" in
|
||||
A) message="Add $single_file" ;;
|
||||
M) message="Update $single_file" ;;
|
||||
D) message="Remove $single_file" ;;
|
||||
@ -110,6 +130,58 @@ generate_commit_message() {
|
||||
*) message="Modify $single_file" ;;
|
||||
esac
|
||||
else
|
||||
# For multiple files, analyze the types of changes
|
||||
local added_count=0
|
||||
local modified_count=0
|
||||
local deleted_count=0
|
||||
local renamed_count=0
|
||||
|
||||
# Use the all_changes variable we already have
|
||||
|
||||
# Count different types of changes
|
||||
while IFS=$'\t' read -r status file; do
|
||||
[ -z "$status" ] && continue
|
||||
case "${status:0:1}" in
|
||||
A) ((added_count++)) ;;
|
||||
M) ((modified_count++)) ;;
|
||||
D) ((deleted_count++)) ;;
|
||||
R) ((renamed_count++)) ;;
|
||||
esac
|
||||
done <<< "$all_changes"
|
||||
|
||||
# Also count untracked files if add-all is enabled
|
||||
if [ "$ADD_ALL" = true ]; then
|
||||
local untracked_files
|
||||
untracked_files=$(git ls-files --others --exclude-standard)
|
||||
if [ -n "$untracked_files" ]; then
|
||||
local untracked_count
|
||||
untracked_count=$(echo "$untracked_files" | wc -l)
|
||||
((added_count += untracked_count))
|
||||
fi
|
||||
fi
|
||||
|
||||
# Generate message based on change types
|
||||
local change_parts=()
|
||||
[ $added_count -gt 0 ] && change_parts+=("add $added_count")
|
||||
[ $modified_count -gt 0 ] && change_parts+=("update $modified_count")
|
||||
[ $deleted_count -gt 0 ] && change_parts+=("remove $deleted_count")
|
||||
[ $renamed_count -gt 0 ] && change_parts+=("rename $renamed_count")
|
||||
|
||||
local change_desc=""
|
||||
if [ ${#change_parts[@]} -eq 1 ]; then
|
||||
change_desc="${change_parts[0]}"
|
||||
elif [ ${#change_parts[@]} -eq 2 ]; then
|
||||
change_desc="${change_parts[0]} and ${change_parts[1]}"
|
||||
else
|
||||
# Join all but last with commas, last with "and"
|
||||
local last_idx=$((${#change_parts[@]} - 1))
|
||||
for i in $(seq 0 $((last_idx - 1))); do
|
||||
[ $i -gt 0 ] && change_desc+=", "
|
||||
change_desc+="${change_parts[i]}"
|
||||
done
|
||||
change_desc+=" and ${change_parts[last_idx]}"
|
||||
fi
|
||||
|
||||
local prefix=""
|
||||
if $has_tests; then
|
||||
prefix="test: "
|
||||
@ -121,18 +193,32 @@ generate_commit_message() {
|
||||
prefix="feat: "
|
||||
fi
|
||||
|
||||
message="${prefix}Update $files_count files"
|
||||
# Capitalize first letter of change description
|
||||
change_desc="$(echo "${change_desc:0:1}" | tr '[:lower:]' '[:upper:]')${change_desc:1}"
|
||||
|
||||
message="${prefix}${change_desc} files"
|
||||
fi
|
||||
|
||||
echo "$message"
|
||||
}
|
||||
|
||||
# Function to check if we're in a git repository
|
||||
# Function to check if we're in a git repository and change to repo root
|
||||
check_git_repo() {
|
||||
if ! git rev-parse --git-dir >/dev/null 2>&1; then
|
||||
print_error "Not in a git repository"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Change to the git repository root to ensure we operate on the entire repo
|
||||
local git_root
|
||||
git_root=$(git rev-parse --show-toplevel)
|
||||
if [ "$PWD" != "$git_root" ]; then
|
||||
print_info "Changing to git repository root: $git_root"
|
||||
cd "$git_root" || {
|
||||
print_error "Failed to change to git repository root"
|
||||
exit 1
|
||||
}
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to check for uncommitted changes and unpushed commits
|
||||
@ -225,19 +311,77 @@ show_status_and_confirm() {
|
||||
|
||||
# Show staged changes
|
||||
if ! git diff --cached --quiet; then
|
||||
print_info "Staged changes:"
|
||||
git diff --cached --name-only -- | while IFS= read -r line; do echo " $line"; done
|
||||
local staged_modified=""
|
||||
local staged_deleted=""
|
||||
local staged_added=""
|
||||
|
||||
# Get staged file status and categorize
|
||||
while IFS=$'\t' read -r status file; do
|
||||
[ -z "$status" ] && continue
|
||||
case "${status:0:1}" in
|
||||
A) staged_added="${staged_added}${file}\n" ;;
|
||||
M) staged_modified="${staged_modified}${file}\n" ;;
|
||||
D) staged_deleted="${staged_deleted}${file}\n" ;;
|
||||
*) staged_modified="${staged_modified}${file}\n" ;; # Default to modified for other statuses
|
||||
esac
|
||||
done < <(git diff --cached --name-status)
|
||||
|
||||
# Show staged added files
|
||||
if [ -n "$staged_added" ]; then
|
||||
print_info "Staged new files:"
|
||||
echo -e "$staged_added" | grep -v '^$' | while IFS= read -r line; do echo " $line"; done
|
||||
fi
|
||||
|
||||
# Show staged modified files
|
||||
if [ -n "$staged_modified" ]; then
|
||||
print_info "Staged modified files:"
|
||||
echo -e "$staged_modified" | grep -v '^$' | while IFS= read -r line; do echo " $line"; done
|
||||
fi
|
||||
|
||||
# Show staged deleted files
|
||||
if [ -n "$staged_deleted" ]; then
|
||||
print_info "Staged deleted files:"
|
||||
echo -e "$staged_deleted" | grep -v '^$' | while IFS= read -r line; do echo " $line"; done
|
||||
fi
|
||||
|
||||
has_staged_changes=true
|
||||
fi
|
||||
|
||||
# Show unstaged changes
|
||||
if ! git diff --quiet; then
|
||||
if [ "$ADD_ALL" = true ]; then
|
||||
print_info "Modified files (will be added):"
|
||||
else
|
||||
print_info "Modified files (unstaged, will NOT be included):"
|
||||
local modified_files=""
|
||||
local deleted_files=""
|
||||
|
||||
# Get file status and categorize
|
||||
while IFS=$'\t' read -r status file; do
|
||||
[ -z "$status" ] && continue
|
||||
case "${status:0:1}" in
|
||||
M) modified_files="${modified_files}${file}\n" ;;
|
||||
D) deleted_files="${deleted_files}${file}\n" ;;
|
||||
*) modified_files="${modified_files}${file}\n" ;; # Default to modified for other statuses
|
||||
esac
|
||||
done < <(git diff --name-status)
|
||||
|
||||
# Show modified files
|
||||
if [ -n "$modified_files" ]; then
|
||||
if [ "$ADD_ALL" = true ]; then
|
||||
print_info "Modified files (will be added):"
|
||||
else
|
||||
print_info "Modified files (unstaged, will NOT be included):"
|
||||
fi
|
||||
echo -e "$modified_files" | grep -v '^$' | while IFS= read -r line; do echo " $line"; done
|
||||
fi
|
||||
git diff --name-only -- | while IFS= read -r line; do echo " $line"; done
|
||||
|
||||
# Show deleted files
|
||||
if [ -n "$deleted_files" ]; then
|
||||
if [ "$ADD_ALL" = true ]; then
|
||||
print_info "Deleted files (will be removed):"
|
||||
else
|
||||
print_info "Deleted files (unstaged, will NOT be included):"
|
||||
fi
|
||||
echo -e "$deleted_files" | grep -v '^$' | while IFS= read -r line; do echo " $line"; done
|
||||
fi
|
||||
|
||||
has_unstaged_changes=true
|
||||
fi
|
||||
|
||||
|
Reference in New Issue
Block a user