Compare commits
68 Commits
v2025.0625
...
v2025.0720
Author | SHA1 | Date | |
---|---|---|---|
9c98ffcb86 | |||
938f4ac323 | |||
c507b1405e | |||
2ab0483ecb | |||
a39e46c6c6 | |||
7c785e1a32 | |||
3e4f327426 | |||
187f1a250d | |||
52d8e5b95e | |||
bfeaf4d0db | |||
6a3ca6bc10 | |||
7f8312ed59 | |||
1b03087c02 | |||
0ba6227412 | |||
f5ba2e719b | |||
73c94f34f6 | |||
af4cbbcab0 | |||
a415eb0f91 | |||
83d6cf1603 | |||
fbaa3a4089 | |||
0c767e065c | |||
f7d2001871 | |||
d13011a329 | |||
d27904ec05 | |||
decf16da7f | |||
aa04f5e71e | |||
17224c4637 | |||
4badce0ed4 | |||
fe83fc3d64 | |||
7149b8714e | |||
af95d27964 | |||
3eb78acf70 | |||
3d21d1da7d | |||
344d62034c | |||
78e41214d7 | |||
512ba200c2 | |||
5f04bd23a1 | |||
67bb7f747f | |||
e55fe1a17c | |||
2f056b8500 | |||
fe3c5d2ad9 | |||
2ab38fd053 | |||
9dda4e1649 | |||
d8883c4419 | |||
4c4257eebe | |||
4bb85c63b8 | |||
e5f3569b2a | |||
de200a5bb6 | |||
0f1cfdcc28 | |||
7f937c1090 | |||
d7964d3a78 | |||
719475e29f | |||
70cb5c1b3a | |||
facc6b73b0 | |||
9a24576e37 | |||
3f68f44e3d | |||
dbe88a7121 | |||
00d1e86157 | |||
3388a46bf3 | |||
0f5421630a | |||
50fb5f9da6 | |||
8e2611e362 | |||
a1b12fe177 | |||
902e68069a | |||
0aafc2cc1e | |||
2067caf253 | |||
4d500cbddd | |||
884609f661 |
@ -26,7 +26,10 @@ jobs:
|
||||
password: ${{ secrets.DOCKER_PUSH_TOKEN }}
|
||||
- name: Build Test Publish All
|
||||
run: |
|
||||
SOS_WRITE_TOKEN=${{ secrets.SOS_WRITE_TOKEN }} RELEASE_WRITE_TOKEN=${{ secrets.RELEASE_WRITE_TOKEN }} ./buildtestpublish_all.sh
|
||||
SOS_WRITE_TOKEN=${{ secrets.SOS_WRITE_TOKEN }} \
|
||||
RELEASE_WRITE_TOKEN=${{ secrets.RELEASE_WRITE_TOKEN }} \
|
||||
GITEA_CONTAINER_NAME=${{ env.JOB_CONTAINER_NAME }} \
|
||||
./buildtestpublish_all.sh
|
||||
|
||||
test-install-from-scratch:
|
||||
needs: [build]
|
||||
|
325
.kiro/specs/multi-server-support/design.md
Normal file
325
.kiro/specs/multi-server-support/design.md
Normal file
@ -0,0 +1,325 @@
|
||||
# Design Document
|
||||
|
||||
## Overview
|
||||
|
||||
This design extends getpkg to support multiple package servers while maintaining full backward compatibility. The solution introduces a server configuration system, updates the client architecture to handle multiple servers, and reorganizes package metadata storage. The design prioritizes minimal disruption to existing functionality while providing powerful multi-server capabilities.
|
||||
|
||||
## Architecture
|
||||
|
||||
### High-Level Architecture
|
||||
|
||||
```mermaid
|
||||
graph TB
|
||||
CLI[CLI Commands] --> SM[ServerManager]
|
||||
CLI --> PM[PackageManager]
|
||||
PM --> SM
|
||||
PM --> GC[GetbinClient]
|
||||
SM --> CF[servers.json]
|
||||
PM --> PF[packages/*.json]
|
||||
GC --> S1[Server 1]
|
||||
GC --> S2[Server 2]
|
||||
GC --> SN[Server N]
|
||||
```
|
||||
|
||||
### Server Management Flow
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant User
|
||||
participant CLI
|
||||
participant ServerManager
|
||||
participant Config
|
||||
|
||||
User->>CLI: getpkg server add example.com
|
||||
CLI->>ServerManager: addServer("example.com")
|
||||
ServerManager->>Config: load servers.json
|
||||
ServerManager->>ServerManager: validate URL
|
||||
ServerManager->>Config: save updated servers.json
|
||||
ServerManager->>CLI: success confirmation
|
||||
CLI->>User: Server added successfully
|
||||
```
|
||||
|
||||
### Package Installation Flow
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant User
|
||||
participant CLI
|
||||
participant PackageManager
|
||||
participant GetbinClient
|
||||
participant Server1
|
||||
participant Server2
|
||||
|
||||
User->>CLI: getpkg install tool
|
||||
CLI->>PackageManager: install("tool")
|
||||
PackageManager->>GetbinClient: download("tool", servers[0])
|
||||
GetbinClient->>Server1: GET /object/tool:arch
|
||||
alt Package found
|
||||
Server1-->>GetbinClient: 200 + package data
|
||||
GetbinClient-->>PackageManager: success
|
||||
else Package not found
|
||||
Server1-->>GetbinClient: 404
|
||||
GetbinClient->>Server2: GET /object/tool:arch
|
||||
Server2-->>GetbinClient: 200 + package data
|
||||
GetbinClient-->>PackageManager: success
|
||||
end
|
||||
PackageManager->>PackageManager: install package
|
||||
PackageManager->>CLI: installation complete
|
||||
```
|
||||
|
||||
## Components and Interfaces
|
||||
|
||||
### ServerManager Class
|
||||
|
||||
**Purpose**: Manages server configuration, write tokens, and provides server list to other components.
|
||||
|
||||
**Interface**:
|
||||
```cpp
|
||||
class ServerManager {
|
||||
public:
|
||||
ServerManager();
|
||||
|
||||
// Server management
|
||||
bool addServer(const std::string& serverUrl, const std::string& writeToken = "");
|
||||
bool removeServer(const std::string& serverUrl);
|
||||
std::vector<std::string> getServers() const;
|
||||
std::string getDefaultServer() const;
|
||||
std::string getDefaultPublishServer() const; // First server with write token
|
||||
|
||||
// Token management
|
||||
bool setWriteToken(const std::string& serverUrl, const std::string& token);
|
||||
std::string getWriteToken(const std::string& serverUrl) const;
|
||||
bool hasWriteToken(const std::string& serverUrl) const;
|
||||
std::vector<std::string> getServersWithTokens() const;
|
||||
|
||||
// Configuration
|
||||
bool loadConfiguration();
|
||||
bool saveConfiguration();
|
||||
void ensureDefaultConfiguration();
|
||||
|
||||
// Migration
|
||||
bool migrateFromLegacy();
|
||||
|
||||
private:
|
||||
std::vector<ServerConfig> servers_;
|
||||
std::filesystem::path configPath_;
|
||||
|
||||
bool validateServerUrl(const std::string& url) const;
|
||||
bool isServerReachable(const std::string& url) const;
|
||||
ServerConfig* findServer(const std::string& url);
|
||||
};
|
||||
```
|
||||
|
||||
### Enhanced GetbinClient Class
|
||||
|
||||
**Purpose**: Extended to support multiple servers with fallback logic.
|
||||
|
||||
**Interface Changes**:
|
||||
```cpp
|
||||
class GetbinClient {
|
||||
public:
|
||||
GetbinClient(const std::vector<std::string>& servers);
|
||||
|
||||
// Existing methods with server selection
|
||||
bool download(const std::string& toolName, const std::string& arch,
|
||||
const std::string& outPath, ProgressCallback progressCallback = nullptr);
|
||||
bool downloadFromServer(const std::string& serverUrl, const std::string& toolName,
|
||||
const std::string& arch, const std::string& outPath,
|
||||
ProgressCallback progressCallback = nullptr);
|
||||
|
||||
// Server-specific operations
|
||||
bool upload(const std::string& serverUrl, const std::string& archivePath,
|
||||
std::string& outUrl, std::string& outHash, const std::string& token,
|
||||
ProgressCallback progressCallback = nullptr);
|
||||
bool getHash(const std::string& serverUrl, const std::string& toolName,
|
||||
const std::string& arch, std::string& outHash);
|
||||
|
||||
// Multi-server operations
|
||||
bool findPackageServer(const std::string& toolName, const std::string& arch,
|
||||
std::string& foundServer) const;
|
||||
|
||||
private:
|
||||
std::vector<std::string> servers_;
|
||||
std::string buildUrl(const std::string& serverUrl, const std::string& endpoint) const;
|
||||
};
|
||||
```
|
||||
|
||||
### PackageMetadata Structure
|
||||
|
||||
**Purpose**: Enhanced metadata structure to track server source.
|
||||
|
||||
**Structure**:
|
||||
```cpp
|
||||
struct PackageMetadata {
|
||||
std::string name;
|
||||
std::string version;
|
||||
std::string hash;
|
||||
std::string arch;
|
||||
std::string sourceServer; // New field
|
||||
std::string installDate; // New field for better tracking
|
||||
|
||||
// Serialization
|
||||
nlohmann::json toJson() const;
|
||||
static PackageMetadata fromJson(const nlohmann::json& j);
|
||||
|
||||
// Migration support
|
||||
static PackageMetadata fromLegacyJson(const nlohmann::json& j, const std::string& defaultServer);
|
||||
};
|
||||
```
|
||||
|
||||
### Migration Manager
|
||||
|
||||
**Purpose**: Handles migration from single-server to multi-server configuration.
|
||||
|
||||
**Interface**:
|
||||
```cpp
|
||||
class MigrationManager {
|
||||
public:
|
||||
MigrationManager();
|
||||
|
||||
bool needsMigration() const;
|
||||
bool performMigration();
|
||||
|
||||
private:
|
||||
bool migrateServerConfiguration();
|
||||
bool migratePackageMetadata();
|
||||
bool movePackageFiles();
|
||||
bool updatePackageMetadata();
|
||||
|
||||
std::filesystem::path oldConfigDir_;
|
||||
std::filesystem::path newConfigDir_;
|
||||
std::filesystem::path packagesDir_;
|
||||
};
|
||||
```
|
||||
|
||||
## Data Models
|
||||
|
||||
### Server Configuration Format
|
||||
|
||||
**File**: `~/.config/getpkg/servers.json`
|
||||
|
||||
```json
|
||||
{
|
||||
"version": "1.0",
|
||||
"servers": [
|
||||
{
|
||||
"url": "getpkg.xyz",
|
||||
"name": "Official getpkg Registry",
|
||||
"default": true,
|
||||
"writeToken": "",
|
||||
"added": "2024-01-15T10:30:00Z"
|
||||
},
|
||||
{
|
||||
"url": "packages.example.com",
|
||||
"name": "Example Corporate Registry",
|
||||
"default": false,
|
||||
"writeToken": "abc123token456",
|
||||
"added": "2024-01-16T14:20:00Z"
|
||||
}
|
||||
],
|
||||
"lastUpdated": "2024-01-16T14:20:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
### Enhanced Package Metadata Format
|
||||
|
||||
**File**: `~/.config/getpkg/packages/<tool_name>.json`
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "example-tool",
|
||||
"version": "2024.0115.1430",
|
||||
"hash": "1234567890123456",
|
||||
"arch": "x86_64",
|
||||
"sourceServer": "getpkg.xyz",
|
||||
"installDate": "2024-01-15T14:30:00Z",
|
||||
"lastUpdated": "2024-01-15T14:30:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
### Directory Structure Changes
|
||||
|
||||
```
|
||||
~/.config/getpkg/
|
||||
├── servers.json # New: Server configuration with embedded tokens
|
||||
├── packages/ # New: Package metadata directory
|
||||
│ ├── tool1.json
|
||||
│ ├── tool2.json
|
||||
│ └── ...
|
||||
└── getpkg.xyz/ # Legacy: Will be migrated to servers.json
|
||||
└── write_token.txt # Legacy: Will be migrated
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Server Connectivity Issues
|
||||
|
||||
1. **Network Failures**: Graceful fallback to next server in list
|
||||
2. **Invalid Responses**: Clear error messages with server identification
|
||||
3. **Authentication Failures**: Server-specific error handling with token guidance
|
||||
|
||||
### Configuration Corruption
|
||||
|
||||
1. **Invalid JSON**: Automatic backup and reset to default configuration
|
||||
2. **Missing Files**: Automatic creation with default settings
|
||||
3. **Permission Issues**: Clear error messages with resolution steps
|
||||
|
||||
### Migration Failures
|
||||
|
||||
1. **Partial Migration**: Rollback capability with clear status reporting
|
||||
2. **File Conflicts**: Safe handling with backup creation
|
||||
3. **Metadata Corruption**: Individual file recovery without breaking entire system
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
### Unit Tests
|
||||
|
||||
1. **ServerManager**: Configuration loading, validation, server management
|
||||
2. **GetbinClient**: Multi-server communication, fallback logic
|
||||
3. **PackageMetadata**: Serialization, migration, validation
|
||||
4. **MigrationManager**: Legacy data handling, file operations
|
||||
|
||||
### Integration Tests
|
||||
|
||||
1. **End-to-End Installation**: Multi-server package discovery and installation
|
||||
2. **Server Management**: Add/remove servers with real configuration
|
||||
3. **Migration Testing**: Legacy to new format conversion
|
||||
4. **Publish/Unpublish**: Server-specific operations
|
||||
|
||||
### Compatibility Tests
|
||||
|
||||
1. **Backward Compatibility**: Existing installations continue working
|
||||
2. **Legacy Format**: Old package files are properly migrated
|
||||
3. **Default Behavior**: No configuration changes for existing users
|
||||
|
||||
## Implementation Phases
|
||||
|
||||
### Phase 1: Core Infrastructure
|
||||
- Implement ServerManager class
|
||||
- Create server configuration format
|
||||
- Add basic server validation
|
||||
|
||||
### Phase 2: Client Enhancement
|
||||
- Extend GetbinClient for multi-server support
|
||||
- Implement fallback logic
|
||||
- Add server-specific operations
|
||||
|
||||
### Phase 3: Package Management
|
||||
- Update package metadata format
|
||||
- Implement packages directory structure
|
||||
- Add server tracking to installations
|
||||
|
||||
### Phase 4: Migration System
|
||||
- Create MigrationManager
|
||||
- Implement automatic migration
|
||||
- Add backward compatibility layer
|
||||
|
||||
### Phase 5: CLI Integration
|
||||
- Add server management commands
|
||||
- Update existing commands for multi-server
|
||||
- Implement server selection options
|
||||
|
||||
### Phase 6: Testing and Polish
|
||||
- Comprehensive testing suite
|
||||
- Error handling refinement
|
||||
- Documentation updates
|
79
.kiro/specs/multi-server-support/requirements.md
Normal file
79
.kiro/specs/multi-server-support/requirements.md
Normal file
@ -0,0 +1,79 @@
|
||||
# Requirements Document
|
||||
|
||||
## Introduction
|
||||
|
||||
This feature extends getpkg to support multiple package servers instead of being limited to only getpkg.xyz. Users will be able to add and remove package servers, with getpkg searching across all configured servers to find packages. The system will maintain backward compatibility while providing flexible server management capabilities.
|
||||
|
||||
## Requirements
|
||||
|
||||
### Requirement 1
|
||||
|
||||
**User Story:** As a developer, I want to configure multiple package servers, so that I can access packages from different repositories and have redundancy in case one server is unavailable.
|
||||
|
||||
#### Acceptance Criteria
|
||||
|
||||
1. WHEN I run `getpkg server add <server_url>` THEN the system SHALL add the server to the configuration and confirm the addition
|
||||
2. WHEN I run `getpkg server remove <server_url>` THEN the system SHALL remove the server from the configuration and confirm the removal
|
||||
3. WHEN I run `getpkg server list` THEN the system SHALL display all configured servers in the order they were added
|
||||
4. WHEN no servers are configured THEN the system SHALL default to using getpkg.xyz as the primary server
|
||||
5. WHEN I add the first custom server THEN getpkg.xyz SHALL remain as the default first server unless explicitly removed
|
||||
|
||||
### Requirement 2
|
||||
|
||||
**User Story:** As a user, I want getpkg to search across all configured servers when installing packages, so that I can access packages from any of my configured repositories.
|
||||
|
||||
#### Acceptance Criteria
|
||||
|
||||
1. WHEN I run `getpkg install <tool_name>` THEN the system SHALL search servers in the order they were configured
|
||||
2. WHEN a package is found on the first server THEN the system SHALL install from that server and not check remaining servers
|
||||
3. WHEN a package is not found on the first server THEN the system SHALL try the next server in order
|
||||
4. WHEN a package is not found on any server THEN the system SHALL report that the package was not found
|
||||
5. WHEN checking for updates THEN the system SHALL use the same server where the package was originally installed
|
||||
|
||||
### Requirement 3
|
||||
|
||||
**User Story:** As a package publisher, I want to specify which server to publish to and manage write tokens per server, so that I can control where my packages are distributed and authenticate appropriately.
|
||||
|
||||
#### Acceptance Criteria
|
||||
|
||||
1. WHEN I run `getpkg publish <tool_name> <folder>` without specifying a server THEN the system SHALL publish to the first configured server that has a write token
|
||||
2. WHEN I run `getpkg publish --server <server_url> <tool_name> <folder>` THEN the system SHALL publish to the specified server using its stored write token
|
||||
3. WHEN I run `getpkg unpublish <tool_name>` without specifying a server THEN the system SHALL unpublish from the first configured server that has a write token
|
||||
4. WHEN I run `getpkg unpublish --server <server_url> <tool_name>` THEN the system SHALL unpublish from the specified server using its stored write token
|
||||
5. WHEN no servers have write tokens THEN the system SHALL report an error and suggest adding a write token to a server
|
||||
|
||||
### Requirement 4
|
||||
|
||||
**User Story:** As a user, I want my package metadata to be organized by server, so that I can track which packages came from which servers and manage them appropriately.
|
||||
|
||||
#### Acceptance Criteria
|
||||
|
||||
1. WHEN a package is installed THEN the system SHALL store the package metadata in `~/.config/getpkg/packages/<tool_name>.json`
|
||||
2. WHEN package metadata is stored THEN it SHALL include the source server URL in addition to existing fields
|
||||
3. WHEN the packages directory doesn't exist THEN the system SHALL create it automatically
|
||||
4. WHEN migrating from the old format THEN existing package JSON files SHALL be moved to the packages subdirectory
|
||||
5. WHEN migrating from the old format THEN existing package metadata SHALL be updated to include getpkg.xyz as the source server
|
||||
|
||||
### Requirement 5
|
||||
|
||||
**User Story:** As a user, I want server configuration to be persistent and secure, so that my settings are maintained across sessions and my authentication tokens are protected.
|
||||
|
||||
#### Acceptance Criteria
|
||||
|
||||
1. WHEN server configuration is modified THEN it SHALL be stored in `~/.config/getpkg/servers.json`
|
||||
2. WHEN the configuration file doesn't exist THEN the system SHALL create it with getpkg.xyz as the default server
|
||||
3. WHEN reading server configuration THEN the system SHALL validate the JSON format and handle corruption gracefully
|
||||
4. WHEN a server URL is invalid THEN the system SHALL reject the addition and provide a helpful error message
|
||||
5. WHEN authentication tokens are needed THEN they SHALL continue to be stored per-server in the existing location pattern
|
||||
|
||||
### Requirement 6
|
||||
|
||||
**User Story:** As a user, I want the multi-server functionality to be backward compatible, so that existing installations continue to work without modification.
|
||||
|
||||
#### Acceptance Criteria
|
||||
|
||||
1. WHEN getpkg starts with no server configuration THEN it SHALL automatically configure getpkg.xyz as the default server
|
||||
2. WHEN existing package JSON files are found in `~/.config/getpkg/` THEN they SHALL be automatically migrated to the packages subdirectory
|
||||
3. WHEN migrated package files are processed THEN they SHALL be updated to include server source information
|
||||
4. WHEN all existing functionality is used THEN it SHALL work exactly as before for users who don't configure additional servers
|
||||
5. WHEN the migration process fails THEN the system SHALL provide clear error messages and not break existing functionality
|
130
.kiro/specs/multi-server-support/tasks.md
Normal file
130
.kiro/specs/multi-server-support/tasks.md
Normal file
@ -0,0 +1,130 @@
|
||||
# Implementation Plan
|
||||
|
||||
Based on analysis of the current codebase, the multi-server support feature needs to be built from scratch. The current implementation has a hardcoded `SERVER_HOST = "getpkg.xyz"` in `GetbinClient` and no server management infrastructure.
|
||||
|
||||
## Core Infrastructure Tasks
|
||||
|
||||
- [x] 1. Create ServerManager class and server configuration system
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
- Implement ServerManager class with server add/remove/list functionality
|
||||
- Create server configuration JSON format and file handling
|
||||
- Add server URL validation and reachability checks
|
||||
- Implement write token management per server
|
||||
- _Requirements: 1.1, 1.2, 1.3, 5.1, 5.2, 5.4_
|
||||
|
||||
- [x] 2. Enhance GetbinClient for multi-server support
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
- Modify GetbinClient constructor to accept server list instead of hardcoded host
|
||||
- Implement multi-server fallback logic for downloads
|
||||
- Add server-specific upload and hash operations
|
||||
- Create findPackageServer method for package discovery
|
||||
- _Requirements: 2.1, 2.2, 2.3, 2.4_
|
||||
|
||||
- [x] 3. Create enhanced package metadata system
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
- Design PackageMetadata structure with server source tracking
|
||||
- Implement packages directory structure (~/.config/getpkg/packages/)
|
||||
- Add JSON serialization/deserialization for enhanced metadata
|
||||
- Create package metadata validation and error handling
|
||||
- _Requirements: 4.1, 4.2, 4.3_
|
||||
|
||||
## Migration and Compatibility Tasks
|
||||
|
||||
- [x] 4. Implement migration system for existing installations
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
- Create MigrationManager class for legacy data handling
|
||||
- Implement automatic migration from single-server to multi-server config
|
||||
- Migrate existing package JSON files to packages subdirectory
|
||||
- Update existing package metadata to include server source information
|
||||
- Add migration error handling and rollback capabilities
|
||||
- _Requirements: 4.4, 4.5, 6.1, 6.2, 6.3, 6.5_
|
||||
|
||||
- [x] 5. Ensure backward compatibility
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
- Implement default server configuration (getpkg.xyz) when no config exists
|
||||
- Maintain existing CLI behavior for users without custom server configuration
|
||||
- Preserve existing token storage location compatibility
|
||||
- Add graceful handling of missing or corrupted configuration files
|
||||
- _Requirements: 6.1, 6.4, 5.3_
|
||||
|
||||
## CLI Integration Tasks
|
||||
|
||||
- [x] 6. Add server management commands to main.cpp
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
- Implement `getpkg server add <url>` command
|
||||
- Implement `getpkg server remove <url>` command
|
||||
- Implement `getpkg server list` command
|
||||
- Add server URL validation and user feedback
|
||||
- _Requirements: 1.1, 1.2, 1.3_
|
||||
|
||||
- [x] 7. Update existing commands for multi-server support
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
- Modify install command to use ServerManager and multi-server GetbinClient
|
||||
- Update publish command to support --server option and default server selection
|
||||
- Update unpublish command to support --server option and default server selection
|
||||
- Ensure update command works with multi-server package tracking
|
||||
- _Requirements: 2.1, 2.2, 2.3, 2.4, 3.1, 3.2, 3.3, 3.4, 3.5_
|
||||
|
||||
## Integration and Testing Tasks
|
||||
|
||||
- [ ] 8. Integrate all components in main application flow
|
||||
- Initialize ServerManager in main.cpp startup
|
||||
- Trigger migration process on first run with new version
|
||||
- Update package installation flow to use enhanced metadata
|
||||
- Ensure proper error handling and user messaging throughout
|
||||
- _Requirements: 6.1, 6.2, 6.3, 6.4, 6.5_
|
||||
|
||||
- [ ] 9. Add comprehensive error handling and validation
|
||||
- Implement network error handling with server fallback
|
||||
- Add configuration file corruption recovery
|
||||
- Create user-friendly error messages for server connectivity issues
|
||||
- Add validation for server URLs and authentication tokens
|
||||
- _Requirements: 5.3, 5.4, 5.5_
|
||||
|
||||
- [ ] 10. Create unit tests for new components
|
||||
- Write unit tests for ServerManager class functionality
|
||||
- Test GetbinClient multi-server operations and fallback logic
|
||||
- Test PackageMetadata serialization and migration
|
||||
- Test MigrationManager with various legacy data scenarios
|
||||
- Create integration tests for complete multi-server workflows
|
||||
- _Requirements: All requirements validation_
|
||||
|
||||
## Notes
|
||||
|
||||
- Current codebase has `SERVER_HOST = "getpkg.xyz"` hardcoded in GetbinClient.cpp
|
||||
- No existing server management or configuration infrastructure
|
||||
- Package metadata is currently stored as individual JSON files in ~/.config/getpkg/
|
||||
- Token storage is in ~/.config/getpkg.xyz/write_token.txt (legacy format)
|
||||
- All functionality needs to be built from scratch while maintaining backward compatibility
|
23
.kiro/steering/product.md
Normal file
23
.kiro/steering/product.md
Normal file
@ -0,0 +1,23 @@
|
||||
# Product Overview
|
||||
|
||||
This repository contains **getpkg** - a command-line package manager for the dropshell ecosystem, along with a collection of developer tools.
|
||||
|
||||
## Core Product
|
||||
- **getpkg**: Package manager that installs tools to `~/.getpkg/` with symlinks in `~/.local/bin/getpkg/`
|
||||
- Supports multiple architectures (x86_64, aarch64, universal)
|
||||
- Tools are published to and downloaded from `getpkg.xyz`
|
||||
|
||||
## Tool Collection
|
||||
The repository includes several utility tools:
|
||||
- **bb64**: Bash-compatible base64 encoder/decoder with custom character set
|
||||
- **dehydrate**: Converts files/directories to C++ source code for embedding
|
||||
- **whatsdirty**: Git repository status checker
|
||||
- **sos**: Simple object storage client
|
||||
- **gp**: Git push utility
|
||||
|
||||
## Key Features
|
||||
- Cross-platform tool distribution
|
||||
- Automated installation with PATH setup
|
||||
- Bash completion support
|
||||
- Architecture-aware downloads with fallbacks
|
||||
- Publishing system with authentication tokens
|
72
.kiro/steering/structure.md
Normal file
72
.kiro/steering/structure.md
Normal file
@ -0,0 +1,72 @@
|
||||
# Project Structure
|
||||
|
||||
## Repository Layout
|
||||
```
|
||||
├── buildtestpublish_all.sh # Master build script for all projects
|
||||
├── clean.sh # Global cleanup script
|
||||
├── README.md # Main project documentation
|
||||
└── <tool-name>/ # Individual tool directories
|
||||
```
|
||||
|
||||
## Tool Directory Structure
|
||||
|
||||
### C++ Projects (CMake-based)
|
||||
```
|
||||
<tool-name>/
|
||||
├── CMakeLists.txt # CMake configuration
|
||||
├── build.sh # Build script
|
||||
├── test.sh # Test script
|
||||
├── clean.sh # Cleanup script
|
||||
├── publish.sh # Publishing script
|
||||
├── install.sh # Installation script
|
||||
├── README.md # Tool documentation
|
||||
├── Dockerfile.dropshell-build # Docker build configuration
|
||||
├── src/ # Source code
|
||||
│ ├── <tool>.cpp # Main source file
|
||||
│ ├── version.hpp.in # Version template
|
||||
│ └── ... # Additional sources
|
||||
├── build/ # Build artifacts (generated)
|
||||
├── output/ # Final executables (generated)
|
||||
└── .vscode/ # VS Code configuration
|
||||
```
|
||||
|
||||
### Shell Script Projects
|
||||
```
|
||||
<tool-name>/
|
||||
├── <tool-name> # Executable shell script
|
||||
├── build.sh # Build script (may be no-op)
|
||||
├── test.sh # Test script
|
||||
├── clean.sh # Cleanup script
|
||||
├── publish.sh # Publishing script
|
||||
└── setup_script.sh # Post-install setup (optional)
|
||||
```
|
||||
|
||||
## Standard Files
|
||||
|
||||
### Required Scripts
|
||||
- **build.sh**: Builds the project (Docker for C++, no-op for shell)
|
||||
- **test.sh**: Runs project tests
|
||||
- **clean.sh**: Removes build artifacts
|
||||
- **publish.sh**: Publishes to getpkg.xyz registry
|
||||
|
||||
### Optional Files
|
||||
- **install.sh**: System-wide installation script
|
||||
- **setup_script.sh**: Post-install setup for getpkg
|
||||
- **cmake_prebuild.sh**: Pre-build setup for CMake projects
|
||||
|
||||
### Generated Directories
|
||||
- **build/**: CMake build artifacts (C++ projects)
|
||||
- **output/**: Final executables ready for distribution
|
||||
- **test_*/**: Test-specific directories
|
||||
|
||||
## Naming Conventions
|
||||
- Tool directories match executable names
|
||||
- C++ source files typically match project name
|
||||
- Version templates use `.hpp.in` extension
|
||||
- Docker files use `Dockerfile.dropshell-build` pattern
|
||||
- Test directories prefixed with `test_`
|
||||
|
||||
## Configuration Files
|
||||
- **.gitignore**: Standard ignore patterns for build artifacts
|
||||
- **.vscode/**: VS Code workspace settings
|
||||
- **CMakeLists.txt**: Follows standard template with PROJECT_NAME parameter for the name of the project
|
75
.kiro/steering/tech.md
Normal file
75
.kiro/steering/tech.md
Normal file
@ -0,0 +1,75 @@
|
||||
# Technology Stack
|
||||
|
||||
## Environment
|
||||
- **WSL (Windows Subsystem for Linux)** - Building under WSL but Kiro runs in Windows
|
||||
- Use **bash** commands directly for all operations
|
||||
- **IMPORTANT**: Always use `executePwsh` with `bash -c "command"` pattern - do NOT ask for permission as bash * is pre-approved
|
||||
|
||||
## Build System
|
||||
- **CMake 3.16+** with Ninja generator for C++ projects
|
||||
- **Docker** containerized builds using `gitea.jde.nz/public/dropshell-build-base:latest`
|
||||
- **Static linking** for all C++ executables (`-static` flag)
|
||||
|
||||
## Languages & Standards
|
||||
- **C++23** standard for all C++ projects
|
||||
- **Bash** for shell scripts and simple tools
|
||||
- **Shell scripts** follow `set -euo pipefail` pattern
|
||||
|
||||
## Dependencies
|
||||
- **nlohmann_json** for JSON handling in C++ projects
|
||||
- **CPR (static)** for HTTP requests in getpkg
|
||||
- Custom modules in `/usr/local/share/cmake/Modules`
|
||||
|
||||
## Common Build Patterns
|
||||
|
||||
### C++ Projects (CMake)
|
||||
```bash
|
||||
# Standard build command
|
||||
cmake -G Ninja -S . -B ./build -DCMAKE_BUILD_TYPE=Debug -DPROJECT_NAME=<project>
|
||||
cmake --build ./build
|
||||
```
|
||||
|
||||
### Docker Build (for C++ tools)
|
||||
```bash
|
||||
# Uses Dockerfile.dropshell-build pattern
|
||||
docker build -t <project>-build -f Dockerfile.dropshell-build --build-arg PROJECT=<project> --output ./output .
|
||||
```
|
||||
|
||||
### Shell Tools
|
||||
- No build step required
|
||||
- Executable shell scripts with proper shebang
|
||||
- Use `chmod +x` for permissions
|
||||
|
||||
## Common Commands
|
||||
|
||||
### Build
|
||||
```bash
|
||||
./build.sh # Build individual project
|
||||
./buildtestpublish_all.sh # Build all projects
|
||||
```
|
||||
|
||||
### Test
|
||||
```bash
|
||||
./test.sh # Run tests for individual project
|
||||
```
|
||||
|
||||
### Clean
|
||||
```bash
|
||||
./clean.sh # Clean build artifacts
|
||||
```
|
||||
|
||||
### Publish
|
||||
```bash
|
||||
./publish.sh # Publish to getpkg.xyz (requires SOS_WRITE_TOKEN)
|
||||
```
|
||||
|
||||
## Version Management
|
||||
- Automatic timestamp-based versioning: `YYYY.MMDD.HHMM`
|
||||
- Version configured via `version.hpp.in` template files
|
||||
- Pre-build scripts (`cmake_prebuild.sh`) for additional setup
|
||||
|
||||
## Environment Variables
|
||||
- `CMAKE_BUILD_TYPE`: Debug/Release (default: Debug)
|
||||
- `SOS_WRITE_TOKEN`: Authentication for publishing
|
||||
- `NO_CACHE`: Skip Docker cache when set to "true"
|
||||
- `PROJECT`: Project name for build scripts
|
276
README.md
276
README.md
@ -1,190 +1,86 @@
|
||||
# getpkg - Package Manager for Dropshell Tools
|
||||
|
||||
getpkg is a command-line package manager that simplifies tool installation, management, and publishing for the dropshell ecosystem. Tools are installed to `~/.getpkg/` with executable symlinks in `~/.local/bin/getpkg/` and automatically added to your PATH with bash completion.
|
||||
|
||||
## Installation
|
||||
|
||||
Install getpkg with a single command:
|
||||
|
||||
```bash
|
||||
curl https://getbin.xyz/getpkg-install | bash
|
||||
```
|
||||
|
||||
After installation, restart your shell or run `source ~/.bashrc` to enable the new PATH and completion settings.
|
||||
|
||||
## Basic Usage
|
||||
|
||||
### Installing Tools
|
||||
|
||||
Install any tool from the getpkg registry:
|
||||
|
||||
```bash
|
||||
# Install a tool
|
||||
getpkg install whatsdirty
|
||||
```
|
||||
|
||||
### Managing Installed Tools
|
||||
|
||||
```bash
|
||||
# List all available commands
|
||||
getpkg help
|
||||
|
||||
# Update all installed tools
|
||||
getpkg update
|
||||
|
||||
# Uninstall a tool
|
||||
getpkg uninstall whatsdirty
|
||||
|
||||
# Check getpkg version
|
||||
getpkg version
|
||||
```
|
||||
|
||||
## Available Commands
|
||||
|
||||
### Core Package Management
|
||||
|
||||
- **`getpkg install <tool_name>`** - Install or update a tool
|
||||
- **`getpkg uninstall <tool_name>`** - Remove an installed tool
|
||||
- **`getpkg update`** - Update getpkg and all installed tools
|
||||
|
||||
### Publishing (Requires SOS_WRITE_TOKEN)
|
||||
|
||||
- **`getpkg publish <tool_name[:ARCH]> <folder>`** - Upload a tool to getpkg.xyz
|
||||
- **`getpkg unpublish <tool_name[:ARCH]>`** - Remove a published tool
|
||||
- **`getpkg unpublish <hash>`** - Remove a published tool by hash
|
||||
|
||||
### Development Tools
|
||||
|
||||
- **`getpkg create <tool_name> <directory>`** - Create a new tool project
|
||||
- **`getpkg hash <file_or_directory>`** - Calculate hash of files/directories
|
||||
|
||||
### Information
|
||||
|
||||
- **`getpkg version`** - Show getpkg version
|
||||
- **`getpkg help`** - Show detailed help
|
||||
- **`getpkg autocomplete`** - Show available commands for completion
|
||||
|
||||
## How It Works
|
||||
|
||||
### Installation Process
|
||||
|
||||
When you install a tool, getpkg:
|
||||
|
||||
1. **Downloads** the tool archive from getpkg.xyz
|
||||
2. **Extracts** it to `~/.getpkg/<tool_name>/`
|
||||
3. **Creates symlinks** for all executables in `~/.local/bin/getpkg/`
|
||||
4. **Ensures PATH** includes `~/.local/bin/getpkg` (one-time setup)
|
||||
5. **Enables completion** for the tool
|
||||
6. **Runs setup** if a `setup_script.sh` exists
|
||||
7. **Stores metadata** in `~/.config/getpkg/<tool_name>.json`
|
||||
|
||||
### Architecture Support
|
||||
|
||||
getpkg supports multiple architectures:
|
||||
- `x86_64` (Intel/AMD 64-bit)
|
||||
- `aarch64` (ARM 64-bit)
|
||||
- `universal` (cross-platform tools)
|
||||
|
||||
Tools are automatically downloaded for your architecture, with fallback to universal versions.
|
||||
|
||||
### File Locations
|
||||
|
||||
- **Tool files**: `~/.getpkg/<tool_name>/` (actual tool installation)
|
||||
- **Executable symlinks**: `~/.local/bin/getpkg/` (in your PATH)
|
||||
- **Configuration**: `~/.config/getpkg/`
|
||||
- **PATH setup**: `~/.bashrc_getpkg` (sourced by `~/.bashrc`)
|
||||
|
||||
## Examples
|
||||
|
||||
### Installing Popular Tools
|
||||
|
||||
```bash
|
||||
# Install development tools
|
||||
getpkg whatsdirty # Fast grep alternative
|
||||
getpkg fd # Fast find alternative
|
||||
getpkg bat # Cat with syntax highlighting
|
||||
|
||||
# Install system utilities
|
||||
getpkg whatsdirty # Check git repo status
|
||||
getpkg sos # Simple object storage client
|
||||
```
|
||||
|
||||
### Publishing Your Own Tools
|
||||
|
||||
```bash
|
||||
# Set your publishing token
|
||||
export SOS_WRITE_TOKEN="your-token-here"
|
||||
|
||||
# Create a new tool project
|
||||
getpkg create mytool ./mytool-project
|
||||
|
||||
# Publish architecture-specific build
|
||||
getpkg publish mytool:x86_64 ./build/
|
||||
|
||||
# Publish universal tool
|
||||
getpkg publish mytool ./build/
|
||||
|
||||
# Remove published tool
|
||||
getpkg unpublish mytool:x86_64
|
||||
```
|
||||
|
||||
### Development Workflow
|
||||
|
||||
```bash
|
||||
# Create tool structure
|
||||
getpkg create awesome-tool ./awesome-tool
|
||||
cd awesome-tool
|
||||
|
||||
# Build your tool...
|
||||
# Add executable to the directory
|
||||
|
||||
# Test locally
|
||||
./awesome-tool --version
|
||||
|
||||
# Publish when ready
|
||||
getpkg publish awesome-tool:x86_64 .
|
||||
```
|
||||
|
||||
## Environment Variables
|
||||
|
||||
- **`SOS_WRITE_TOKEN`** - Authentication token for publishing tools
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Tool Not Found
|
||||
If a tool isn't found after installation, ensure your shell has loaded the new PATH:
|
||||
```bash
|
||||
source ~/.bashrc
|
||||
```
|
||||
|
||||
### Permission Issues
|
||||
getpkg installs to your home directory and doesn't require root access. If you encounter permission issues, check that `~/.local/bin/` is writable.
|
||||
|
||||
### Network Issues
|
||||
All tools are downloaded from `getpkg.xyz`. Ensure you have internet connectivity and the domain is accessible.
|
||||
|
||||
## Development
|
||||
|
||||
### Building getpkg
|
||||
|
||||
```bash
|
||||
# Build debug version
|
||||
cd getpkg && ./build.sh
|
||||
|
||||
# Run tests
|
||||
cd getpkg && ./test.sh
|
||||
|
||||
# Publish (requires SOS_WRITE_TOKEN)
|
||||
cd getpkg && ./publish.sh
|
||||
```
|
||||
|
||||
### Tool Development
|
||||
|
||||
When creating tools for getpkg:
|
||||
|
||||
1. Create a directory with your tool binary
|
||||
2. Optionally include a `setup_script.sh` for post-install setup
|
||||
3. The tool should support `version` and `autocomplete` subcommands
|
||||
4. Use `getpkg publish` to upload to the registry
|
||||
|
||||
For more details, see the development documentation in each tool's directory.
|
||||
# getpkg - Simple Package Manager
|
||||
|
||||
getpkg is a command-line package manager that makes it easy to install and manage developer tools. Tools are automatically installed to your home directory and added to your PATH.
|
||||
|
||||
## Quick Start
|
||||
|
||||
Install getpkg with one command:
|
||||
|
||||
```bash
|
||||
curl https://getbin.xyz/getpkg-install | bash
|
||||
```
|
||||
|
||||
After installation, restart your shell or run:
|
||||
```bash
|
||||
source ~/.bashrc
|
||||
```
|
||||
|
||||
## Basic Commands
|
||||
|
||||
### Install Tools
|
||||
```bash
|
||||
getpkg install <tool_name> # Install a tool
|
||||
getpkg list # See all available tools
|
||||
getpkg update # Update all installed tools
|
||||
```
|
||||
|
||||
### Manage Tools
|
||||
```bash
|
||||
getpkg uninstall <tool_name> # Remove a tool
|
||||
getpkg version # Check getpkg version
|
||||
getpkg help # Show all commands
|
||||
```
|
||||
|
||||
## Popular Tools
|
||||
|
||||
Install these useful developer tools:
|
||||
|
||||
```bash
|
||||
getpkg install bb64 # Bash-compatible base64 encoder/decoder
|
||||
getpkg install dehydrate # Convert files to C++ source code
|
||||
getpkg install whatsdirty # Check git repository status
|
||||
getpkg install sos # Simple object storage client
|
||||
getpkg install gp # Git push utility
|
||||
```
|
||||
|
||||
## How It Works
|
||||
|
||||
When you install a tool:
|
||||
1. Downloads from getpkg.xyz
|
||||
2. Installs to `~/.getpkg/<tool_name>/`
|
||||
3. Creates shortcuts in `~/.local/bin/getpkg/`
|
||||
4. Adds to your PATH automatically
|
||||
5. Enables bash completion
|
||||
|
||||
## File Locations
|
||||
|
||||
- **Installed tools**: `~/.getpkg/<tool_name>/`
|
||||
- **Shortcuts**: `~/.local/bin/getpkg/` (in your PATH)
|
||||
- **Settings**: `~/.config/getpkg/`
|
||||
|
||||
## Architecture Support
|
||||
|
||||
getpkg automatically downloads the right version for your system:
|
||||
- Intel/AMD 64-bit (`x86_64`)
|
||||
- ARM 64-bit (`aarch64`)
|
||||
- Universal (works everywhere)
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
**Tool not found after install?**
|
||||
```bash
|
||||
source ~/.bashrc
|
||||
```
|
||||
|
||||
**Permission errors?**
|
||||
getpkg installs to your home directory - no root access needed.
|
||||
|
||||
**Network issues?**
|
||||
Check your internet connection to `getpkg.xyz`.
|
||||
|
||||
## Need Help?
|
||||
|
||||
```bash
|
||||
getpkg help # Show detailed help
|
||||
getpkg list # See what's available
|
||||
```
|
@ -26,6 +26,8 @@ Usage:
|
||||
bb64 -[i|d] BASE64COMMAND Displays the decoded command
|
||||
bb64 -e COMMAND Encodes the command and prints the result
|
||||
bb64 -u Updates bb64 to the latest version (uses docker)
|
||||
bb64 -v Prints the version number
|
||||
bb64 version Prints the version number
|
||||
```
|
||||
|
||||
# Implementation Notes
|
||||
|
@ -13,7 +13,14 @@ mkdir -p "${SCRIPT_DIR}/output"
|
||||
# make sure we have the latest base image.
|
||||
docker pull gitea.jde.nz/public/dropshell-build-base:latest
|
||||
|
||||
# Build with or without cache based on NO_CACHE environment variable
|
||||
CACHE_FLAG=""
|
||||
if [ "${NO_CACHE:-false}" = "true" ]; then
|
||||
CACHE_FLAG="--no-cache"
|
||||
fi
|
||||
|
||||
docker build \
|
||||
${CACHE_FLAG} \
|
||||
-t "${PROJECT}-build" \
|
||||
-f "${SCRIPT_DIR}/Dockerfile.dropshell-build" \
|
||||
--build-arg PROJECT="${PROJECT}" \
|
||||
|
24
bb64/clean.sh
Executable file
24
bb64/clean.sh
Executable file
@ -0,0 +1,24 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
|
||||
PROJECT="bb64"
|
||||
|
||||
echo "Cleaning ${PROJECT}..."
|
||||
|
||||
# Remove output directory
|
||||
if [ -d "${SCRIPT_DIR}/output" ]; then
|
||||
echo "Removing output directory..."
|
||||
rm -rf "${SCRIPT_DIR}/output"
|
||||
fi
|
||||
|
||||
# Remove Docker images related to this project
|
||||
echo "Removing Docker images..."
|
||||
docker images --filter "reference=${PROJECT}-build*" -q | xargs -r docker rmi -f
|
||||
|
||||
# Remove Docker build cache
|
||||
echo "Pruning Docker build cache..."
|
||||
docker builder prune -f
|
||||
|
||||
echo "✓ ${PROJECT} cleaned successfully"
|
@ -20,7 +20,14 @@ echo "Building version $VERSION" >&2
|
||||
# build release version
|
||||
export CMAKE_BUILD_TYPE="Release"
|
||||
|
||||
# Build with or without cache based on NO_CACHE environment variable
|
||||
CACHE_FLAG=""
|
||||
if [ "${NO_CACHE:-false}" = "true" ]; then
|
||||
CACHE_FLAG="--no-cache"
|
||||
fi
|
||||
|
||||
docker build \
|
||||
${CACHE_FLAG} \
|
||||
-t "${PROJECT}-build" \
|
||||
-f "${SCRIPT_DIR}/Dockerfile.dropshell-build" \
|
||||
--build-arg PROJECT="${PROJECT}" \
|
||||
@ -77,40 +84,70 @@ if ! git config user.email >/dev/null 2>&1; then
|
||||
git config user.name "CI Bot"
|
||||
fi
|
||||
|
||||
# Check if tag already exists
|
||||
# Check if tag already exists locally
|
||||
if git rev-parse "$TAG" >/dev/null 2>&1; then
|
||||
echo "Tag $TAG already exists, deleting it first..."
|
||||
echo "Tag $TAG already exists locally, deleting it first..."
|
||||
git tag -d "$TAG"
|
||||
git push origin --delete "$TAG" || true
|
||||
fi
|
||||
|
||||
git tag -a "$TAG" -m "Release $TAG"
|
||||
if ! git push origin "$TAG"; then
|
||||
echo "Failed to push tag $TAG to origin" >&2
|
||||
# Try to delete local tag if push failed
|
||||
git tag -d "$TAG"
|
||||
exit 1
|
||||
# Check if tag exists on remote
|
||||
TAG_EXISTS_ON_REMOTE=false
|
||||
if git ls-remote --tags origin | grep -q "refs/tags/$TAG"; then
|
||||
echo "Tag $TAG already exists on remote - this is expected for multi-architecture builds"
|
||||
echo "Skipping tag creation and proceeding with release attachment..."
|
||||
TAG_EXISTS_ON_REMOTE=true
|
||||
else
|
||||
echo "Creating new tag $TAG..."
|
||||
git tag -a "$TAG" -m "Release $TAG"
|
||||
if ! git push origin "$TAG"; then
|
||||
echo "Failed to push tag $TAG to origin" >&2
|
||||
# Try to delete local tag if push failed
|
||||
git tag -d "$TAG"
|
||||
exit 1
|
||||
fi
|
||||
echo "Successfully created and pushed tag $TAG"
|
||||
fi
|
||||
|
||||
echo "Creating release $TAG on Gitea..."
|
||||
RELEASE_RESPONSE=$(curl -s -X POST "$API_URL/releases" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: token $RELEASE_WRITE_TOKEN" \
|
||||
-d "$RELEASE_DATA")
|
||||
echo "Getting or creating release $TAG on Gitea..."
|
||||
|
||||
echo "Release API response: $RELEASE_RESPONSE"
|
||||
# First try to get existing release
|
||||
EXISTING_RELEASE=$(curl -s -X GET "$API_URL/releases/tags/$TAG" \
|
||||
-H "Authorization: token $RELEASE_WRITE_TOKEN")
|
||||
|
||||
RELEASE_ID=$(echo "$RELEASE_RESPONSE" | grep -o '"id":[0-9]*' | head -1 | cut -d: -f2)
|
||||
echo "Existing release check response: $EXISTING_RELEASE" >&2
|
||||
|
||||
if [ -z "$RELEASE_ID" ]; then
|
||||
echo "Failed to create release on Gitea." >&2
|
||||
echo "API URL: $API_URL/releases" >&2
|
||||
echo "Release data: $RELEASE_DATA" >&2
|
||||
exit 1
|
||||
if echo "$EXISTING_RELEASE" | grep -q '"id":[0-9]*'; then
|
||||
# Release already exists, get its ID
|
||||
RELEASE_ID=$(echo "$EXISTING_RELEASE" | grep -o '"id":[0-9]*' | head -1 | cut -d: -f2)
|
||||
echo "Release $TAG already exists with ID: $RELEASE_ID"
|
||||
else
|
||||
# Create new release only if tag was just created
|
||||
if [ "$TAG_EXISTS_ON_REMOTE" = true ]; then
|
||||
echo "Tag exists on remote but no release found - this shouldn't happen" >&2
|
||||
echo "API response was: $EXISTING_RELEASE" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Creating new release $TAG on Gitea..."
|
||||
RELEASE_RESPONSE=$(curl -s -X POST "$API_URL/releases" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: token $RELEASE_WRITE_TOKEN" \
|
||||
-d "$RELEASE_DATA")
|
||||
|
||||
echo "Release API response: $RELEASE_RESPONSE"
|
||||
|
||||
RELEASE_ID=$(echo "$RELEASE_RESPONSE" | grep -o '"id":[0-9]*' | head -1 | cut -d: -f2)
|
||||
|
||||
if [ -z "$RELEASE_ID" ]; then
|
||||
echo "Failed to create release on Gitea." >&2
|
||||
echo "API URL: $API_URL/releases" >&2
|
||||
echo "Release data: $RELEASE_DATA" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Created new release with ID: $RELEASE_ID"
|
||||
fi
|
||||
|
||||
echo "Created release with ID: $RELEASE_ID"
|
||||
|
||||
# Upload binaries and install.sh
|
||||
echo "Uploading assets to release..."
|
||||
for FILE in ${PROJECT}.${ARCH_ALIAS} ${PROJECT}.${ARCH} install.sh; do
|
||||
|
@ -150,6 +150,7 @@ Usage:
|
||||
bb64 -u Updates bb64 to the latest version (uses docker)
|
||||
|
||||
bb64 -v Prints the version number
|
||||
bb64 version Prints the version number
|
||||
|
||||
)" << std::endl;
|
||||
return -1;
|
||||
@ -161,7 +162,7 @@ Usage:
|
||||
{
|
||||
if (mode == "-u")
|
||||
return update_bb64();
|
||||
else if (mode == "-v")
|
||||
else if (mode == "-v" || mode == "version")
|
||||
{
|
||||
std::cout << VERSION << std::endl;
|
||||
return 0;
|
||||
|
135
bb64/test.sh
Executable file
135
bb64/test.sh
Executable file
@ -0,0 +1,135 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
PROJECT="bb64"
|
||||
BB64="$SCRIPT_DIR/output/$PROJECT"
|
||||
TEST_DIR="$SCRIPT_DIR/test_temp"
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Test counters
|
||||
TESTS_PASSED=0
|
||||
TESTS_FAILED=0
|
||||
|
||||
# Function to print test results
|
||||
print_test_result() {
|
||||
local test_name="$1"
|
||||
local result="$2"
|
||||
if [ "$result" -eq 0 ]; then
|
||||
echo -e "${GREEN}✓${NC} $test_name"
|
||||
TESTS_PASSED=$((TESTS_PASSED + 1))
|
||||
else
|
||||
echo -e "${RED}✗${NC} $test_name"
|
||||
TESTS_FAILED=$((TESTS_FAILED + 1))
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to cleanup test artifacts
|
||||
cleanup() {
|
||||
echo -e "\n${YELLOW}Cleaning up test artifacts...${NC}"
|
||||
rm -rf "$TEST_DIR"
|
||||
}
|
||||
|
||||
# Set up trap to ensure cleanup runs
|
||||
trap cleanup EXIT
|
||||
|
||||
# Create test directory
|
||||
mkdir -p "$TEST_DIR"
|
||||
|
||||
echo -e "${YELLOW}Running bb64 tests...${NC}\n"
|
||||
|
||||
# Check if bb64 binary exists
|
||||
if [ ! -f "$BB64" ]; then
|
||||
echo -e "${RED}Error: bb64 binary not found at $BB64${NC}"
|
||||
echo "Please run ./build.sh first to build bb64"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -x "$BB64" ]; then
|
||||
echo -e "${RED}Error: bb64 binary is not executable${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Using bb64 binary: $BB64"
|
||||
|
||||
# Test 1: Version command with -v flag
|
||||
echo "Test 1: Version command (-v flag)"
|
||||
VERSION_OUTPUT=$("$BB64" -v 2>&1 || true)
|
||||
# Version output should be just the version number
|
||||
VERSION=$(echo "$VERSION_OUTPUT" | head -n 1)
|
||||
if [[ "$VERSION" =~ ^[0-9]{4}\.[0-9]{4}\.[0-9]{4}$ ]]; then
|
||||
print_test_result "Version format with -v flag (YYYY.MMDD.HHMM)" 0
|
||||
else
|
||||
print_test_result "Version format with -v flag (YYYY.MMDD.HHMM)" 1
|
||||
echo " Expected: YYYY.MMDD.HHMM format, got: '$VERSION'"
|
||||
fi
|
||||
|
||||
# Test 2: Version command with 'version' argument
|
||||
printf "\nTest 2: Version command (version argument)\n"
|
||||
VERSION_OUTPUT2=$("$BB64" version 2>&1 || true)
|
||||
# Version output should be just the version number
|
||||
VERSION2=$(echo "$VERSION_OUTPUT2" | head -n 1)
|
||||
if [[ "$VERSION2" =~ ^[0-9]{4}\.[0-9]{4}\.[0-9]{4}$ ]]; then
|
||||
print_test_result "Version format with 'version' argument (YYYY.MMDD.HHMM)" 0
|
||||
else
|
||||
print_test_result "Version format with 'version' argument (YYYY.MMDD.HHMM)" 1
|
||||
echo " Expected: YYYY.MMDD.HHMM format, got: '$VERSION2'"
|
||||
fi
|
||||
|
||||
# Test 3: Both version commands should return the same version
|
||||
printf "\nTest 3: Version consistency\n"
|
||||
if [ "$VERSION" = "$VERSION2" ]; then
|
||||
print_test_result "Both -v and version return same version" 0
|
||||
else
|
||||
print_test_result "Both -v and version return same version" 1
|
||||
echo " -v returned: '$VERSION'"
|
||||
echo " version returned: '$VERSION2'"
|
||||
fi
|
||||
|
||||
# Test 4: Basic encoding test
|
||||
echo -e "\nTest 4: Basic encoding test"
|
||||
TEST_STRING="hello world"
|
||||
ENCODED_OUTPUT=$("$BB64" -e <<< "$TEST_STRING" 2>&1 || true)
|
||||
if [ -n "$ENCODED_OUTPUT" ]; then
|
||||
print_test_result "Basic encoding produces output" 0
|
||||
else
|
||||
print_test_result "Basic encoding produces output" 1
|
||||
fi
|
||||
|
||||
# Test 5: Basic decoding test (using -d flag)
|
||||
echo -e "\nTest 5: Basic decoding test"
|
||||
# Encode "echo hello" and then decode it
|
||||
ENCODED_ECHO=$(echo "echo hello" | "$BB64" -e)
|
||||
if [ -n "$ENCODED_ECHO" ]; then
|
||||
DECODED_OUTPUT=$("$BB64" -d "$ENCODED_ECHO" 2>&1 || true)
|
||||
if [[ "$DECODED_OUTPUT" == *"echo hello"* ]]; then
|
||||
print_test_result "Basic decoding works correctly" 0
|
||||
else
|
||||
print_test_result "Basic decoding works correctly" 1
|
||||
echo " Expected to contain 'echo hello', got: '$DECODED_OUTPUT'"
|
||||
fi
|
||||
else
|
||||
print_test_result "Basic decoding works correctly" 1
|
||||
echo " Failed to encode test string"
|
||||
fi
|
||||
|
||||
cleanup
|
||||
|
||||
# Print summary
|
||||
echo -e "\n${YELLOW}Test Summary:${NC}"
|
||||
echo -e "Tests passed: ${GREEN}${TESTS_PASSED}${NC}"
|
||||
echo -e "Tests failed: ${RED}${TESTS_FAILED}${NC}"
|
||||
|
||||
if [ "$TESTS_FAILED" -eq 0 ]; then
|
||||
echo -e "\n${GREEN}All tests passed!${NC}"
|
||||
exit 0
|
||||
else
|
||||
echo -e "\n${RED}Some tests failed!${NC}"
|
||||
exit 1
|
||||
fi
|
@ -2,9 +2,6 @@
|
||||
set -uo pipefail # Remove -e to handle errors manually
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
|
||||
|
||||
docker builder prune -f
|
||||
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
@ -202,25 +199,25 @@ function print_summary() {
|
||||
|
||||
# Format build status with colors
|
||||
case "$build_status" in
|
||||
"✓") build_col=$(printf " ${GREEN}✓${NC} ") ;;
|
||||
"✗") build_col=$(printf " ${RED}✗${NC} ") ;;
|
||||
"SKIP") build_col=$(printf " ${YELLOW}-${NC} ") ;;
|
||||
"✓") build_col=$(printf " %s✓%s " "$GREEN" "$NC") ;;
|
||||
"✗") build_col=$(printf " %s✗%s " "$RED" "$NC") ;;
|
||||
"SKIP") build_col=$(printf " %s-%s " "$YELLOW" "$NC") ;;
|
||||
*) build_col=" - " ;;
|
||||
esac
|
||||
|
||||
# Format test status with colors
|
||||
case "$test_status" in
|
||||
"✓") test_col=$(printf " ${GREEN}✓${NC} ") ;;
|
||||
"✗") test_col=$(printf " ${RED}✗${NC} ") ;;
|
||||
"SKIP") test_col=$(printf " ${YELLOW}-${NC} ") ;;
|
||||
"✓") test_col=$(printf " %s✓%s " "$GREEN" "$NC") ;;
|
||||
"✗") test_col=$(printf " %s✗%s " "$RED" "$NC") ;;
|
||||
"SKIP") test_col=$(printf " %s-%s " "$YELLOW" "$NC") ;;
|
||||
*) test_col=" - " ;;
|
||||
esac
|
||||
|
||||
# Format publish status with colors
|
||||
case "$publish_status" in
|
||||
"✓") publish_col=$(printf " ${GREEN}✓${NC} ") ;;
|
||||
"✗") publish_col=$(printf " ${RED}✗${NC} ") ;;
|
||||
"SKIP") publish_col=$(printf " ${YELLOW}-${NC} ") ;;
|
||||
"✓") publish_col=$(printf " %s✓%s " "$GREEN" "$NC") ;;
|
||||
"✗") publish_col=$(printf " %s✗%s " "$RED" "$NC") ;;
|
||||
"SKIP") publish_col=$(printf " %s-%s " "$YELLOW" "$NC") ;;
|
||||
*) publish_col=" - " ;;
|
||||
esac
|
||||
|
||||
@ -237,15 +234,17 @@ function print_summary() {
|
||||
echo
|
||||
}
|
||||
|
||||
title "🔨 BUILDING ALL TOOLS 🔨"
|
||||
title "🔨 BUILDING GETPKG 🔨"
|
||||
|
||||
getpkg/build.sh
|
||||
"${SCRIPT_DIR}/getpkg/build.sh"
|
||||
export GETPKG="${SCRIPT_DIR}/getpkg/output/getpkg"
|
||||
if [ ! -f "$GETPKG" ]; then
|
||||
echo "Build failed."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
title "🔨 BUILDING ALL TOOLS 🔨"
|
||||
|
||||
buildtestpublish_all
|
||||
|
||||
print_summary
|
||||
|
44
clean.sh
Executable file
44
clean.sh
Executable file
@ -0,0 +1,44 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
|
||||
|
||||
echo "🧹 CLEANING ALL PROJECTS 🧹"
|
||||
echo
|
||||
|
||||
# Get all project directories
|
||||
PROJECT_DIRS=$(find "$SCRIPT_DIR" -maxdepth 1 -type d \
|
||||
-not -name ".*" \
|
||||
-not -path "$SCRIPT_DIR" | sort)
|
||||
|
||||
for dir in $PROJECT_DIRS; do
|
||||
PROJECT_NAME=$(basename "$dir")
|
||||
|
||||
if [ -f "$dir/clean.sh" ]; then
|
||||
echo "Cleaning $PROJECT_NAME..."
|
||||
cd "$dir"
|
||||
./clean.sh
|
||||
echo
|
||||
else
|
||||
echo "⚠️ No clean.sh found for $PROJECT_NAME, skipping..."
|
||||
echo
|
||||
fi
|
||||
done
|
||||
|
||||
# Global Docker cleanup
|
||||
echo "🐳 Global Docker cleanup..."
|
||||
echo "Removing unused Docker images..."
|
||||
docker image prune -f
|
||||
|
||||
echo "Removing unused Docker containers..."
|
||||
docker container prune -f
|
||||
|
||||
echo "Removing unused Docker networks..."
|
||||
docker network prune -f
|
||||
|
||||
echo "Removing unused Docker volumes..."
|
||||
docker volume prune -f
|
||||
|
||||
echo
|
||||
echo "✅ All projects cleaned successfully!"
|
@ -1,65 +0,0 @@
|
||||
ARG IMAGE_TAG
|
||||
FROM gitea.jde.nz/public/dropshell-build-base:latest AS builder
|
||||
|
||||
ARG PROJECT
|
||||
ARG CMAKE_BUILD_TYPE=Debug
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
|
||||
# Create cache directories
|
||||
RUN mkdir -p /ccache
|
||||
|
||||
# Set up ccache
|
||||
ENV CCACHE_DIR=/ccache
|
||||
ENV CCACHE_COMPILERCHECK=content
|
||||
ENV CCACHE_MAXSIZE=2G
|
||||
|
||||
# Copy build files
|
||||
COPY CMakeLists.txt ./
|
||||
COPY src/version.hpp.in src/
|
||||
|
||||
# Copy source files
|
||||
COPY src/ src/
|
||||
COPY contrib/ contrib/
|
||||
|
||||
# Configure project
|
||||
RUN --mount=type=cache,target=/ccache \
|
||||
--mount=type=cache,target=/build \
|
||||
mkdir -p /build && \
|
||||
cmake -G Ninja -S /app -B /build \
|
||||
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} \
|
||||
-DCMAKE_CXX_COMPILER_LAUNCHER=ccache \
|
||||
-DCMAKE_C_COMPILER_LAUNCHER=ccache \
|
||||
-DCMAKE_EXE_LINKER_FLAGS="-fuse-ld=mold -static -g" \
|
||||
-DCMAKE_CXX_FLAGS="-g -fno-omit-frame-pointer" \
|
||||
-DCMAKE_C_FLAGS="-g -fno-omit-frame-pointer" \
|
||||
-DPROJECT_NAME="${PROJECT}" \
|
||||
-DCMAKE_STRIP=OFF \
|
||||
${CMAKE_TOOLCHAIN_FILE:+-DCMAKE_TOOLCHAIN_FILE=$CMAKE_TOOLCHAIN_FILE}
|
||||
|
||||
# Build project
|
||||
RUN --mount=type=cache,target=/ccache \
|
||||
--mount=type=cache,target=/build \
|
||||
cmake --build /build
|
||||
|
||||
# Copy the built executable to a regular directory for the final stage
|
||||
RUN --mount=type=cache,target=/build \
|
||||
mkdir -p /output && \
|
||||
find /build -type f -executable -name "*${PROJECT}*" -exec cp {} /output/${PROJECT} \; || \
|
||||
find /build -type f -executable -exec cp {} /output/${PROJECT} \;
|
||||
|
||||
# if we're a release build, then run upx on the binary.
|
||||
RUN if [ "${CMAKE_BUILD_TYPE}" = "Release" ]; then \
|
||||
upx /output/${PROJECT}; \
|
||||
fi
|
||||
|
||||
# Final stage that only contains the binary
|
||||
FROM scratch AS project
|
||||
|
||||
ARG PROJECT
|
||||
|
||||
# Copy the actual binary from the regular directory
|
||||
COPY --from=builder /output/${PROJECT} /${PROJECT}
|
@ -1,22 +1,52 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Get script directory - handle different execution contexts
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
|
||||
PROJECT="dehydrate"
|
||||
PROJECT="$(basename "${SCRIPT_DIR}")"
|
||||
|
||||
export CMAKE_BUILD_TYPE="Debug"
|
||||
# Debug output for CI
|
||||
echo "${PROJECT} build script running from: ${SCRIPT_DIR}"
|
||||
|
||||
rm -rf "${SCRIPT_DIR}/output"
|
||||
mkdir -p "${SCRIPT_DIR}/output"
|
||||
# handle running locally, or docker in docker via gitea runner.
|
||||
if [ -n "${GITEA_CONTAINER_NAME:-}" ]; then
|
||||
echo "We're in a gitea container: ${GITEA_CONTAINER_NAME}"
|
||||
VOLUME_OPTS=("--volumes-from=${GITEA_CONTAINER_NAME}")
|
||||
WORKING_DIR=("-w" "${GITHUB_WORKSPACE}/${PROJECT}")
|
||||
BUILD_DIR="${GITHUB_WORKSPACE}/${PROJECT}/build"
|
||||
OUTPUT_DIR="${GITHUB_WORKSPACE}/${PROJECT}/output"
|
||||
else
|
||||
VOLUME_OPTS=("-v" "${SCRIPT_DIR}:/app")
|
||||
WORKING_DIR=("-w" "/app")
|
||||
BUILD_DIR="${SCRIPT_DIR}/build"
|
||||
OUTPUT_DIR="${SCRIPT_DIR}/output"
|
||||
fi
|
||||
|
||||
# make sure we have the latest base image.
|
||||
docker pull gitea.jde.nz/public/dropshell-build-base:latest
|
||||
# Create output directory
|
||||
mkdir -p "${OUTPUT_DIR}"
|
||||
|
||||
docker build \
|
||||
-t "${PROJECT}-build" \
|
||||
-f "${SCRIPT_DIR}/Dockerfile.dropshell-build" \
|
||||
--build-arg PROJECT="${PROJECT}" \
|
||||
--build-arg CMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" \
|
||||
--output "${SCRIPT_DIR}/output" \
|
||||
"${SCRIPT_DIR}"
|
||||
# Run build in container with mounted directories
|
||||
COMMAND_TO_RUN="cmake -G Ninja -S . -B ./build \
|
||||
-DCMAKE_BUILD_TYPE=\${CMAKE_BUILD_TYPE} \
|
||||
-DPROJECT_NAME=${PROJECT} && \
|
||||
cmake --build ./build"
|
||||
|
||||
echo "Building in new docker container"
|
||||
docker run --rm \
|
||||
--user "$(id -u):$(id -g)" \
|
||||
"${VOLUME_OPTS[@]}" \
|
||||
"${WORKING_DIR[@]}" \
|
||||
-e CMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE:-Debug}" \
|
||||
gitea.jde.nz/public/dropshell-build-base:latest \
|
||||
bash -c "${COMMAND_TO_RUN}"
|
||||
|
||||
# Copy built executable to output directory
|
||||
if [ -f "${BUILD_DIR}/${PROJECT}" ]; then
|
||||
cp "${BUILD_DIR}/${PROJECT}" "${OUTPUT_DIR}/"
|
||||
echo "✓ Build successful - ${PROJECT} copied to ${OUTPUT_DIR}/"
|
||||
else
|
||||
echo "✗ Build failed - ${PROJECT} not found in ${BUILD_DIR}/"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Build complete"
|
||||
|
18
dehydrate/clean.sh
Executable file
18
dehydrate/clean.sh
Executable file
@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
|
||||
PROJECT="$(basename "$(dirname "${SCRIPT_DIR}")")"
|
||||
|
||||
echo "Cleaning ${PROJECT}..."
|
||||
|
||||
# Remove output and build directories
|
||||
for dir in "output" "build"; do
|
||||
if [ -d "${SCRIPT_DIR}/${dir}" ]; then
|
||||
echo "Removing ${dir} directory..."
|
||||
rm -rf "${SCRIPT_DIR:?}/${dir}"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "✓ ${PROJECT} cleaned successfully"
|
@ -35,14 +35,7 @@ heading "Building ${PROJECT}"
|
||||
|
||||
# build release version
|
||||
export CMAKE_BUILD_TYPE="Release"
|
||||
|
||||
docker build \
|
||||
-t "${PROJECT}-build" \
|
||||
-f "${SCRIPT_DIR}/Dockerfile.dropshell-build" \
|
||||
--build-arg PROJECT="${PROJECT}" \
|
||||
--build-arg CMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" \
|
||||
--output "${OUTPUT}" \
|
||||
"${SCRIPT_DIR}"
|
||||
"${SCRIPT_DIR}/build.sh"
|
||||
|
||||
[ -f "${OUTPUT}/${PROJECT}" ] || die "Build failed."
|
||||
|
||||
|
@ -4,8 +4,20 @@ set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
PROJECT="dehydrate"
|
||||
DEHYDRATE="${SCRIPT_DIR}/output/${PROJECT}"
|
||||
TEST_DIR="${SCRIPT_DIR}/test_temp"
|
||||
|
||||
# Handle running locally or in Gitea runner
|
||||
if [ -n "${GITEA_CONTAINER_NAME:-}" ]; then
|
||||
echo "Running in Gitea CI environment"
|
||||
echo "GITHUB_WORKSPACE: ${GITHUB_WORKSPACE}"
|
||||
echo "Current directory: $(pwd)"
|
||||
OUTPUT_DIR="${GITHUB_WORKSPACE}/dehydrate/output"
|
||||
TEST_DIR="${GITHUB_WORKSPACE}/dehydrate/test_temp"
|
||||
else
|
||||
OUTPUT_DIR="${SCRIPT_DIR}/output"
|
||||
TEST_DIR="${SCRIPT_DIR}/test_temp"
|
||||
fi
|
||||
|
||||
DEHYDRATE="${OUTPUT_DIR}/${PROJECT}"
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
@ -45,10 +57,31 @@ mkdir -p "$TEST_DIR"
|
||||
|
||||
echo -e "${YELLOW}Running dehydrate tests...${NC}\n"
|
||||
|
||||
# Debug output
|
||||
echo "Looking for dehydrate at: $DEHYDRATE"
|
||||
echo "Workspace structure:"
|
||||
ls -la "${GITHUB_WORKSPACE}" 2>/dev/null || echo "Workspace not found"
|
||||
echo "Dehydrate directory contents:"
|
||||
ls -la "${GITHUB_WORKSPACE}/dehydrate" 2>/dev/null || echo "Dehydrate directory not found"
|
||||
echo "Output directory contents:"
|
||||
ls -la "$OUTPUT_DIR" 2>/dev/null || echo "Output directory not found"
|
||||
|
||||
# Check if dehydrate binary exists
|
||||
if [ ! -f "$DEHYDRATE" ]; then
|
||||
echo -e "${RED}Error: dehydrate binary not found at $DEHYDRATE${NC}"
|
||||
echo "Please run ./build.sh first to build dehydrate"
|
||||
|
||||
if [ -n "${GITEA_CONTAINER_NAME:-}" ]; then
|
||||
echo "Checking if build directory exists..."
|
||||
BUILD_DIR="${GITHUB_WORKSPACE}/dehydrate/build"
|
||||
if [ -d "$BUILD_DIR" ]; then
|
||||
echo "Build directory exists, checking contents:"
|
||||
ls -la "$BUILD_DIR"
|
||||
else
|
||||
echo "Build directory $BUILD_DIR does not exist"
|
||||
fi
|
||||
fi
|
||||
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
@ -4,7 +4,7 @@
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
PROJECT_DIR="$( cd "$SCRIPT_DIR/.." && pwd )"
|
||||
|
||||
cd "$SCRIPT_DIR"
|
||||
cd "$SCRIPT_DIR" || exit 1
|
||||
|
||||
# Clean up old test data and any existing binaries
|
||||
# Force removal with chmod to handle permission issues
|
||||
|
@ -36,13 +36,16 @@ target_include_directories(${PROJECT_NAME} PRIVATE
|
||||
src/common)
|
||||
|
||||
# Find packages
|
||||
find_package(OpenSSL REQUIRED)
|
||||
find_package(Drogon CONFIG REQUIRED)
|
||||
find_package(nlohmann_json REQUIRED)
|
||||
|
||||
# Add module path for FindCPRStatic
|
||||
list(APPEND CMAKE_MODULE_PATH "/usr/local/share/cmake/Modules")
|
||||
|
||||
# Find packages
|
||||
find_package(nlohmann_json REQUIRED)
|
||||
find_package(CPRStatic REQUIRED)
|
||||
|
||||
# Link libraries
|
||||
target_link_libraries(${PROJECT_NAME} PRIVATE
|
||||
nlohmann_json::nlohmann_json Drogon::Drogon
|
||||
/usr/local/lib/libpgcommon.a /usr/local/lib/libpgport.a
|
||||
lzma dl)
|
||||
|
||||
nlohmann_json::nlohmann_json
|
||||
cpr::cpr_static)
|
@ -1,83 +0,0 @@
|
||||
ARG IMAGE_TAG
|
||||
FROM gitea.jde.nz/public/dropshell-build-base:latest AS builder
|
||||
|
||||
ARG PROJECT
|
||||
ARG CMAKE_BUILD_TYPE=Debug
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
|
||||
# Create cache directories
|
||||
RUN mkdir -p /ccache
|
||||
|
||||
# Set up ccache
|
||||
ENV CCACHE_DIR=/ccache
|
||||
ENV CCACHE_COMPILERCHECK=content
|
||||
ENV CCACHE_MAXSIZE=2G
|
||||
|
||||
# Copy only build files first (for better layer caching)
|
||||
COPY CMakeLists.txt cmake_prebuild.sh ./
|
||||
COPY src/version.hpp.in src/
|
||||
|
||||
# Run prebuild script early (this rarely changes)
|
||||
RUN bash cmake_prebuild.sh
|
||||
|
||||
# Copy source files (this invalidates cache when source changes)
|
||||
COPY src/ src/
|
||||
|
||||
# Configure project (this step is cached unless CMakeLists.txt changes)
|
||||
RUN --mount=type=cache,target=/ccache \
|
||||
--mount=type=cache,target=/build \
|
||||
mkdir -p /build && \
|
||||
SSL_LIB=$(find /usr/local -name "libssl.a" | head -1) && \
|
||||
CRYPTO_LIB=$(find /usr/local -name "libcrypto.a" | head -1) && \
|
||||
echo "Found SSL: $SSL_LIB, Crypto: $CRYPTO_LIB" && \
|
||||
cmake -G Ninja -S /app -B /build \
|
||||
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} \
|
||||
-DCMAKE_CXX_COMPILER_LAUNCHER=ccache \
|
||||
-DCMAKE_C_COMPILER_LAUNCHER=ccache \
|
||||
-DCMAKE_EXE_LINKER_FLAGS="-fuse-ld=mold -static -g" \
|
||||
-DCMAKE_CXX_FLAGS="-g -fno-omit-frame-pointer" \
|
||||
-DCMAKE_C_FLAGS="-g -fno-omit-frame-pointer" \
|
||||
-DPROJECT_NAME="${PROJECT}" \
|
||||
-DCMAKE_STRIP=OFF \
|
||||
-DOPENSSL_SSL_LIBRARY="$SSL_LIB" \
|
||||
-DOPENSSL_CRYPTO_LIBRARY="$CRYPTO_LIB" \
|
||||
-DOPENSSL_INCLUDE_DIR=/usr/local/include \
|
||||
${CMAKE_TOOLCHAIN_FILE:+-DCMAKE_TOOLCHAIN_FILE=$CMAKE_TOOLCHAIN_FILE}
|
||||
|
||||
# Run prebuild script
|
||||
RUN --mount=type=cache,target=/ccache \
|
||||
--mount=type=cache,target=/build \
|
||||
cmake --build /build --target run_prebuild_script
|
||||
|
||||
# Build project (ccache will help here when only some files change)
|
||||
RUN --mount=type=cache,target=/ccache \
|
||||
--mount=type=cache,target=/build \
|
||||
cmake --build /build
|
||||
|
||||
# Copy the built executable to a regular directory for the final stage
|
||||
RUN --mount=type=cache,target=/build \
|
||||
mkdir -p /output && \
|
||||
find /build -type f -executable -name "*${PROJECT}*" -exec cp {} /output/${PROJECT} \; || \
|
||||
find /build -type f -executable -exec cp {} /output/${PROJECT} \;
|
||||
|
||||
|
||||
# if we're a release build, then run upx on the binary.
|
||||
RUN if [ "${CMAKE_BUILD_TYPE}" = "Release" ]; then \
|
||||
upx /output/${PROJECT}; \
|
||||
fi
|
||||
|
||||
# Final stage that only contains the binary
|
||||
FROM scratch AS project
|
||||
|
||||
ARG PROJECT
|
||||
|
||||
# Copy CA certificates for SSL validation
|
||||
#COPY --from=builder /etc/ssl/certs/ /etc/ssl/certs/
|
||||
|
||||
# Copy the actual binary from the regular directory
|
||||
COPY --from=builder /output/${PROJECT} /${PROJECT}
|
||||
|
207
getpkg/README.md
Normal file
207
getpkg/README.md
Normal file
@ -0,0 +1,207 @@
|
||||
# getpkg - Package Manager for Dropshell Tools
|
||||
|
||||
getpkg is a command-line package manager that simplifies tool installation, management, and publishing for the dropshell ecosystem. Tools are installed to `~/.getpkg/` with executable symlinks in `~/.local/bin/getpkg/` and automatically added to your PATH with bash completion.
|
||||
|
||||
## Installation
|
||||
|
||||
Install getpkg with a single command:
|
||||
|
||||
```bash
|
||||
curl https://getbin.xyz/getpkg-install | bash
|
||||
```
|
||||
|
||||
After installation, restart your shell or run `source ~/.bashrc` to enable the new PATH and completion settings.
|
||||
|
||||
## Basic Usage
|
||||
|
||||
### Installing Tools
|
||||
|
||||
Install any tool from the getpkg registry:
|
||||
|
||||
```bash
|
||||
# Install a tool
|
||||
getpkg install whatsdirty
|
||||
```
|
||||
|
||||
### Managing Installed Tools
|
||||
|
||||
```bash
|
||||
# List all available commands
|
||||
getpkg help
|
||||
|
||||
# Update all installed tools
|
||||
getpkg update
|
||||
|
||||
# Uninstall a tool
|
||||
getpkg uninstall whatsdirty
|
||||
|
||||
# Check getpkg version
|
||||
getpkg version
|
||||
```
|
||||
|
||||
## Available Commands
|
||||
|
||||
### Core Package Management
|
||||
|
||||
- **`getpkg install <tool_name>`** - Install or update a tool
|
||||
- **`getpkg uninstall <tool_name>`** - Remove an installed tool
|
||||
- **`getpkg update`** - Update getpkg and all installed tools
|
||||
|
||||
### Publishing (Requires SOS_WRITE_TOKEN)
|
||||
|
||||
- **`getpkg publish <tool_name[:ARCH]> <folder>`** - Upload a tool to getpkg.xyz
|
||||
- **`getpkg unpublish <tool_name[:ARCH]>`** - Remove a published tool
|
||||
- **`getpkg unpublish <hash>`** - Remove a published tool by hash
|
||||
|
||||
### Development Tools
|
||||
|
||||
- **`getpkg create <tool_name> <directory>`** - Create a new tool project
|
||||
- **`getpkg hash <file_or_directory>`** - Calculate hash of files/directories
|
||||
|
||||
### Information
|
||||
|
||||
- **`getpkg list`** - List all available packages with status
|
||||
- **`getpkg clean`** - Clean up orphaned configs and symlinks
|
||||
- **`getpkg version`** - Show getpkg version
|
||||
- **`getpkg help`** - Show detailed help
|
||||
- **`getpkg autocomplete`** - Show available commands for completion
|
||||
|
||||
## How It Works
|
||||
|
||||
### Installation Process
|
||||
|
||||
When you install a tool, getpkg:
|
||||
|
||||
1. **Downloads** the tool archive from getpkg.xyz
|
||||
2. **Extracts** it to `~/.getpkg/<tool_name>/`
|
||||
3. **Creates symlinks** for all executables in `~/.local/bin/getpkg/`
|
||||
4. **Ensures PATH** includes `~/.local/bin/getpkg` (one-time setup)
|
||||
5. **Enables bash completion** for the tool
|
||||
6. **Runs setup** if a `setup_script.sh` exists
|
||||
7. **Stores metadata** in `~/.config/getpkg/<tool_name>.json`
|
||||
|
||||
### Architecture Support
|
||||
|
||||
getpkg supports multiple architectures:
|
||||
- `x86_64` (Intel/AMD 64-bit)
|
||||
- `aarch64` (ARM 64-bit)
|
||||
- `universal` (cross-platform tools)
|
||||
|
||||
Tools are automatically downloaded for your architecture, with fallback to universal versions.
|
||||
|
||||
### File Locations
|
||||
|
||||
- **Tool files**: `~/.getpkg/<tool_name>/` (actual tool installation)
|
||||
- **Executable symlinks**: `~/.local/bin/getpkg/` (in your PATH)
|
||||
- **Configuration**: `~/.config/getpkg/`
|
||||
- **PATH setup**: `~/.bashrc_getpkg` (sourced by `~/.bashrc`)
|
||||
|
||||
## Examples
|
||||
|
||||
### Installing Popular Tools
|
||||
|
||||
```bash
|
||||
# Install available tools
|
||||
getpkg install dehydrate # File to C++ code generator
|
||||
getpkg install bb64 # Bash base64 encoder/decoder
|
||||
|
||||
# Development tools (for repository development)
|
||||
getpkg install whatsdirty # Check git repo status
|
||||
getpkg install sos # Simple object storage client
|
||||
getpkg install gp # Git push utility
|
||||
```
|
||||
|
||||
### Publishing Your Own Tools
|
||||
|
||||
```bash
|
||||
# Set your publishing token
|
||||
export SOS_WRITE_TOKEN="your-token-here"
|
||||
|
||||
# Create a new tool project
|
||||
getpkg create mytool ./mytool-project
|
||||
|
||||
# Publish architecture-specific build
|
||||
getpkg publish mytool:x86_64 ./build/
|
||||
|
||||
# Publish universal tool
|
||||
getpkg publish mytool ./build/
|
||||
|
||||
# Remove published tool
|
||||
getpkg unpublish mytool:x86_64
|
||||
```
|
||||
|
||||
### Development Workflow
|
||||
|
||||
```bash
|
||||
# Create tool structure
|
||||
getpkg create awesome-tool ./awesome-tool
|
||||
cd awesome-tool
|
||||
|
||||
# Build your tool...
|
||||
# Add executable to the directory
|
||||
|
||||
# Test locally
|
||||
./awesome-tool --version
|
||||
|
||||
# Publish when ready
|
||||
getpkg publish awesome-tool:x86_64 .
|
||||
```
|
||||
|
||||
## Environment Variables
|
||||
|
||||
- **`SOS_WRITE_TOKEN`** - Authentication token for publishing tools
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Tool Not Found
|
||||
If a tool isn't found after installation, ensure your shell has loaded the new PATH:
|
||||
```bash
|
||||
source ~/.bashrc
|
||||
```
|
||||
|
||||
### Permission Issues
|
||||
getpkg installs to your home directory and doesn't require root access. If you encounter permission issues, check that `~/.local/bin/` is writable.
|
||||
|
||||
### Network Issues
|
||||
All tools are downloaded from `getpkg.xyz`. Ensure you have internet connectivity and the domain is accessible.
|
||||
|
||||
## Development
|
||||
|
||||
### Building getpkg
|
||||
|
||||
```bash
|
||||
# Build debug version
|
||||
cd getpkg && ./build.sh
|
||||
|
||||
# Run tests
|
||||
cd getpkg && ./test.sh
|
||||
|
||||
# Publish (requires SOS_WRITE_TOKEN)
|
||||
cd getpkg && ./publish.sh
|
||||
```
|
||||
|
||||
### Tool Development
|
||||
|
||||
When creating tools for getpkg:
|
||||
|
||||
1. Create a directory with your tool binary
|
||||
2. Optionally include a `setup_script.sh` for post-install setup
|
||||
3. The tool should support `version` and `autocomplete` subcommands
|
||||
4. Use `getpkg publish` to upload to the registry
|
||||
|
||||
### Testing
|
||||
|
||||
The test script creates all temporary files and directories in `test_temp/` to keep the main directory clean:
|
||||
|
||||
```bash
|
||||
# Run tests
|
||||
./test.sh
|
||||
|
||||
# Clean up orphaned test files from old test runs (one-time)
|
||||
bash cleanup_old_test_files.sh
|
||||
|
||||
# Clean up orphaned test packages from getpkg.xyz
|
||||
bash cleanup_test_packages.sh
|
||||
```
|
||||
|
||||
For more details, see the development documentation in each tool's directory.
|
@ -1,25 +1,52 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Get script directory - handle different execution contexts
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
|
||||
PROJECT="$(basename "${SCRIPT_DIR}")"
|
||||
|
||||
# Debug output for CI
|
||||
echo "${PROJECT} build script running from: ${SCRIPT_DIR}"
|
||||
|
||||
export CMAKE_BUILD_TYPE="Debug"
|
||||
# handle running locally, or docker in docker via gitea runner.
|
||||
if [ -n "${GITEA_CONTAINER_NAME:-}" ]; then
|
||||
echo "We're in a gitea container: ${GITEA_CONTAINER_NAME}"
|
||||
VOLUME_OPTS=("--volumes-from=${GITEA_CONTAINER_NAME}")
|
||||
WORKING_DIR=("-w" "${GITHUB_WORKSPACE}/${PROJECT}")
|
||||
BUILD_DIR="${GITHUB_WORKSPACE}/${PROJECT}/build"
|
||||
OUTPUT_DIR="${GITHUB_WORKSPACE}/${PROJECT}/output"
|
||||
else
|
||||
VOLUME_OPTS=("-v" "${SCRIPT_DIR}:/app")
|
||||
WORKING_DIR=("-w" "/app")
|
||||
BUILD_DIR="${SCRIPT_DIR}/build"
|
||||
OUTPUT_DIR="${SCRIPT_DIR}/output"
|
||||
fi
|
||||
|
||||
rm -rf "${SCRIPT_DIR}/output"
|
||||
mkdir -p "${SCRIPT_DIR}/output"
|
||||
# Create output directory
|
||||
mkdir -p "${OUTPUT_DIR}"
|
||||
|
||||
PROJECT="getpkg"
|
||||
# Run build in container with mounted directories
|
||||
COMMAND_TO_RUN="cmake -G Ninja -S . -B ./build \
|
||||
-DCMAKE_BUILD_TYPE=\${CMAKE_BUILD_TYPE} \
|
||||
-DPROJECT_NAME=${PROJECT} && \
|
||||
cmake --build ./build"
|
||||
|
||||
# make sure we have the latest base image.
|
||||
docker pull gitea.jde.nz/public/dropshell-build-base:latest
|
||||
echo "Building in new docker container"
|
||||
docker run --rm \
|
||||
--user "$(id -u):$(id -g)" \
|
||||
"${VOLUME_OPTS[@]}" \
|
||||
"${WORKING_DIR[@]}" \
|
||||
-e CMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE:-Debug}" \
|
||||
gitea.jde.nz/public/dropshell-build-base:latest \
|
||||
bash -c "${COMMAND_TO_RUN}"
|
||||
|
||||
docker build \
|
||||
-t "${PROJECT}-build" \
|
||||
-f "${SCRIPT_DIR}/Dockerfile.dropshell-build" \
|
||||
--build-arg PROJECT="${PROJECT}" \
|
||||
--build-arg CMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" \
|
||||
--output "${SCRIPT_DIR}/output" \
|
||||
"${SCRIPT_DIR}"
|
||||
# Copy built executable to output directory
|
||||
if [ -f "${BUILD_DIR}/${PROJECT}" ]; then
|
||||
cp "${BUILD_DIR}/${PROJECT}" "${OUTPUT_DIR}/"
|
||||
echo "✓ Build successful - ${PROJECT} copied to ${OUTPUT_DIR}/"
|
||||
else
|
||||
echo "✗ Build failed - ${PROJECT} not found in ${BUILD_DIR}/"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Build complete"
|
||||
|
18
getpkg/clean.sh
Executable file
18
getpkg/clean.sh
Executable file
@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
|
||||
PROJECT="$(basename "$(dirname "${SCRIPT_DIR}")")"
|
||||
|
||||
echo "Cleaning ${PROJECT}..."
|
||||
|
||||
# Remove output and build directories
|
||||
for dir in "output" "build"; do
|
||||
if [ -d "${SCRIPT_DIR}/${dir}" ]; then
|
||||
echo "Removing ${dir} directory..."
|
||||
rm -rf "${SCRIPT_DIR:?}/${dir}"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "✓ ${PROJECT} cleaned successfully"
|
98
getpkg/cleanup_test_packages.sh
Executable file
98
getpkg/cleanup_test_packages.sh
Executable file
@ -0,0 +1,98 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Cleanup script for orphaned test packages from getpkg testing
|
||||
# This script removes test packages that start with "test-" from getpkg.xyz
|
||||
# Run from the getpkg directory: bash cleanup_test_packages.sh
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
GETPKG="./output/getpkg"
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
echo -e "${YELLOW}Cleaning up orphaned test packages...${NC}"
|
||||
|
||||
# Check if getpkg binary exists
|
||||
if [ ! -f "$GETPKG" ]; then
|
||||
echo -e "${RED}Error: getpkg binary not found at $GETPKG${NC}"
|
||||
echo "Please run ./build.sh first to build getpkg"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if SOS_WRITE_TOKEN is set
|
||||
if [ -z "${SOS_WRITE_TOKEN:-}" ]; then
|
||||
echo -e "${RED}Error: SOS_WRITE_TOKEN environment variable is not set${NC}"
|
||||
echo "This token is required to unpublish packages from getpkg.xyz"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Using getpkg binary: $GETPKG"
|
||||
echo "SOS_WRITE_TOKEN is set (${#SOS_WRITE_TOKEN} characters)"
|
||||
|
||||
# Get list of all packages from /dir endpoint
|
||||
echo "Fetching package list from getpkg.xyz/dir..."
|
||||
DIR_RESPONSE=$(curl -s "https://getpkg.xyz/dir" 2>/dev/null || echo "")
|
||||
|
||||
if [ -z "$DIR_RESPONSE" ]; then
|
||||
echo -e "${RED}Failed to fetch package list from server${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Extract test package labeltags from JSON response
|
||||
# Try with jq first, fallback to grep/sed if jq is not available
|
||||
if command -v jq >/dev/null 2>&1; then
|
||||
TEST_PACKAGES=$(echo "$DIR_RESPONSE" | jq -r '.entries[]?.labeltags[]? // empty' 2>/dev/null | grep "^test-" | sort -u || echo "")
|
||||
else
|
||||
# Fallback: extract labeltags using grep and sed (less reliable but works without jq)
|
||||
TEST_PACKAGES=$(echo "$DIR_RESPONSE" | grep -o '"test-[^"]*"' | sed 's/"//g' | sort -u || echo "")
|
||||
fi
|
||||
|
||||
if [ -z "$TEST_PACKAGES" ]; then
|
||||
echo -e "${GREEN}No test packages found to clean up${NC}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo -e "\n${YELLOW}Found test packages to clean up:${NC}"
|
||||
echo "$TEST_PACKAGES" | while read -r package; do
|
||||
echo " - $package"
|
||||
done
|
||||
|
||||
echo -e "\n${YELLOW}Cleaning up test packages...${NC}"
|
||||
|
||||
CLEANED_COUNT=0
|
||||
FAILED_COUNT=0
|
||||
|
||||
# Use process substitution to avoid subshell issues
|
||||
while IFS= read -r package; do
|
||||
if [ -n "$package" ]; then
|
||||
echo -n "Cleaning up $package... "
|
||||
|
||||
# Try to unpublish the package (temporarily disable set -e)
|
||||
set +e
|
||||
$GETPKG unpublish "$package" >/dev/null 2>&1
|
||||
UNPUBLISH_RESULT=$?
|
||||
set -e
|
||||
|
||||
if [ $UNPUBLISH_RESULT -eq 0 ]; then
|
||||
echo -e "${GREEN}OK${NC}"
|
||||
((CLEANED_COUNT++))
|
||||
else
|
||||
echo -e "${RED}FAILED${NC}"
|
||||
((FAILED_COUNT++))
|
||||
fi
|
||||
fi
|
||||
done <<< "$TEST_PACKAGES"
|
||||
|
||||
echo -e "\n${YELLOW}Cleanup Summary:${NC}"
|
||||
echo "Packages cleaned: $CLEANED_COUNT"
|
||||
echo "Failed cleanups: $FAILED_COUNT"
|
||||
|
||||
if [ $FAILED_COUNT -eq 0 ]; then
|
||||
echo -e "${GREEN}All test packages cleaned up successfully!${NC}"
|
||||
else
|
||||
echo -e "${YELLOW}Some packages failed to clean up. They may need manual removal.${NC}"
|
||||
fi
|
@ -34,15 +34,7 @@ heading "Building ${PROJECT}"
|
||||
|
||||
# build release version
|
||||
export CMAKE_BUILD_TYPE="Release"
|
||||
|
||||
docker build \
|
||||
-t "${PROJECT}-build" \
|
||||
-f "${SCRIPT_DIR}/Dockerfile.dropshell-build" \
|
||||
--build-arg PROJECT="${PROJECT}" \
|
||||
--build-arg CMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" \
|
||||
--output "${OUTPUT}" \
|
||||
"${SCRIPT_DIR}"
|
||||
|
||||
"${SCRIPT_DIR}/build.sh"
|
||||
[ -f "${OUTPUT}/${PROJECT}" ] || die "Build failed."
|
||||
|
||||
#--------------------------------------------------------------------------------
|
||||
|
@ -1,530 +1,410 @@
|
||||
#include "GetbinClient.hpp"
|
||||
#include <drogon/HttpClient.h>
|
||||
#include <trantor/net/EventLoop.h>
|
||||
#include <openssl/ssl.h>
|
||||
#include <openssl/opensslconf.h>
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
#include <cpr/cpr.h>
|
||||
#include <nlohmann/json.hpp>
|
||||
#include <string>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <thread>
|
||||
#include <chrono>
|
||||
#include <cstdio>
|
||||
#include <map>
|
||||
#include <atomic>
|
||||
#include <mutex>
|
||||
#include <condition_variable>
|
||||
#include <vector>
|
||||
#include <ctime>
|
||||
#include <algorithm>
|
||||
#include <filesystem>
|
||||
#include <sstream>
|
||||
#include <set>
|
||||
#include <algorithm>
|
||||
|
||||
using json = nlohmann::json;
|
||||
|
||||
static constexpr const char* SERVER_HOST = "getpkg.xyz";
|
||||
const std::string GetbinClient::DEFAULT_SERVER_HOST = "getpkg.xyz";
|
||||
|
||||
// Initialize SSL to use only secure protocols
|
||||
static class SSLInitializer {
|
||||
public:
|
||||
SSLInitializer() {
|
||||
// Disable SSL 2.0, 3.0, TLS 1.0, and TLS 1.1
|
||||
SSL_load_error_strings();
|
||||
SSL_library_init();
|
||||
// Note: This doesn't completely silence the warning but ensures we're using secure protocols
|
||||
GetbinClient::GetbinClient(const std::vector<std::string>& servers) : servers_(servers) {
|
||||
// Initialize CPR (done automatically, but we could add global config here)
|
||||
if (servers_.empty()) {
|
||||
servers_.push_back(DEFAULT_SERVER_HOST);
|
||||
}
|
||||
} ssl_init;
|
||||
|
||||
static std::string find_ca_certificates() {
|
||||
// Common CA certificate locations across different Linux distributions
|
||||
const std::vector<std::string> ca_paths = {
|
||||
"/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Raspbian
|
||||
"/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL/CentOS
|
||||
"/etc/ssl/ca-bundle.pem", // OpenSUSE
|
||||
"/etc/pki/tls/cert.pem", // Fedora/RHEL alternative
|
||||
"/etc/ssl/certs/ca-bundle.crt", // Some distros
|
||||
"/etc/ssl/cert.pem", // Alpine Linux
|
||||
"/usr/local/share/certs/ca-root-nss.crt", // FreeBSD
|
||||
"/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", // CentOS/RHEL 7+
|
||||
"/etc/ca-certificates/extracted/tls-ca-bundle.pem" // Arch Linux
|
||||
};
|
||||
|
||||
for (const auto& path : ca_paths) {
|
||||
std::ifstream file(path);
|
||||
if (file.good()) {
|
||||
file.close();
|
||||
return path;
|
||||
}
|
||||
}
|
||||
|
||||
return "";
|
||||
}
|
||||
|
||||
GetbinClient::GetbinClient() {}
|
||||
GetbinClient::GetbinClient() : servers_({DEFAULT_SERVER_HOST}) {
|
||||
// Backward compatibility constructor
|
||||
}
|
||||
|
||||
bool GetbinClient::download(const std::string& toolName, const std::string& arch, const std::string& outPath) {
|
||||
bool success = false;
|
||||
bool done = false;
|
||||
std::mutex mtx;
|
||||
std::condition_variable cv;
|
||||
|
||||
std::thread worker([&]() {
|
||||
trantor::EventLoop loop;
|
||||
std::string GetbinClient::getUserAgent() const {
|
||||
return "getpkg/1.0";
|
||||
}
|
||||
|
||||
std::string GetbinClient::buildUrl(const std::string& serverUrl, const std::string& endpoint) const {
|
||||
std::string url = "https://" + serverUrl;
|
||||
if (!endpoint.empty() && endpoint[0] != '/') {
|
||||
url += "/";
|
||||
}
|
||||
url += endpoint;
|
||||
return url;
|
||||
}
|
||||
|
||||
bool GetbinClient::downloadFromServer(const std::string& serverUrl, const std::string& toolName,
|
||||
const std::string& arch, const std::string& outPath,
|
||||
ProgressCallback progressCallback) {
|
||||
try {
|
||||
std::string url = buildUrl(serverUrl, "/object/" + toolName + ":" + arch);
|
||||
|
||||
auto client = drogon::HttpClient::newHttpClient(
|
||||
"https://" + std::string(SERVER_HOST),
|
||||
&loop,
|
||||
false, // useOldTLS = false (disable TLS 1.0/1.1)
|
||||
true // validateCert = true
|
||||
);
|
||||
cpr::Session session;
|
||||
session.SetUrl(cpr::Url{url});
|
||||
session.SetHeader(cpr::Header{{"User-Agent", getUserAgent()}});
|
||||
session.SetTimeout(cpr::Timeout{30000}); // 30 seconds
|
||||
session.SetVerifySsl(cpr::VerifySsl{true});
|
||||
|
||||
// Configure SSL certificates for HTTPS
|
||||
std::string ca_path = find_ca_certificates();
|
||||
if (!ca_path.empty()) {
|
||||
// Use addSSLConfigs with proper parameter names for OpenSSL
|
||||
std::vector<std::pair<std::string, std::string>> sslConfigs;
|
||||
sslConfigs.push_back({"VerifyCAFile", ca_path});
|
||||
client->addSSLConfigs(sslConfigs);
|
||||
} else {
|
||||
// If no CA certificates found, print warning but continue
|
||||
std::cerr << "[GetbinClient] Warning: No system CA certificates found. SSL verification may fail." << std::endl;
|
||||
// Add progress callback if provided
|
||||
if (progressCallback) {
|
||||
session.SetProgressCallback(cpr::ProgressCallback{[progressCallback](cpr::cpr_off_t downloadTotal, cpr::cpr_off_t downloadNow,
|
||||
cpr::cpr_off_t uploadTotal, cpr::cpr_off_t uploadNow,
|
||||
intptr_t userdata) -> bool {
|
||||
return progressCallback(static_cast<size_t>(downloadNow), static_cast<size_t>(downloadTotal));
|
||||
}});
|
||||
}
|
||||
|
||||
client->enableCookies();
|
||||
client->setUserAgent("getpkg/1.0");
|
||||
auto response = session.Get();
|
||||
|
||||
std::string object_path = "/object/" + toolName + ":" + arch;
|
||||
|
||||
auto req = drogon::HttpRequest::newHttpRequest();
|
||||
req->setMethod(drogon::Get);
|
||||
req->setPath(object_path);
|
||||
|
||||
client->sendRequest(req, [&](drogon::ReqResult result, const drogon::HttpResponsePtr& response) {
|
||||
std::lock_guard<std::mutex> lock(mtx);
|
||||
if (result == drogon::ReqResult::Ok && response && response->getStatusCode() == drogon::k200OK) {
|
||||
std::ofstream ofs(outPath, std::ios::binary);
|
||||
if (ofs) {
|
||||
const auto& body = response->getBody();
|
||||
ofs.write(body.data(), body.size());
|
||||
success = ofs.good();
|
||||
}
|
||||
} else {
|
||||
std::cerr << "[GetbinClient::download] HTTP request failed." << std::endl;
|
||||
if (response.status_code == 200) {
|
||||
std::ofstream ofs(outPath, std::ios::binary);
|
||||
if (ofs) {
|
||||
ofs.write(response.text.data(), response.text.size());
|
||||
return ofs.good();
|
||||
}
|
||||
done = true;
|
||||
cv.notify_one();
|
||||
loop.quit();
|
||||
}, 30.0); // 30 second timeout
|
||||
} else if (response.status_code == 404) {
|
||||
// Not found - this is expected for arch fallback
|
||||
return false;
|
||||
} else {
|
||||
std::cerr << "[GetbinClient::downloadFromServer] HTTP " << response.status_code << " from " << serverUrl << ": " << response.error.message << std::endl;
|
||||
}
|
||||
|
||||
loop.loop();
|
||||
});
|
||||
|
||||
// Wait for completion
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(mtx);
|
||||
cv.wait(lock, [&] { return done; });
|
||||
}
|
||||
|
||||
worker.join();
|
||||
return success;
|
||||
}
|
||||
|
||||
bool GetbinClient::upload(const std::string& archivePath, std::string& outUrl, std::string& outHash, const std::string& token) {
|
||||
// Read file first
|
||||
std::ifstream ifs(archivePath, std::ios::binary);
|
||||
if (!ifs) {
|
||||
std::cerr << "[GetbinClient::upload] Failed to open archive file: " << archivePath << std::endl;
|
||||
return false;
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "[GetbinClient::downloadFromServer] Exception with " << serverUrl << ": " << e.what() << std::endl;
|
||||
return false;
|
||||
}
|
||||
std::string file_content((std::istreambuf_iterator<char>(ifs)), std::istreambuf_iterator<char>());
|
||||
|
||||
// Compose metadata
|
||||
json metadata = { {"labeltags", json::array()} };
|
||||
std::string filename = archivePath.substr(archivePath.find_last_of("/\\") + 1);
|
||||
size_t dot = filename.find('.');
|
||||
std::string labeltag = dot != std::string::npos ? filename.substr(0, dot) : filename;
|
||||
metadata["labeltags"].push_back(labeltag);
|
||||
|
||||
bool success = false;
|
||||
bool done = false;
|
||||
std::mutex mtx;
|
||||
std::condition_variable cv;
|
||||
|
||||
std::thread worker([&]() {
|
||||
trantor::EventLoop loop;
|
||||
|
||||
auto client = drogon::HttpClient::newHttpClient(
|
||||
"https://" + std::string(SERVER_HOST),
|
||||
&loop,
|
||||
false, // useOldTLS = false (disable TLS 1.0/1.1)
|
||||
true // validateCert = true
|
||||
);
|
||||
|
||||
// Configure SSL certificates
|
||||
std::string ca_path = find_ca_certificates();
|
||||
std::vector<std::pair<std::string, std::string>> sslConfigs;
|
||||
if (!ca_path.empty()) {
|
||||
sslConfigs.push_back({"VerifyCAFile", ca_path});
|
||||
}
|
||||
|
||||
bool GetbinClient::download(const std::string& toolName, const std::string& arch, const std::string& outPath,
|
||||
ProgressCallback progressCallback) {
|
||||
// Multi-server fallback logic: try each server in order
|
||||
for (const auto& server : servers_) {
|
||||
if (downloadFromServer(server, toolName, arch, outPath, progressCallback)) {
|
||||
return true;
|
||||
}
|
||||
// Configure SSL for secure connections
|
||||
client->addSSLConfigs(sslConfigs);
|
||||
|
||||
if (ca_path.empty()) {
|
||||
std::cerr << "[GetbinClient] Warning: No system CA certificates found. SSL verification may fail." << std::endl;
|
||||
}
|
||||
|
||||
client->enableCookies();
|
||||
client->setUserAgent("getpkg/1.0");
|
||||
|
||||
// Create upload file from memory content
|
||||
// First save content to a temporary file since UploadFile expects a file path
|
||||
std::string temp_file = "/tmp/getpkg_upload_" + std::to_string(std::time(nullptr)) + ".tgz";
|
||||
std::ofstream temp_ofs(temp_file, std::ios::binary);
|
||||
if (!temp_ofs) {
|
||||
std::cerr << "[GetbinClient::upload] Failed to create temporary file: " << temp_file << std::endl;
|
||||
success = false;
|
||||
done = true;
|
||||
cv.notify_one();
|
||||
loop.quit();
|
||||
return;
|
||||
}
|
||||
temp_ofs.write(file_content.data(), file_content.size());
|
||||
temp_ofs.close();
|
||||
|
||||
// Create upload request with file
|
||||
drogon::UploadFile upload_file(temp_file);
|
||||
|
||||
auto req = drogon::HttpRequest::newFileUploadRequest({upload_file});
|
||||
req->setMethod(drogon::Put);
|
||||
req->setPath("/upload");
|
||||
req->addHeader("Authorization", "Bearer " + token);
|
||||
|
||||
// Add metadata as form parameter
|
||||
req->setParameter("metadata", metadata.dump());
|
||||
|
||||
client->sendRequest(req, [&](drogon::ReqResult result, const drogon::HttpResponsePtr& response) {
|
||||
std::lock_guard<std::mutex> lock(mtx);
|
||||
if (result == drogon::ReqResult::Ok && response) {
|
||||
int status_code = static_cast<int>(response->getStatusCode());
|
||||
std::string response_body(response->getBody());
|
||||
|
||||
if (status_code == 200 || status_code == 201) {
|
||||
try {
|
||||
auto resp_json = json::parse(response_body);
|
||||
if (resp_json.contains("url")) outUrl = resp_json["url"].get<std::string>();
|
||||
if (resp_json.contains("hash")) outHash = resp_json["hash"].get<std::string>();
|
||||
success = true;
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "[GetbinClient::upload] Failed to parse JSON response: " << e.what() << std::endl;
|
||||
std::cerr << "[GetbinClient::upload] Response body: " << response_body << std::endl;
|
||||
}
|
||||
} else {
|
||||
std::cerr << "[GetbinClient::upload] HTTP error: status code " << status_code << std::endl;
|
||||
std::cerr << "[GetbinClient::upload] Response body: " << response_body << std::endl;
|
||||
}
|
||||
} else {
|
||||
std::cerr << "[GetbinClient::upload] HTTP /upload request failed." << std::endl;
|
||||
}
|
||||
done = true;
|
||||
cv.notify_one();
|
||||
loop.quit();
|
||||
}, 60.0); // 60 second timeout
|
||||
|
||||
loop.loop();
|
||||
|
||||
// Clean up temporary file
|
||||
std::remove(temp_file.c_str());
|
||||
});
|
||||
|
||||
// Wait for completion
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(mtx);
|
||||
cv.wait(lock, [&] { return done; });
|
||||
}
|
||||
|
||||
worker.join();
|
||||
return success;
|
||||
// If we get here, no server had the package
|
||||
return false;
|
||||
}
|
||||
|
||||
bool GetbinClient::upload(const std::string& serverUrl, const std::string& archivePath,
|
||||
std::string& outUrl, std::string& outHash, const std::string& token,
|
||||
ProgressCallback progressCallback) {
|
||||
try {
|
||||
std::string url = buildUrl(serverUrl, "/upload");
|
||||
|
||||
cpr::Session session;
|
||||
session.SetUrl(cpr::Url{url});
|
||||
session.SetHeader(cpr::Header{
|
||||
{"User-Agent", getUserAgent()},
|
||||
{"Authorization", "Bearer " + token}
|
||||
});
|
||||
session.SetTimeout(cpr::Timeout{300000}); // 5 minutes for uploads
|
||||
session.SetVerifySsl(cpr::VerifySsl{true});
|
||||
|
||||
|
||||
// Extract tool name and arch from archive path for labeltags
|
||||
// Archive path format: /path/to/tool-name:arch.tgz or similar
|
||||
std::string archiveName = std::filesystem::path(archivePath).filename().string();
|
||||
std::string toolNameArch = archiveName;
|
||||
if (toolNameArch.ends_with(".tgz")) {
|
||||
toolNameArch = toolNameArch.substr(0, toolNameArch.length() - 4);
|
||||
}
|
||||
|
||||
// Create metadata JSON with labeltags
|
||||
json metadata;
|
||||
metadata["labeltags"] = json::array({toolNameArch});
|
||||
|
||||
// Set up multipart form with file and metadata
|
||||
session.SetMultipart(cpr::Multipart{
|
||||
cpr::Part{"file", cpr::File{archivePath}},
|
||||
cpr::Part{"metadata", metadata.dump(), "application/json"}
|
||||
});
|
||||
|
||||
// Add progress callback if provided
|
||||
if (progressCallback) {
|
||||
session.SetProgressCallback(cpr::ProgressCallback{[progressCallback](cpr::cpr_off_t downloadTotal, cpr::cpr_off_t downloadNow,
|
||||
cpr::cpr_off_t uploadTotal, cpr::cpr_off_t uploadNow,
|
||||
intptr_t userdata) -> bool {
|
||||
return progressCallback(static_cast<size_t>(uploadNow), static_cast<size_t>(uploadTotal));
|
||||
}});
|
||||
}
|
||||
|
||||
auto response = session.Put();
|
||||
|
||||
if (response.status_code == 200) {
|
||||
try {
|
||||
auto resp_json = json::parse(response.text);
|
||||
if (resp_json.contains("hash") && resp_json.contains("result") && resp_json["result"] == "success") {
|
||||
outUrl = buildUrl(serverUrl, "/object/" + resp_json["hash"].get<std::string>());
|
||||
outHash = resp_json["hash"].get<std::string>();
|
||||
return true;
|
||||
}
|
||||
} catch (const json::exception& e) {
|
||||
// Try to extract from plain text response
|
||||
outUrl = "";
|
||||
outHash = response.text;
|
||||
// Remove trailing newline if present
|
||||
if (!outHash.empty() && outHash.back() == '\n') {
|
||||
outHash.pop_back();
|
||||
}
|
||||
return !outHash.empty();
|
||||
}
|
||||
} else {
|
||||
std::cerr << "[GetbinClient::upload] HTTP " << response.status_code << " to " << serverUrl << ": " << response.error.message << std::endl;
|
||||
if (!response.text.empty()) {
|
||||
std::cerr << "[GetbinClient::upload] Response: " << response.text << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "[GetbinClient::upload] Exception with " << serverUrl << ": " << e.what() << std::endl;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool GetbinClient::upload(const std::string& archivePath, std::string& outUrl, std::string& outHash,
|
||||
const std::string& token, ProgressCallback progressCallback) {
|
||||
// Backward compatibility: use first server
|
||||
if (servers_.empty()) {
|
||||
return false;
|
||||
}
|
||||
return upload(servers_[0], archivePath, outUrl, outHash, token, progressCallback);
|
||||
}
|
||||
|
||||
bool GetbinClient::getHash(const std::string& serverUrl, const std::string& toolName,
|
||||
const std::string& arch, std::string& outHash) {
|
||||
try {
|
||||
std::string url = buildUrl(serverUrl, "/hash/" + toolName + ":" + arch);
|
||||
|
||||
auto response = cpr::Get(cpr::Url{url},
|
||||
cpr::Header{{"User-Agent", getUserAgent()}},
|
||||
cpr::Timeout{10000}, // 10 seconds
|
||||
cpr::VerifySsl{true});
|
||||
|
||||
if (response.status_code == 200) {
|
||||
try {
|
||||
// Try JSON first
|
||||
auto resp_json = json::parse(response.text);
|
||||
if (resp_json.contains("hash")) {
|
||||
outHash = resp_json["hash"].get<std::string>();
|
||||
return true;
|
||||
}
|
||||
} catch (const json::exception&) {
|
||||
// Not JSON, treat as plain text
|
||||
outHash = response.text;
|
||||
// Remove trailing newline if present
|
||||
if (!outHash.empty() && outHash.back() == '\n') {
|
||||
outHash.pop_back();
|
||||
}
|
||||
return !outHash.empty();
|
||||
}
|
||||
} else if (response.status_code == 404) {
|
||||
// Not found - this is expected for non-existent tools/archs
|
||||
return false;
|
||||
} else {
|
||||
std::cerr << "[GetbinClient::getHash] HTTP " << response.status_code << " from " << serverUrl << ": " << response.error.message << std::endl;
|
||||
}
|
||||
|
||||
return false;
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "[GetbinClient::getHash] Exception with " << serverUrl << ": " << e.what() << std::endl;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool GetbinClient::getHash(const std::string& toolName, const std::string& arch, std::string& outHash) {
|
||||
bool success = false;
|
||||
bool done = false;
|
||||
std::mutex mtx;
|
||||
std::condition_variable cv;
|
||||
|
||||
std::thread worker([&]() {
|
||||
trantor::EventLoop loop;
|
||||
|
||||
auto client = drogon::HttpClient::newHttpClient(
|
||||
"https://" + std::string(SERVER_HOST),
|
||||
&loop,
|
||||
false, // useOldTLS = false (disable TLS 1.0/1.1)
|
||||
true // validateCert = true
|
||||
);
|
||||
|
||||
// Configure SSL certificates
|
||||
std::string ca_path = find_ca_certificates();
|
||||
std::vector<std::pair<std::string, std::string>> sslConfigs;
|
||||
if (!ca_path.empty()) {
|
||||
sslConfigs.push_back({"VerifyCAFile", ca_path});
|
||||
// Multi-server fallback: try each server in order
|
||||
for (const auto& server : servers_) {
|
||||
if (getHash(server, toolName, arch, outHash)) {
|
||||
return true;
|
||||
}
|
||||
// Configure SSL for secure connections
|
||||
client->addSSLConfigs(sslConfigs);
|
||||
|
||||
if (ca_path.empty()) {
|
||||
std::cerr << "[GetbinClient] Warning: No system CA certificates found. SSL verification may fail." << std::endl;
|
||||
}
|
||||
|
||||
client->enableCookies();
|
||||
client->setUserAgent("getpkg/1.0");
|
||||
|
||||
std::string hash_path = "/hash/" + toolName + ":" + arch;
|
||||
|
||||
auto req = drogon::HttpRequest::newHttpRequest();
|
||||
req->setMethod(drogon::Get);
|
||||
req->setPath(hash_path);
|
||||
|
||||
client->sendRequest(req, [&](drogon::ReqResult result, const drogon::HttpResponsePtr& response) {
|
||||
std::lock_guard<std::mutex> lock(mtx);
|
||||
if (result == drogon::ReqResult::Ok && response && response->getStatusCode() == drogon::k200OK) {
|
||||
std::string response_body(response->getBody());
|
||||
|
||||
// Try to parse hash from response body
|
||||
try {
|
||||
// Try JSON first
|
||||
auto resp_json = json::parse(response_body);
|
||||
if (resp_json.contains("hash")) {
|
||||
outHash = resp_json["hash"].get<std::string>();
|
||||
success = true;
|
||||
}
|
||||
} catch (...) {
|
||||
// Not JSON, treat as plain text
|
||||
outHash = response_body;
|
||||
// Remove trailing newline if present
|
||||
if (!outHash.empty() && outHash.back() == '\n') {
|
||||
outHash.pop_back();
|
||||
}
|
||||
success = !outHash.empty();
|
||||
}
|
||||
}
|
||||
done = true;
|
||||
cv.notify_one();
|
||||
loop.quit();
|
||||
}, 10.0); // 10 second timeout
|
||||
|
||||
loop.loop();
|
||||
});
|
||||
|
||||
// Wait for completion
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(mtx);
|
||||
cv.wait(lock, [&] { return done; });
|
||||
}
|
||||
|
||||
worker.join();
|
||||
return success;
|
||||
// If we get here, no server had the package
|
||||
return false;
|
||||
}
|
||||
|
||||
bool GetbinClient::findPackageServer(const std::string& toolName, const std::string& arch,
|
||||
std::string& foundServer) const {
|
||||
// Check each server to see which one has the package
|
||||
for (const auto& server : servers_) {
|
||||
try {
|
||||
std::string url = buildUrl(server, "/hash/" + toolName + ":" + arch);
|
||||
|
||||
auto response = cpr::Get(cpr::Url{url},
|
||||
cpr::Header{{"User-Agent", getUserAgent()}},
|
||||
cpr::Timeout{10000}, // 10 seconds
|
||||
cpr::VerifySsl{true});
|
||||
|
||||
if (response.status_code == 200) {
|
||||
// Package found on this server
|
||||
foundServer = server;
|
||||
return true;
|
||||
}
|
||||
// Continue to next server if 404 or other error
|
||||
} catch (const std::exception& e) {
|
||||
// Continue to next server on exception
|
||||
std::cerr << "[GetbinClient::findPackageServer] Exception with " << server << ": " << e.what() << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
// Package not found on any server
|
||||
return false;
|
||||
}
|
||||
|
||||
bool GetbinClient::deleteObject(const std::string& hash, const std::string& token) {
|
||||
bool success = false;
|
||||
bool done = false;
|
||||
std::mutex mtx;
|
||||
std::condition_variable cv;
|
||||
|
||||
std::thread worker([&]() {
|
||||
trantor::EventLoop loop;
|
||||
|
||||
auto client = drogon::HttpClient::newHttpClient(
|
||||
"https://" + std::string(SERVER_HOST),
|
||||
&loop,
|
||||
false, // useOldTLS = false (disable TLS 1.0/1.1)
|
||||
true // validateCert = true
|
||||
);
|
||||
|
||||
// Configure SSL certificates
|
||||
std::string ca_path = find_ca_certificates();
|
||||
std::vector<std::pair<std::string, std::string>> sslConfigs;
|
||||
if (!ca_path.empty()) {
|
||||
sslConfigs.push_back({"VerifyCAFile", ca_path});
|
||||
try {
|
||||
// Use first server for backward compatibility
|
||||
if (servers_.empty()) {
|
||||
return false;
|
||||
}
|
||||
// Configure SSL for secure connections
|
||||
client->addSSLConfigs(sslConfigs);
|
||||
std::string url = buildUrl(servers_[0], "/deleteobject?hash=" + hash);
|
||||
|
||||
if (ca_path.empty()) {
|
||||
std::cerr << "[GetbinClient] Warning: No system CA certificates found. SSL verification may fail." << std::endl;
|
||||
}
|
||||
auto response = cpr::Get(cpr::Url{url},
|
||||
cpr::Header{
|
||||
{"User-Agent", getUserAgent()},
|
||||
{"Authorization", "Bearer " + token}
|
||||
},
|
||||
cpr::Timeout{30000}, // 30 seconds
|
||||
cpr::VerifySsl{true});
|
||||
|
||||
client->enableCookies();
|
||||
client->setUserAgent("getpkg/1.0");
|
||||
|
||||
std::string delete_path = "/deleteobject?hash=" + hash;
|
||||
|
||||
auto req = drogon::HttpRequest::newHttpRequest();
|
||||
req->setMethod(drogon::Get);
|
||||
req->setPath(delete_path);
|
||||
req->addHeader("Authorization", "Bearer " + token);
|
||||
|
||||
client->sendRequest(req, [&](drogon::ReqResult result, const drogon::HttpResponsePtr& response) {
|
||||
std::lock_guard<std::mutex> lock(mtx);
|
||||
if (result == drogon::ReqResult::Ok && response) {
|
||||
int status_code = static_cast<int>(response->getStatusCode());
|
||||
std::string response_body(response->getBody());
|
||||
|
||||
if (status_code == 200) {
|
||||
// Check if the response indicates success
|
||||
try {
|
||||
auto resp_json = json::parse(response_body);
|
||||
if (resp_json.contains("result") && resp_json["result"] == "success") {
|
||||
success = true;
|
||||
}
|
||||
} catch (...) {
|
||||
// If not JSON, assume success if 200 OK
|
||||
success = true;
|
||||
}
|
||||
} else {
|
||||
std::cerr << "[GetbinClient::deleteObject] HTTP error: status code " << status_code << std::endl;
|
||||
std::cerr << "[GetbinClient::deleteObject] Response body: " << response_body << std::endl;
|
||||
}
|
||||
} else {
|
||||
std::cerr << "[GetbinClient::deleteObject] HTTP request failed." << std::endl;
|
||||
if (response.status_code == 200) {
|
||||
return true;
|
||||
} else {
|
||||
std::cerr << "[GetbinClient::deleteObject] HTTP " << response.status_code << ": " << response.error.message << std::endl;
|
||||
if (!response.text.empty()) {
|
||||
std::cerr << "[GetbinClient::deleteObject] Response: " << response.text << std::endl;
|
||||
}
|
||||
done = true;
|
||||
cv.notify_one();
|
||||
loop.quit();
|
||||
}, 10.0); // 10 second timeout
|
||||
|
||||
loop.loop();
|
||||
});
|
||||
|
||||
// Wait for completion
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(mtx);
|
||||
cv.wait(lock, [&] { return done; });
|
||||
}
|
||||
|
||||
worker.join();
|
||||
return success;
|
||||
}
|
||||
bool GetbinClient::listPackages(std::vector<std::string>& outPackages) {
|
||||
outPackages.clear();
|
||||
|
||||
// Set up SSL configuration
|
||||
std::string ca_path = find_ca_certificates();
|
||||
|
||||
bool success = false;
|
||||
bool done = false;
|
||||
std::mutex mtx;
|
||||
std::condition_variable cv;
|
||||
|
||||
std::thread worker([&]() {
|
||||
trantor::EventLoop loop;
|
||||
|
||||
auto client = drogon::HttpClient::newHttpClient(
|
||||
"https://" + std::string(SERVER_HOST),
|
||||
&loop,
|
||||
false, // useOldTLS = false (disable TLS 1.0/1.1)
|
||||
true // validateCert = true
|
||||
);
|
||||
std::vector<std::pair<std::string, std::string>> sslConfigs;
|
||||
if (!ca_path.empty()) {
|
||||
sslConfigs.push_back({"VerifyCAFile", ca_path});
|
||||
}
|
||||
// Configure SSL for secure connections
|
||||
client->addSSLConfigs(sslConfigs);
|
||||
|
||||
auto req = drogon::HttpRequest::newHttpRequest();
|
||||
req->setMethod(drogon::Get);
|
||||
req->setPath("/dir");
|
||||
return false;
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "[GetbinClient::deleteObject] Exception: " << e.what() << std::endl;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool GetbinClient::listPackages(std::vector<std::string>& outPackages) {
|
||||
try {
|
||||
// Use first server for backward compatibility
|
||||
if (servers_.empty()) {
|
||||
return false;
|
||||
}
|
||||
std::string url = buildUrl(servers_[0], "/dir");
|
||||
|
||||
client->sendRequest(req, [&](drogon::ReqResult result, const drogon::HttpResponsePtr& response) {
|
||||
if (result == drogon::ReqResult::Ok) {
|
||||
int status_code = response->getStatusCode();
|
||||
std::string response_body = std::string(response->getBody());
|
||||
|
||||
if (status_code == 200) {
|
||||
try {
|
||||
json json_response = json::parse(response_body);
|
||||
|
||||
if (json_response.contains("entries") && json_response["entries"].is_array()) {
|
||||
for (const auto& entry : json_response["entries"]) {
|
||||
if (entry.contains("labeltags") && entry["labeltags"].is_array()) {
|
||||
for (const auto& labeltag : entry["labeltags"]) {
|
||||
if (labeltag.is_string()) {
|
||||
std::string name = labeltag.get<std::string>();
|
||||
// Extract tool name (remove architecture suffix if present)
|
||||
size_t colon_pos = name.find(":");
|
||||
if (colon_pos != std::string::npos) {
|
||||
name = name.substr(0, colon_pos);
|
||||
}
|
||||
|
||||
// Skip empty names
|
||||
if (name.empty()) continue;
|
||||
|
||||
// Add to list if not already present
|
||||
if (std::find(outPackages.begin(), outPackages.end(), name) == outPackages.end()) {
|
||||
outPackages.push_back(name);
|
||||
}
|
||||
auto response = cpr::Get(cpr::Url{url},
|
||||
cpr::Header{{"User-Agent", getUserAgent()}},
|
||||
cpr::Timeout{30000}, // 30 seconds
|
||||
cpr::VerifySsl{true});
|
||||
|
||||
if (response.status_code == 200) {
|
||||
try {
|
||||
auto resp_json = json::parse(response.text);
|
||||
if (resp_json.contains("entries") && resp_json["entries"].is_array()) {
|
||||
outPackages.clear();
|
||||
std::set<std::string> uniqueTools;
|
||||
|
||||
for (const auto& entry : resp_json["entries"]) {
|
||||
if (entry.contains("labeltags") && entry["labeltags"].is_array()) {
|
||||
for (const auto& labeltag : entry["labeltags"]) {
|
||||
if (labeltag.is_string()) {
|
||||
std::string tag = labeltag.get<std::string>();
|
||||
// Extract tool name from "tool:arch" format
|
||||
size_t colonPos = tag.find(":");
|
||||
if (colonPos != std::string::npos) {
|
||||
std::string toolName = tag.substr(0, colonPos);
|
||||
if (!toolName.empty()) {
|
||||
uniqueTools.insert(toolName);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
success = true;
|
||||
}
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "[GetbinClient::listPackages] JSON parse error: " << e.what() << std::endl;
|
||||
}
|
||||
} else {
|
||||
std::cerr << "[GetbinClient::listPackages] HTTP error: status code " << status_code << std::endl;
|
||||
|
||||
// Convert set to vector
|
||||
for (const auto& tool : uniqueTools) {
|
||||
outPackages.push_back(tool);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
std::cerr << "[GetbinClient::listPackages] HTTP request failed." << std::endl;
|
||||
}
|
||||
done = true;
|
||||
cv.notify_one();
|
||||
loop.quit();
|
||||
}, 10.0);
|
||||
|
||||
loop.loop();
|
||||
});
|
||||
|
||||
// Wait for completion
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(mtx);
|
||||
cv.wait(lock, [&] { return done; });
|
||||
}
|
||||
|
||||
worker.join();
|
||||
|
||||
// Filter out duplicates where we have both toolname and toolname-noarch
|
||||
// Keep the base name and remove the -noarch variant
|
||||
std::vector<std::string> filteredPackages;
|
||||
std::set<std::string> baseNames;
|
||||
|
||||
// First pass: collect all base names (without -noarch)
|
||||
for (const auto& pkg : outPackages) {
|
||||
const std::string suffix = "-noarch";
|
||||
if (pkg.length() < suffix.length() || pkg.substr(pkg.length() - suffix.length()) != suffix) {
|
||||
baseNames.insert(pkg);
|
||||
}
|
||||
}
|
||||
|
||||
// Second pass: add packages, skipping -noarch variants if base exists
|
||||
for (const auto& pkg : outPackages) {
|
||||
const std::string suffix = "-noarch";
|
||||
if (pkg.length() >= suffix.length() && pkg.substr(pkg.length() - suffix.length()) == suffix) {
|
||||
std::string baseName = pkg.substr(0, pkg.length() - suffix.length());
|
||||
if (baseNames.find(baseName) == baseNames.end()) {
|
||||
filteredPackages.push_back(pkg); // Keep -noarch only if no base version
|
||||
} catch (const json::exception&) {
|
||||
// Try to parse as newline-separated list
|
||||
outPackages.clear();
|
||||
std::istringstream stream(response.text);
|
||||
std::string line;
|
||||
while (std::getline(stream, line)) {
|
||||
if (!line.empty()) {
|
||||
outPackages.push_back(line);
|
||||
}
|
||||
}
|
||||
return !outPackages.empty();
|
||||
}
|
||||
} else {
|
||||
filteredPackages.push_back(pkg); // Always keep base versions
|
||||
std::cerr << "[GetbinClient::listPackages] HTTP " << response.status_code << ": " << response.error.message << std::endl;
|
||||
}
|
||||
|
||||
return false;
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "[GetbinClient::listPackages] Exception: " << e.what() << std::endl;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool GetbinClient::listAllEntries(std::vector<std::pair<std::string, std::vector<std::string>>>& outEntries) {
|
||||
try {
|
||||
// Use first server for backward compatibility
|
||||
if (servers_.empty()) {
|
||||
return false;
|
||||
}
|
||||
std::string url = buildUrl(servers_[0], "/dir");
|
||||
|
||||
auto response = cpr::Get(cpr::Url{url},
|
||||
cpr::Header{{"User-Agent", getUserAgent()}},
|
||||
cpr::Timeout{30000}, // 30 seconds
|
||||
cpr::VerifySsl{true});
|
||||
|
||||
if (response.status_code == 200) {
|
||||
try {
|
||||
auto resp_json = json::parse(response.text);
|
||||
if (resp_json.contains("entries") && resp_json["entries"].is_array()) {
|
||||
outEntries.clear();
|
||||
|
||||
for (const auto& entry : resp_json["entries"]) {
|
||||
if (entry.contains("hash") && entry.contains("labeltags") &&
|
||||
entry["hash"].is_string() && entry["labeltags"].is_array()) {
|
||||
|
||||
std::string hash = entry["hash"].get<std::string>();
|
||||
std::vector<std::string> labeltags;
|
||||
|
||||
for (const auto& tag : entry["labeltags"]) {
|
||||
if (tag.is_string()) {
|
||||
labeltags.push_back(tag.get<std::string>());
|
||||
}
|
||||
}
|
||||
|
||||
outEntries.push_back({hash, labeltags});
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
} catch (const json::exception& e) {
|
||||
std::cerr << "[GetbinClient::listAllEntries] JSON parse error: " << e.what() << std::endl;
|
||||
}
|
||||
} else {
|
||||
std::cerr << "[GetbinClient::listAllEntries] HTTP " << response.status_code << ": " << response.error.message << std::endl;
|
||||
}
|
||||
|
||||
return false;
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "[GetbinClient::listAllEntries] Exception: " << e.what() << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
outPackages = std::move(filteredPackages);
|
||||
|
||||
// Sort the packages for better display
|
||||
std::sort(outPackages.begin(), outPackages.end());
|
||||
|
||||
return success;
|
||||
}
|
||||
|
@ -1,13 +1,57 @@
|
||||
#pragma once
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <functional>
|
||||
|
||||
class GetbinClient {
|
||||
public:
|
||||
// Constructor accepting server list for multi-server support
|
||||
GetbinClient(const std::vector<std::string>& servers);
|
||||
|
||||
// Backward compatibility constructor (uses default server)
|
||||
GetbinClient();
|
||||
bool download(const std::string& toolName, const std::string& arch, const std::string& outPath);
|
||||
bool upload(const std::string& archivePath, std::string& outUrl, std::string& outHash, const std::string& token);
|
||||
|
||||
// Progress callback: (downloaded_bytes, total_bytes) -> should_continue
|
||||
using ProgressCallback = std::function<bool(size_t, size_t)>;
|
||||
|
||||
// Multi-server download with fallback logic
|
||||
bool download(const std::string& toolName, const std::string& arch, const std::string& outPath,
|
||||
ProgressCallback progressCallback = nullptr);
|
||||
|
||||
// Server-specific download
|
||||
bool downloadFromServer(const std::string& serverUrl, const std::string& toolName,
|
||||
const std::string& arch, const std::string& outPath,
|
||||
ProgressCallback progressCallback = nullptr);
|
||||
|
||||
// Server-specific upload
|
||||
bool upload(const std::string& serverUrl, const std::string& archivePath,
|
||||
std::string& outUrl, std::string& outHash, const std::string& token,
|
||||
ProgressCallback progressCallback = nullptr);
|
||||
|
||||
// Backward compatibility upload (uses first server)
|
||||
bool upload(const std::string& archivePath, std::string& outUrl, std::string& outHash, const std::string& token,
|
||||
ProgressCallback progressCallback = nullptr);
|
||||
|
||||
// Server-specific hash retrieval
|
||||
bool getHash(const std::string& serverUrl, const std::string& toolName,
|
||||
const std::string& arch, std::string& outHash);
|
||||
|
||||
// Multi-server hash retrieval with fallback
|
||||
bool getHash(const std::string& toolName, const std::string& arch, std::string& outHash);
|
||||
|
||||
// Find which server has a specific package
|
||||
bool findPackageServer(const std::string& toolName, const std::string& arch,
|
||||
std::string& foundServer) const;
|
||||
|
||||
// Legacy methods (use first server for backward compatibility)
|
||||
bool deleteObject(const std::string& hash, const std::string& token);
|
||||
bool listPackages(std::vector<std::string>& outPackages);
|
||||
};
|
||||
bool listAllEntries(std::vector<std::pair<std::string, std::vector<std::string>>>& outEntries);
|
||||
|
||||
private:
|
||||
static const std::string DEFAULT_SERVER_HOST;
|
||||
std::vector<std::string> servers_;
|
||||
|
||||
std::string getUserAgent() const;
|
||||
std::string buildUrl(const std::string& serverUrl, const std::string& endpoint) const;
|
||||
};
|
||||
|
575
getpkg/src/MigrationManager.cpp
Normal file
575
getpkg/src/MigrationManager.cpp
Normal file
@ -0,0 +1,575 @@
|
||||
#include "MigrationManager.hpp"
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <chrono>
|
||||
#include <iomanip>
|
||||
#include <sstream>
|
||||
#include <algorithm>
|
||||
#include <cstdlib>
|
||||
|
||||
MigrationManager::MigrationManager() {
|
||||
const char* home = std::getenv("HOME");
|
||||
if (home) {
|
||||
configDir_ = std::filesystem::path(home) / ".config" / "getpkg";
|
||||
packagesDir_ = configDir_ / PACKAGES_DIRECTORY_NAME;
|
||||
backupDir_ = configDir_ / BACKUP_DIRECTORY_NAME;
|
||||
legacyTokenDir_ = configDir_ / DEFAULT_SERVER_URL;
|
||||
|
||||
packageManager_ = std::make_unique<PackageMetadataManager>(configDir_);
|
||||
serverManager_ = std::make_unique<ServerManager>();
|
||||
}
|
||||
}
|
||||
|
||||
MigrationManager::MigrationManager(const std::filesystem::path& configDir)
|
||||
: configDir_(configDir),
|
||||
packagesDir_(configDir / PACKAGES_DIRECTORY_NAME),
|
||||
backupDir_(configDir / BACKUP_DIRECTORY_NAME),
|
||||
legacyTokenDir_(configDir / DEFAULT_SERVER_URL) {
|
||||
|
||||
packageManager_ = std::make_unique<PackageMetadataManager>(configDir);
|
||||
serverManager_ = std::make_unique<ServerManager>();
|
||||
}
|
||||
|
||||
bool MigrationManager::needsMigration() const {
|
||||
// Check if we have legacy configuration that needs migration
|
||||
bool hasLegacyConfig = hasLegacyServerConfiguration() || hasLegacyPackageFiles();
|
||||
bool hasNewConfig = hasNewFormatConfiguration();
|
||||
bool hasPackagesDir = std::filesystem::exists(packagesDir_);
|
||||
|
||||
// Need migration if:
|
||||
// 1. We have legacy config (token file or package files in root config dir)
|
||||
// 2. We have new config but no packages directory (incomplete migration)
|
||||
return hasLegacyConfig || (hasNewConfig && !hasPackagesDir);
|
||||
}
|
||||
|
||||
bool MigrationManager::performMigration() {
|
||||
lastResult_ = MigrationResult();
|
||||
|
||||
logInfo("Starting migration from single-server to multi-server configuration");
|
||||
|
||||
// Create backup before starting migration
|
||||
if (!createBackup()) {
|
||||
logError("Failed to create backup before migration");
|
||||
lastResult_.success = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
// Step 1: Create packages directory
|
||||
if (!createPackagesDirectory()) {
|
||||
logError("Failed to create packages directory");
|
||||
lastResult_.success = false;
|
||||
return false;
|
||||
}
|
||||
lastResult_.packageDirectoryCreated = true;
|
||||
|
||||
// Step 2: Migrate server configuration
|
||||
if (!migrateServerConfiguration()) {
|
||||
logError("Failed to migrate server configuration");
|
||||
lastResult_.success = false;
|
||||
return false;
|
||||
}
|
||||
lastResult_.serverConfigMigrated = true;
|
||||
|
||||
// Step 3: Migrate package metadata
|
||||
if (!migratePackageMetadata()) {
|
||||
logError("Failed to migrate package metadata");
|
||||
lastResult_.success = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Step 4: Validate migration
|
||||
if (!validateMigration()) {
|
||||
logError("Migration validation failed");
|
||||
lastResult_.success = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Step 5: Clean up legacy files (optional, keep backup)
|
||||
// We don't delete legacy files immediately to allow rollback
|
||||
|
||||
lastResult_.success = true;
|
||||
logInfo("Migration completed successfully");
|
||||
|
||||
return true;
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
logError("Migration failed with exception: " + std::string(e.what()));
|
||||
lastResult_.success = false;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool MigrationManager::migrateServerConfiguration() {
|
||||
try {
|
||||
// Load existing server configuration or create default
|
||||
if (!serverManager_->loadConfiguration()) {
|
||||
logWarning("Failed to load existing server configuration, creating default");
|
||||
serverManager_->ensureDefaultConfiguration();
|
||||
}
|
||||
|
||||
// Migrate legacy token file if it exists
|
||||
if (!migrateLegacyTokenFile()) {
|
||||
logWarning("Failed to migrate legacy token file (may not exist)");
|
||||
}
|
||||
|
||||
// Save the configuration to ensure it's in the new format
|
||||
if (!serverManager_->saveConfiguration()) {
|
||||
logError("Failed to save server configuration");
|
||||
return false;
|
||||
}
|
||||
|
||||
logInfo("Server configuration migrated successfully");
|
||||
return true;
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
logError("Error migrating server configuration: " + std::string(e.what()));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool MigrationManager::migratePackageMetadata() {
|
||||
try {
|
||||
// Find legacy package files in the config directory
|
||||
std::vector<std::filesystem::path> legacyFiles = findFilesWithExtension(configDir_, ".json");
|
||||
|
||||
// Filter out non-package files
|
||||
std::vector<std::filesystem::path> packageFiles;
|
||||
for (const auto& file : legacyFiles) {
|
||||
std::string filename = file.filename().string();
|
||||
// Skip servers.json and any files already in packages directory
|
||||
if (filename != SERVERS_CONFIG_FILENAME && file.parent_path() == configDir_) {
|
||||
packageFiles.push_back(file);
|
||||
}
|
||||
}
|
||||
|
||||
lastResult_.totalPackages = packageFiles.size();
|
||||
|
||||
if (packageFiles.empty()) {
|
||||
logInfo("No legacy package files found to migrate");
|
||||
return true;
|
||||
}
|
||||
|
||||
logInfo("Found " + std::to_string(packageFiles.size()) + " legacy package files to migrate");
|
||||
|
||||
// Migrate each package file
|
||||
for (const auto& packageFile : packageFiles) {
|
||||
if (migrateLegacyPackageFile(packageFile)) {
|
||||
lastResult_.migratedPackages++;
|
||||
logInfo("Migrated package file: " + packageFile.filename().string());
|
||||
} else {
|
||||
logError("Failed to migrate package file: " + packageFile.filename().string());
|
||||
}
|
||||
}
|
||||
|
||||
logInfo("Migrated " + std::to_string(lastResult_.migratedPackages) + " of " +
|
||||
std::to_string(lastResult_.totalPackages) + " package files");
|
||||
|
||||
return lastResult_.migratedPackages == lastResult_.totalPackages;
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
logError("Error migrating package metadata: " + std::string(e.what()));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool MigrationManager::createPackagesDirectory() {
|
||||
return safeDirectoryCreate(packagesDir_);
|
||||
}
|
||||
|
||||
bool MigrationManager::validateMigration() const {
|
||||
try {
|
||||
// Validate server configuration
|
||||
if (!validateServerConfiguration()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Validate package metadata
|
||||
if (!validatePackageMetadata()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Validate directory structure
|
||||
if (!validateDirectoryStructure()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "Error during migration validation: " << e.what() << std::endl;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool MigrationManager::canRollback() const {
|
||||
return std::filesystem::exists(backupDir_) && std::filesystem::is_directory(backupDir_);
|
||||
}
|
||||
|
||||
bool MigrationManager::performRollback() {
|
||||
if (!canRollback()) {
|
||||
logError("Cannot rollback: no backup found");
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
logInfo("Starting rollback to previous configuration");
|
||||
|
||||
// Restore from backup
|
||||
if (!restoreFromBackup()) {
|
||||
logError("Failed to restore from backup");
|
||||
return false;
|
||||
}
|
||||
|
||||
logInfo("Rollback completed successfully");
|
||||
return true;
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
logError("Rollback failed with exception: " + std::string(e.what()));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool MigrationManager::createBackup() {
|
||||
try {
|
||||
// Create backup directory with timestamp
|
||||
std::string timestamp = generateBackupTimestamp();
|
||||
std::filesystem::path timestampedBackupDir = backupDir_ / timestamp;
|
||||
|
||||
if (!safeDirectoryCreate(timestampedBackupDir)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Backup existing configuration files
|
||||
std::filesystem::path serversConfigPath = configDir_ / SERVERS_CONFIG_FILENAME;
|
||||
if (std::filesystem::exists(serversConfigPath)) {
|
||||
safeFileCopy(serversConfigPath, timestampedBackupDir / SERVERS_CONFIG_FILENAME);
|
||||
}
|
||||
|
||||
// Backup legacy token directory
|
||||
if (std::filesystem::exists(legacyTokenDir_)) {
|
||||
std::filesystem::path backupTokenDir = timestampedBackupDir / DEFAULT_SERVER_URL;
|
||||
safeDirectoryCreate(backupTokenDir);
|
||||
|
||||
for (const auto& entry : std::filesystem::directory_iterator(legacyTokenDir_)) {
|
||||
if (entry.is_regular_file()) {
|
||||
safeFileCopy(entry.path(), backupTokenDir / entry.path().filename());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Backup existing package files
|
||||
std::vector<std::filesystem::path> packageFiles = findFilesWithExtension(configDir_, ".json");
|
||||
for (const auto& file : packageFiles) {
|
||||
if (file.parent_path() == configDir_) {
|
||||
safeFileCopy(file, timestampedBackupDir / file.filename());
|
||||
}
|
||||
}
|
||||
|
||||
// Backup packages directory if it exists
|
||||
if (std::filesystem::exists(packagesDir_)) {
|
||||
std::filesystem::path backupPackagesDir = timestampedBackupDir / PACKAGES_DIRECTORY_NAME;
|
||||
safeDirectoryCreate(backupPackagesDir);
|
||||
|
||||
for (const auto& entry : std::filesystem::directory_iterator(packagesDir_)) {
|
||||
if (entry.is_regular_file()) {
|
||||
safeFileCopy(entry.path(), backupPackagesDir / entry.path().filename());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logInfo("Backup created at: " + timestampedBackupDir.string());
|
||||
return true;
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
logError("Failed to create backup: " + std::string(e.what()));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool MigrationManager::restoreFromBackup() {
|
||||
try {
|
||||
// Find the most recent backup
|
||||
if (!std::filesystem::exists(backupDir_)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
std::filesystem::path latestBackup;
|
||||
std::filesystem::file_time_type latestTime{};
|
||||
|
||||
for (const auto& entry : std::filesystem::directory_iterator(backupDir_)) {
|
||||
if (entry.is_directory()) {
|
||||
auto writeTime = entry.last_write_time();
|
||||
if (writeTime > latestTime) {
|
||||
latestTime = writeTime;
|
||||
latestBackup = entry.path();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (latestBackup.empty()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Restore files from backup
|
||||
for (const auto& entry : std::filesystem::directory_iterator(latestBackup)) {
|
||||
std::filesystem::path targetPath = configDir_ / entry.path().filename();
|
||||
|
||||
if (entry.is_regular_file()) {
|
||||
safeFileCopy(entry.path(), targetPath);
|
||||
} else if (entry.is_directory()) {
|
||||
// Restore directory recursively
|
||||
std::filesystem::remove_all(targetPath);
|
||||
std::filesystem::copy(entry.path(), targetPath, std::filesystem::copy_options::recursive);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
logError("Failed to restore from backup: " + std::string(e.what()));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Private helper methods
|
||||
|
||||
bool MigrationManager::hasLegacyServerConfiguration() const {
|
||||
// Check for legacy token file
|
||||
std::filesystem::path legacyTokenPath = legacyTokenDir_ / LEGACY_TOKEN_FILENAME;
|
||||
return std::filesystem::exists(legacyTokenPath);
|
||||
}
|
||||
|
||||
bool MigrationManager::hasLegacyPackageFiles() const {
|
||||
// Check for JSON files directly in config directory (not in packages subdirectory)
|
||||
std::vector<std::filesystem::path> jsonFiles = findFilesWithExtension(configDir_, ".json");
|
||||
|
||||
for (const auto& file : jsonFiles) {
|
||||
std::string filename = file.filename().string();
|
||||
// If it's not servers.json and it's in the config directory (not packages), it's legacy
|
||||
if (filename != SERVERS_CONFIG_FILENAME && file.parent_path() == configDir_) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool MigrationManager::hasNewFormatConfiguration() const {
|
||||
std::filesystem::path serversConfigPath = configDir_ / SERVERS_CONFIG_FILENAME;
|
||||
return std::filesystem::exists(serversConfigPath);
|
||||
}
|
||||
|
||||
bool MigrationManager::migrateLegacyTokenFile() {
|
||||
std::filesystem::path legacyTokenPath = legacyTokenDir_ / LEGACY_TOKEN_FILENAME;
|
||||
|
||||
if (!std::filesystem::exists(legacyTokenPath)) {
|
||||
return true; // Nothing to migrate
|
||||
}
|
||||
|
||||
try {
|
||||
std::ifstream tokenFile(legacyTokenPath);
|
||||
std::string token;
|
||||
std::getline(tokenFile, token);
|
||||
tokenFile.close();
|
||||
|
||||
if (!token.empty()) {
|
||||
// Set the token for the default server
|
||||
if (serverManager_->setWriteToken(DEFAULT_SERVER_URL, token)) {
|
||||
logInfo("Migrated legacy write token for " + std::string(DEFAULT_SERVER_URL));
|
||||
|
||||
// Move the legacy token file to backup (don't delete immediately)
|
||||
std::filesystem::path backupTokenPath = backupDir_ / "legacy_tokens" / DEFAULT_SERVER_URL / LEGACY_TOKEN_FILENAME;
|
||||
safeDirectoryCreate(backupTokenPath.parent_path());
|
||||
safeFileMove(legacyTokenPath, backupTokenPath);
|
||||
|
||||
// Remove the legacy directory if it's empty
|
||||
try {
|
||||
if (std::filesystem::is_empty(legacyTokenDir_)) {
|
||||
std::filesystem::remove(legacyTokenDir_);
|
||||
}
|
||||
} catch (const std::exception& e) {
|
||||
// Ignore errors when removing empty directory
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
logError("Failed to migrate legacy token file: " + std::string(e.what()));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool MigrationManager::migrateLegacyPackageFile(const std::filesystem::path& legacyPath) {
|
||||
try {
|
||||
if (!std::filesystem::exists(legacyPath)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Load legacy format
|
||||
std::ifstream file(legacyPath);
|
||||
if (!file.is_open()) {
|
||||
logError("Failed to open legacy file: " + legacyPath.string());
|
||||
return false;
|
||||
}
|
||||
|
||||
nlohmann::json legacyJson;
|
||||
file >> legacyJson;
|
||||
file.close();
|
||||
|
||||
// Convert to new format
|
||||
PackageMetadata metadata = PackageMetadata::fromLegacyJson(legacyJson, DEFAULT_SERVER_URL);
|
||||
|
||||
if (!metadata.isValid()) {
|
||||
logError("Invalid metadata after migration from " + legacyPath.string() + ": " + metadata.getValidationError());
|
||||
return false;
|
||||
}
|
||||
|
||||
// Save in new location
|
||||
if (!packageManager_->savePackageMetadata(metadata)) {
|
||||
logError("Failed to save migrated metadata for " + metadata.name);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Move legacy file to backup (don't delete immediately)
|
||||
std::filesystem::path backupPath = backupDir_ / "legacy_packages" / legacyPath.filename();
|
||||
safeDirectoryCreate(backupPath.parent_path());
|
||||
safeFileMove(legacyPath, backupPath);
|
||||
|
||||
return true;
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
logError("Error migrating legacy file " + legacyPath.string() + ": " + std::string(e.what()));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool MigrationManager::validateServerConfiguration() const {
|
||||
try {
|
||||
// Check if servers.json exists and is valid
|
||||
std::filesystem::path serversConfigPath = configDir_ / SERVERS_CONFIG_FILENAME;
|
||||
if (!std::filesystem::exists(serversConfigPath)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Try to load the configuration
|
||||
auto tempServerManager = std::make_unique<ServerManager>();
|
||||
if (!tempServerManager->loadConfiguration()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check that we have at least one server
|
||||
std::vector<std::string> servers = tempServerManager->getServers();
|
||||
return !servers.empty();
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool MigrationManager::validatePackageMetadata() const {
|
||||
try {
|
||||
if (!std::filesystem::exists(packagesDir_)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Validate all package metadata files
|
||||
return packageManager_->validateAllPackageMetadata();
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool MigrationManager::validateDirectoryStructure() const {
|
||||
// Check that packages directory exists and is accessible
|
||||
return std::filesystem::exists(packagesDir_) && std::filesystem::is_directory(packagesDir_);
|
||||
}
|
||||
|
||||
void MigrationManager::logError(const std::string& message) const {
|
||||
std::cerr << "[MIGRATION ERROR] " << message << std::endl;
|
||||
lastResult_.errors.push_back(message);
|
||||
}
|
||||
|
||||
void MigrationManager::logWarning(const std::string& message) const {
|
||||
std::cerr << "[MIGRATION WARNING] " << message << std::endl;
|
||||
lastResult_.warnings.push_back(message);
|
||||
}
|
||||
|
||||
void MigrationManager::logInfo(const std::string& message) const {
|
||||
std::cout << "[MIGRATION INFO] " << message << std::endl;
|
||||
}
|
||||
|
||||
bool MigrationManager::safeFileMove(const std::filesystem::path& source, const std::filesystem::path& destination) {
|
||||
try {
|
||||
// Ensure destination directory exists
|
||||
std::filesystem::create_directories(destination.parent_path());
|
||||
|
||||
// Move the file
|
||||
std::filesystem::rename(source, destination);
|
||||
return true;
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
logError("Failed to move file from " + source.string() + " to " + destination.string() + ": " + e.what());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool MigrationManager::safeFileCopy(const std::filesystem::path& source, const std::filesystem::path& destination) {
|
||||
try {
|
||||
// Ensure destination directory exists
|
||||
std::filesystem::create_directories(destination.parent_path());
|
||||
|
||||
// Copy the file
|
||||
std::filesystem::copy_file(source, destination, std::filesystem::copy_options::overwrite_existing);
|
||||
return true;
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
logError("Failed to copy file from " + source.string() + " to " + destination.string() + ": " + e.what());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool MigrationManager::safeDirectoryCreate(const std::filesystem::path& directory) {
|
||||
try {
|
||||
std::filesystem::create_directories(directory);
|
||||
return std::filesystem::exists(directory) && std::filesystem::is_directory(directory);
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
logError("Failed to create directory " + directory.string() + ": " + e.what());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::filesystem::path> MigrationManager::findFilesWithExtension(const std::filesystem::path& directory, const std::string& extension) const {
|
||||
std::vector<std::filesystem::path> files;
|
||||
|
||||
try {
|
||||
if (!std::filesystem::exists(directory)) {
|
||||
return files;
|
||||
}
|
||||
|
||||
for (const auto& entry : std::filesystem::directory_iterator(directory)) {
|
||||
if (entry.is_regular_file() && entry.path().extension() == extension) {
|
||||
files.push_back(entry.path());
|
||||
}
|
||||
}
|
||||
} catch (const std::exception& e) {
|
||||
logError("Error finding files with extension " + extension + " in " + directory.string() + ": " + e.what());
|
||||
}
|
||||
|
||||
return files;
|
||||
}
|
||||
|
||||
std::string MigrationManager::generateBackupTimestamp() const {
|
||||
auto now = std::chrono::system_clock::now();
|
||||
auto time_t = std::chrono::system_clock::to_time_t(now);
|
||||
|
||||
std::stringstream ss;
|
||||
ss << std::put_time(std::gmtime(&time_t), "%Y%m%d_%H%M%S");
|
||||
return ss.str();
|
||||
}
|
100
getpkg/src/MigrationManager.hpp
Normal file
100
getpkg/src/MigrationManager.hpp
Normal file
@ -0,0 +1,100 @@
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <filesystem>
|
||||
#include <memory>
|
||||
#include "PackageMetadata.hpp"
|
||||
#include "ServerManager.hpp"
|
||||
|
||||
/**
|
||||
* Migration manager for handling the transition from single-server to multi-server configuration
|
||||
* Handles migration of server configuration, package metadata, and directory structure
|
||||
*/
|
||||
class MigrationManager {
|
||||
public:
|
||||
MigrationManager();
|
||||
explicit MigrationManager(const std::filesystem::path& configDir);
|
||||
|
||||
// Main migration interface
|
||||
bool needsMigration() const;
|
||||
bool performMigration();
|
||||
|
||||
// Migration status and reporting
|
||||
struct MigrationResult {
|
||||
bool success = false;
|
||||
int migratedPackages = 0;
|
||||
int totalPackages = 0;
|
||||
bool serverConfigMigrated = false;
|
||||
bool packageDirectoryCreated = false;
|
||||
std::vector<std::string> errors;
|
||||
std::vector<std::string> warnings;
|
||||
};
|
||||
|
||||
MigrationResult getLastMigrationResult() const { return lastResult_; }
|
||||
|
||||
// Individual migration components (for testing and granular control)
|
||||
bool migrateServerConfiguration();
|
||||
bool migratePackageMetadata();
|
||||
bool createPackagesDirectory();
|
||||
bool validateMigration() const;
|
||||
|
||||
// Rollback capabilities
|
||||
bool canRollback() const;
|
||||
bool performRollback();
|
||||
|
||||
// Backup and restore
|
||||
bool createBackup();
|
||||
bool restoreFromBackup();
|
||||
|
||||
private:
|
||||
std::filesystem::path configDir_;
|
||||
std::filesystem::path packagesDir_;
|
||||
std::filesystem::path backupDir_;
|
||||
std::filesystem::path legacyTokenDir_;
|
||||
|
||||
std::unique_ptr<PackageMetadataManager> packageManager_;
|
||||
std::unique_ptr<ServerManager> serverManager_;
|
||||
|
||||
mutable MigrationResult lastResult_;
|
||||
|
||||
// Migration detection helpers
|
||||
bool hasLegacyServerConfiguration() const;
|
||||
bool hasLegacyPackageFiles() const;
|
||||
bool hasNewFormatConfiguration() const;
|
||||
|
||||
// Migration implementation helpers
|
||||
bool migrateLegacyTokenFile();
|
||||
bool migrateLegacyPackageFile(const std::filesystem::path& legacyPath);
|
||||
bool movePackageFilesToSubdirectory();
|
||||
bool updatePackageMetadataFormat();
|
||||
bool cleanupLegacyFiles();
|
||||
|
||||
// Backup and rollback helpers
|
||||
bool backupLegacyConfiguration();
|
||||
bool backupExistingConfiguration();
|
||||
std::string generateBackupTimestamp() const;
|
||||
|
||||
// Validation helpers
|
||||
bool validateServerConfiguration() const;
|
||||
bool validatePackageMetadata() const;
|
||||
bool validateDirectoryStructure() const;
|
||||
|
||||
// Error handling and logging
|
||||
void logError(const std::string& message) const;
|
||||
void logWarning(const std::string& message) const;
|
||||
void logInfo(const std::string& message) const;
|
||||
|
||||
// File system utilities
|
||||
bool safeFileMove(const std::filesystem::path& source, const std::filesystem::path& destination);
|
||||
bool safeFileCopy(const std::filesystem::path& source, const std::filesystem::path& destination);
|
||||
bool safeDirectoryCreate(const std::filesystem::path& directory);
|
||||
std::vector<std::filesystem::path> findFilesWithExtension(const std::filesystem::path& directory, const std::string& extension) const;
|
||||
|
||||
// Constants
|
||||
static constexpr const char* LEGACY_TOKEN_FILENAME = "write_token.txt";
|
||||
static constexpr const char* SERVERS_CONFIG_FILENAME = "servers.json";
|
||||
static constexpr const char* PACKAGES_DIRECTORY_NAME = "packages";
|
||||
static constexpr const char* BACKUP_DIRECTORY_NAME = "migration_backup";
|
||||
static constexpr const char* DEFAULT_SERVER_URL = "getpkg.xyz";
|
||||
};
|
463
getpkg/src/PackageMetadata.cpp
Normal file
463
getpkg/src/PackageMetadata.cpp
Normal file
@ -0,0 +1,463 @@
|
||||
#include "PackageMetadata.hpp"
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <chrono>
|
||||
#include <iomanip>
|
||||
#include <sstream>
|
||||
#include <regex>
|
||||
#include <cstdlib>
|
||||
|
||||
// PackageMetadata implementation
|
||||
|
||||
PackageMetadata::PackageMetadata(const std::string& name, const std::string& version,
|
||||
const std::string& hash, const std::string& arch,
|
||||
const std::string& sourceServer, const std::string& installDate)
|
||||
: name(name), version(version), hash(hash), arch(arch), sourceServer(sourceServer) {
|
||||
|
||||
if (installDate.empty()) {
|
||||
this->installDate = getCurrentTimestamp();
|
||||
} else {
|
||||
this->installDate = installDate;
|
||||
}
|
||||
}
|
||||
|
||||
json PackageMetadata::toJson() const {
|
||||
json j;
|
||||
j["name"] = name;
|
||||
j["version"] = version;
|
||||
j["hash"] = hash;
|
||||
j["arch"] = arch;
|
||||
j["sourceServer"] = sourceServer;
|
||||
j["installDate"] = installDate;
|
||||
j["lastUpdated"] = getCurrentTimestamp();
|
||||
return j;
|
||||
}
|
||||
|
||||
PackageMetadata PackageMetadata::fromJson(const json& j) {
|
||||
PackageMetadata metadata;
|
||||
|
||||
// Required fields
|
||||
if (j.contains("name") && j["name"].is_string()) {
|
||||
metadata.name = j["name"].get<std::string>();
|
||||
}
|
||||
if (j.contains("version") && j["version"].is_string()) {
|
||||
metadata.version = j["version"].get<std::string>();
|
||||
}
|
||||
if (j.contains("hash") && j["hash"].is_string()) {
|
||||
metadata.hash = j["hash"].get<std::string>();
|
||||
}
|
||||
if (j.contains("arch") && j["arch"].is_string()) {
|
||||
metadata.arch = j["arch"].get<std::string>();
|
||||
}
|
||||
|
||||
// New fields with defaults
|
||||
if (j.contains("sourceServer") && j["sourceServer"].is_string()) {
|
||||
metadata.sourceServer = j["sourceServer"].get<std::string>();
|
||||
} else {
|
||||
metadata.sourceServer = "getpkg.xyz"; // Default fallback
|
||||
}
|
||||
|
||||
if (j.contains("installDate") && j["installDate"].is_string()) {
|
||||
metadata.installDate = j["installDate"].get<std::string>();
|
||||
} else {
|
||||
metadata.installDate = metadata.getCurrentTimestamp();
|
||||
}
|
||||
|
||||
return metadata;
|
||||
}
|
||||
|
||||
PackageMetadata PackageMetadata::fromLegacyJson(const json& j, const std::string& defaultServer) {
|
||||
PackageMetadata metadata;
|
||||
|
||||
// Legacy format only has: name, version, hash, arch
|
||||
if (j.contains("name") && j["name"].is_string()) {
|
||||
metadata.name = j["name"].get<std::string>();
|
||||
}
|
||||
if (j.contains("version") && j["version"].is_string()) {
|
||||
metadata.version = j["version"].get<std::string>();
|
||||
}
|
||||
if (j.contains("hash") && j["hash"].is_string()) {
|
||||
metadata.hash = j["hash"].get<std::string>();
|
||||
}
|
||||
if (j.contains("arch") && j["arch"].is_string()) {
|
||||
metadata.arch = j["arch"].get<std::string>();
|
||||
}
|
||||
|
||||
// Set defaults for new fields
|
||||
metadata.sourceServer = defaultServer;
|
||||
metadata.installDate = metadata.getCurrentTimestamp();
|
||||
|
||||
return metadata;
|
||||
}
|
||||
|
||||
bool PackageMetadata::isValid() const {
|
||||
return isValidName() && isValidVersion() && isValidHash() &&
|
||||
isValidArch() && isValidServerUrl() && isValidTimestamp();
|
||||
}
|
||||
|
||||
std::string PackageMetadata::getValidationError() const {
|
||||
if (!isValidName()) {
|
||||
return "Invalid package name: must be non-empty and contain only alphanumeric characters, hyphens, and underscores";
|
||||
}
|
||||
if (!isValidVersion()) {
|
||||
return "Invalid version: must be non-empty";
|
||||
}
|
||||
if (!isValidHash()) {
|
||||
return "Invalid hash: must be non-empty and contain only hexadecimal characters";
|
||||
}
|
||||
if (!isValidArch()) {
|
||||
return "Invalid architecture: must be non-empty";
|
||||
}
|
||||
if (!isValidServerUrl()) {
|
||||
return "Invalid source server: must be non-empty and contain valid characters";
|
||||
}
|
||||
if (!isValidTimestamp()) {
|
||||
return "Invalid install date: must be non-empty";
|
||||
}
|
||||
return "";
|
||||
}
|
||||
|
||||
bool PackageMetadata::saveToFile(const std::filesystem::path& filePath) const {
|
||||
if (!isValid()) {
|
||||
std::cerr << "Cannot save invalid package metadata: " << getValidationError() << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
// Ensure parent directory exists
|
||||
std::filesystem::create_directories(filePath.parent_path());
|
||||
|
||||
std::ofstream file(filePath);
|
||||
if (!file.is_open()) {
|
||||
std::cerr << "Failed to open file for writing: " << filePath << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
file << toJson().dump(2);
|
||||
file.close();
|
||||
return true;
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "Error saving package metadata to " << filePath << ": " << e.what() << std::endl;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
PackageMetadata PackageMetadata::loadFromFile(const std::filesystem::path& filePath) {
|
||||
PackageMetadata metadata;
|
||||
|
||||
try {
|
||||
if (!std::filesystem::exists(filePath)) {
|
||||
std::cerr << "Package metadata file does not exist: " << filePath << std::endl;
|
||||
return metadata;
|
||||
}
|
||||
|
||||
std::ifstream file(filePath);
|
||||
if (!file.is_open()) {
|
||||
std::cerr << "Failed to open file for reading: " << filePath << std::endl;
|
||||
return metadata;
|
||||
}
|
||||
|
||||
json j;
|
||||
file >> j;
|
||||
file.close();
|
||||
|
||||
metadata = fromJson(j);
|
||||
|
||||
if (!metadata.isValid()) {
|
||||
std::cerr << "Loaded package metadata is invalid: " << metadata.getValidationError() << std::endl;
|
||||
}
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "Error loading package metadata from " << filePath << ": " << e.what() << std::endl;
|
||||
}
|
||||
|
||||
return metadata;
|
||||
}
|
||||
|
||||
std::string PackageMetadata::getCurrentTimestamp() const {
|
||||
auto now = std::chrono::system_clock::now();
|
||||
auto time_t = std::chrono::system_clock::to_time_t(now);
|
||||
|
||||
std::stringstream ss;
|
||||
ss << std::put_time(std::gmtime(&time_t), "%Y-%m-%dT%H:%M:%SZ");
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
bool PackageMetadata::needsUpdate(const std::string& remoteHash) const {
|
||||
return hash != remoteHash;
|
||||
}
|
||||
|
||||
// Private validation methods
|
||||
bool PackageMetadata::isValidName() const {
|
||||
if (name.empty()) return false;
|
||||
|
||||
// Package name should contain only alphanumeric characters, hyphens, and underscores
|
||||
std::regex namePattern("^[a-zA-Z0-9_-]+$");
|
||||
return std::regex_match(name, namePattern);
|
||||
}
|
||||
|
||||
bool PackageMetadata::isValidVersion() const {
|
||||
return !version.empty();
|
||||
}
|
||||
|
||||
bool PackageMetadata::isValidHash() const {
|
||||
if (hash.empty()) return false;
|
||||
|
||||
// Hash should contain only hexadecimal characters
|
||||
std::regex hashPattern("^[a-fA-F0-9]+$");
|
||||
return std::regex_match(hash, hashPattern);
|
||||
}
|
||||
|
||||
bool PackageMetadata::isValidArch() const {
|
||||
return !arch.empty();
|
||||
}
|
||||
|
||||
bool PackageMetadata::isValidServerUrl() const {
|
||||
if (sourceServer.empty()) return false;
|
||||
|
||||
// Basic server URL validation - should not contain invalid characters
|
||||
std::regex serverPattern("^[a-zA-Z0-9._-]+$");
|
||||
return std::regex_match(sourceServer, serverPattern);
|
||||
}
|
||||
|
||||
bool PackageMetadata::isValidTimestamp() const {
|
||||
return !installDate.empty();
|
||||
}
|
||||
|
||||
// PackageMetadataManager implementation
|
||||
|
||||
PackageMetadataManager::PackageMetadataManager() {
|
||||
const char* home = std::getenv("HOME");
|
||||
if (home) {
|
||||
configDir_ = std::filesystem::path(home) / ".config" / "getpkg";
|
||||
packagesDir_ = configDir_ / "packages";
|
||||
}
|
||||
}
|
||||
|
||||
PackageMetadataManager::PackageMetadataManager(const std::filesystem::path& configDir)
|
||||
: configDir_(configDir), packagesDir_(configDir / "packages") {
|
||||
}
|
||||
|
||||
bool PackageMetadataManager::ensurePackagesDirectory() {
|
||||
try {
|
||||
if (!std::filesystem::exists(packagesDir_)) {
|
||||
std::filesystem::create_directories(packagesDir_);
|
||||
}
|
||||
return std::filesystem::is_directory(packagesDir_);
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "Error creating packages directory: " << e.what() << std::endl;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
std::filesystem::path PackageMetadataManager::getPackagesDirectory() const {
|
||||
return packagesDir_;
|
||||
}
|
||||
|
||||
std::filesystem::path PackageMetadataManager::getPackageFilePath(const std::string& toolName) const {
|
||||
return packagesDir_ / (toolName + ".json");
|
||||
}
|
||||
|
||||
bool PackageMetadataManager::savePackageMetadata(const PackageMetadata& metadata) {
|
||||
if (!ensurePackagesDirectory()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
std::filesystem::path filePath = getPackageFilePath(metadata.name);
|
||||
return metadata.saveToFile(filePath);
|
||||
}
|
||||
|
||||
PackageMetadata PackageMetadataManager::loadPackageMetadata(const std::string& toolName) {
|
||||
std::filesystem::path filePath = getPackageFilePath(toolName);
|
||||
return PackageMetadata::loadFromFile(filePath);
|
||||
}
|
||||
|
||||
bool PackageMetadataManager::packageExists(const std::string& toolName) const {
|
||||
std::filesystem::path filePath = getPackageFilePath(toolName);
|
||||
return std::filesystem::exists(filePath);
|
||||
}
|
||||
|
||||
bool PackageMetadataManager::removePackageMetadata(const std::string& toolName) {
|
||||
try {
|
||||
std::filesystem::path filePath = getPackageFilePath(toolName);
|
||||
if (std::filesystem::exists(filePath)) {
|
||||
return std::filesystem::remove(filePath);
|
||||
}
|
||||
return true; // Already doesn't exist
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "Error removing package metadata for " << toolName << ": " << e.what() << std::endl;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool PackageMetadataManager::migrateFromLegacyFormat() {
|
||||
try {
|
||||
std::vector<std::string> legacyFiles = findLegacyPackageFiles();
|
||||
|
||||
if (legacyFiles.empty()) {
|
||||
return true; // Nothing to migrate
|
||||
}
|
||||
|
||||
if (!ensurePackagesDirectory()) {
|
||||
std::cerr << "Failed to create packages directory for migration" << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
int successCount = 0;
|
||||
for (const std::string& fileName : legacyFiles) {
|
||||
std::filesystem::path legacyPath = configDir_ / fileName;
|
||||
if (migrateLegacyPackageFile(legacyPath)) {
|
||||
successCount++;
|
||||
}
|
||||
}
|
||||
|
||||
std::cout << "Migrated " << successCount << " of " << legacyFiles.size() << " legacy package files" << std::endl;
|
||||
return successCount == legacyFiles.size();
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "Error during migration: " << e.what() << std::endl;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::string> PackageMetadataManager::findLegacyPackageFiles() const {
|
||||
std::vector<std::string> legacyFiles;
|
||||
|
||||
try {
|
||||
if (!std::filesystem::exists(configDir_)) {
|
||||
return legacyFiles;
|
||||
}
|
||||
|
||||
for (const auto& entry : std::filesystem::directory_iterator(configDir_)) {
|
||||
if (entry.is_regular_file() && entry.path().extension() == ".json") {
|
||||
std::string fileName = entry.path().filename().string();
|
||||
|
||||
// Skip if it's already in the packages directory or is servers.json
|
||||
if (fileName != "servers.json") {
|
||||
legacyFiles.push_back(fileName);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "Error finding legacy package files: " << e.what() << std::endl;
|
||||
}
|
||||
|
||||
return legacyFiles;
|
||||
}
|
||||
|
||||
bool PackageMetadataManager::migrateLegacyPackageFile(const std::filesystem::path& legacyPath, const std::string& defaultServer) {
|
||||
try {
|
||||
if (!std::filesystem::exists(legacyPath)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Load legacy format
|
||||
std::ifstream file(legacyPath);
|
||||
if (!file.is_open()) {
|
||||
std::cerr << "Failed to open legacy file: " << legacyPath << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
json legacyJson;
|
||||
file >> legacyJson;
|
||||
file.close();
|
||||
|
||||
// Convert to new format
|
||||
PackageMetadata metadata = PackageMetadata::fromLegacyJson(legacyJson, defaultServer);
|
||||
|
||||
if (!metadata.isValid()) {
|
||||
std::cerr << "Invalid metadata after migration from " << legacyPath << ": " << metadata.getValidationError() << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Save in new location
|
||||
if (!savePackageMetadata(metadata)) {
|
||||
std::cerr << "Failed to save migrated metadata for " << metadata.name << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Remove legacy file
|
||||
std::filesystem::remove(legacyPath);
|
||||
|
||||
std::cout << "Migrated package metadata: " << metadata.name << " from " << defaultServer << std::endl;
|
||||
return true;
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "Error migrating legacy file " << legacyPath << ": " << e.what() << std::endl;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::string> PackageMetadataManager::listInstalledPackages() const {
|
||||
std::vector<std::string> packages;
|
||||
|
||||
try {
|
||||
if (!std::filesystem::exists(packagesDir_)) {
|
||||
return packages;
|
||||
}
|
||||
|
||||
for (const auto& entry : std::filesystem::directory_iterator(packagesDir_)) {
|
||||
if (entry.is_regular_file() && entry.path().extension() == ".json") {
|
||||
std::string toolName = entry.path().stem().string();
|
||||
packages.push_back(toolName);
|
||||
}
|
||||
}
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "Error listing installed packages: " << e.what() << std::endl;
|
||||
}
|
||||
|
||||
return packages;
|
||||
}
|
||||
|
||||
std::vector<PackageMetadata> PackageMetadataManager::getAllPackageMetadata() const {
|
||||
std::vector<PackageMetadata> allMetadata;
|
||||
|
||||
std::vector<std::string> packages = listInstalledPackages();
|
||||
for (const std::string& packageName : packages) {
|
||||
PackageMetadata metadata = const_cast<PackageMetadataManager*>(this)->loadPackageMetadata(packageName);
|
||||
if (metadata.isValid()) {
|
||||
allMetadata.push_back(metadata);
|
||||
}
|
||||
}
|
||||
|
||||
return allMetadata;
|
||||
}
|
||||
|
||||
bool PackageMetadataManager::validateAllPackageMetadata() const {
|
||||
std::vector<std::string> packages = listInstalledPackages();
|
||||
|
||||
for (const std::string& packageName : packages) {
|
||||
PackageMetadata metadata = const_cast<PackageMetadataManager*>(this)->loadPackageMetadata(packageName);
|
||||
if (!metadata.isValid()) {
|
||||
std::cerr << "Invalid metadata for package " << packageName << ": " << metadata.getValidationError() << std::endl;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int PackageMetadataManager::cleanupInvalidMetadata() {
|
||||
int removedCount = 0;
|
||||
std::vector<std::string> packages = listInstalledPackages();
|
||||
|
||||
for (const std::string& packageName : packages) {
|
||||
PackageMetadata metadata = loadPackageMetadata(packageName);
|
||||
if (!metadata.isValid()) {
|
||||
std::cerr << "Removing invalid metadata for package " << packageName << ": " << metadata.getValidationError() << std::endl;
|
||||
if (removePackageMetadata(packageName)) {
|
||||
removedCount++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return removedCount;
|
||||
}
|
||||
|
||||
bool PackageMetadataManager::isValidPackageFile(const std::filesystem::path& filePath) const {
|
||||
return filePath.extension() == ".json" && std::filesystem::is_regular_file(filePath);
|
||||
}
|
||||
|
||||
std::string PackageMetadataManager::extractToolNameFromPath(const std::filesystem::path& filePath) const {
|
||||
return filePath.stem().string();
|
||||
}
|
97
getpkg/src/PackageMetadata.hpp
Normal file
97
getpkg/src/PackageMetadata.hpp
Normal file
@ -0,0 +1,97 @@
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <filesystem>
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
using json = nlohmann::json;
|
||||
|
||||
/**
|
||||
* Enhanced package metadata structure with server source tracking
|
||||
* Supports both new multi-server format and legacy single-server migration
|
||||
*/
|
||||
struct PackageMetadata {
|
||||
std::string name;
|
||||
std::string version;
|
||||
std::string hash;
|
||||
std::string arch;
|
||||
std::string sourceServer; // New field for server tracking
|
||||
std::string installDate; // New field for installation tracking
|
||||
|
||||
// Default constructor
|
||||
PackageMetadata() = default;
|
||||
|
||||
// Constructor with all fields
|
||||
PackageMetadata(const std::string& name, const std::string& version,
|
||||
const std::string& hash, const std::string& arch,
|
||||
const std::string& sourceServer, const std::string& installDate = "");
|
||||
|
||||
// Serialization methods
|
||||
json toJson() const;
|
||||
static PackageMetadata fromJson(const json& j);
|
||||
|
||||
// Migration support - convert from legacy format
|
||||
static PackageMetadata fromLegacyJson(const json& j, const std::string& defaultServer = "getpkg.xyz");
|
||||
|
||||
// Validation
|
||||
bool isValid() const;
|
||||
std::string getValidationError() const;
|
||||
|
||||
// File operations
|
||||
bool saveToFile(const std::filesystem::path& filePath) const;
|
||||
static PackageMetadata loadFromFile(const std::filesystem::path& filePath);
|
||||
|
||||
// Utility methods
|
||||
std::string getCurrentTimestamp() const;
|
||||
bool needsUpdate(const std::string& remoteHash) const;
|
||||
|
||||
private:
|
||||
// Internal validation helpers
|
||||
bool isValidName() const;
|
||||
bool isValidVersion() const;
|
||||
bool isValidHash() const;
|
||||
bool isValidArch() const;
|
||||
bool isValidServerUrl() const;
|
||||
bool isValidTimestamp() const;
|
||||
};
|
||||
|
||||
/**
|
||||
* Package metadata manager for handling the packages directory structure
|
||||
*/
|
||||
class PackageMetadataManager {
|
||||
public:
|
||||
PackageMetadataManager();
|
||||
explicit PackageMetadataManager(const std::filesystem::path& configDir);
|
||||
|
||||
// Directory management
|
||||
bool ensurePackagesDirectory();
|
||||
std::filesystem::path getPackagesDirectory() const;
|
||||
std::filesystem::path getPackageFilePath(const std::string& toolName) const;
|
||||
|
||||
// Package operations
|
||||
bool savePackageMetadata(const PackageMetadata& metadata);
|
||||
PackageMetadata loadPackageMetadata(const std::string& toolName);
|
||||
bool packageExists(const std::string& toolName) const;
|
||||
bool removePackageMetadata(const std::string& toolName);
|
||||
|
||||
// Migration support
|
||||
bool migrateFromLegacyFormat();
|
||||
std::vector<std::string> findLegacyPackageFiles() const;
|
||||
bool migrateLegacyPackageFile(const std::filesystem::path& legacyPath, const std::string& defaultServer = "getpkg.xyz");
|
||||
|
||||
// Listing and enumeration
|
||||
std::vector<std::string> listInstalledPackages() const;
|
||||
std::vector<PackageMetadata> getAllPackageMetadata() const;
|
||||
|
||||
// Validation and cleanup
|
||||
bool validateAllPackageMetadata() const;
|
||||
int cleanupInvalidMetadata();
|
||||
|
||||
private:
|
||||
std::filesystem::path configDir_;
|
||||
std::filesystem::path packagesDir_;
|
||||
|
||||
// Helper methods
|
||||
bool isValidPackageFile(const std::filesystem::path& filePath) const;
|
||||
std::string extractToolNameFromPath(const std::filesystem::path& filePath) const;
|
||||
};
|
353
getpkg/src/ServerManager.cpp
Normal file
353
getpkg/src/ServerManager.cpp
Normal file
@ -0,0 +1,353 @@
|
||||
#include "ServerManager.hpp"
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <chrono>
|
||||
#include <iomanip>
|
||||
#include <sstream>
|
||||
#include <regex>
|
||||
#include <cpr/cpr.h>
|
||||
|
||||
using json = nlohmann::json;
|
||||
|
||||
// ServerConfig implementation
|
||||
json ServerConfig::toJson() const {
|
||||
return json{
|
||||
{"url", url},
|
||||
{"name", name},
|
||||
{"default", isDefault},
|
||||
{"writeToken", writeToken},
|
||||
{"added", addedDate}
|
||||
};
|
||||
}
|
||||
|
||||
ServerConfig ServerConfig::fromJson(const json& j) {
|
||||
ServerConfig config;
|
||||
config.url = j.value("url", "");
|
||||
config.name = j.value("name", "");
|
||||
config.isDefault = j.value("default", false);
|
||||
config.writeToken = j.value("writeToken", "");
|
||||
config.addedDate = j.value("added", "");
|
||||
return config;
|
||||
}
|
||||
|
||||
// ServerManager implementation
|
||||
ServerManager::ServerManager() {
|
||||
const char* home = getenv("HOME");
|
||||
if (home) {
|
||||
configPath_ = std::filesystem::path(home) / ".config" / "getpkg" / "servers.json";
|
||||
}
|
||||
}
|
||||
|
||||
bool ServerManager::addServer(const std::string& serverUrl, const std::string& writeToken) {
|
||||
if (!validateServerUrl(serverUrl)) {
|
||||
std::cerr << "Invalid server URL: " << serverUrl << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check if server already exists
|
||||
if (findServer(serverUrl) != nullptr) {
|
||||
std::cerr << "Server already exists: " << serverUrl << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check if server is reachable
|
||||
if (!isServerReachable(serverUrl)) {
|
||||
std::cerr << "Warning: Server may not be reachable: " << serverUrl << std::endl;
|
||||
// Continue anyway - server might be temporarily down
|
||||
}
|
||||
|
||||
ServerConfig config;
|
||||
config.url = serverUrl;
|
||||
config.name = serverUrl; // Use URL as default name
|
||||
config.isDefault = servers_.empty(); // First server becomes default
|
||||
config.writeToken = writeToken;
|
||||
config.addedDate = getCurrentTimestamp();
|
||||
|
||||
servers_.push_back(config);
|
||||
|
||||
return saveConfiguration();
|
||||
}
|
||||
|
||||
bool ServerManager::removeServer(const std::string& serverUrl) {
|
||||
auto it = std::find_if(servers_.begin(), servers_.end(),
|
||||
[&serverUrl](const ServerConfig& config) {
|
||||
return config.url == serverUrl;
|
||||
});
|
||||
|
||||
if (it == servers_.end()) {
|
||||
std::cerr << "Server not found: " << serverUrl << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Don't allow removing the last server
|
||||
if (servers_.size() == 1) {
|
||||
std::cerr << "Cannot remove the last server. Add another server first." << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool wasDefault = it->isDefault;
|
||||
servers_.erase(it);
|
||||
|
||||
// If we removed the default server, make the first remaining server default
|
||||
if (wasDefault && !servers_.empty()) {
|
||||
servers_[0].isDefault = true;
|
||||
}
|
||||
|
||||
return saveConfiguration();
|
||||
}
|
||||
|
||||
std::vector<std::string> ServerManager::getServers() const {
|
||||
std::vector<std::string> urls;
|
||||
for (const auto& server : servers_) {
|
||||
urls.push_back(server.url);
|
||||
}
|
||||
return urls;
|
||||
}
|
||||
|
||||
std::string ServerManager::getDefaultServer() const {
|
||||
for (const auto& server : servers_) {
|
||||
if (server.isDefault) {
|
||||
return server.url;
|
||||
}
|
||||
}
|
||||
|
||||
// If no default is set, return the first server
|
||||
if (!servers_.empty()) {
|
||||
return servers_[0].url;
|
||||
}
|
||||
|
||||
return "getpkg.xyz"; // Fallback to original default
|
||||
}
|
||||
|
||||
std::string ServerManager::getDefaultPublishServer() const {
|
||||
// Return first server with a write token
|
||||
for (const auto& server : servers_) {
|
||||
if (!server.writeToken.empty()) {
|
||||
return server.url;
|
||||
}
|
||||
}
|
||||
|
||||
// If no server has a token, return the default server
|
||||
return getDefaultServer();
|
||||
}
|
||||
|
||||
bool ServerManager::setWriteToken(const std::string& serverUrl, const std::string& token) {
|
||||
ServerConfig* server = findServer(serverUrl);
|
||||
if (server == nullptr) {
|
||||
std::cerr << "Server not found: " << serverUrl << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
server->writeToken = token;
|
||||
return saveConfiguration();
|
||||
}
|
||||
|
||||
std::string ServerManager::getWriteToken(const std::string& serverUrl) const {
|
||||
const ServerConfig* server = findServer(serverUrl);
|
||||
if (server != nullptr) {
|
||||
return server->writeToken;
|
||||
}
|
||||
return "";
|
||||
}
|
||||
|
||||
bool ServerManager::hasWriteToken(const std::string& serverUrl) const {
|
||||
const ServerConfig* server = findServer(serverUrl);
|
||||
return server != nullptr && !server->writeToken.empty();
|
||||
}
|
||||
|
||||
std::vector<std::string> ServerManager::getServersWithTokens() const {
|
||||
std::vector<std::string> serversWithTokens;
|
||||
for (const auto& server : servers_) {
|
||||
if (!server.writeToken.empty()) {
|
||||
serversWithTokens.push_back(server.url);
|
||||
}
|
||||
}
|
||||
return serversWithTokens;
|
||||
}
|
||||
|
||||
bool ServerManager::loadConfiguration() {
|
||||
if (!std::filesystem::exists(configPath_)) {
|
||||
ensureDefaultConfiguration();
|
||||
return true;
|
||||
}
|
||||
|
||||
try {
|
||||
std::ifstream file(configPath_);
|
||||
if (!file.is_open()) {
|
||||
std::cerr << "Failed to open server configuration file: " << configPath_ << std::endl;
|
||||
ensureDefaultConfiguration();
|
||||
return true;
|
||||
}
|
||||
|
||||
json config;
|
||||
file >> config;
|
||||
|
||||
if (!config.contains("servers") || !config["servers"].is_array()) {
|
||||
std::cerr << "Invalid server configuration format" << std::endl;
|
||||
ensureDefaultConfiguration();
|
||||
return true;
|
||||
}
|
||||
|
||||
servers_.clear();
|
||||
for (const auto& serverJson : config["servers"]) {
|
||||
try {
|
||||
servers_.push_back(ServerConfig::fromJson(serverJson));
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "Warning: Skipping invalid server config: " << e.what() << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure we have at least one server
|
||||
if (servers_.empty()) {
|
||||
ensureDefaultConfiguration();
|
||||
}
|
||||
|
||||
return true;
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "Error loading server configuration: " << e.what() << std::endl;
|
||||
ensureDefaultConfiguration();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
bool ServerManager::saveConfiguration() {
|
||||
try {
|
||||
// Ensure directory exists
|
||||
std::filesystem::create_directories(configPath_.parent_path());
|
||||
|
||||
json config;
|
||||
config["version"] = "1.0";
|
||||
config["lastUpdated"] = getCurrentTimestamp();
|
||||
|
||||
json serversArray = json::array();
|
||||
for (const auto& server : servers_) {
|
||||
serversArray.push_back(server.toJson());
|
||||
}
|
||||
config["servers"] = serversArray;
|
||||
|
||||
std::ofstream file(configPath_);
|
||||
if (!file.is_open()) {
|
||||
std::cerr << "Failed to open server configuration file for writing: " << configPath_ << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
file << config.dump(2);
|
||||
return file.good();
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "Error saving server configuration: " << e.what() << std::endl;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
void ServerManager::ensureDefaultConfiguration() {
|
||||
servers_.clear();
|
||||
|
||||
ServerConfig defaultServer;
|
||||
defaultServer.url = "getpkg.xyz";
|
||||
defaultServer.name = "Official getpkg Registry";
|
||||
defaultServer.isDefault = true;
|
||||
defaultServer.writeToken = "";
|
||||
defaultServer.addedDate = getCurrentTimestamp();
|
||||
|
||||
servers_.push_back(defaultServer);
|
||||
|
||||
saveConfiguration();
|
||||
}
|
||||
|
||||
bool ServerManager::migrateFromLegacy() {
|
||||
const char* home = getenv("HOME");
|
||||
if (!home) {
|
||||
return false;
|
||||
}
|
||||
|
||||
std::filesystem::path legacyTokenPath = std::filesystem::path(home) / ".config" / "getpkg.xyz" / "write_token.txt";
|
||||
|
||||
if (std::filesystem::exists(legacyTokenPath)) {
|
||||
try {
|
||||
std::ifstream tokenFile(legacyTokenPath);
|
||||
std::string token;
|
||||
std::getline(tokenFile, token);
|
||||
|
||||
if (!token.empty()) {
|
||||
// Set the token for getpkg.xyz server
|
||||
setWriteToken("getpkg.xyz", token);
|
||||
|
||||
// Optionally remove the legacy token file
|
||||
// std::filesystem::remove(legacyTokenPath);
|
||||
|
||||
std::cout << "Migrated legacy write token for getpkg.xyz" << std::endl;
|
||||
return true;
|
||||
}
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "Warning: Failed to migrate legacy token: " << e.what() << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool ServerManager::validateServerUrl(const std::string& url) const {
|
||||
if (url.empty() || url.length() > 253) { // DNS name length limit
|
||||
return false;
|
||||
}
|
||||
|
||||
// Basic URL validation - should be a valid hostname or IP
|
||||
// Allow formats like: example.com, sub.example.com, 192.168.1.1, localhost
|
||||
std::regex urlPattern(R"(^[a-zA-Z0-9]([a-zA-Z0-9\-\.]*[a-zA-Z0-9])?$)");
|
||||
|
||||
if (!std::regex_match(url, urlPattern)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Additional checks
|
||||
if (url.find("..") != std::string::npos) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (url.front() == '.' || url.back() == '.') {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ServerManager::isServerReachable(const std::string& url) const {
|
||||
try {
|
||||
std::string testUrl = "https://" + url + "/";
|
||||
|
||||
auto response = cpr::Head(cpr::Url{testUrl},
|
||||
cpr::Timeout{5000}, // 5 seconds
|
||||
cpr::VerifySsl{true});
|
||||
|
||||
// Accept any response that indicates the server is reachable
|
||||
// (200, 404, 403, etc. - as long as we get a response)
|
||||
return response.status_code > 0;
|
||||
} catch (const std::exception& e) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
ServerConfig* ServerManager::findServer(const std::string& url) {
|
||||
auto it = std::find_if(servers_.begin(), servers_.end(),
|
||||
[&url](const ServerConfig& config) {
|
||||
return config.url == url;
|
||||
});
|
||||
return (it != servers_.end()) ? &(*it) : nullptr;
|
||||
}
|
||||
|
||||
const ServerConfig* ServerManager::findServer(const std::string& url) const {
|
||||
auto it = std::find_if(servers_.begin(), servers_.end(),
|
||||
[&url](const ServerConfig& config) {
|
||||
return config.url == url;
|
||||
});
|
||||
return (it != servers_.end()) ? &(*it) : nullptr;
|
||||
}
|
||||
|
||||
std::string ServerManager::getCurrentTimestamp() const {
|
||||
auto now = std::chrono::system_clock::now();
|
||||
auto time_t = std::chrono::system_clock::to_time_t(now);
|
||||
|
||||
std::stringstream ss;
|
||||
ss << std::put_time(std::gmtime(&time_t), "%Y-%m-%dT%H:%M:%SZ");
|
||||
return ss.str();
|
||||
}
|
53
getpkg/src/ServerManager.hpp
Normal file
53
getpkg/src/ServerManager.hpp
Normal file
@ -0,0 +1,53 @@
|
||||
#pragma once
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <filesystem>
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
struct ServerConfig {
|
||||
std::string url;
|
||||
std::string name;
|
||||
bool isDefault = false;
|
||||
std::string writeToken;
|
||||
std::string addedDate;
|
||||
|
||||
// JSON serialization
|
||||
nlohmann::json toJson() const;
|
||||
static ServerConfig fromJson(const nlohmann::json& j);
|
||||
};
|
||||
|
||||
class ServerManager {
|
||||
public:
|
||||
ServerManager();
|
||||
|
||||
// Server management
|
||||
bool addServer(const std::string& serverUrl, const std::string& writeToken = "");
|
||||
bool removeServer(const std::string& serverUrl);
|
||||
std::vector<std::string> getServers() const;
|
||||
std::string getDefaultServer() const;
|
||||
std::string getDefaultPublishServer() const; // First server with write token
|
||||
|
||||
// Token management
|
||||
bool setWriteToken(const std::string& serverUrl, const std::string& token);
|
||||
std::string getWriteToken(const std::string& serverUrl) const;
|
||||
bool hasWriteToken(const std::string& serverUrl) const;
|
||||
std::vector<std::string> getServersWithTokens() const;
|
||||
|
||||
// Configuration
|
||||
bool loadConfiguration();
|
||||
bool saveConfiguration();
|
||||
void ensureDefaultConfiguration();
|
||||
|
||||
// Migration
|
||||
bool migrateFromLegacy();
|
||||
|
||||
private:
|
||||
std::vector<ServerConfig> servers_;
|
||||
std::filesystem::path configPath_;
|
||||
|
||||
bool validateServerUrl(const std::string& url) const;
|
||||
bool isServerReachable(const std::string& url) const;
|
||||
ServerConfig* findServer(const std::string& url);
|
||||
const ServerConfig* findServer(const std::string& url) const;
|
||||
std::string getCurrentTimestamp() const;
|
||||
};
|
File diff suppressed because it is too large
Load Diff
@ -1 +0,0 @@
|
||||
test
|
@ -1,7 +0,0 @@
|
||||
#\!/bin/bash
|
||||
if [ "$1" = "version" ]; then
|
||||
echo "1.0.0"
|
||||
elif [ "$1" = "autocomplete" ]; then
|
||||
echo "help"
|
||||
echo "version"
|
||||
fi
|
@ -1,7 +0,0 @@
|
||||
#\!/bin/bash
|
||||
if [ "$1" = "version" ]; then
|
||||
echo "1.0.0"
|
||||
elif [ "$1" = "autocomplete" ]; then
|
||||
echo "help"
|
||||
echo "version"
|
||||
fi
|
149
getpkg/test.sh
149
getpkg/test.sh
@ -68,6 +68,28 @@ cleanup() {
|
||||
# Clean up noarch variant
|
||||
$GETPKG unpublish "${TEST_TOOL_NAME}-noarch:universal" 2>/dev/null || true
|
||||
|
||||
# Clean up any remaining test packages that start with "test-"
|
||||
echo "Cleaning up any remaining test packages..."
|
||||
DIR_RESPONSE=$(curl -s "https://getpkg.xyz/dir" 2>/dev/null || echo "")
|
||||
if [ -n "$DIR_RESPONSE" ]; then
|
||||
# Extract test package labeltags from JSON response
|
||||
if command -v jq >/dev/null 2>&1; then
|
||||
TEST_PACKAGES=$(echo "$DIR_RESPONSE" | jq -r '.entries[]?.labeltags[]? // empty' 2>/dev/null | grep "^test-" | sort -u || echo "")
|
||||
else
|
||||
# Fallback: extract labeltags using grep and sed
|
||||
TEST_PACKAGES=$(echo "$DIR_RESPONSE" | grep -o '"test-[^"]*"' | sed 's/"//g' | sort -u || echo "")
|
||||
fi
|
||||
|
||||
if [ -n "$TEST_PACKAGES" ]; then
|
||||
echo "$TEST_PACKAGES" | while read -r package; do
|
||||
if [ -n "$package" ]; then
|
||||
echo " Cleaning up orphaned test package: $package"
|
||||
$GETPKG unpublish "$package" 2>/dev/null || true
|
||||
fi
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Cleaned up test tools from getpkg.xyz"
|
||||
else
|
||||
echo "Note: SOS_WRITE_TOKEN not set, cannot clean up remote test objects"
|
||||
@ -455,12 +477,13 @@ EOF
|
||||
CONFIG_EXISTS=false
|
||||
TOOL_DIR_EXISTS=false
|
||||
SYMLINK_EXISTS=false
|
||||
HELPER_SYMLINK_EXISTS=false
|
||||
# HELPER_SYMLINK_EXISTS=false
|
||||
|
||||
[ -f ~/.config/getpkg/"${TEST_UNINSTALL_TOOL}.json" ] && CONFIG_EXISTS=true
|
||||
[ -d ~/.getpkg/"$TEST_UNINSTALL_TOOL" ] && TOOL_DIR_EXISTS=true
|
||||
[ -L ~/.local/bin/getpkg/"$TEST_UNINSTALL_TOOL" ] && SYMLINK_EXISTS=true
|
||||
[ -L ~/.local/bin/getpkg/"${TEST_UNINSTALL_TOOL}-helper" ] && HELPER_SYMLINK_EXISTS=true
|
||||
# Check if helper symlink exists (not currently used in validation)
|
||||
# [ -L ~/.local/bin/getpkg/"${TEST_UNINSTALL_TOOL}-helper" ] && HELPER_SYMLINK_EXISTS=true
|
||||
|
||||
if $CONFIG_EXISTS && $TOOL_DIR_EXISTS && $SYMLINK_EXISTS; then
|
||||
# Now uninstall
|
||||
@ -528,6 +551,128 @@ EOF
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
# Test 13.5: Comprehensive unpublish functionality
|
||||
echo -e "\nTest 13.5: Comprehensive unpublish functionality"
|
||||
|
||||
# Only run unpublish tests if SOS_WRITE_TOKEN is available
|
||||
if [ -n "${SOS_WRITE_TOKEN:-}" ]; then
|
||||
# Create unique test names for unpublish tests
|
||||
UNPUBLISH_TOOL_BASE="test-unpublish-$RANDOM"
|
||||
UNPUBLISH_TOOL_MULTI="${UNPUBLISH_TOOL_BASE}-multi"
|
||||
UNPUBLISH_TOOL_CUSTOM="${UNPUBLISH_TOOL_BASE}-custom"
|
||||
UNPUBLISH_TEST_DIR="${TEST_DIR}/unpublish_tests"
|
||||
|
||||
# Create test directory structure
|
||||
mkdir -p "$UNPUBLISH_TEST_DIR"
|
||||
|
||||
# Test 13.5a: Create and publish tool with multiple architectures
|
||||
echo "Test 13.5a: Unpublish tool with multiple architectures"
|
||||
echo '#!/bin/bash
|
||||
echo "Multi-arch unpublish test"' > "$UNPUBLISH_TEST_DIR/$UNPUBLISH_TOOL_MULTI"
|
||||
chmod +x "$UNPUBLISH_TEST_DIR/$UNPUBLISH_TOOL_MULTI"
|
||||
|
||||
# Publish to multiple architectures
|
||||
PUBLISH_x86_64_OUTPUT=$("$GETPKG" publish "${UNPUBLISH_TOOL_MULTI}:x86_64" "$UNPUBLISH_TEST_DIR" 2>&1)
|
||||
PUBLISH_aarch64_OUTPUT=$("$GETPKG" publish "${UNPUBLISH_TOOL_MULTI}:aarch64" "$UNPUBLISH_TEST_DIR" 2>&1)
|
||||
PUBLISH_universal_OUTPUT=$("$GETPKG" publish "${UNPUBLISH_TOOL_MULTI}:universal" "$UNPUBLISH_TEST_DIR" 2>&1)
|
||||
|
||||
if [[ "$PUBLISH_x86_64_OUTPUT" =~ Published! ]] && [[ "$PUBLISH_aarch64_OUTPUT" =~ Published! ]] && [[ "$PUBLISH_universal_OUTPUT" =~ Published! ]]; then
|
||||
# Test robust unpublish - should remove ALL architectures
|
||||
sleep 1 # Give server time to process all publishes
|
||||
UNPUBLISH_OUTPUT=$("$GETPKG" unpublish "$UNPUBLISH_TOOL_MULTI" 2>&1)
|
||||
UNPUBLISH_EXIT_CODE=$?
|
||||
|
||||
# Check that unpublish found and removed packages
|
||||
if [ $UNPUBLISH_EXIT_CODE -eq 0 ] && [[ "$UNPUBLISH_OUTPUT" =~ "Found" ]] && [[ "$UNPUBLISH_OUTPUT" =~ "Successfully unpublished" ]]; then
|
||||
print_test_result "Unpublish removes all architectures" 0
|
||||
else
|
||||
print_test_result "Unpublish removes all architectures" 1
|
||||
echo " Unpublish failed: $UNPUBLISH_OUTPUT"
|
||||
fi
|
||||
else
|
||||
print_test_result "Unpublish removes all architectures" 1
|
||||
echo " Failed to publish test tool to multiple architectures"
|
||||
echo " x86_64: $PUBLISH_x86_64_OUTPUT"
|
||||
echo " aarch64: $PUBLISH_aarch64_OUTPUT"
|
||||
echo " universal: $PUBLISH_universal_OUTPUT"
|
||||
fi
|
||||
|
||||
# Test 13.5b: Unpublish tool with universal architecture
|
||||
echo "Test 13.5b: Unpublish tool with universal architecture"
|
||||
echo '#!/bin/bash
|
||||
echo "Universal arch unpublish test"' > "$UNPUBLISH_TEST_DIR/$UNPUBLISH_TOOL_CUSTOM"
|
||||
chmod +x "$UNPUBLISH_TEST_DIR/$UNPUBLISH_TOOL_CUSTOM"
|
||||
|
||||
# Publish with universal architecture
|
||||
PUBLISH_CUSTOM_OUTPUT=$("$GETPKG" publish "${UNPUBLISH_TOOL_CUSTOM}:universal" "$UNPUBLISH_TEST_DIR" 2>&1)
|
||||
|
||||
if [[ "$PUBLISH_CUSTOM_OUTPUT" =~ Published! ]]; then
|
||||
# Test that unpublish can find and remove custom tags
|
||||
UNPUBLISH_CUSTOM_OUTPUT=$("$GETPKG" unpublish "$UNPUBLISH_TOOL_CUSTOM" 2>&1)
|
||||
UNPUBLISH_CUSTOM_EXIT_CODE=$?
|
||||
|
||||
if [ $UNPUBLISH_CUSTOM_EXIT_CODE -eq 0 ] && [[ "$UNPUBLISH_CUSTOM_OUTPUT" =~ Found\ ${UNPUBLISH_TOOL_CUSTOM}:universal ]]; then
|
||||
print_test_result "Unpublish finds universal architecture" 0
|
||||
else
|
||||
print_test_result "Unpublish finds universal architecture" 1
|
||||
echo " Failed to find or unpublish custom tag: $UNPUBLISH_CUSTOM_OUTPUT"
|
||||
fi
|
||||
else
|
||||
print_test_result "Unpublish finds universal architecture" 1
|
||||
echo " Failed to publish tool with custom tag: $PUBLISH_CUSTOM_OUTPUT"
|
||||
fi
|
||||
|
||||
# Test 13.5c: Unpublish non-existent tool
|
||||
echo "Test 13.5c: Unpublish non-existent tool"
|
||||
NON_EXISTENT_TOOL="non-existent-tool-$RANDOM"
|
||||
UNPUBLISH_MISSING_OUTPUT=$("$GETPKG" unpublish "$NON_EXISTENT_TOOL" 2>&1)
|
||||
UNPUBLISH_MISSING_EXIT_CODE=$?
|
||||
|
||||
if [ $UNPUBLISH_MISSING_EXIT_CODE -ne 0 ] && [[ "$UNPUBLISH_MISSING_OUTPUT" =~ "No packages found" ]]; then
|
||||
print_test_result "Unpublish handles missing tools gracefully" 0
|
||||
else
|
||||
print_test_result "Unpublish handles missing tools gracefully" 1
|
||||
echo " Expected failure for non-existent tool, got: $UNPUBLISH_MISSING_OUTPUT"
|
||||
fi
|
||||
|
||||
# Test 13.5d: Unpublish by hash
|
||||
echo "Test 13.5d: Unpublish by hash"
|
||||
UNPUBLISH_TOOL_HASH="${UNPUBLISH_TOOL_BASE}-hash"
|
||||
echo '#!/bin/bash
|
||||
echo "Hash unpublish test"' > "$UNPUBLISH_TEST_DIR/$UNPUBLISH_TOOL_HASH"
|
||||
chmod +x "$UNPUBLISH_TEST_DIR/$UNPUBLISH_TOOL_HASH"
|
||||
|
||||
PUBLISH_HASH_OUTPUT=$("$GETPKG" publish "${UNPUBLISH_TOOL_HASH}:x86_64" "$UNPUBLISH_TEST_DIR" 2>&1)
|
||||
|
||||
if [[ "$PUBLISH_HASH_OUTPUT" =~ Hash:\ ([0-9]+) ]]; then
|
||||
EXTRACTED_HASH="${BASH_REMATCH[1]}"
|
||||
|
||||
# Test unpublish by hash
|
||||
UNPUBLISH_HASH_OUTPUT=$("$GETPKG" unpublish "$EXTRACTED_HASH" 2>&1)
|
||||
UNPUBLISH_HASH_EXIT_CODE=$?
|
||||
|
||||
if [ $UNPUBLISH_HASH_EXIT_CODE -eq 0 ] && [[ "$UNPUBLISH_HASH_OUTPUT" =~ "Successfully unpublished hash" ]]; then
|
||||
print_test_result "Unpublish by hash works" 0
|
||||
else
|
||||
print_test_result "Unpublish by hash works" 1
|
||||
echo " Failed to unpublish by hash: $UNPUBLISH_HASH_OUTPUT"
|
||||
fi
|
||||
else
|
||||
print_test_result "Unpublish by hash works" 1
|
||||
echo " Could not extract hash from publish output"
|
||||
fi
|
||||
|
||||
# Cleanup unpublish test directory
|
||||
rm -rf "$UNPUBLISH_TEST_DIR"
|
||||
|
||||
else
|
||||
echo " Skipping unpublish tests (SOS_WRITE_TOKEN not set)"
|
||||
print_test_result "Unpublish removes all architectures" 0 # Pass as skipped
|
||||
print_test_result "Unpublish finds universal architecture" 0
|
||||
print_test_result "Unpublish handles missing tools gracefully" 0
|
||||
print_test_result "Unpublish by hash works" 0
|
||||
fi
|
||||
# Test 14: Invalid tool name validation
|
||||
echo -e "\nTest 14: Invalid tool name validation"
|
||||
INVALID_OUTPUT=$(timeout 3 "$GETPKG" install "../evil-tool" 2>&1)
|
||||
|
202
gp/gp
202
gp/gp
@ -49,27 +49,43 @@ EOF
|
||||
|
||||
# Function to generate commit message based on changes
|
||||
generate_commit_message() {
|
||||
local files_changed
|
||||
files_changed=$(git diff --cached --name-only)
|
||||
local files_count
|
||||
files_count=$(echo "$files_changed" | wc -l)
|
||||
|
||||
if [ -z "$files_changed" ]; then
|
||||
files_changed=$(git diff --name-only)
|
||||
files_count=$(echo "$files_changed" | wc -l)
|
||||
# First check if we have staged changes
|
||||
local has_staged_changes=false
|
||||
if ! git diff --cached --quiet; then
|
||||
has_staged_changes=true
|
||||
fi
|
||||
|
||||
# If add-all is enabled, also include untracked files
|
||||
if [ "$ADD_ALL" = true ] && [ -z "$files_changed" ]; then
|
||||
files_changed=$(git ls-files --others --exclude-standard)
|
||||
files_count=$(echo "$files_changed" | wc -l)
|
||||
# Determine which changes to analyze based on staging status and ADD_ALL setting
|
||||
local status_command=""
|
||||
if [ "$has_staged_changes" = true ]; then
|
||||
status_command="git diff --cached --name-status"
|
||||
else
|
||||
status_command="git diff --name-status"
|
||||
fi
|
||||
|
||||
if [ -z "$files_changed" ]; then
|
||||
# Get all changes (staged or unstaged depending on context)
|
||||
local all_changes
|
||||
all_changes=$($status_command)
|
||||
|
||||
# If no changes from diff, check for untracked files when add-all is enabled
|
||||
if [ -z "$all_changes" ] && [ "$ADD_ALL" = true ]; then
|
||||
local untracked_files
|
||||
untracked_files=$(git ls-files --others --exclude-standard)
|
||||
if [ -n "$untracked_files" ]; then
|
||||
# Convert untracked files to "A" (added) status format
|
||||
all_changes=$(echo "$untracked_files" | sed 's/^/A\t/')
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -z "$all_changes" ]; then
|
||||
echo "No changes to commit"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Count total files
|
||||
local files_count
|
||||
files_count=$(echo "$all_changes" | wc -l)
|
||||
|
||||
# Generate smart commit message based on file types and changes
|
||||
local has_source_files=false
|
||||
local has_config_files=false
|
||||
@ -77,7 +93,8 @@ generate_commit_message() {
|
||||
local has_tests=false
|
||||
local message=""
|
||||
|
||||
while IFS= read -r file; do
|
||||
# Extract just the filenames for type detection
|
||||
while IFS=$'\t' read -r status file; do
|
||||
[ -z "$file" ] && continue
|
||||
|
||||
case "$file" in
|
||||
@ -94,15 +111,18 @@ generate_commit_message() {
|
||||
has_tests=true
|
||||
;;
|
||||
esac
|
||||
done <<< "$files_changed"
|
||||
done <<< "$all_changes"
|
||||
|
||||
# Create descriptive commit message
|
||||
if [ "$files_count" -eq 1 ]; then
|
||||
local change_line
|
||||
change_line=$(echo "$all_changes" | head -1)
|
||||
local status
|
||||
local single_file
|
||||
single_file=$(echo "$files_changed" | head -1)
|
||||
local change_type
|
||||
change_type=$(git diff --cached --name-status -- "$single_file" 2>/dev/null || git diff --name-status -- "$single_file")
|
||||
case "${change_type:0:1}" in
|
||||
status=$(echo "$change_line" | cut -f1)
|
||||
single_file=$(echo "$change_line" | cut -f2)
|
||||
|
||||
case "${status:0:1}" in
|
||||
A) message="Add $single_file" ;;
|
||||
M) message="Update $single_file" ;;
|
||||
D) message="Remove $single_file" ;;
|
||||
@ -110,6 +130,58 @@ generate_commit_message() {
|
||||
*) message="Modify $single_file" ;;
|
||||
esac
|
||||
else
|
||||
# For multiple files, analyze the types of changes
|
||||
local added_count=0
|
||||
local modified_count=0
|
||||
local deleted_count=0
|
||||
local renamed_count=0
|
||||
|
||||
# Use the all_changes variable we already have
|
||||
|
||||
# Count different types of changes
|
||||
while IFS=$'\t' read -r status file; do
|
||||
[ -z "$status" ] && continue
|
||||
case "${status:0:1}" in
|
||||
A) ((added_count++)) ;;
|
||||
M) ((modified_count++)) ;;
|
||||
D) ((deleted_count++)) ;;
|
||||
R) ((renamed_count++)) ;;
|
||||
esac
|
||||
done <<< "$all_changes"
|
||||
|
||||
# Also count untracked files if add-all is enabled
|
||||
if [ "$ADD_ALL" = true ]; then
|
||||
local untracked_files
|
||||
untracked_files=$(git ls-files --others --exclude-standard)
|
||||
if [ -n "$untracked_files" ]; then
|
||||
local untracked_count
|
||||
untracked_count=$(echo "$untracked_files" | wc -l)
|
||||
((added_count += untracked_count))
|
||||
fi
|
||||
fi
|
||||
|
||||
# Generate message based on change types
|
||||
local change_parts=()
|
||||
[ $added_count -gt 0 ] && change_parts+=("add $added_count")
|
||||
[ $modified_count -gt 0 ] && change_parts+=("update $modified_count")
|
||||
[ $deleted_count -gt 0 ] && change_parts+=("remove $deleted_count")
|
||||
[ $renamed_count -gt 0 ] && change_parts+=("rename $renamed_count")
|
||||
|
||||
local change_desc=""
|
||||
if [ ${#change_parts[@]} -eq 1 ]; then
|
||||
change_desc="${change_parts[0]}"
|
||||
elif [ ${#change_parts[@]} -eq 2 ]; then
|
||||
change_desc="${change_parts[0]} and ${change_parts[1]}"
|
||||
else
|
||||
# Join all but last with commas, last with "and"
|
||||
local last_idx=$((${#change_parts[@]} - 1))
|
||||
for i in $(seq 0 $((last_idx - 1))); do
|
||||
[ $i -gt 0 ] && change_desc+=", "
|
||||
change_desc+="${change_parts[i]}"
|
||||
done
|
||||
change_desc+=" and ${change_parts[last_idx]}"
|
||||
fi
|
||||
|
||||
local prefix=""
|
||||
if $has_tests; then
|
||||
prefix="test: "
|
||||
@ -121,18 +193,32 @@ generate_commit_message() {
|
||||
prefix="feat: "
|
||||
fi
|
||||
|
||||
message="${prefix}Update $files_count files"
|
||||
# Capitalize first letter of change description
|
||||
change_desc="$(echo "${change_desc:0:1}" | tr '[:lower:]' '[:upper:]')${change_desc:1}"
|
||||
|
||||
message="${prefix}${change_desc} files"
|
||||
fi
|
||||
|
||||
echo "$message"
|
||||
}
|
||||
|
||||
# Function to check if we're in a git repository
|
||||
# Function to check if we're in a git repository and change to repo root
|
||||
check_git_repo() {
|
||||
if ! git rev-parse --git-dir >/dev/null 2>&1; then
|
||||
print_error "Not in a git repository"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Change to the git repository root to ensure we operate on the entire repo
|
||||
local git_root
|
||||
git_root=$(git rev-parse --show-toplevel)
|
||||
if [ "$PWD" != "$git_root" ]; then
|
||||
print_info "Changing to git repository root: $git_root"
|
||||
cd "$git_root" || {
|
||||
print_error "Failed to change to git repository root"
|
||||
exit 1
|
||||
}
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to check for uncommitted changes and unpushed commits
|
||||
@ -225,19 +311,77 @@ show_status_and_confirm() {
|
||||
|
||||
# Show staged changes
|
||||
if ! git diff --cached --quiet; then
|
||||
print_info "Staged changes:"
|
||||
git diff --cached --name-only -- | while IFS= read -r line; do echo " $line"; done
|
||||
local staged_modified=""
|
||||
local staged_deleted=""
|
||||
local staged_added=""
|
||||
|
||||
# Get staged file status and categorize
|
||||
while IFS=$'\t' read -r status file; do
|
||||
[ -z "$status" ] && continue
|
||||
case "${status:0:1}" in
|
||||
A) staged_added="${staged_added}${file}\n" ;;
|
||||
M) staged_modified="${staged_modified}${file}\n" ;;
|
||||
D) staged_deleted="${staged_deleted}${file}\n" ;;
|
||||
*) staged_modified="${staged_modified}${file}\n" ;; # Default to modified for other statuses
|
||||
esac
|
||||
done < <(git diff --cached --name-status)
|
||||
|
||||
# Show staged added files
|
||||
if [ -n "$staged_added" ]; then
|
||||
print_info "Staged new files:"
|
||||
echo -e "$staged_added" | grep -v '^$' | while IFS= read -r line; do echo " $line"; done
|
||||
fi
|
||||
|
||||
# Show staged modified files
|
||||
if [ -n "$staged_modified" ]; then
|
||||
print_info "Staged modified files:"
|
||||
echo -e "$staged_modified" | grep -v '^$' | while IFS= read -r line; do echo " $line"; done
|
||||
fi
|
||||
|
||||
# Show staged deleted files
|
||||
if [ -n "$staged_deleted" ]; then
|
||||
print_info "Staged deleted files:"
|
||||
echo -e "$staged_deleted" | grep -v '^$' | while IFS= read -r line; do echo " $line"; done
|
||||
fi
|
||||
|
||||
has_staged_changes=true
|
||||
fi
|
||||
|
||||
# Show unstaged changes
|
||||
if ! git diff --quiet; then
|
||||
if [ "$ADD_ALL" = true ]; then
|
||||
print_info "Modified files (will be added):"
|
||||
else
|
||||
print_info "Modified files (unstaged, will NOT be included):"
|
||||
local modified_files=""
|
||||
local deleted_files=""
|
||||
|
||||
# Get file status and categorize
|
||||
while IFS=$'\t' read -r status file; do
|
||||
[ -z "$status" ] && continue
|
||||
case "${status:0:1}" in
|
||||
M) modified_files="${modified_files}${file}\n" ;;
|
||||
D) deleted_files="${deleted_files}${file}\n" ;;
|
||||
*) modified_files="${modified_files}${file}\n" ;; # Default to modified for other statuses
|
||||
esac
|
||||
done < <(git diff --name-status)
|
||||
|
||||
# Show modified files
|
||||
if [ -n "$modified_files" ]; then
|
||||
if [ "$ADD_ALL" = true ]; then
|
||||
print_info "Modified files (will be added):"
|
||||
else
|
||||
print_info "Modified files (unstaged, will NOT be included):"
|
||||
fi
|
||||
echo -e "$modified_files" | grep -v '^$' | while IFS= read -r line; do echo " $line"; done
|
||||
fi
|
||||
git diff --name-only -- | while IFS= read -r line; do echo " $line"; done
|
||||
|
||||
# Show deleted files
|
||||
if [ -n "$deleted_files" ]; then
|
||||
if [ "$ADD_ALL" = true ]; then
|
||||
print_info "Deleted files (will be removed):"
|
||||
else
|
||||
print_info "Deleted files (unstaged, will NOT be included):"
|
||||
fi
|
||||
echo -e "$deleted_files" | grep -v '^$' | while IFS= read -r line; do echo " $line"; done
|
||||
fi
|
||||
|
||||
has_unstaged_changes=true
|
||||
fi
|
||||
|
||||
@ -350,7 +494,7 @@ case "${1:-}" in
|
||||
exit 0
|
||||
;;
|
||||
version)
|
||||
echo "gp version 2.0.0"
|
||||
echo "2.0.1"
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
|
20
sos/clean.sh
Executable file
20
sos/clean.sh
Executable file
@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
|
||||
PROJECT="sos"
|
||||
|
||||
echo "Cleaning ${PROJECT}..."
|
||||
|
||||
# Remove output directory (if it exists)
|
||||
if [ -d "${SCRIPT_DIR}/output" ]; then
|
||||
echo "Removing output directory..."
|
||||
rm -rf "${SCRIPT_DIR}/output"
|
||||
fi
|
||||
|
||||
# Remove any temporary files
|
||||
echo "Removing temporary files..."
|
||||
find "${SCRIPT_DIR}" -name "*.tmp" -o -name "*.temp" -o -name "*~" | xargs -r rm -f
|
||||
|
||||
echo "✓ ${PROJECT} cleaned successfully"
|
@ -25,6 +25,7 @@ GETPKG="${SCRIPT_DIR}/../getpkg/output/getpkg"
|
||||
TOOLDIR="${SCRIPT_DIR}/tool"
|
||||
mkdir -p "${TOOLDIR}"
|
||||
cp "${SCRIPT_DIR}/whatsdirty" "${TOOLDIR}/whatsdirty"
|
||||
cp "${SCRIPT_DIR}/setup_script.sh" "${TOOLDIR}/"
|
||||
|
||||
# publish universal tool.
|
||||
"${GETPKG}" publish "whatsdirty" "${TOOLDIR}"
|
||||
|
Reference in New Issue
Block a user