This commit is contained in:
yumoqing 2026-02-14 15:27:27 +08:00
parent 61f09d3691
commit ba1d128f1a
35 changed files with 8 additions and 1811 deletions

View File

@ -110,26 +110,26 @@ class IndustrialSkillEngine:
name = meta.get('name')
# 检查依赖 (Pre-flight check)
req_file = skill_md.parent / "requirements.txt"
has_deps = req_file.exists()
init_file = skill_md.parent / "init.sh"
need_init = init_file.exists()
self.registry[name] = DictObject(**{
"root": skill_md.parent,
"meta": meta,
"content": content,
"has_deps": has_deps
"need_init": need_init,
"content": content
})
await env.session_setvalue(key, self.registry)
# --- 2. 自动化依赖环境隔离 (venv 思想) ---
async def _ensure_dependencies(self, skill_name: str):
skill = self.registry[skill_name]
if skill["has_deps"]:
if skill["need_init"]:
# 工业级引擎通常会检查一个隐藏的 .installed 标识
if not (skill["root"] / ".deps_installed").exists():
if not (skill["root"] / ".skill-inited").exists():
print(f"📦 正在为技能 {skill_name} 安装必要依赖...")
await run_subprocess(["pip", "install", "-r", "requirements.txt"], cwd=skill["root"])
(skill["root"] / ".deps_installed").touch()
await run_subprocess("./init.sh", cwd=skill["root"])
(skill["root"] / ".skill-inited").touch()
# --- 3. 增强版安全执行器:带重试逻辑与审计 ---
async def _execute_with_retry(self, cmdjson: dict, skill_name: str, retry_count=1) -> str:

View File

@ -1,52 +0,0 @@
---
name: calculator
description: "Perform basic arithmetic operations: addition, subtraction, multiplication, and division"
---
# calculator
Perform basic arithmetic operations: addition, subtraction, multiplication, and division.
## Input
The input is a JSON object provided via standard input:
```json
{
"a": 10,
"op": "+",
"b": 5
}
```
### Parameters
- `a` (number, required): First operand
- `op` (string, required): Operator, one of `+`, `-`, `*`, `/`
- `b` (number, required): Second operand
## Output
Returns a JSON object to standard output:
```json
{
"result": 15
}
```
## Errors
On failure, returns a JSON object:
```json
{
"error": "division by zero"
}
```
## Notes
- This skill is non-interactive.
- Output is always valid JSON.
- No output is written to stderr.

View File

@ -1,31 +0,0 @@
#!/usr/bin/env bash
read -r INPUT
echo "$INPUT"
error() {
echo "{\"error\":\"$1\"}"
exit 0
}
A=$(echo "$INPUT" | jq -r '.a // empty')
OP=$(echo "$INPUT" | jq -r '.op // empty')
B=$(echo "$INPUT" | jq -r '.b // empty')
[[ -z "$A" || -z "$OP" || -z "$B" ]] && error "missing parameter"
is_number='^-?[0-9]+([.][0-9]+)?$'
[[ ! "$A" =~ $is_number ]] && error "a is not a number"
[[ ! "$B" =~ $is_number ]] && error "b is not a number"
case "$OP" in
"+") RESULT=$(echo "$A + $B" | bc) ;;
"-") RESULT=$(echo "$A - $B" | bc) ;;
"*") RESULT=$(echo "$A * $B" | bc) ;;
"/")
[[ "$(echo "$B == 0" | bc)" -eq 1 ]] && error "division by zero"
RESULT=$(echo "scale=10; $A / $B" | bc)
;;
*) error "unsupported operator" ;;
esac
echo "{\"result\": $RESULT}"

View File

@ -1,18 +0,0 @@
scripts:
run:
description: 四则运算
inputs:
a:
type: int or float
required: true
description: 计算左值
op:
type: str
required: true
description: 计算方法
enum: ['+', '-', '*', '/']
b:
type: int or float
required: true
description: 计算左值

View File

@ -1,37 +0,0 @@
---
name: code-reviewer
description: Review code for best practices, potential bugs, security vulnerabilities, and performance issues
allowed-tools: Read, Grep
---
# Code Reviewer Skill
You are an experienced code reviewer. Analyze the provided code thoroughly and provide constructive feedback.
## Review Areas
1. **Best Practices**: Check adherence to language-specific conventions
2. **Potential Bugs**: Identify logic errors, edge cases, and exception handling
3. **Security**: Look for common vulnerabilities (SQL injection, XSS, etc.)
4. **Performance**: Spot inefficient algorithms or resource usage
5. **Readability**: Assess code clarity, naming, and documentation
## Instructions
$ARGUMENTS
## Output Format
Provide your review in the following structure:
### Summary
Brief overview of the code quality
### Issues Found
List specific issues with severity (Critical/High/Medium/Low) and location
### Recommendations
Actionable suggestions for improvement
### Positive Aspects
Highlight what was done well

View File

@ -1,32 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
read -r INPUT
error() {
echo "{\"error\":\"$1\"}"
exit 0
}
A=$(echo "$INPUT" | jq -r '.a // empty')
OP=$(echo "$INPUT" | jq -r '.op // empty')
B=$(echo "$INPUT" | jq -r '.b // empty')
[[ -z "$A" || -z "$OP" || -z "$B" ]] && error "missing parameter"
is_number='^-?[0-9]+([.][0-9]+)?$'
[[ ! "$A" =~ $is_number ]] && error "a is not a number"
[[ ! "$B" =~ $is_number ]] && error "b is not a number"
case "$OP" in
"+") RESULT=$(echo "$A + $B" | bc) ;;
"-") RESULT=$(echo "$A - $B" | bc) ;;
"*") RESULT=$(echo "$A * $B" | bc) ;;
"/")
[[ "$(echo "$B == 0" | bc)" -eq 1 ]] && error "division by zero"
RESULT=$(echo "scale=10; $A / $B" | bc)
;;
*) error "unsupported operator" ;;
esac
echo "{\"result\": $RESULT}"

View File

@ -1,18 +0,0 @@
{
"manifest_version": "0.1",
"name": "example-plugin",
"version": "1.0.0",
"description": "Example plugin demonstrating plugin manifest structure with multiple skill directories",
"author": {
"name": "Skillkit Team",
"email": "team@skillkit.example.com",
"url": "https://github.com/skillkit/example-plugin"
},
"skills": ["skills/"],
"display_name": "Example Plugin",
"homepage": "https://github.com/skillkit/example-plugin",
"repository": {
"type": "git",
"url": "https://github.com/skillkit/example-plugin"
}
}

View File

@ -1,38 +0,0 @@
---
name: csv-parser
description: Parse and analyze CSV files with data validation
---
# CSV Parser Skill
You are a CSV file analysis assistant from the example-plugin.
## Capabilities
- Parse CSV files with various delimiters
- Validate data types and constraints
- Generate summary statistics
- Detect encoding issues
- Handle malformed data gracefully
## Usage
To analyze a CSV file, provide the file path as an argument:
```
Arguments: $ARGUMENTS
```
## Output Format
The analysis will include:
- Row count and column count
- Column names and inferred data types
- Missing value report
- Basic statistics for numeric columns
- Encoding and delimiter detection results
## Example
Input: data.csv
Output: Analysis report with statistics and validation results

View File

@ -1,37 +0,0 @@
---
name: json-parser
description: Parse and validate JSON data with schema support
---
# JSON Parser Skill
You are a JSON data validation assistant from the example-plugin.
## Capabilities
- Parse and pretty-print JSON data
- Validate against JSON Schema
- Detect malformed JSON
- Extract specific fields using JSONPath
- Convert to other formats (CSV, YAML)
## Usage
To parse JSON data, provide the file path or raw JSON as an argument:
```
Arguments: $ARGUMENTS
```
## Output Format
The parser will provide:
- Validation status (valid/invalid)
- Structure overview (depth, object count)
- Schema compliance report (if schema provided)
- Extracted values (if JSONPath provided)
## Example
Input: config.json
Output: Validated JSON structure with compliance report

View File

@ -1,68 +0,0 @@
---
name: file-reference-skill
description: Example skill demonstrating secure file reference resolution with supporting files
allowed-tools: []
---
# File Reference Skill
This skill demonstrates how to use supporting files (scripts, templates, documentation) within a skill directory.
## Overview
This skill uses helper scripts and templates for data processing. All supporting files are accessible via relative paths from the skill's base directory.
## Available Supporting Files
### Scripts
- `scripts/data_processor.py` - Main data processing script
- `scripts/validator.py` - Input validation utilities
- `scripts/helper.sh` - Shell helper script
### Templates
- `templates/config.yaml` - Configuration template
- `templates/report.md` - Report generation template
### Documentation
- `docs/usage.md` - Detailed usage instructions
- `docs/examples.md` - Example use cases
## Usage
When this skill is invoked with arguments, it can access supporting files using the FilePathResolver:
```python
from pathlib import Path
from skillkit.core.path_resolver import FilePathResolver
# Get the skill's base directory (injected by BaseDirectoryProcessor)
base_dir = Path("<base_directory_from_context>")
# Resolve supporting files securely
processor_script = FilePathResolver.resolve_path(base_dir, "scripts/data_processor.py")
config_template = FilePathResolver.resolve_path(base_dir, "templates/config.yaml")
usage_docs = FilePathResolver.resolve_path(base_dir, "docs/usage.md")
# Read file contents
with open(processor_script) as f:
script_code = f.read()
```
## Processing Arguments
The skill expects data file paths as arguments:
**Example invocation**: `file-reference-skill data/input.csv data/output.csv`
Processing steps:
1. Validate input using `scripts/validator.py`
2. Process data using `scripts/data_processor.py`
3. Generate report using `templates/report.md`
4. Output results to specified location
## Security Notes
- All file paths are validated to prevent directory traversal attacks
- Symlinks are resolved and verified to stay within skill directory
- Absolute paths and path traversal patterns (../) are blocked
- Any security violation raises PathSecurityError with detailed logging

View File

@ -1,262 +0,0 @@
# File Reference Skill - Examples
## Example 1: Simple Data Processing
Process a CSV file using the skill's data processor:
```python
from skillkit import SkillManager
from skillkit.core.path_resolver import FilePathResolver
from pathlib import Path
# Initialize skill manager
manager = SkillManager("./examples/skills")
manager.discover()
# Invoke skill
result = manager.invoke_skill(
"file-reference-skill",
"data/input.csv data/output.csv"
)
print(result)
```
## Example 2: Accessing Supporting Scripts
Read and execute supporting scripts:
```python
from pathlib import Path
from skillkit.core.path_resolver import FilePathResolver
# Get skill's base directory
skill = manager.get_skill("file-reference-skill")
base_dir = skill.base_directory
# Resolve script path securely
processor_path = FilePathResolver.resolve_path(
base_dir,
"scripts/data_processor.py"
)
# Read script content
with open(processor_path) as f:
script_code = f.read()
print(f"Script location: {processor_path}")
print(f"Script length: {len(script_code)} bytes")
```
## Example 3: Loading Configuration Template
Load and parse configuration template:
```python
import yaml
from skillkit.core.path_resolver import FilePathResolver
# Resolve config template path
config_path = FilePathResolver.resolve_path(
base_dir,
"templates/config.yaml"
)
# Load configuration
with open(config_path) as f:
config = yaml.safe_load(f)
print("Configuration:", config)
```
## Example 4: Handling Security Violations
Demonstrate path traversal prevention:
```python
from skillkit.core.path_resolver import FilePathResolver
from skillkit.core.exceptions import PathSecurityError
try:
# Attempt path traversal (will be blocked)
malicious_path = FilePathResolver.resolve_path(
base_dir,
"../../../etc/passwd"
)
except PathSecurityError as e:
print(f"Security violation blocked: {e}")
# Expected output:
# Security violation blocked: Path traversal attempt detected:
# '../../../etc/passwd' resolves outside skill directory
```
## Example 5: Validating Input Files
Use validator script to check input files:
```python
import subprocess
from skillkit.core.path_resolver import FilePathResolver
# Resolve validator script
validator_path = FilePathResolver.resolve_path(
base_dir,
"scripts/validator.py"
)
# Import and use validator
import sys
sys.path.insert(0, str(validator_path.parent))
from validator import validate_csv_format
# Validate input file
is_valid = validate_csv_format("data/input.csv")
print(f"File is valid: {is_valid}")
```
## Example 6: Generating Reports
Generate report using template:
```python
from string import Template
from datetime import datetime
from skillkit.core.path_resolver import FilePathResolver
# Resolve report template
template_path = FilePathResolver.resolve_path(
base_dir,
"templates/report.md"
)
# Load template
with open(template_path) as f:
template_content = f.read()
# Fill template with data
template = Template(template_content)
report = template.safe_substitute({
'timestamp': datetime.now().isoformat(),
'input_file': 'data/input.csv',
'input_size': '1234',
'format': 'CSV',
'encoding': 'UTF-8',
'start_time': '10:00:00',
'end_time': '10:00:05',
'duration': '5',
'status': 'SUCCESS',
'output_file': 'data/output.csv',
'output_size': '1234',
'record_count': '100',
'error_count': '0',
'validation_results': 'All checks passed',
'processing_log': 'Processing completed successfully'
})
print(report)
```
## Example 7: Shell Script Integration
Execute shell helper script:
```python
import subprocess
from skillkit.core.path_resolver import FilePathResolver
# Resolve shell script
helper_path = FilePathResolver.resolve_path(
base_dir,
"scripts/helper.sh"
)
# Execute script
result = subprocess.run(
['bash', str(helper_path), 'check'],
capture_output=True,
text=True
)
print(result.stdout)
```
## Example 8: Multiple File Access
Access multiple supporting files in one operation:
```python
from skillkit.core.path_resolver import FilePathResolver
# List of files to access
file_paths = [
"scripts/data_processor.py",
"scripts/validator.py",
"templates/config.yaml",
"docs/usage.md"
]
# Resolve all paths securely
resolved_paths = {}
for rel_path in file_paths:
try:
abs_path = FilePathResolver.resolve_path(base_dir, rel_path)
resolved_paths[rel_path] = abs_path
print(f"✓ {rel_path} -> {abs_path}")
except PathSecurityError as e:
print(f"✗ {rel_path} -> BLOCKED ({e})")
print(f"\nSuccessfully resolved {len(resolved_paths)} paths")
```
## Example 9: Error Handling Best Practices
Robust error handling when accessing supporting files:
```python
from pathlib import Path
from skillkit.core.path_resolver import FilePathResolver
from skillkit.core.exceptions import PathSecurityError
def safe_load_supporting_file(base_dir: Path, rel_path: str) -> str:
"""Safely load supporting file with comprehensive error handling."""
try:
# Resolve path securely
abs_path = FilePathResolver.resolve_path(base_dir, rel_path)
# Read file content
with open(abs_path, 'r', encoding='utf-8') as f:
return f.read()
except PathSecurityError as e:
print(f"Security violation: {e}")
raise
except FileNotFoundError:
print(f"File not found: {rel_path}")
raise
except PermissionError:
print(f"Permission denied: {rel_path}")
raise
except UnicodeDecodeError:
print(f"Invalid UTF-8 encoding: {rel_path}")
raise
except Exception as e:
print(f"Unexpected error loading {rel_path}: {e}")
raise
# Usage
try:
content = safe_load_supporting_file(base_dir, "scripts/helper.py")
print(f"Loaded {len(content)} bytes")
except Exception as e:
print(f"Failed to load file: {e}")
```
## Summary
These examples demonstrate:
- Secure file path resolution using FilePathResolver
- Accessing scripts, templates, and documentation
- Handling security violations gracefully
- Integration with Python and shell scripts
- Best practices for error handling
- Template-based report generation

View File

@ -1,141 +0,0 @@
# File Reference Skill - Usage Guide
## Overview
The file-reference-skill demonstrates how to structure a skill with supporting files (scripts, templates, documentation) and access them securely using the FilePathResolver.
## Directory Structure
```
file-reference-skill/
├── SKILL.md # Main skill definition
├── scripts/ # Processing scripts
│ ├── data_processor.py # Main data processor
│ ├── validator.py # Input validation
│ └── helper.sh # Shell utilities
├── templates/ # Configuration and output templates
│ ├── config.yaml # Configuration template
│ └── report.md # Report generation template
└── docs/ # Documentation
├── usage.md # This file
└── examples.md # Example use cases
```
## Using Supporting Files
### From Python
```python
from pathlib import Path
from skillkit.core.path_resolver import FilePathResolver
# Base directory is provided in the skill context
base_dir = Path("/path/to/skills/file-reference-skill")
# Resolve paths securely
processor_path = FilePathResolver.resolve_path(
base_dir,
"scripts/data_processor.py"
)
# Read file content
with open(processor_path) as f:
script_code = f.read()
```
### From Shell
```bash
# Get base directory from skill context
BASE_DIR="/path/to/skills/file-reference-skill"
# Use helper script
bash "$BASE_DIR/scripts/helper.sh" check
# Run data processor
python3 "$BASE_DIR/scripts/data_processor.py" input.csv output.csv
```
## Security Features
The FilePathResolver ensures:
1. **Path Traversal Prevention**: Blocks attempts to access files outside skill directory
2. **Symlink Validation**: Resolves symlinks and verifies targets stay within base directory
3. **Absolute Path Rejection**: Prevents absolute path injection
4. **Detailed Logging**: All security violations logged at ERROR level
### Valid Paths
```python
# Allowed - relative path within skill directory
FilePathResolver.resolve_path(base_dir, "scripts/helper.py")
FilePathResolver.resolve_path(base_dir, "templates/config.yaml")
FilePathResolver.resolve_path(base_dir, "docs/usage.md")
```
### Invalid Paths (Blocked)
```python
# Blocked - directory traversal
FilePathResolver.resolve_path(base_dir, "../../etc/passwd")
# Blocked - absolute path
FilePathResolver.resolve_path(base_dir, "/etc/passwd")
# Blocked - symlink escape
# (if symlink target is outside base_dir)
FilePathResolver.resolve_path(base_dir, "malicious_link")
```
## Example Workflow
1. **Skill Invocation**
```python
manager = SkillManager()
manager.discover()
result = manager.invoke_skill(
"file-reference-skill",
"input_data.csv output_data.csv"
)
```
2. **Skill Processing**
- Skill receives base directory in context
- Script paths resolved using FilePathResolver
- Scripts executed with validated paths
- Results returned to caller
3. **File Access**
- All file operations use resolved paths
- Security violations raise PathSecurityError
- Detailed error messages help debugging
## Best Practices
1. **Always use FilePathResolver** for accessing supporting files
2. **Use relative paths** from skill base directory
3. **Document file dependencies** in SKILL.md
4. **Test with various path patterns** including edge cases
5. **Handle PathSecurityError** appropriately in your code
## Troubleshooting
### PathSecurityError
**Problem**: Attempting to access files outside skill directory
**Solution**: Use relative paths within skill directory only
### FileNotFoundError
**Problem**: Resolved path doesn't exist
**Solution**: Verify file exists in skill directory structure
### PermissionError
**Problem**: Cannot read resolved file
**Solution**: Check file permissions and ownership

View File

@ -1,51 +0,0 @@
"""Data processing script for file-reference-skill.
This script demonstrates how supporting files can be used within a skill.
"""
import sys
from pathlib import Path
def process_data(input_file: str, output_file: str) -> None:
"""Process data from input file and write to output file.
Args:
input_file: Path to input data file
output_file: Path to output data file
"""
print(f"Processing data from {input_file}")
print(f"Output will be written to {output_file}")
# Read input file
try:
with open(input_file, 'r') as f:
data = f.read()
print(f"Read {len(data)} bytes from input file")
except FileNotFoundError:
print(f"Error: Input file not found: {input_file}")
sys.exit(1)
# Process data (example: uppercase transformation)
processed_data = data.upper()
# Write output file
with open(output_file, 'w') as f:
f.write(processed_data)
print(f"Wrote {len(processed_data)} bytes to output file")
def main() -> None:
"""Main entry point."""
if len(sys.argv) != 3:
print("Usage: data_processor.py <input_file> <output_file>")
sys.exit(1)
input_file = sys.argv[1]
output_file = sys.argv[2]
process_data(input_file, output_file)
if __name__ == "__main__":
main()

View File

@ -1,102 +0,0 @@
#!/usr/bin/env python3
"""Environment Variable Demonstration Script
This script demonstrates how skillkit automatically injects environment
variables into script execution context.
Injected Variables:
- SKILL_NAME: Name of the skill
- SKILL_BASE_DIR: Absolute path to skill directory
- SKILL_VERSION: Version from skill metadata
- SKILLKIT_VERSION: Current skillkit version
These variables can be used for:
- Locating files relative to skill directory
- Including skill context in logs
- Version-specific behavior
- Debugging and troubleshooting
Usage:
This script is designed to be executed by skillkit's script executor.
It reads JSON arguments from stdin and writes results to stdout.
"""
import json
import os
import sys
from pathlib import Path
def main():
"""Demonstrate environment variable access."""
# Read arguments from stdin (standard skillkit pattern)
try:
args = json.load(sys.stdin)
except json.JSONDecodeError:
args = {}
# Access injected environment variables
skill_name = os.environ.get('SKILL_NAME', 'unknown')
skill_base = os.environ.get('SKILL_BASE_DIR', 'unknown')
skill_version = os.environ.get('SKILL_VERSION', '0.0.0')
skillkit_version = os.environ.get('SKILLKIT_VERSION', 'unknown')
# Prepare output
output = {
"message": "Environment variables successfully accessed!",
"context": {
"skill_name": skill_name,
"skill_base_dir": skill_base,
"skill_version": skill_version,
"skillkit_version": skillkit_version
},
"arguments_received": args,
"examples": {
"relative_file_path": "Use SKILL_BASE_DIR to locate files",
"logging": f"[{skill_name} v{skill_version}] Log message here",
"file_resolution": str(Path(skill_base) / "data" / "config.json")
}
}
# Print formatted output
print("=" * 60)
print(f"Skill: {skill_name} v{skill_version}")
print(f"Directory: {skill_base}")
print(f"Powered by: skillkit v{skillkit_version}")
print("=" * 60)
print()
print("Environment Variables:")
print(f" SKILL_NAME = {skill_name}")
print(f" SKILL_BASE_DIR = {skill_base}")
print(f" SKILL_VERSION = {skill_version}")
print(f" SKILLKIT_VERSION = {skillkit_version}")
print()
print("Arguments Received:")
print(f" {json.dumps(args, indent=2)}")
print()
print("Example Use Cases:")
print(f" 1. Locate skill files:")
print(f" config_path = Path(os.environ['SKILL_BASE_DIR']) / 'config.json'")
print(f"{Path(skill_base) / 'config.json'}")
print()
print(f" 2. Contextual logging:")
print(f" logger.info(f'[{{os.environ[\"SKILL_NAME\"]}}] Processing...')")
print(f" → [{skill_name}] Processing...")
print()
print(f" 3. Version-specific behavior:")
print(f" if os.environ['SKILL_VERSION'] >= '2.0.0':")
print(f" use_new_api()")
print()
print("=" * 60)
# Also output as JSON for programmatic use
print()
print("JSON Output:")
print(json.dumps(output, indent=2))
# Exit successfully
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,39 +0,0 @@
#!/bin/bash
# Helper script for file-reference-skill
echo "File Reference Skill Helper Script"
echo "==================================="
echo ""
echo "This script demonstrates shell scripting support in skills."
echo ""
echo "Usage: ./helper.sh <command> [args...]"
echo ""
case "${1:-help}" in
check)
echo "Checking environment..."
echo "Python version: $(python3 --version)"
echo "Current directory: $(pwd)"
echo "Script directory: $(dirname "$0")"
;;
validate)
if [ -z "$2" ]; then
echo "Error: No file specified"
exit 1
fi
echo "Validating file: $2"
if [ -f "$2" ]; then
echo "File exists: $2"
echo "File size: $(wc -c < "$2") bytes"
else
echo "File not found: $2"
exit 1
fi
;;
help|*)
echo "Available commands:"
echo " check - Check environment"
echo " validate <file> - Validate file exists"
echo " help - Show this help message"
;;
esac

View File

@ -1,60 +0,0 @@
"""Input validation utilities for file-reference-skill."""
from pathlib import Path
def validate_file_path(file_path: str) -> bool:
"""Validate that a file path exists and is readable.
Args:
file_path: Path to validate
Returns:
True if valid, False otherwise
"""
path = Path(file_path)
if not path.exists():
print(f"Error: File does not exist: {file_path}")
return False
if not path.is_file():
print(f"Error: Path is not a file: {file_path}")
return False
try:
with open(path, 'r') as f:
f.read(1)
return True
except PermissionError:
print(f"Error: Permission denied reading file: {file_path}")
return False
except Exception as e:
print(f"Error: Cannot read file: {file_path} ({e})")
return False
def validate_csv_format(file_path: str) -> bool:
"""Validate that a file is in CSV format.
Args:
file_path: Path to CSV file
Returns:
True if valid CSV, False otherwise
"""
if not validate_file_path(file_path):
return False
# Check file extension
if not file_path.endswith('.csv'):
print(f"Warning: File does not have .csv extension: {file_path}")
# Check for CSV content (basic validation)
with open(file_path, 'r') as f:
first_line = f.readline()
if ',' not in first_line:
print(f"Warning: File may not be valid CSV (no commas found): {file_path}")
return False
return True

View File

@ -1,28 +0,0 @@
# Configuration template for file-reference-skill
# Data processing settings
processing:
input_format: csv
output_format: csv
encoding: utf-8
delimiter: ","
skip_header: false
# Validation settings
validation:
check_encoding: true
check_format: true
max_file_size_mb: 100
required_columns: []
# Output settings
output:
include_timestamp: true
compress: false
create_backup: true
# Logging settings
logging:
level: INFO
format: "%(asctime)s - %(levelname)s - %(message)s"
file: "processing.log"

View File

@ -1,39 +0,0 @@
# Data Processing Report
**Generated**: {timestamp}
**Skill**: file-reference-skill
## Input Summary
- **Input File**: {input_file}
- **File Size**: {input_size} bytes
- **Format**: {format}
- **Encoding**: {encoding}
## Processing Summary
- **Start Time**: {start_time}
- **End Time**: {end_time}
- **Duration**: {duration} seconds
- **Status**: {status}
## Output Summary
- **Output File**: {output_file}
- **Output Size**: {output_size} bytes
- **Records Processed**: {record_count}
- **Errors**: {error_count}
## Validation Results
{validation_results}
## Processing Log
```
{processing_log}
```
---
*This report was generated by the file-reference-skill example skill.*

View File

@ -1,37 +0,0 @@
---
name: git-helper
description: Generate git commit messages and help with git workflows
allowed-tools: Bash
---
# Git Helper Skill
You are a git workflow assistant. Help users with commit messages, branch naming, and git best practices.
## Commit Message Format
Follow conventional commits specification:
- **feat**: New feature
- **fix**: Bug fix
- **docs**: Documentation changes
- **style**: Formatting, missing semicolons, etc.
- **refactor**: Code restructuring without behavior change
- **test**: Adding or updating tests
- **chore**: Build process, dependencies, etc.
Format:
```
<type>(<scope>): <subject>
<body>
<footer>
```
## Instructions
$ARGUMENTS
## Output
Provide a well-formatted commit message or git workflow guidance.

View File

@ -1,25 +0,0 @@
---
name: markdown-formatter
description: Format and clean up markdown documents following best practices
---
# Markdown Formatter Skill
You are a markdown formatting expert. Clean up and standardize markdown documents.
## Formatting Rules
1. **Headers**: Ensure proper hierarchy (single H1, incremental levels)
2. **Lists**: Consistent bullet style (- for unordered, 1. for ordered)
3. **Code Blocks**: Proper language tags for syntax highlighting
4. **Links**: Convert inline links to reference-style when repeated
5. **Spacing**: Blank lines around headers, lists, and code blocks
6. **Line Length**: Wrap lines at 80-100 characters for readability
## Instructions
$ARGUMENTS
## Output
Provide the formatted markdown with a brief summary of changes made.

View File

@ -1,13 +0,0 @@
---
name: nested-root-skill
description: Example skill at the root of the nested structure demonstrating depth 1
---
# Nested Root Skill
This skill demonstrates that skills can exist at any level of the hierarchy.
**Location**: `nested-example/SKILL.md`
**Depth**: 1 level from root (immediate subdirectory)
The nested structure supports flexible organization with skills at every level.

View File

@ -1,13 +0,0 @@
---
name: nested-mid-skill
description: Example skill demonstrating nested directory structure at depth 2
---
# Nested Mid-Level Skill
This skill demonstrates mid-level nesting in skill organization.
**Location**: `category-a/SKILL.md`
**Depth**: 2 levels from root
Skills can be organized in categories and subcategories for better organization.

View File

@ -1,13 +0,0 @@
---
name: nested-deep-skill
description: Example skill demonstrating nested directory structure at depth 3
---
# Nested Deep Skill
This skill demonstrates that skillkit can discover skills in nested subdirectories.
**Location**: `category-a/subcategory-1/SKILL.md`
**Depth**: 3 levels from root
You can organize your skills in any nested structure up to 5 levels deep.

View File

@ -1,13 +0,0 @@
---
name: nested-category-b
description: Example skill in category B demonstrating flat organization within nested structure
---
# Nested Category B Skill
This skill demonstrates that you can mix flat and nested structures.
**Location**: `category-b/SKILL.md`
**Depth**: 2 levels from root
Some categories may have immediate skills, while others may have deeper nesting.

View File

@ -1,77 +0,0 @@
---
name: pdf-extractor
description: Extract and convert PDF documents using Python scripts
version: 1.0.0
allowed-tools:
- Bash
- Read
- Write
---
# PDF Extractor Skill
This skill provides tools for extracting text and metadata from PDF documents and converting them to different formats.
## Available Scripts
### extract.py
Extracts text and metadata from PDF files.
**Input**:
```json
{
"file_path": "/path/to/document.pdf",
"pages": "all" | [1, 2, 3]
}
```
**Output**:
```json
{
"text": "Extracted text content...",
"metadata": {
"title": "Document Title",
"author": "Author Name",
"pages": 10
}
}
```
### convert.sh
Converts PDF files to different formats (text, markdown, etc.).
**Input**:
```json
{
"input_file": "/path/to/input.pdf",
"output_format": "txt" | "md" | "html"
}
```
### parse.py
Parses structured data from PDF forms and tables.
**Input**:
```json
{
"file_path": "/path/to/form.pdf",
"extract_tables": true,
"extract_forms": true
}
```
## Usage Example
```python
from skillkit import SkillManager
manager = SkillManager()
result = manager.execute_skill_script(
skill_name="pdf-extractor",
script_name="extract",
arguments={"file_path": "document.pdf", "pages": "all"}
)
if result.success:
print(result.stdout)
```

View File

@ -1,46 +0,0 @@
#!/bin/bash
# Convert PDF files to different formats
#
# This script demonstrates shell script support in skillkit.
# It reads JSON from stdin and performs format conversion.
#
# Environment variables available:
# - SKILL_NAME
# - SKILL_BASE_DIR
# - SKILL_VERSION
# - SKILLKIT_VERSION
# Read JSON input from stdin
read -r json_input
# Parse JSON using Python (for simplicity)
input_file=$(echo "$json_input" | python3 -c "import sys, json; data=json.load(sys.stdin); print(data.get('input_file', ''))")
output_format=$(echo "$json_input" | python3 -c "import sys, json; data=json.load(sys.stdin); print(data.get('output_format', 'txt'))")
# Validate input
if [ -z "$input_file" ]; then
echo '{"error": "Missing required argument: input_file"}' >&2
exit 1
fi
# Mock conversion (in real implementation, would use tools like pdftotext, pandoc, etc.)
output_file="${input_file%.pdf}.${output_format}"
# Output result
cat <<EOF
{
"status": "success",
"input_file": "$input_file",
"output_file": "$output_file",
"output_format": "$output_format",
"message": "Converted $input_file to $output_format format",
"environment": {
"skill_name": "$SKILL_NAME",
"skill_base_dir": "$SKILL_BASE_DIR",
"skill_version": "$SKILL_VERSION",
"skillkit_version": "$SKILLKIT_VERSION"
}
}
EOF
exit 0

View File

@ -1,97 +0,0 @@
#!/usr/bin/env python3
"""Extract text and metadata from PDF files.
This script demonstrates reading JSON arguments from stdin,
processing them, and outputting results in JSON format.
Environment Variables:
- SKILL_NAME: Name of the parent skill
- SKILL_BASE_DIR: Base directory of the skill
- SKILL_VERSION: Version of the skill
- SKILLKIT_VERSION: Version of skillkit
"""
import sys
import json
import os
def extract_pdf(file_path: str, pages: str | list):
"""
Extract text from PDF file (mock implementation).
In a real implementation, this would use a library like PyPDF2 or pdfplumber.
Args:
file_path: Path to the PDF file
pages: "all" or list of page numbers
Returns:
dict with extracted text and metadata
"""
# Mock implementation for demonstration
return {
"text": f"Extracted text from {file_path}",
"metadata": {
"title": "Sample Document",
"author": "skillkit",
"pages": 10,
"file_path": file_path,
"requested_pages": pages
},
"environment": {
"skill_name": os.getenv("SKILL_NAME"),
"skill_base_dir": os.getenv("SKILL_BASE_DIR"),
"skill_version": os.getenv("SKILL_VERSION"),
"skillkit_version": os.getenv("SKILLKIT_VERSION")
}
}
def main():
"""Main entry point for the PDF extraction script."""
try:
# Read JSON arguments from stdin
args = json.load(sys.stdin)
# Validate required arguments
if "file_path" not in args:
raise ValueError("Missing required argument: file_path")
# Extract optional arguments
file_path = args["file_path"]
pages = args.get("pages", "all")
# Perform extraction
result = extract_pdf(file_path, pages)
# Output result as JSON
print(json.dumps(result, indent=2))
sys.exit(0)
except json.JSONDecodeError as e:
error = {
"error": "Invalid JSON input",
"details": str(e)
}
print(json.dumps(error), file=sys.stderr)
sys.exit(1)
except ValueError as e:
error = {
"error": "Invalid arguments",
"details": str(e)
}
print(json.dumps(error), file=sys.stderr)
sys.exit(1)
except Exception as e:
error = {
"error": "Unexpected error",
"details": str(e)
}
print(json.dumps(error), file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()

View File

@ -1,94 +0,0 @@
#!/usr/bin/env python3
"""Parse structured data from PDF forms and tables.
This script demonstrates advanced PDF processing capabilities.
"""
import sys
import json
import os
def parse_pdf(file_path: str, extract_tables: bool, extract_forms: bool):
"""
Parse structured data from PDF (mock implementation).
In a real implementation, this would use libraries like:
- tabula-py or camelot for table extraction
- PyPDF2 or pdfplumber for form field extraction
Args:
file_path: Path to the PDF file
extract_tables: Whether to extract tables
extract_forms: Whether to extract form fields
Returns:
dict with parsed data
"""
result = {
"file_path": file_path,
"extracted_data": {}
}
if extract_tables:
result["extracted_data"]["tables"] = [
{
"page": 1,
"rows": 5,
"columns": 3,
"data": [
["Header1", "Header2", "Header3"],
["Row1Col1", "Row1Col2", "Row1Col3"],
["Row2Col1", "Row2Col2", "Row2Col3"]
]
}
]
if extract_forms:
result["extracted_data"]["forms"] = {
"name": "John Doe",
"email": "john@example.com",
"checkbox_agree": True
}
return result
def main():
"""Main entry point for PDF parsing script."""
try:
# Read JSON arguments from stdin
args = json.load(sys.stdin)
# Validate and extract arguments
file_path = args.get("file_path")
if not file_path:
raise ValueError("Missing required argument: file_path")
extract_tables = args.get("extract_tables", False)
extract_forms = args.get("extract_forms", False)
# Perform parsing
result = parse_pdf(file_path, extract_tables, extract_forms)
# Output result as JSON
print(json.dumps(result, indent=2))
sys.exit(0)
except json.JSONDecodeError as e:
error = {"error": "Invalid JSON input", "details": str(e)}
print(json.dumps(error), file=sys.stderr)
sys.exit(1)
except ValueError as e:
error = {"error": "Invalid arguments", "details": str(e)}
print(json.dumps(error), file=sys.stderr)
sys.exit(1)
except Exception as e:
error = {"error": "Unexpected error", "details": str(e)}
print(json.dumps(error), file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()

View File

@ -1,24 +0,0 @@
---
name: pdf-tools
description: Extract and convert PDF documents using Python scripts
version: 1.0.0
allowed-tools:
- Bash
- Read
- Write
---
# PDF Tools Skill
This skill provides tools for extracting text and metadata from PDF documents and converting them to different formats.
## Available Scripts
### extract.py
see [extract](extract.md)
### convert.sh
see [convert](convert.md)
### parse.py
see [parse](parse.md)

View File

@ -1,11 +0,0 @@
# convert.md
Converts PDF files to different formats (text, markdown, etc.).
**Input**:
```json
{
"input_file": "/path/to/input.pdf",
"output_format": "txt" | "md" | "html"
}
```

View File

@ -1,38 +0,0 @@
# extract.py
Extracts text and metadata from PDF files.
**Input**:
```json
{
"file_path": "/path/to/document.pdf",
"pages": "all" | [1, 2, 3]
}
```
**Output**:
```json
{
"text": "Extracted text content...",
"metadata": {
"title": "Document Title",
"author": "Author Name",
"pages": 10
}
}
```
## Usage Example
```python
from skillkit import SkillManager
manager = SkillManager()
result = manager.execute_skill_script(
skill_name="pdf-extractor",
script_name="extract",
arguments={"file_path": "document.pdf", "pages": "all"}
)
if result.success:
print(result.stdout)
```

View File

@ -1,12 +0,0 @@
# parse.py
Parses structured data from PDF forms and tables.
**Input**:
```json
{
"file_path": "/path/to/form.pdf",
"extract_tables": true,
"extract_forms": true
}
```

View File

@ -1,46 +0,0 @@
#!/bin/bash
# Convert PDF files to different formats
#
# This script demonstrates shell script support in skillkit.
# It reads JSON from stdin and performs format conversion.
#
# Environment variables available:
# - SKILL_NAME
# - SKILL_BASE_DIR
# - SKILL_VERSION
# - SKILLKIT_VERSION
# Read JSON input from stdin
read -r json_input
# Parse JSON using Python (for simplicity)
input_file=$(echo "$json_input" | python3 -c "import sys, json; data=json.load(sys.stdin); print(data.get('input_file', ''))")
output_format=$(echo "$json_input" | python3 -c "import sys, json; data=json.load(sys.stdin); print(data.get('output_format', 'txt'))")
# Validate input
if [ -z "$input_file" ]; then
echo '{"error": "Missing required argument: input_file"}' >&2
exit 1
fi
# Mock conversion (in real implementation, would use tools like pdftotext, pandoc, etc.)
output_file="${input_file%.pdf}.${output_format}"
# Output result
cat <<EOF
{
"status": "success",
"input_file": "$input_file",
"output_file": "$output_file",
"output_format": "$output_format",
"message": "Converted $input_file to $output_format format",
"environment": {
"skill_name": "$SKILL_NAME",
"skill_base_dir": "$SKILL_BASE_DIR",
"skill_version": "$SKILL_VERSION",
"skillkit_version": "$SKILLKIT_VERSION"
}
}
EOF
exit 0

View File

@ -1,97 +0,0 @@
#!/usr/bin/env python3
"""Extract text and metadata from PDF files.
This script demonstrates reading JSON arguments from stdin,
processing them, and outputting results in JSON format.
Environment Variables:
- SKILL_NAME: Name of the parent skill
- SKILL_BASE_DIR: Base directory of the skill
- SKILL_VERSION: Version of the skill
- SKILLKIT_VERSION: Version of skillkit
"""
import sys
import json
import os
def extract_pdf(file_path: str, pages: str | list):
"""
Extract text from PDF file (mock implementation).
In a real implementation, this would use a library like PyPDF2 or pdfplumber.
Args:
file_path: Path to the PDF file
pages: "all" or list of page numbers
Returns:
dict with extracted text and metadata
"""
# Mock implementation for demonstration
return {
"text": f"Extracted text from {file_path}",
"metadata": {
"title": "Sample Document",
"author": "skillkit",
"pages": 10,
"file_path": file_path,
"requested_pages": pages
},
"environment": {
"skill_name": os.getenv("SKILL_NAME"),
"skill_base_dir": os.getenv("SKILL_BASE_DIR"),
"skill_version": os.getenv("SKILL_VERSION"),
"skillkit_version": os.getenv("SKILLKIT_VERSION")
}
}
def main():
"""Main entry point for the PDF extraction script."""
try:
# Read JSON arguments from stdin
args = json.load(sys.stdin)
# Validate required arguments
if "file_path" not in args:
raise ValueError("Missing required argument: file_path")
# Extract optional arguments
file_path = args["file_path"]
pages = args.get("pages", "all")
# Perform extraction
result = extract_pdf(file_path, pages)
# Output result as JSON
print(json.dumps(result, indent=2))
sys.exit(0)
except json.JSONDecodeError as e:
error = {
"error": "Invalid JSON input",
"details": str(e)
}
print(json.dumps(error), file=sys.stderr)
sys.exit(1)
except ValueError as e:
error = {
"error": "Invalid arguments",
"details": str(e)
}
print(json.dumps(error), file=sys.stderr)
sys.exit(1)
except Exception as e:
error = {
"error": "Unexpected error",
"details": str(e)
}
print(json.dumps(error), file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()

View File

@ -1,94 +0,0 @@
#!/usr/bin/env python3
"""Parse structured data from PDF forms and tables.
This script demonstrates advanced PDF processing capabilities.
"""
import sys
import json
import os
def parse_pdf(file_path: str, extract_tables: bool, extract_forms: bool):
"""
Parse structured data from PDF (mock implementation).
In a real implementation, this would use libraries like:
- tabula-py or camelot for table extraction
- PyPDF2 or pdfplumber for form field extraction
Args:
file_path: Path to the PDF file
extract_tables: Whether to extract tables
extract_forms: Whether to extract form fields
Returns:
dict with parsed data
"""
result = {
"file_path": file_path,
"extracted_data": {}
}
if extract_tables:
result["extracted_data"]["tables"] = [
{
"page": 1,
"rows": 5,
"columns": 3,
"data": [
["Header1", "Header2", "Header3"],
["Row1Col1", "Row1Col2", "Row1Col3"],
["Row2Col1", "Row2Col2", "Row2Col3"]
]
}
]
if extract_forms:
result["extracted_data"]["forms"] = {
"name": "John Doe",
"email": "john@example.com",
"checkbox_agree": True
}
return result
def main():
"""Main entry point for PDF parsing script."""
try:
# Read JSON arguments from stdin
args = json.load(sys.stdin)
# Validate and extract arguments
file_path = args.get("file_path")
if not file_path:
raise ValueError("Missing required argument: file_path")
extract_tables = args.get("extract_tables", False)
extract_forms = args.get("extract_forms", False)
# Perform parsing
result = parse_pdf(file_path, extract_tables, extract_forms)
# Output result as JSON
print(json.dumps(result, indent=2))
sys.exit(0)
except json.JSONDecodeError as e:
error = {"error": "Invalid JSON input", "details": str(e)}
print(json.dumps(error), file=sys.stderr)
sys.exit(1)
except ValueError as e:
error = {"error": "Invalid arguments", "details": str(e)}
print(json.dumps(error), file=sys.stderr)
sys.exit(1)
except Exception as e:
error = {"error": "Unexpected error", "details": str(e)}
print(json.dumps(error), file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()