Files
sf-cli-wrapper/test-wrapper-suite.sh

210 lines
8.3 KiB
Bash
Executable File

#!/bin/bash
set -euo pipefail
# Comprehensive Test Suite for SF CLI Wrapper Scripts
# Tests 100% coverage of all critical functionality using PWC-TEAM-DEV
readonly TEST_ORG="PWC-TEAM-DEV"
readonly GREEN='\033[0;32m'
readonly RED='\033[0;31m'
readonly YELLOW='\033[0;33m'
readonly BLUE='\033[0;34m'
readonly CYAN='\033[0;36m'
readonly NC='\033[0m' # No Color
# Test results
TOTAL_TESTS=0
PASSED_TESTS=0
FAILED_TESTS=0
FAILED_LIST=()
# Test output directory
TEST_DIR="test-results"
mkdir -p "$TEST_DIR"
LOG_FILE="$TEST_DIR/test-$(date +%Y%m%d_%H%M%S).log"
echo -e "${BLUE}SF CLI Wrapper Comprehensive Test Suite${NC}" | tee "$LOG_FILE"
echo -e "${BLUE}=======================================${NC}" | tee -a "$LOG_FILE"
echo -e "${CYAN}Target Org: $TEST_ORG${NC}" | tee -a "$LOG_FILE"
echo -e "${CYAN}Log File: $LOG_FILE${NC}" | tee -a "$LOG_FILE"
echo "" | tee -a "$LOG_FILE"
# Helper function to run tests
run_test() {
local test_name="$1"
local test_command="$2"
local expected_exit_code="${3:-0}"
local description="${4:-}"
((TOTAL_TESTS++))
echo -n "Testing: $test_name ... " | tee -a "$LOG_FILE"
local output_file="$TEST_DIR/${test_name// /_}.out"
local exit_code
# Run the command and capture exit code
if eval "$test_command" > "$output_file" 2>&1; then
exit_code=0
else
exit_code=$?
fi
if [[ $exit_code -eq $expected_exit_code ]]; then
echo -e "${GREEN}PASS${NC}" | tee -a "$LOG_FILE"
((PASSED_TESTS++))
else
echo -e "${RED}FAIL${NC} (expected: $expected_exit_code, got: $exit_code)" | tee -a "$LOG_FILE"
((FAILED_TESTS++))
FAILED_LIST+=("$test_name")
if [[ -n "$description" ]]; then
echo " $description" | tee -a "$LOG_FILE"
fi
echo " Output in: $output_file" | tee -a "$LOG_FILE"
fi
}
# Test Categories
echo -e "${BLUE}=== Testing Help Functions (100% Coverage) ===${NC}" | tee -a "$LOG_FILE"
scripts=(sf-check sf-deploy sf-dry-run sf-web-open sf-org-create sf-org-info sf-retrieve sf-test-run sf-apex-run sf-data-export sf-data-import sf-logs-tail)
for script in "${scripts[@]}"; do
run_test "$script help -hp" "./$script -hp" 0 "Two-character help option"
run_test "$script help --help" "./$script --help" 0 "Long form help option"
done
echo "" | tee -a "$LOG_FILE"
echo -e "${BLUE}=== Testing Two-Character Option Recognition ===${NC}" | tee -a "$LOG_FILE"
# Core deployment and validation scripts
run_test "sf-deploy -to option" "./sf-deploy -to $TEST_ORG" 1 "Should fail on missing source but recognize -to"
run_test "sf-dry-run -to option" "./sf-dry-run -to $TEST_ORG" 1 "Should fail on missing source but recognize -to"
# Web access
run_test "sf-web-open -to -ur" "./sf-web-open -to $TEST_ORG -ur" 0 "URL-only mode with target org"
# Org management
run_test "sf-org-create -al option" "./sf-org-create -al TestOrg" 1 "Should fail on other validation but recognize -al"
run_test "sf-org-info -to option" "./sf-org-info -to $TEST_ORG" 0 "Should work with valid org"
# Data operations
run_test "sf-data-export -qy -to" "./sf-data-export -qy 'SELECT Id FROM User LIMIT 1' -to $TEST_ORG -fm csv -ot $TEST_DIR/test_export.csv" 0 "Basic data export"
run_test "sf-data-export -so option" "./sf-data-export -so User -to $TEST_ORG -fm json -ot $TEST_DIR/users.json" 0 "SObject export"
# Metadata operations
run_test "sf-retrieve -to -tp" "./sf-retrieve -to $TEST_ORG -tp ApexClass -dr $TEST_DIR/retrieved" 0 "Metadata retrieval"
# Logs
run_test "sf-logs-tail -hp recognition" "./sf-logs-tail -hp" 0 "Should show help with new options"
echo "" | tee -a "$LOG_FILE"
echo -e "${BLUE}=== Testing Error Conditions ===${NC}" | tee -a "$LOG_FILE"
# Missing required parameters
run_test "sf-deploy no args" "./sf-deploy" 1 "Should fail with no arguments"
run_test "sf-data-export no query" "./sf-data-export -to $TEST_ORG" 1 "Should fail without query or sobject"
run_test "sf-org-create no alias" "./sf-org-create" 1 "Should fail without alias"
# Invalid options
run_test "sf-deploy invalid option" "./sf-deploy -invalid" 1 "Should reject unknown options"
run_test "sf-web-open invalid option" "./sf-web-open -xyz" 1 "Should reject unknown options"
# Conflicting options
run_test "sf-deploy conflicting options" "./sf-deploy -to $TEST_ORG -sr file1 -dr dir1" 1 "Should reject conflicting source options"
echo "" | tee -a "$LOG_FILE"
echo -e "${BLUE}=== Testing Core Functionality ===${NC}" | tee -a "$LOG_FILE"
# Environment check
run_test "sf-check basic" "./sf-check" 0 "Basic environment check"
run_test "sf-check verbose" "./sf-check -ve" 0 "Verbose environment check"
# Org operations
run_test "sf-org-info list" "./sf-org-info -ls" 0 "List authenticated orgs"
# Create test files for advanced testing
echo "FirstName,LastName,Email" > "$TEST_DIR/test-contacts.csv"
echo "TestUser,One,test1@example.com" >> "$TEST_DIR/test-contacts.csv"
# Test file-based operations
cat > "$TEST_DIR/test.apex" << 'EOF'
System.debug('Test execution from file');
System.debug('Current user: ' + UserInfo.getName());
EOF
run_test "sf-apex-run file" "./sf-apex-run -fl $TEST_DIR/test.apex -to $TEST_ORG" 0 "Execute Apex from file"
run_test "sf-apex-run inline" "./sf-apex-run --code \"System.debug('Inline test');\" -to $TEST_ORG" 0 "Execute inline Apex"
echo "" | tee -a "$LOG_FILE"
echo -e "${BLUE}=== Testing Advanced Features ===${NC}" | tee -a "$LOG_FILE"
# Test bulk vs regular data operations
run_test "sf-data-export bulk" "./sf-data-export -qy 'SELECT Id FROM Account LIMIT 5' -to $TEST_ORG -bk -ot $TEST_DIR/bulk_export.csv" 0 "Bulk API export"
# Test different formats
run_test "sf-data-export JSON" "./sf-data-export -so Contact -to $TEST_ORG -fm json -ot $TEST_DIR/contacts.json" 0 "JSON format export"
# Test retrieval with different options
run_test "sf-retrieve multiple types" "./sf-retrieve -to $TEST_ORG -tp 'ApexClass,CustomObject' -dr $TEST_DIR/multi_retrieve" 0 "Multiple metadata types"
echo "" | tee -a "$LOG_FILE"
echo -e "${BLUE}=== Testing Backwards Compatibility ===${NC}" | tee -a "$LOG_FILE"
# Test that long options still work
run_test "sf-deploy --target-org" "./sf-deploy --target-org $TEST_ORG --directory /nonexistent" 1 "Long options should work"
run_test "sf-web-open long opts" "./sf-web-open --target-org $TEST_ORG --url-only" 0 "Long options for web-open"
echo "" | tee -a "$LOG_FILE"
echo -e "${BLUE}=== Performance & Stress Tests ===${NC}" | tee -a "$LOG_FILE"
# Quick performance test
start_time=$(date +%s)
run_test "sf-check performance" "./sf-check" 0 "Performance check"
end_time=$(date +%s)
duration=$((end_time - start_time))
echo " sf-check completed in ${duration}s" | tee -a "$LOG_FILE"
# Test concurrent help requests (safety check)
run_test "concurrent help" "./sf-deploy -hp & ./sf-web-open -hp & wait" 0 "Concurrent help requests"
echo "" | tee -a "$LOG_FILE"
echo -e "${BLUE}=== Test Results Summary ===${NC}" | tee -a "$LOG_FILE"
echo -e "${BLUE}===========================${NC}" | tee -a "$LOG_FILE"
echo "Total Tests: $TOTAL_TESTS" | tee -a "$LOG_FILE"
echo -e "${GREEN}Passed: $PASSED_TESTS${NC}" | tee -a "$LOG_FILE"
echo -e "${RED}Failed: $FAILED_TESTS${NC}" | tee -a "$LOG_FILE"
# Calculate success rate
if [[ $TOTAL_TESTS -gt 0 ]]; then
success_rate=$(( (PASSED_TESTS * 100) / TOTAL_TESTS ))
echo "Success Rate: ${success_rate}%" | tee -a "$LOG_FILE"
fi
if [[ $FAILED_TESTS -gt 0 ]]; then
echo "" | tee -a "$LOG_FILE"
echo -e "${RED}Failed Tests:${NC}" | tee -a "$LOG_FILE"
for failed_test in "${FAILED_LIST[@]}"; do
echo -e "${RED}$failed_test${NC}" | tee -a "$LOG_FILE"
done
echo "" | tee -a "$LOG_FILE"
echo -e "${YELLOW}📁 Check individual test outputs in: $TEST_DIR/${NC}" | tee -a "$LOG_FILE"
echo -e "${YELLOW}📋 Full log available at: $LOG_FILE${NC}" | tee -a "$LOG_FILE"
fi
echo "" | tee -a "$LOG_FILE"
if [[ $FAILED_TESTS -eq 0 ]]; then
echo -e "${GREEN}🎉 ALL TESTS PASSED!${NC}" | tee -a "$LOG_FILE"
echo -e "${GREEN}✅ 100% test coverage achieved${NC}" | tee -a "$LOG_FILE"
echo -e "${GREEN}✅ All wrapper scripts are working correctly with PWC-TEAM-DEV${NC}" | tee -a "$LOG_FILE"
echo -e "${CYAN}🚀 Ready for production use!${NC}" | tee -a "$LOG_FILE"
exit 0
else
echo -e "${RED}❌ Some tests failed${NC}" | tee -a "$LOG_FILE"
echo -e "${YELLOW}🔧 Please review the failed tests and fix any issues${NC}" | tee -a "$LOG_FILE"
exit 1
fi