#!/bin/bash set -euo pipefail OLLAMA_API_URL="https://ollama-api.hq.ars9.space/api" OLLAMA_USER="ollama" OLLAMA_PASS_PATH="device/god-stronghold/authelia/users/ollama/password" DEFAULT_MODEL="llama3:8b" CONTEXT_FILE="${OLLAMA_CONTEXT_FILE:-$HOME/.ollama/context.json}" PROFILE_DIR="${OLLAMA_PROFILE_DIR:-$HOME/.ollama/profiles}" OUTPUT_FORMAT="text" # Can be "text" or "json" SCRIPT_VERSION="1.0.0" # Check for required dependencies check_dependencies() { local missing=() for cmd in pass curl jq column less; do if ! command -v "$cmd" &> /dev/null; then missing+=("$cmd") fi done if [[ ${#missing[@]} -gt 0 ]]; then echo "Error: missing required command(s): ${missing[*]}" >&2 return 1 fi } check_dependencies # Ensure ~/.ollama directory exists ensure_ollama_dir() { local dir="$(dirname "$CONTEXT_FILE")" if [[ ! -d "$dir" ]]; then mkdir -p "$dir" fi } # Initialize empty context file if it doesn't exist init_context_file() { ensure_ollama_dir if [[ ! -f "$CONTEXT_FILE" ]]; then echo '[]' > "$CONTEXT_FILE" fi } # Load system prompt from profile load_profile_system() { local profile_name="$1" local profile_file="$PROFILE_DIR/$profile_name.json" if [[ -f "$profile_file" ]]; then jq -r '.system // ""' "$profile_file" fi } # Load pre-script from profile load_profile_pre_script() { local profile_name="$1" local profile_file="$PROFILE_DIR/$profile_name.json" if [[ -f "$profile_file" ]]; then jq -r '.pre_script // ""' "$profile_file" fi } # Load post-script from profile load_profile_post_script() { local profile_name="$1" local profile_file="$PROFILE_DIR/$profile_name.json" if [[ -f "$profile_file" ]]; then jq -r '.post_script // ""' "$profile_file" fi } # Load model from profile load_profile_model() { local profile_name="$1" local profile_file="$PROFILE_DIR/$profile_name.json" if [[ -f "$profile_file" ]]; then jq -r '.model // ""' "$profile_file" fi } # Execute a script with input execute_script() { local script="$1" local input="$2" if [[ -n "$script" ]]; then local output output=$(echo "$input" | bash -c "$script" 2>&1) local exit_code=$? if [[ $exit_code -ne 0 ]]; then echo "Error: script execution failed with exit code $exit_code" >&2 echo "Script output: $output" >&2 return $exit_code fi echo "$output" else echo "$input" fi } # Ensure default profile exists ensure_default_profile() { local default_profile="$PROFILE_DIR/default.json" if [[ ! -f "$default_profile" ]]; then ensure_ollama_dir if [[ ! -d "$PROFILE_DIR" ]]; then mkdir -p "$PROFILE_DIR" fi echo "You are a helpful assistant" | jq -n -R '{system: input}' > "$default_profile" fi } # Get password from pass get_password() { pass "$OLLAMA_PASS_PATH" } # Generate a completion generate() { local prompt="" local model="$DEFAULT_MODEL" local profile="default" # Parse arguments while [[ $# -gt 0 ]]; do case "$1" in -m|--model) model="$2" shift 2 ;; -p|--profile) profile="$2" shift 2 ;; *) prompt="$1" shift ;; esac done # Read from stdin if available and combine with argument prompt local stdin_content="" if [[ ! -t 0 ]]; then stdin_content=$(cat) fi if [[ -n "$stdin_content" ]] && [[ -n "$prompt" ]]; then prompt="$stdin_content"$'\n'"$prompt" elif [[ -n "$stdin_content" ]]; then prompt="$stdin_content" elif [[ -z "$prompt" ]]; then echo "Error: prompt required (provide as argument or via stdin)" return 1 fi local password password=$(get_password) # Ensure default profile exists ensure_default_profile # Load system prompt and scripts from profile local system_prompt system_prompt=$(load_profile_system "$profile") local pre_script pre_script=$(load_profile_pre_script "$profile") local post_script post_script=$(load_profile_post_script "$profile") # Apply pre-script if defined prompt=$(execute_script "$pre_script" "$prompt") # If system prompt provided, use chat API; otherwise use generate API local response local api_response if [[ -n "$system_prompt" ]]; then # Build messages array with system prompt local messages messages=$(echo '[]' | jq --arg content "$system_prompt" '. += [{"role": "system", "content": $content}]') messages=$(echo "$messages" | jq --arg content "$prompt" '. += [{"role": "user", "content": $content}]') api_response=$(curl -s -u "$OLLAMA_USER:$password" \ -X POST "$OLLAMA_API_URL/chat" \ -H 'Content-Type: application/json' \ -d "{ \"model\": \"$model\", \"messages\": $messages, \"stream\": false }") # Check for API errors if ! echo "$api_response" | jq . &>/dev/null; then echo "Error: Invalid response from API" >&2 echo "Response: $api_response" >&2 return 1 fi if echo "$api_response" | jq -e '.error' &>/dev/null; then echo "Error: $(echo "$api_response" | jq -r '.error')" >&2 return 1 fi response=$(echo "$api_response" | jq -r '.message.content // empty') if [[ -z "$response" ]]; then echo "Error: No response content from API" >&2 return 1 fi else # Use generate API for simple completion api_response=$(curl -s -u "$OLLAMA_USER:$password" \ -X POST "$OLLAMA_API_URL/generate" \ -H 'Content-Type: application/json' \ -d "{ \"model\": \"$model\", \"prompt\": $(printf '%s\n' "$prompt" | jq -R .), \"stream\": false }") # Check for API errors if ! echo "$api_response" | jq . &>/dev/null; then echo "Error: Invalid response from API" >&2 echo "Response: $api_response" >&2 return 1 fi if echo "$api_response" | jq -e '.error' &>/dev/null; then echo "Error: $(echo "$api_response" | jq -r '.error')" >&2 return 1 fi response=$(echo "$api_response" | jq -r '.response // empty') if [[ -z "$response" ]]; then echo "Error: No response content from API" >&2 return 1 fi fi # Apply post-script if defined if ! response=$(execute_script "$post_script" "$response"); then return 1 fi echo "$response" } # Chat with context chat() { local prompt="" local model="$DEFAULT_MODEL" local reset=false local profile="default" # Parse arguments while [[ $# -gt 0 ]]; do case "$1" in -m|--model) model="$2" shift 2 ;; -c|--context) CONTEXT_FILE="$2" shift 2 ;; -p|--profile) profile="$2" shift 2 ;; -r|--reset) reset=true shift ;; *) prompt="$1" shift ;; esac done # Read from stdin if available and combine with argument prompt local stdin_content="" if [[ ! -t 0 ]]; then stdin_content=$(cat) fi if [[ -n "$stdin_content" ]] && [[ -n "$prompt" ]]; then prompt="$stdin_content"$'\n'"$prompt" elif [[ -n "$stdin_content" ]]; then prompt="$stdin_content" elif [[ -z "$prompt" ]]; then echo "Error: prompt required (provide as argument or via stdin)" return 1 fi init_context_file # Reset context if requested if [[ "$reset" == true ]]; then echo '[]' > "$CONTEXT_FILE" fi # Ensure default profile exists ensure_default_profile # Load system prompt and scripts from profile local system_prompt system_prompt=$(load_profile_system "$profile") local pre_script pre_script=$(load_profile_pre_script "$profile") local post_script post_script=$(load_profile_post_script "$profile") # Apply pre-script if defined prompt=$(execute_script "$pre_script" "$prompt") # Load existing messages local messages messages=$(cat "$CONTEXT_FILE") # Build messages array with system prompt at the beginning if [[ -n "$system_prompt" ]]; then messages=$(echo '[]' | jq --arg content "$system_prompt" '. += [{"role": "system", "content": $content}]') messages=$(echo "$messages" | jq --argjson context "$(cat "$CONTEXT_FILE")" '. += $context') fi # Add user message to context messages=$(echo "$messages" | jq --arg content "$prompt" '. += [{"role": "user", "content": $content}]') local password password=$(get_password) # Call API and get response local api_response api_response=$(curl -s -u "$OLLAMA_USER:$password" \ -X POST "$OLLAMA_API_URL/chat" \ -H 'Content-Type: application/json' \ -d "{ \"model\": \"$model\", \"messages\": $messages, \"stream\": false }") # Check for API errors if ! echo "$api_response" | jq . &>/dev/null; then echo "Error: Invalid response from API" >&2 echo "Response: $api_response" >&2 return 1 fi if echo "$api_response" | jq -e '.error' &>/dev/null; then echo "Error: $(echo "$api_response" | jq -r '.error')" >&2 return 1 fi # Extract the response local reply reply=$(echo "$api_response" | jq -r '.message.content // empty') if [[ -z "$reply" ]]; then echo "Error: No response content from API" >&2 return 1 fi # Apply post-script if defined if ! reply=$(execute_script "$post_script" "$reply"); then return 1 fi echo "$reply" # Add assistant response to context and save local context_messages context_messages=$(cat "$CONTEXT_FILE") context_messages=$(echo "$context_messages" | jq --arg content "$prompt" '. += [{"role": "user", "content": $content}]') context_messages=$(echo "$context_messages" | jq --arg content "$reply" '. += [{"role": "assistant", "content": $content}]') echo "$context_messages" > "$CONTEXT_FILE" } # Generate embeddings embed() { local input="" local model="$DEFAULT_MODEL" local profile="default" local output_format="$OUTPUT_FORMAT" # Parse arguments while [[ $# -gt 0 ]]; do case "$1" in -m|--model) model="$2" shift 2 ;; -p|--profile) profile="$2" shift 2 ;; -o|--output) output_format="$2" shift 2 ;; *) input="$1" shift ;; esac done # Read from stdin if available and combine with argument input local stdin_content="" if [[ ! -t 0 ]]; then stdin_content=$(cat) fi if [[ -n "$stdin_content" ]] && [[ -n "$input" ]]; then input="$stdin_content"$'\n'"$input" elif [[ -n "$stdin_content" ]]; then input="$stdin_content" elif [[ -z "$input" ]]; then echo "Error: input required (provide as argument or via stdin)" return 1 fi local password password=$(get_password) # Ensure default profile exists ensure_default_profile # Load model from profile if not explicitly set local profile_model profile_model=$(load_profile_model "$profile") if [[ -n "$profile_model" ]] && [[ "$model" == "$DEFAULT_MODEL" ]]; then model="$profile_model" fi # Call embedding API local api_response api_response=$(curl -s -u "$OLLAMA_USER:$password" \ -X POST "$OLLAMA_API_URL/embed" \ -H 'Content-Type: application/json' \ -d "{ \"model\": \"$model\", \"input\": $(printf '%s\n' "$input" | jq -R .), \"stream\": false }") # Check for API errors if ! echo "$api_response" | jq . &>/dev/null; then echo "Error: Invalid response from API" >&2 echo "Response: $api_response" >&2 return 1 fi if echo "$api_response" | jq -e '.error' &>/dev/null; then echo "Error: $(echo "$api_response" | jq -r '.error')" >&2 return 1 fi if [[ "$output_format" == "json" ]]; then echo "$api_response" | jq '.' else # Output as formatted text with summary local embedding embedding=$(echo "$api_response" | jq -r '.embeddings[0] | @json' 2>/dev/null) if [[ -z "$embedding" ]]; then echo "Error: No embedding in response" >&2 return 1 fi local dimension dimension=$(echo "$api_response" | jq '.embeddings[0] | length') echo "Embedding Summary" echo "" local table_data="Dimension|$dimension" table_data="$table_data"$'\n'"Model|$model" echo -e "$table_data" | column -t -s '|' echo "" echo "First 10 values:" echo "$api_response" | jq '.embeddings[0][0:10]' fi } # Manage models model() { if [[ $# -eq 0 ]]; then echo "Error: model subcommand required (list, get, ps)" >&2 return 1 fi local subcommand="$1" shift local output_format="$OUTPUT_FORMAT" local password password=$(get_password) # Parse output format flag while [[ $# -gt 0 ]]; do case "$1" in -o|--output) output_format="$2" shift 2 ;; *) break ;; esac done case "$subcommand" in list) # List all available models local api_response api_response=$(curl -s -u "$OLLAMA_USER:$password" \ -X GET "$OLLAMA_API_URL/tags") if ! echo "$api_response" | jq . &>/dev/null; then echo "Error: Invalid response from API" >&2 return 1 fi if echo "$api_response" | jq -e '.error' &>/dev/null; then echo "Error: $(echo "$api_response" | jq -r '.error')" >&2 return 1 fi if [[ "$output_format" == "json" ]]; then echo "$api_response" | jq '.' else # Build pipe-separated table data for column formatting local table_data="NAME|SIZE|QUANTIZATION" table_data="$table_data"$'\n'"$(echo "$api_response" | jq -r '.models[] | "\(.name)|\(.details.parameter_size)|\(.details.quantization_level)"')" echo -e "$table_data" | column -t -s '|' fi ;; get) if [[ $# -eq 0 ]]; then echo "Error: model name required" >&2 return 1 fi local model_name="$1" shift # Check for output format flag local get_output_format="$output_format" while [[ $# -gt 0 ]]; do case "$1" in -o|--output) get_output_format="$2" shift 2 ;; *) shift ;; esac done # Get detailed model info local api_response api_response=$(curl -s -u "$OLLAMA_USER:$password" \ -X POST "$OLLAMA_API_URL/show" \ -H 'Content-Type: application/json' \ -d "{\"name\": \"$model_name\"}") if ! echo "$api_response" | jq . &>/dev/null; then echo "Error: Invalid response from API" >&2 return 1 fi if echo "$api_response" | jq -e '.error' &>/dev/null; then echo "Error: $(echo "$api_response" | jq -r '.error')" >&2 return 1 fi if [[ "$get_output_format" == "json" ]]; then echo "$api_response" | jq '.' else # Output as formatted table with column local table_data="Model|$model_name" local param_size param_size=$(echo "$api_response" | jq -r '.details.parameter_size // "-"') table_data="$table_data"$'\n'"Parameter Size|$param_size" local quantization quantization=$(echo "$api_response" | jq -r '.details.quantization_level // "-"') table_data="$table_data"$'\n'"Quantization|$quantization" local format format=$(echo "$api_response" | jq -r '.details.format // "-"') table_data="$table_data"$'\n'"Format|$format" local family family=$(echo "$api_response" | jq -r '.details.family // "-"') table_data="$table_data"$'\n'"Family|$family" echo -e "$table_data" | column -t -s '|' fi ;; ps) # List running models local api_response api_response=$(curl -s -u "$OLLAMA_USER:$password" \ -X GET "$OLLAMA_API_URL/ps") if ! echo "$api_response" | jq . &>/dev/null; then echo "Error: Invalid response from API" >&2 return 1 fi if echo "$api_response" | jq -e '.error' &>/dev/null; then echo "Error: $(echo "$api_response" | jq -r '.error')" >&2 return 1 fi # Format running models as table local table_data="NAME|SIZE|VRAM" if echo "$api_response" | jq -e '.models[] | select(.name)' &>/dev/null; then table_data="$table_data"$'\n'"$(echo "$api_response" | jq -r '.models[] | "\(.name)|\(.size)|\(.size_vram)"')" fi if [[ "$output_format" == "json" ]]; then echo "$api_response" | jq '.' else echo -e "$table_data" | column -t -s '|' fi ;; *) echo "Error: unknown model subcommand '$subcommand'" >&2 echo "Available subcommands: list, get, ps" >&2 return 1 ;; esac } # Manage profiles profile() { if [[ $# -eq 0 ]]; then echo "Error: profile subcommand required (list, add, update, delete)" return 1 fi local subcommand="$1" shift local output_format="$OUTPUT_FORMAT" # Parse output format flag while [[ $# -gt 0 ]]; do case "$1" in -o|--output) output_format="$2" shift 2 ;; *) break ;; esac done case "$subcommand" in list) if [[ ! -d "$PROFILE_DIR" ]]; then return 0 fi if [[ ! "$(ls -A "$PROFILE_DIR"/*.json 2>/dev/null)" ]]; then return 0 fi # Build pipe-separated table data and use column for formatting local table_data="NAME|SYSTEM PROMPT|MODEL|SCRIPTS" for file in "$PROFILE_DIR"/*.json; do local name name=$(basename "$file" .json) local system system=$(jq -r '.system // ""' "$file" | cut -c1-40) local model model=$(jq -r '.model // ""' "$file") local pre_script post_script scripts_str pre_script=$(jq -r '.pre_script // ""' "$file") post_script=$(jq -r '.post_script // ""' "$file") scripts_str="" [[ -n "$pre_script" ]] && scripts_str="pre" [[ -n "$post_script" ]] && scripts_str="${scripts_str:+$scripts_str,}post" table_data="$table_data"$'\n'"$name|$system|$model|$scripts_str" done if [[ "$output_format" == "json" ]]; then # Output as JSON array of profiles local json_output="[]" for file in "$PROFILE_DIR"/*.json; do local profile_name=$(basename "$file" .json) local profile_data profile_data=$(cat "$file" | jq --arg name "$profile_name" '. + {name: $name}') json_output=$(echo "$json_output" | jq --argjson profile "$profile_data" '. += [$profile]') done echo "$json_output" | jq '.' else # Use column for clean table formatting echo -e "$table_data" | column -t -s '|' fi ;; get) if [[ $# -eq 0 ]]; then echo "Error: profile name required" >&2 return 1 fi local name="$1" shift # Check for output format flag local get_output_format="$output_format" while [[ $# -gt 0 ]]; do case "$1" in -o|--output) get_output_format="$2" shift 2 ;; *) shift ;; esac done local profile_file="$PROFILE_DIR/$name.json" if [[ ! -f "$profile_file" ]]; then echo "Error: profile '$name' not found" >&2 return 1 fi if [[ "$get_output_format" == "json" ]]; then # Output as JSON with profile name cat "$profile_file" | jq --arg name "$name" '. + {name: $name}' else # Output as formatted table with column local table_data="Name|$name" local system_prompt system_prompt=$(jq -r '.system // ""' "$profile_file") if [[ -n "$system_prompt" ]]; then # Truncate to 60 chars table_data="$table_data"$'\n'"System Prompt|$(echo "$system_prompt" | cut -c1-60)..." fi local pre_script pre_script=$(jq -r '.pre_script // ""' "$profile_file") if [[ -n "$pre_script" ]]; then table_data="$table_data"$'\n'"Pre-script|$(echo "$pre_script" | cut -c1-60)..." fi local post_script post_script=$(jq -r '.post_script // ""' "$profile_file") if [[ -n "$post_script" ]]; then table_data="$table_data"$'\n'"Post-script|$(echo "$post_script" | cut -c1-60)..." fi local profile_model profile_model=$(jq -r '.model // ""' "$profile_file") if [[ -n "$profile_model" ]]; then table_data="$table_data"$'\n'"Model|$profile_model" fi echo -e "$table_data" | column -t -s '|' fi ;; add) if [[ $# -eq 0 ]]; then echo "Error: profile name required" return 1 fi local name="$1" shift local system_prompt="" local pre_script="" local post_script="" local profile_model="" while [[ $# -gt 0 ]]; do case "$1" in --pre-script) pre_script="$2" shift 2 ;; --post-script) post_script="$2" shift 2 ;; -m|--model) profile_model="$2" shift 2 ;; *) if [[ -z "$system_prompt" ]]; then system_prompt="$1" fi shift ;; esac done if [[ -z "$system_prompt" ]] && [[ ! -t 0 ]]; then system_prompt=$(cat) fi if [[ -z "$system_prompt" ]]; then echo "Error: system prompt required" return 1 fi ensure_ollama_dir if [[ ! -d "$PROFILE_DIR" ]]; then mkdir -p "$PROFILE_DIR" fi # Build JSON object with system prompt and optional scripts/model local profile_json profile_json=$(echo "$system_prompt" | jq -n -R '{system: input}') if [[ -n "$pre_script" ]]; then profile_json=$(echo "$profile_json" | jq --arg pre "$pre_script" '.pre_script = $pre') fi if [[ -n "$post_script" ]]; then profile_json=$(echo "$profile_json" | jq --arg post "$post_script" '.post_script = $post') fi if [[ -n "$profile_model" ]]; then profile_json=$(echo "$profile_json" | jq --arg model "$profile_model" '.model = $model') fi echo "$profile_json" > "$PROFILE_DIR/$name.json" echo "Profile '$name' created" ;; update) if [[ $# -eq 0 ]]; then echo "Error: profile name required" return 1 fi local name="$1" shift local profile_file="$PROFILE_DIR/$name.json" if [[ ! -f "$profile_file" ]]; then echo "Error: profile '$name' not found" return 1 fi local system_prompt="" local pre_script="" local post_script="" local profile_model="" while [[ $# -gt 0 ]]; do case "$1" in --pre-script) pre_script="$2" shift 2 ;; --post-script) post_script="$2" shift 2 ;; -m|--model) profile_model="$2" shift 2 ;; *) if [[ -z "$system_prompt" ]]; then system_prompt="$1" fi shift ;; esac done if [[ -z "$system_prompt" ]] && [[ ! -t 0 ]]; then system_prompt=$(cat) fi # Load existing profile if no updates provided if [[ -z "$system_prompt" ]] && [[ -z "$pre_script" ]] && [[ -z "$post_script" ]] && [[ -z "$profile_model" ]]; then echo "Error: system prompt or script/model options required" return 1 fi # Load current profile local profile_json profile_json=$(cat "$profile_file") # Update fields if provided if [[ -n "$system_prompt" ]]; then profile_json=$(echo "$profile_json" | jq --arg system "$system_prompt" '.system = $system') fi if [[ -n "$pre_script" ]]; then profile_json=$(echo "$profile_json" | jq --arg pre "$pre_script" '.pre_script = $pre') fi if [[ -n "$post_script" ]]; then profile_json=$(echo "$profile_json" | jq --arg post "$post_script" '.post_script = $post') fi if [[ -n "$profile_model" ]]; then profile_json=$(echo "$profile_json" | jq --arg model "$profile_model" '.model = $model') fi echo "$profile_json" > "$profile_file" echo "Profile '$name' updated" ;; delete) if [[ $# -eq 0 ]]; then echo "Error: profile name required" return 1 fi local name="$1" local profile_file="$PROFILE_DIR/$name.json" if [[ ! -f "$profile_file" ]]; then echo "Error: profile '$name' not found" return 1 fi rm "$profile_file" echo "Profile '$name' deleted" ;; *) echo "Error: unknown profile subcommand '$subcommand'" >&2 echo "Available subcommands: list, get, add, update, delete" >&2 return 1 ;; esac } # Show general help show_help() { cat << 'EOF' Usage: ollama [options] Commands: generate [options] Generate a response from a prompt (supports stdin) chat [options] Chat with context memory (supports stdin) embed [options] Generate embeddings for text (supports stdin) model Manage models profile Manage profiles version Show script and Ollama server version help [command] Show this help message or help for a specific command For detailed help on a command, run: ollama help Examples: ollama help generate ollama help chat ollama help embed ollama help profile ollama help model Environment Variables: OLLAMA_CONTEXT_FILE Override default context file location OLLAMA_PROFILE_DIR Override default profile directory location EOF } help_generate() { cat << 'EOF' Usage: ollama generate [options] Generate a single response from a prompt. Supports reading from stdin. Options: -m, --model MODEL Use a specific model (default: llama3:8b) -p, --profile NAME Use a profile's system prompt (default: default) Examples: ollama generate "What is 2+2?" ollama generate "Write a poem" -m deepseek-r1:8b echo "Tell me a joke" | ollama generate echo "How do I read a file?" | ollama generate -p python ollama generate -p json "List colors" EOF } help_chat() { cat << 'EOF' Usage: ollama chat [options] Chat with context memory. Maintains conversation history between invocations. Options: -m, --model MODEL Use a specific model (default: llama3:8b) -c, --context FILE Use a specific context file (default: ~/.ollama/context.json) -p, --profile NAME Use a profile's system prompt (default: default) -r, --reset Clear the context and start fresh Examples: ollama chat echo "Hello!" | ollama chat ollama chat -m deepseek-r1:14b ollama chat -r # Reset context ollama chat -c /tmp/custom_context.json # Use custom context file OLLAMA_CONTEXT_FILE=/tmp/session.json ollama chat # Use env variable ollama chat -p default echo "What is Docker?" | ollama chat -p json echo "How do I write a loop?" | ollama chat -p python EOF } help_profile() { cat << 'EOF' Usage: ollama profile [options] Manage profiles for system prompts, pre/post scripts, and model selection. Subcommands: list List all profile names get NAME Show profile details add NAME PROMPT [OPTIONS] Create a new profile update NAME [PROMPT] [OPTIONS] Update a profile's system prompt or scripts delete NAME Delete a profile Profile Options: --pre-script SCRIPT Script to execute on input before sending to ollama --post-script SCRIPT Script to execute on output after receiving from ollama -m, --model MODEL Model to use for this profile Examples: ollama profile list # List all profiles ollama profile get json # Show profile details ollama profile add work "You are a professional..." # Create profile ollama profile add uppercase "Respond in all caps" --pre-script "tr '[:lower:]' '[:upper:]'" ollama profile add pretty "Return formatted JSON" --post-script "jq ." ollama profile add deep "You are a thinking model" -m deepseek-r1:8b ollama profile update python "You are a Python expert..." # Update profile ollama profile update json -m gemma:7b-text # Change model ollama profile delete work # Delete profile EOF } help_model() { cat << 'EOF' Usage: ollama model Manage and query available models. Subcommands: list List all available models with specs get MODEL Show detailed model information ps List currently running/loaded models Examples: ollama model list ollama model get llama3:8b ollama model get deepseek-r1:8b ollama model ps EOF } help_embed() { cat << 'EOF' Usage: ollama embed [options] Generate embeddings for text using a specified model. Supports reading from stdin. Options: -m, --model MODEL Use a specific model (default: llama3:8b) -p, --profile NAME Use a profile's model selection (default: default) -o, --output FORMAT Output format: text (default) or json Output Formats: text Display embedding summary with first 10 values json Display full API response including all embedding values Examples: ollama embed "What is machine learning?" ollama embed "Hello world" -m nomic-embed-text echo "Some text to embed" | ollama embed echo "Multiple lines of text" | ollama embed -o json ollama embed "Search query" -m nomic-embed-text -o json | jq '.embedding | length' EOF } help_version() { cat << 'EOF' Usage: ollama version [options] Display the CLI script version and the Ollama server version. Options: -o, --output FORMAT Output format: text (default) or json Examples: ollama version ollama version -o json ollama version -o json | jq '.ollama_version' EOF } # Get version information version() { local output_format="$OUTPUT_FORMAT" # Parse output format flag while [[ $# -gt 0 ]]; do case "$1" in -o|--output) output_format="$2" shift 2 ;; *) shift ;; esac done local password password=$(get_password) # Get Ollama server version local api_response api_response=$(curl -s -u "$OLLAMA_USER:$password" \ -X GET "$OLLAMA_API_URL/version") if ! echo "$api_response" | jq . &>/dev/null; then echo "Error: Invalid response from API" >&2 return 1 fi if echo "$api_response" | jq -e '.error' &>/dev/null; then echo "Error: $(echo "$api_response" | jq -r '.error')" >&2 return 1 fi local ollama_version ollama_version=$(echo "$api_response" | jq -r '.version // "unknown"') if [[ "$output_format" == "json" ]]; then echo "$api_response" | jq --arg cli_version "$SCRIPT_VERSION" '{cli_version: $cli_version, ollama_version: .version}' else echo "ollama CLI version $SCRIPT_VERSION" echo "" echo "Ollama version $ollama_version" fi } # Main main() { local command="${1:-help}" case "$command" in generate) generate "${@:2}" ;; embed) embed "${@:2}" ;; model|models) model "${@:2}" ;; chat) chat "${@:2}" ;; profile) profile "${@:2}" ;; version) version "${@:2}" ;; help|--help|-h) if [[ $# -gt 1 ]]; then local help_topic="$2" case "$help_topic" in generate) help_generate ;; embed) help_embed ;; chat) help_chat ;; profile) help_profile ;; model|models) help_model ;; version) help_version ;; *) echo "Error: unknown command '$help_topic'" >&2 return 1 ;; esac else show_help fi ;; *) echo "Error: unknown command '$command'" >&2 echo "Run 'ollama help' for usage information" >&2 return 1 ;; esac } main "$@"