mirror of
https://github.com/K-Dense-AI/claude-scientific-skills.git
synced 2026-01-26 16:58:56 +08:00
Replace all instances of Sonnet to Opus
This commit is contained in:
@@ -139,7 +139,7 @@ client = OpenAI(
|
|||||||
|
|
||||||
md = MarkItDown(
|
md = MarkItDown(
|
||||||
llm_client=client,
|
llm_client=client,
|
||||||
llm_model="anthropic/claude-sonnet-4.5", # recommended for scientific vision
|
llm_model="anthropic/claude-opus-4.5", # recommended for scientific vision
|
||||||
llm_prompt="Describe this image in detail for scientific documentation"
|
llm_prompt="Describe this image in detail for scientific documentation"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -262,7 +262,7 @@ client = OpenAI(
|
|||||||
|
|
||||||
md = MarkItDown(
|
md = MarkItDown(
|
||||||
llm_client=client,
|
llm_client=client,
|
||||||
llm_model="anthropic/claude-sonnet-4.5", # recommended for presentations
|
llm_model="anthropic/claude-opus-4.5", # recommended for presentations
|
||||||
llm_prompt="Describe this slide image in detail, focusing on key visual elements and data"
|
llm_prompt="Describe this slide image in detail, focusing on key visual elements and data"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -419,7 +419,7 @@ client = OpenAI(
|
|||||||
|
|
||||||
md_ai = MarkItDown(
|
md_ai = MarkItDown(
|
||||||
llm_client=client,
|
llm_client=client,
|
||||||
llm_model="anthropic/claude-sonnet-4.5",
|
llm_model="anthropic/claude-opus-4.5",
|
||||||
llm_prompt="Describe scientific figures with technical precision"
|
llm_prompt="Describe scientific figures with technical precision"
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -101,7 +101,7 @@ from openai import OpenAI
|
|||||||
client = OpenAI()
|
client = OpenAI()
|
||||||
md = MarkItDown(
|
md = MarkItDown(
|
||||||
llm_client=client,
|
llm_client=client,
|
||||||
llm_model="anthropic/claude-sonnet-4.5",
|
llm_model="anthropic/claude-opus-4.5",
|
||||||
llm_prompt="Describe this scientific slide, focusing on data and key findings"
|
llm_prompt="Describe this scientific slide, focusing on data and key findings"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -143,7 +143,7 @@ Be technical and precise.
|
|||||||
|
|
||||||
md = MarkItDown(
|
md = MarkItDown(
|
||||||
llm_client=client,
|
llm_client=client,
|
||||||
llm_model="anthropic/claude-sonnet-4.5", # recommended for scientific vision
|
llm_model="anthropic/claude-opus-4.5", # recommended for scientific vision
|
||||||
llm_prompt=scientific_prompt
|
llm_prompt=scientific_prompt
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -167,14 +167,14 @@ client = OpenAI(
|
|||||||
# Scientific papers - use Claude for technical analysis
|
# Scientific papers - use Claude for technical analysis
|
||||||
scientific_md = MarkItDown(
|
scientific_md = MarkItDown(
|
||||||
llm_client=client,
|
llm_client=client,
|
||||||
llm_model="anthropic/claude-sonnet-4.5",
|
llm_model="anthropic/claude-opus-4.5",
|
||||||
llm_prompt="Describe scientific figures with technical precision"
|
llm_prompt="Describe scientific figures with technical precision"
|
||||||
)
|
)
|
||||||
|
|
||||||
# Presentations - use GPT-4o for visual understanding
|
# Presentations - use GPT-4o for visual understanding
|
||||||
presentation_md = MarkItDown(
|
presentation_md = MarkItDown(
|
||||||
llm_client=client,
|
llm_client=client,
|
||||||
llm_model="anthropic/claude-sonnet-4.5",
|
llm_model="anthropic/claude-opus-4.5",
|
||||||
llm_prompt="Summarize slide content and key visual elements"
|
llm_prompt="Summarize slide content and key visual elements"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ md = MarkItDown(
|
|||||||
| Parameter | Type | Default | Description |
|
| Parameter | Type | Default | Description |
|
||||||
|-----------|------|---------|-------------|
|
|-----------|------|---------|-------------|
|
||||||
| `llm_client` | OpenAI client | `None` | OpenAI-compatible client for AI image descriptions |
|
| `llm_client` | OpenAI client | `None` | OpenAI-compatible client for AI image descriptions |
|
||||||
| `llm_model` | str | `None` | Model name (e.g., "anthropic/claude-sonnet-4.5") for image descriptions |
|
| `llm_model` | str | `None` | Model name (e.g., "anthropic/claude-opus-4.5") for image descriptions |
|
||||||
| `llm_prompt` | str | `None` | Custom prompt for image description |
|
| `llm_prompt` | str | `None` | Custom prompt for image description |
|
||||||
| `docintel_endpoint` | str | `None` | Azure Document Intelligence endpoint |
|
| `docintel_endpoint` | str | `None` | Azure Document Intelligence endpoint |
|
||||||
| `enable_plugins` | bool | `False` | Enable 3rd-party plugins |
|
| `enable_plugins` | bool | `False` | Enable 3rd-party plugins |
|
||||||
@@ -235,7 +235,7 @@ client = OpenAI(
|
|||||||
# Create MarkItDown with AI support
|
# Create MarkItDown with AI support
|
||||||
md = MarkItDown(
|
md = MarkItDown(
|
||||||
llm_client=client,
|
llm_client=client,
|
||||||
llm_model="anthropic/claude-sonnet-4.5", # recommended for scientific vision
|
llm_model="anthropic/claude-opus-4.5", # recommended for scientific vision
|
||||||
llm_prompt="Describe this image in detail for scientific documentation"
|
llm_prompt="Describe this image in detail for scientific documentation"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -246,11 +246,8 @@ result = md.convert("presentation.pptx")
|
|||||||
### Available Models via OpenRouter
|
### Available Models via OpenRouter
|
||||||
|
|
||||||
Popular models with vision support:
|
Popular models with vision support:
|
||||||
- `anthropic/claude-sonnet-4.5` - **Recommended for scientific vision**
|
- `anthropic/claude-opus-4.5` - **Recommended for scientific vision**
|
||||||
- `anthropic/claude-opus-4.5` - Advanced vision model
|
- `google/gemini-3-pro-preview` - Gemini Pro Vision
|
||||||
- `openai/gpt-4o` - GPT-4 Omni
|
|
||||||
- `openai/gpt-4-vision` - GPT-4 Vision
|
|
||||||
- `google/gemini-pro-vision` - Gemini Pro Vision
|
|
||||||
|
|
||||||
See https://openrouter.ai/models for the complete list.
|
See https://openrouter.ai/models for the complete list.
|
||||||
|
|
||||||
@@ -269,7 +266,7 @@ Be precise and technical.
|
|||||||
|
|
||||||
md = MarkItDown(
|
md = MarkItDown(
|
||||||
llm_client=client,
|
llm_client=client,
|
||||||
llm_model="anthropic/claude-sonnet-4.5",
|
llm_model="anthropic/claude-opus-4.5",
|
||||||
llm_prompt=scientific_prompt
|
llm_prompt=scientific_prompt
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -71,7 +71,7 @@ def convert_with_ai(
|
|||||||
input_file: Path,
|
input_file: Path,
|
||||||
output_file: Path,
|
output_file: Path,
|
||||||
api_key: str,
|
api_key: str,
|
||||||
model: str = "anthropic/claude-sonnet-4.5",
|
model: str = "anthropic/claude-opus-4.5",
|
||||||
prompt_type: str = "general",
|
prompt_type: str = "general",
|
||||||
custom_prompt: str = None
|
custom_prompt: str = None
|
||||||
) -> bool:
|
) -> bool:
|
||||||
@@ -82,7 +82,7 @@ def convert_with_ai(
|
|||||||
input_file: Path to input file
|
input_file: Path to input file
|
||||||
output_file: Path to output Markdown file
|
output_file: Path to output Markdown file
|
||||||
api_key: OpenRouter API key
|
api_key: OpenRouter API key
|
||||||
model: Model name (default: anthropic/claude-sonnet-4.5)
|
model: Model name (default: anthropic/claude-opus-4.5)
|
||||||
prompt_type: Type of prompt to use
|
prompt_type: Type of prompt to use
|
||||||
custom_prompt: Custom prompt (overrides prompt_type)
|
custom_prompt: Custom prompt (overrides prompt_type)
|
||||||
|
|
||||||
@@ -154,10 +154,10 @@ Examples:
|
|||||||
python convert_with_ai.py paper.pdf output.md --prompt-type scientific
|
python convert_with_ai.py paper.pdf output.md --prompt-type scientific
|
||||||
|
|
||||||
# Convert a presentation with custom model
|
# Convert a presentation with custom model
|
||||||
python convert_with_ai.py slides.pptx slides.md --model anthropic/claude-sonnet-4.5 --prompt-type presentation
|
python convert_with_ai.py slides.pptx slides.md --model anthropic/claude-opus-4.5 --prompt-type presentation
|
||||||
|
|
||||||
# Use custom prompt with advanced vision model
|
# Use custom prompt with advanced vision model
|
||||||
python convert_with_ai.py diagram.png diagram.md --model anthropic/claude-sonnet-4.5 --custom-prompt "Describe this technical diagram"
|
python convert_with_ai.py diagram.png diagram.md --model anthropic/claude-opus-4.5 --custom-prompt "Describe this technical diagram"
|
||||||
|
|
||||||
# Set API key via environment variable
|
# Set API key via environment variable
|
||||||
export OPENROUTER_API_KEY="sk-or-v1-..."
|
export OPENROUTER_API_KEY="sk-or-v1-..."
|
||||||
@@ -167,11 +167,8 @@ Environment Variables:
|
|||||||
OPENROUTER_API_KEY OpenRouter API key (required if not passed via --api-key)
|
OPENROUTER_API_KEY OpenRouter API key (required if not passed via --api-key)
|
||||||
|
|
||||||
Popular Models (use with --model):
|
Popular Models (use with --model):
|
||||||
anthropic/claude-sonnet-4.5 - Recommended for scientific vision
|
anthropic/claude-opus-4.5 - Recommended for scientific vision
|
||||||
anthropic/claude-opus-4.5 - Advanced vision model
|
google/gemini-3-pro-preview - Gemini Pro Vision
|
||||||
openai/gpt-4o - GPT-4 Omni (vision support)
|
|
||||||
openai/gpt-4-vision - GPT-4 Vision
|
|
||||||
google/gemini-pro-vision - Gemini Pro Vision
|
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -183,8 +180,8 @@ Popular Models (use with --model):
|
|||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--model', '-m',
|
'--model', '-m',
|
||||||
default='anthropic/claude-sonnet-4.5',
|
default='anthropic/claude-opus-4.5',
|
||||||
help='Model to use via OpenRouter (default: anthropic/claude-sonnet-4.5)'
|
help='Model to use via OpenRouter (default: anthropic/claude-opus-4.5)'
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--prompt-type', '-t',
|
'--prompt-type', '-t',
|
||||||
|
|||||||
@@ -67,7 +67,7 @@ result = npa.analyze_unit_visually(
|
|||||||
analyzer,
|
analyzer,
|
||||||
unit_id=0,
|
unit_id=0,
|
||||||
api_client=client,
|
api_client=client,
|
||||||
model='claude-3-5-sonnet-20241022',
|
model='claude-opus-4.5',
|
||||||
task='quality_assessment'
|
task='quality_assessment'
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -287,7 +287,7 @@ Currently supported APIs:
|
|||||||
|
|
||||||
| Provider | Client | Model Examples |
|
| Provider | Client | Model Examples |
|
||||||
|----------|--------|----------------|
|
|----------|--------|----------------|
|
||||||
| Anthropic | `anthropic.Anthropic()` | claude-3-5-sonnet-20241022 |
|
| Anthropic | `anthropic.Anthropic()` | claude-opus-4.5 |
|
||||||
| OpenAI | `openai.OpenAI()` | gpt-4-vision-preview |
|
| OpenAI | `openai.OpenAI()` | gpt-4-vision-preview |
|
||||||
| Google | `google.generativeai` | gemini-pro-vision |
|
| Google | `google.generativeai` | gemini-pro-vision |
|
||||||
|
|
||||||
|
|||||||
@@ -284,7 +284,7 @@ npa.analyze_unit_visually(
|
|||||||
analyzer: SortingAnalyzer,
|
analyzer: SortingAnalyzer,
|
||||||
unit_id: int,
|
unit_id: int,
|
||||||
api_client: Any = None,
|
api_client: Any = None,
|
||||||
model: str = 'claude-3-5-sonnet-20241022',
|
model: str = 'claude-opus-4.5',
|
||||||
task: str = 'quality_assessment',
|
task: str = 'quality_assessment',
|
||||||
custom_prompt: str = None,
|
custom_prompt: str = None,
|
||||||
) -> dict
|
) -> dict
|
||||||
@@ -304,7 +304,7 @@ npa.batch_visual_curation(
|
|||||||
analyzer: SortingAnalyzer,
|
analyzer: SortingAnalyzer,
|
||||||
unit_ids: list = None,
|
unit_ids: list = None,
|
||||||
api_client: Any = None,
|
api_client: Any = None,
|
||||||
model: str = 'claude-3-5-sonnet-20241022',
|
model: str = 'claude-opus-4.5',
|
||||||
output_dir: str = None,
|
output_dir: str = None,
|
||||||
progress_callback: callable = None,
|
progress_callback: callable = None,
|
||||||
) -> dict
|
) -> dict
|
||||||
|
|||||||
Reference in New Issue
Block a user