Skip to content

Commit d095677

Browse files
Release 0.14.9 (#20330)
1 parent 0f9585d commit d095677

6 files changed

Lines changed: 210 additions & 6 deletions

File tree

CHANGELOG.md

Lines changed: 116 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,122 @@
22

33
<!--- generated changelog --->
44

5+
## [2025-12-02]
6+
7+
### llama-index-agent-azure [0.2.1]
8+
9+
- fix: Pin azure-ai-projects version to prevent breaking changes ([#20255](https://github.com/run-llama/llama_index/pull/20255))
10+
11+
### llama-index-core [0.14.9]
12+
13+
- MultiModalVectorStoreIndex now returns a multi-modal ContextChatEngine. ([#20265](https://github.com/run-llama/llama_index/pull/20265))
14+
- Ingestion to vector store now ensures that \_node-content is readable ([#20266](https://github.com/run-llama/llama_index/pull/20266))
15+
- fix: ensure context is copied with async utils run_async ([#20286](https://github.com/run-llama/llama_index/pull/20286))
16+
- fix(memory): ensure first message in queue is always a user message after flush ([#20310](https://github.com/run-llama/llama_index/pull/20310))
17+
18+
### llama-index-embeddings-bedrock [0.7.2]
19+
20+
- feat(embeddings-bedrock): Add support for Amazon Bedrock Application Inference Profiles ([#20267](https://github.com/run-llama/llama_index/pull/20267))
21+
- fix:(embeddings-bedrock) correct extraction of provider from model_name ([#20295](https://github.com/run-llama/llama_index/pull/20295))
22+
- Bump version of bedrock-embedding ([#20304](https://github.com/run-llama/llama_index/pull/20304))
23+
24+
### llama-index-embeddings-voyageai [0.5.1]
25+
26+
- VoyageAI correction and documentation ([#20251](https://github.com/run-llama/llama_index/pull/20251))
27+
28+
### llama-index-llms-anthropic [0.10.3]
29+
30+
- feat: add anthropic opus 4.5 ([#20306](https://github.com/run-llama/llama_index/pull/20306))
31+
32+
### llama-index-llms-bedrock-converse [0.12.2]
33+
34+
- fix(bedrock-converse): Only use guardrail_stream_processing_mode in streaming functions ([#20289](https://github.com/run-llama/llama_index/pull/20289))
35+
- feat: add anthropic opus 4.5 ([#20306](https://github.com/run-llama/llama_index/pull/20306))
36+
- feat(bedrock-converse): Additional support for Claude Opus 4.5 ([#20317](https://github.com/run-llama/llama_index/pull/20317))
37+
38+
### llama-index-llms-google-genai [0.7.4]
39+
40+
- Fix gemini-3 support and gemini function call support ([#20315](https://github.com/run-llama/llama_index/pull/20315))
41+
42+
### llama-index-llms-helicone [0.1.1]
43+
44+
- update helicone docs + examples ([#20208](https://github.com/run-llama/llama_index/pull/20208))
45+
46+
### llama-index-llms-openai [0.6.10]
47+
48+
- Smallest Nit ([#20252](https://github.com/run-llama/llama_index/pull/20252))
49+
- Feat: Add gpt-5.1-chat model support ([#20311](https://github.com/run-llama/llama_index/pull/20311))
50+
51+
### llama-index-llms-ovhcloud [0.1.0]
52+
53+
- Add OVHcloud AI Endpoints provider ([#20288](https://github.com/run-llama/llama_index/pull/20288))
54+
55+
### llama-index-llms-siliconflow [0.4.2]
56+
57+
- [Bugfix] None check on content in delta in siliconflow LLM ([#20327](https://github.com/run-llama/llama_index/pull/20327))
58+
59+
### llama-index-node-parser-docling [0.4.2]
60+
61+
- Relax docling Python constraints ([#20322](https://github.com/run-llama/llama_index/pull/20322))
62+
63+
### llama-index-packs-resume-screener [0.9.3]
64+
65+
- feat: Update pypdf to latest version ([#20285](https://github.com/run-llama/llama_index/pull/20285))
66+
67+
### llama-index-postprocessor-voyageai-rerank [0.4.1]
68+
69+
- VoyageAI correction and documentation ([#20251](https://github.com/run-llama/llama_index/pull/20251))
70+
71+
### llama-index-protocols-ag-ui [0.2.3]
72+
73+
- fix: correct order of ag-ui events to avoid event conflicts ([#20296](https://github.com/run-llama/llama_index/pull/20296))
74+
75+
### llama-index-readers-confluence [0.6.0]
76+
77+
- Refactor Confluence integration: Update license to MIT, remove requirements.txt, and implement HtmlTextParser for HTML to Markdown conversion. Update dependencies and tests accordingly. ([#20262](https://github.com/run-llama/llama_index/pull/20262))
78+
79+
### llama-index-readers-docling [0.4.2]
80+
81+
- Relax docling Python constraints ([#20322](https://github.com/run-llama/llama_index/pull/20322))
82+
83+
### llama-index-readers-file [0.5.5]
84+
85+
- feat: Update pypdf to latest version ([#20285](https://github.com/run-llama/llama_index/pull/20285))
86+
87+
### llama-index-readers-reddit [0.4.1]
88+
89+
- Fix typo in README.md for Reddit integration ([#20283](https://github.com/run-llama/llama_index/pull/20283))
90+
91+
### llama-index-storage-chat-store-postgres [0.3.2]
92+
93+
- [FIX] Postgres ChatStore automatically prefix table name with "data\_" ([#20241](https://github.com/run-llama/llama_index/pull/20241))
94+
95+
### llama-index-vector-stores-azureaisearch [0.4.4]
96+
97+
- `vector-azureaisearch`: check if user agent already in policy before add it to azure client ([#20243](https://github.com/run-llama/llama_index/pull/20243))
98+
- fix(azureaisearch): Add close/aclose methods to fix unclosed client session warnings ([#20309](https://github.com/run-llama/llama_index/pull/20309))
99+
100+
### llama-index-vector-stores-milvus [0.9.4]
101+
102+
- Fix/consistency level param for milvus ([#20268](https://github.com/run-llama/llama_index/pull/20268))
103+
104+
### llama-index-vector-stores-postgres [0.7.2]
105+
106+
- Fix postgresql dispose ([#20312](https://github.com/run-llama/llama_index/pull/20312))
107+
108+
### llama-index-vector-stores-qdrant [0.9.0]
109+
110+
- fix: Update qdrant-client version constraints ([#20280](https://github.com/run-llama/llama_index/pull/20280))
111+
- Feat: update Qdrant client to 1.16.0 ([#20287](https://github.com/run-llama/llama_index/pull/20287))
112+
113+
### llama-index-vector-stores-vertexaivectorsearch [0.3.2]
114+
115+
- fix: update blob path in batch_update_index ([#20281](https://github.com/run-llama/llama_index/pull/20281))
116+
117+
### llama-index-voice-agents-openai [0.2.2]
118+
119+
- Smallest Nit ([#20252](https://github.com/run-llama/llama_index/pull/20252))
120+
5121
## [2025-11-10]
6122

7123
### llama-index-core [0.14.8]
Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,3 @@
1-
# Helicone AI Gateway
2-
31
::: llama_index.llms.helicone
42
options:
53
members: - Helicone
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
::: llama_index.llms.ovhcloud
22
options:
3-
members: - OVHcloud AI Endpoints
3+
members: - OVHcloud

docs/src/content/docs/framework/CHANGELOG.md

Lines changed: 90 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,96 @@ title: ChangeLog
44

55
<!--- generated changelog --->
66

7+
## [2025-11-10]
8+
9+
### llama-index-core [0.14.8]
10+
11+
- Fix ReActOutputParser getting stuck when "Answer:" contains "Action:" ([#20098](https://github.com/run-llama/llama_index/pull/20098))
12+
- Add buffer to image, audio, video and document blocks ([#20153](https://github.com/run-llama/llama_index/pull/20153))
13+
- fix(agent): Handle multi-block ChatMessage in ReActAgent ([#20196](https://github.com/run-llama/llama_index/pull/20196))
14+
- Fix/20209 ([#20214](https://github.com/run-llama/llama_index/pull/20214))
15+
- Preserve Exception in ToolOutput ([#20231](https://github.com/run-llama/llama_index/pull/20231))
16+
- fix weird pydantic warning ([#20235](https://github.com/run-llama/llama_index/pull/20235))
17+
18+
### llama-index-embeddings-nvidia [0.4.2]
19+
20+
- docs: Edit pass and update example model ([#20198](https://github.com/run-llama/llama_index/pull/20198))
21+
22+
### llama-index-embeddings-ollama [0.8.4]
23+
24+
- Added a test case (no code) to check the embedding through an actual connection to a Ollama server (after checking that the ollama server exists) ([#20230](https://github.com/run-llama/llama_index/pull/20230))
25+
26+
### llama-index-llms-anthropic [0.10.2]
27+
28+
- feat(llms/anthropic): Add support for RawMessageDeltaEvent in streaming ([#20206](https://github.com/run-llama/llama_index/pull/20206))
29+
- chore: remove unsupported models ([#20211](https://github.com/run-llama/llama_index/pull/20211))
30+
31+
### llama-index-llms-bedrock-converse [0.11.1]
32+
33+
- feat: integrate bedrock converse with tool call block ([#20099](https://github.com/run-llama/llama_index/pull/20099))
34+
- feat: Update model name extraction to include 'jp' region prefix and … ([#20233](https://github.com/run-llama/llama_index/pull/20233))
35+
36+
### llama-index-llms-google-genai [0.7.3]
37+
38+
- feat: google genai integration with tool block ([#20096](https://github.com/run-llama/llama_index/pull/20096))
39+
- fix: non-streaming gemini tool calling ([#20207](https://github.com/run-llama/llama_index/pull/20207))
40+
- Add token usage information in GoogleGenAI chat additional_kwargs ([#20219](https://github.com/run-llama/llama_index/pull/20219))
41+
- bug fix google genai stream_complete ([#20220](https://github.com/run-llama/llama_index/pull/20220))
42+
43+
### llama-index-llms-nvidia [0.4.4]
44+
45+
- docs: Edit pass and code example updates ([#20200](https://github.com/run-llama/llama_index/pull/20200))
46+
47+
### llama-index-llms-openai [0.6.8]
48+
49+
- FixV2: Correct DocumentBlock type for OpenAI from 'input_file' to 'file' ([#20203](https://github.com/run-llama/llama_index/pull/20203))
50+
- OpenAI v2 sdk support ([#20234](https://github.com/run-llama/llama_index/pull/20234))
51+
52+
### llama-index-llms-upstage [0.6.5]
53+
54+
- OpenAI v2 sdk support ([#20234](https://github.com/run-llama/llama_index/pull/20234))
55+
56+
### llama-index-packs-streamlit-chatbot [0.5.2]
57+
58+
- OpenAI v2 sdk support ([#20234](https://github.com/run-llama/llama_index/pull/20234))
59+
60+
### llama-index-packs-voyage-query-engine [0.5.2]
61+
62+
- OpenAI v2 sdk support ([#20234](https://github.com/run-llama/llama_index/pull/20234))
63+
64+
### llama-index-postprocessor-nvidia-rerank [0.5.1]
65+
66+
- docs: Edit pass ([#20199](https://github.com/run-llama/llama_index/pull/20199))
67+
68+
### llama-index-readers-web [0.5.6]
69+
70+
- feat: Add ScrapyWebReader Integration ([#20212](https://github.com/run-llama/llama_index/pull/20212))
71+
- Update Scrapy dependency to 2.13.3 ([#20228](https://github.com/run-llama/llama_index/pull/20228))
72+
73+
### llama-index-readers-whisper [0.3.0]
74+
75+
- OpenAI v2 sdk support ([#20234](https://github.com/run-llama/llama_index/pull/20234))
76+
77+
### llama-index-storage-kvstore-postgres [0.4.3]
78+
79+
- fix: Ensure schema creation only occurs if it doesn't already exist ([#20225](https://github.com/run-llama/llama_index/pull/20225))
80+
81+
### llama-index-tools-brightdata [0.2.1]
82+
83+
- docs: add api key claim instructions ([#20204](https://github.com/run-llama/llama_index/pull/20204))
84+
85+
### llama-index-tools-mcp [0.4.3]
86+
87+
- Added test case for issue 19211. No code change ([#20201](https://github.com/run-llama/llama_index/pull/20201))
88+
89+
### llama-index-utils-oracleai [0.3.1]
90+
91+
- Update llama-index-core dependency to 0.12.45 ([#20227](https://github.com/run-llama/llama_index/pull/20227))
92+
93+
### llama-index-vector-stores-lancedb [0.4.2]
94+
95+
- fix: FTS index recreation bug on every LanceDB query ([#20213](https://github.com/run-llama/llama_index/pull/20213))
96+
797
## [2025-10-30]
898

999
### llama-index-core [0.14.7]

llama-index-core/pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ dev = [
3232

3333
[project]
3434
name = "llama-index-core"
35-
version = "0.14.8"
35+
version = "0.14.9"
3636
description = "Interface between LLMs and your data"
3737
authors = [{name = "Jerry Liu", email = "jerry@llamaindex.ai"}]
3838
requires-python = ">=3.9,<4.0"

pyproject.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ classifiers = [
4040
]
4141
dependencies = [
4242
"llama-index-cli>=0.5.0,<0.6 ; python_version > '3.9'",
43-
"llama-index-core>=0.14.8,<0.15.0",
43+
"llama-index-core>=0.14.9,<0.15.0",
4444
"llama-index-embeddings-openai>=0.5.0,<0.6",
4545
"llama-index-indices-managed-llama-cloud>=0.4.0",
4646
"llama-index-llms-openai>=0.6.0,<0.7",
@@ -70,7 +70,7 @@ maintainers = [
7070
name = "llama-index"
7171
readme = "README.md"
7272
requires-python = ">=3.9,<4.0"
73-
version = "0.14.8"
73+
version = "0.14.9"
7474

7575
[project.scripts]
7676
llamaindex-cli = "llama_index.cli.command_line:main"

0 commit comments

Comments
 (0)