You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
--- Logging error ---
Traceback (most recent call last):
File "C:\__download\Python\Lib\logging\__init__.py", line 1110, in emit
msg = self.format(record)
^^^^^^^^^^^^^^^^^^^
File "C:\__download\Python\Lib\logging\__init__.py", line 953, in format
return fmt.format(record)
^^^^^^^^^^^^^^^^^^
File "C:\_code\ollama-deep-researcher-1\.venv\Lib\site-packages\structlog\stdlib.py", line 1087, in format
ed = p(logger, meth_name, cast(EventDict, ed))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\_code\ollama-deep-researcher-1\.venv\Lib\site-packages\langgraph_api\logging.py", line 62, in __call__
and LEVELS[event_dict["level"].upper()] > LEVELS["INFO"]
~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
KeyError: 'EXCEPTION'
Call stack:
File "C:\__download\Python\Lib\threading.py", line 995, in _bootstrap
self._bootstrap_inner()
File "C:\__download\Python\Lib\threading.py", line 1038, in _bootstrap_inner
self.run()
File "C:\__download\Python\Lib\threading.py", line 975, in run
self._target(*self._args, **self._kwargs)
File "C:\__download\Python\Lib\concurrent\futures\thread.py", line 83, in _worker
work_item.run()
File "C:\__download\Python\Lib\concurrent\futures\thread.py", line 58, in run
result = self.fn(*self.args, **self.kwargs)
File "C:\_code\ollama-deep-researcher-1\.venv\Lib\site-packages\structlog\stdlib.py", line 400, in <lambda>
lambda: ctx.run(lambda: meth(event, *args, **kw)),
File "C:\_code\ollama-deep-researcher-1\.venv\Lib\site-packages\structlog\stdlib.py", line 400, in <lambda>
lambda: ctx.run(lambda: meth(event, *args, **kw)),
File "C:\_code\ollama-deep-researcher-1\.venv\Lib\site-packages\structlog\stdlib.py", line 224, in exception
return self._proxy_to_logger("exception", event, *args, **kw)
File "C:\_code\ollama-deep-researcher-1\.venv\Lib\site-packages\structlog\stdlib.py", line 254, in _proxy_to_logger
return super()._proxy_to_logger(method_name, event=event, **event_kw)
File "C:\_code\ollama-deep-researcher-1\.venv\Lib\site-packages\structlog\_base.py", line 217, in _proxy_to_logger
return getattr(self._logger, method_name)(*args, **kw)
Message: {'run_id': '1eff58d3-f85f-6062-bfb7-64f4a9f27ca1', 'run_attempt': 1, 'run_created_at': '2025-02-28T04:34:07.672099+00:00', 'run_started_at': '2025-02-28T04:34:08.351574+00:00', 'run_ended_at': '2025-02-28T04:34:09.038877+00:00', 'run_exec_ms': 687, 'event': 'Background run failed', 'logger': 'langgraph_api.queue', 'level': 'exception', 'api_variant': 'local_dev', 'timestamp': '2025-02-28T04:34:09.039876Z', 'exception': 'Traceback (most recent call last):\n File "C:\\_code\\ollama-deep-researcher-1\\.venv\\Lib\\site-packages\\langgraph_api\\queue.py", line 266, in worker\n await asyncio.wait_for(consume(stream, run_id), timeout)\n File "C:\\__download\\Python\\Lib\\asyncio\\tasks.py", line 479, in wait_for\n return fut.result()\n ^^^^^^^^^^^^\n File "C:\\_code\\ollama-deep-researcher-1\\.venv\\Lib\\site-packages\\langgraph_api\\stream.py", line 292, in consume\n raise e from None\n File "C:\\_code\\ollama-deep-researcher-1\\.venv\\Lib\\site-packages\\langgraph_api\\stream.py", line 282, in consume\n async for mode, payload in stream:\n File "C:\\_code\\ollama-deep-researcher-1\\.venv\\Lib\\site-packages\\langgraph_api\\stream.py", line 233, in astream_state\n event = await wait_if_not_done(anext(stream, sentinel), done)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File "C:\\_code\\ollama-deep-researcher-1\\.venv\\Lib\\site-packages\\langgraph_api\\asyncio.py", line 72, in wait_if_not_done\n raise e.exceptions[0] from None\n File "C:\\_code\\ollama-deep-researcher-1\\.venv\\Lib\\site-packages\\langgraph\\pregel\\__init__.py", line 2274, in astream\n async for _ in runner.atick(\n File "C:\\_code\\ollama-deep-researcher-1\\.venv\\Lib\\site-packages\\langgraph\\pregel\\runner.py", line 527, in atick\n _panic_or_proceed(\n File "C:\\_code\\ollama-deep-researcher-1\\.venv\\Lib\\site-packages\\langgraph\\pregel\\runner.py", line 619, in _panic_or_proceed\n raise exc\n File "C:\\_code\\ollama-deep-researcher-1\\.venv\\Lib\\site-packages\\langgraph\\pregel\\retry.py", line 128, in arun_with_retry\n return await task.proc.ainvoke(task.input, config)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File "C:\\_code\\ollama-deep-researcher-1\\.venv\\Lib\\site-packages\\langgraph\\utils\\runnable.py", line 583, in ainvoke\n input = await step.ainvoke(input, config, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File "C:\\_code\\ollama-deep-researcher-1\\.venv\\Lib\\site-packages\\langgraph\\utils\\runnable.py", line 371, in ainvoke\n ret = await asyncio.create_task(coro, context=context)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File "C:\\_code\\ollama-deep-researcher-1\\.venv\\Lib\\site-packages\\langchain_core\\runnables\\config.py", line 588, in run_in_executor\n return await asyncio.get_running_loop().run_in_executor(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File "C:\\__download\\Python\\Lib\\concurrent\\futures\\thread.py", line 58, in run\n result = self.fn(*self.args, **self.kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File "C:\\_code\\ollama-deep-researcher-1\\.venv\\Lib\\site-packages\\langchain_core\\runnables\\config.py", line 579, in wrapper\n return func(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^\n File "C:\\_code\\ollama-deep-researcher-1\\./src/assistant/graph.py", line 25, in generate_query\n result = llm_json_mode.invoke(\n ^^^^^^^^^^^^^^^^^^^^^\n File "C:\\_code\\ollama-deep-researcher-1\\.venv\\Lib\\site-packages\\langchain_core\\language_models\\chat_models.py", line 284, in invoke\n self.generate_prompt(\n File "C:\\_code\\ollama-deep-researcher-1\\.venv\\Lib\\site-packages\\langchain_core\\language_models\\chat_models.py", line 860, in generate_prompt\n return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File "C:\\_code\\ollama-deep-researcher-1\\.venv\\Lib\\site-packages\\langchain_core\\language_models\\chat_models.py", line 690, in generate\n self._generate_with_cache(\n File "C:\\_code\\ollama-deep-researcher-1\\.venv\\Lib\\site-packages\\langchain_core\\language_models\\chat_models.py", line 913, in _generate_with_cache\n for chunk in self._stream(messages, stop=stop, **kwargs):\n File "C:\\_code\\ollama-deep-researcher-1\\.venv\\Lib\\site-packages\\langchain_ollama\\chat_models.py", line 722, in _stream\n for stream_resp in self._create_chat_stream(messages, stop, **kwargs):\n File "C:\\_code\\ollama-deep-researcher-1\\.venv\\Lib\\site-packages\\langchain_ollama\\chat_models.py", line 589, in _create_chat_stream\n yield from self._client.chat(**chat_params)\n File "C:\\_code\\ollama-deep-researcher-1\\.venv\\Lib\\site-packages\\ollama\\_client.py", line 163, in inner\n with self._client.stream(*args, **kwargs) as r:\n File "C:\\__download\\Python\\Lib\\contextlib.py", line 137, in __enter__\n return next(self.gen)\n ^^^^^^^^^^^^^^\n File "C:\\_code\\ollama-deep-researcher-1\\.venv\\Lib\\site-packages\\httpx\\_client.py", line 868, in stream\n response = self.send(\n ^^^^^^^^^^\n File "C:\\_code\\ollama-deep-researcher-1\\.venv\\Lib\\site-packages\\httpx\\_client.py", line 914, in send\n response = self._send_handling_auth(\n ^^^^^^^^^^^^^^^^^^^^^^^^^\n File "C:\\_code\\ollama-deep-researcher-1\\.venv\\Lib\\site-packages\\httpx\\_client.py", line 942, in _send_handling_auth\n response = self._send_handling_redirects(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File "C:\\_code\\ollama-deep-researcher-1\\.venv\\Lib\\site-packages\\httpx\\_client.py", line 979, in _send_handling_redirects\n response = self._send_single_request(request)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File "C:\\_code\\ollama-deep-researcher-1\\.venv\\Lib\\site-packages\\httpx\\_client.py", line 1014, in _send_single_request\n response = transport.handle_request(request)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File "C:\\_code\\ollama-deep-researcher-1\\.venv\\Lib\\site-packages\\httpx\\_transports\\default.py", line 249, in handle_request\n with map_httpcore_exceptions():\n File "C:\\__download\\Python\\Lib\\contextlib.py", line 155, in __exit__\n self.gen.throw(typ, value, traceback)\n File "C:\\_code\\ollama-deep-researcher-1\\.venv\\Lib\\site-packages\\httpx\\_transports\\default.py", line 118, in map_httpcore_exceptions\n raise mapped_exc(message) from exc\nhttpx.UnsupportedProtocol: Request URL is missing an \'http://\' or \'https://\' protocol.\nDuring task with name \'generate_query\' and id \'2963ff51-2913-5448-6659-1703309ccc55\''}
Arguments: ()
Did I do something wrong with the uv installation?
This is the command used after cloning the repo in vscode which runs in window:
1. uv venv --python 3.11
2. .venv\Scripts\activate
3. uv pip install -e .
4. uv pip install langgraph-cli[inmem]
5. cp .env.example .env
6. uv run langgraph dev
.env file:

The generate query does not work.

The terminal shows logging error.
2025-02-28T04:34:08.472770Z [info ] GET /assistants/a6ab75b8-fb3d-5c2c-a436-2fee55e33a06/subgraphs 200 14ms [langgraph_api.server] api_variant=local_dev latency_ms=14 method=GET path=/assistants/a6ab75b8-fb3d-5c2c-a436-2fee55e33a06/subgraphs path_params={'assistant_id': 'a6ab75b8-fb3d-5c2c-a436-2fee55e33a06'} proto=1.1 query_string=recurse=true req_header={'host': '127.0.0.1:2024', 'connection': 'keep-alive', 'x-auth-scheme': 'langsmith', 'sec-ch-ua-platform': '"Windows"', 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36 Edg/133.0.0.0', 'sec-ch-ua': '"Not(A:Brand";v="99", "Microsoft Edge";v="133", "Chromium";v="133"', 'sec-ch-ua-mobile': '?0', 'accept': '*/*', 'origin': 'https://smith.langchain.com', 'sec-fetch-site': 'cross-site', 'sec-fetch-mode': 'cors', 'sec-fetch-dest': 'empty', 'accept-encoding': 'gzip, deflate, br, zstd', 'accept-language': 'en-US,en;q=0.9,ms;q=0.8,en-GB;q=0.7'} res_header={'content-length': '2', 'content-type': 'application/json'} route=/assistants/{assistant_id}/subgraphs status=200
Did I do something wrong with the uv installation?
The text was updated successfully, but these errors were encountered: