额外信息
"http://www.w3.org/TR/html4/loose.dtd">
type="text/css">
href="?__debugger__=yes&cmd=resource&f=console.png">
openai.error.ServiceUnavailableError
openai.error.ServiceUnavailableError: The server is overloaded or not ready yet.
Traceback (most recent call last)
File "/home/bitnami/.local/lib/python3.7/site-packages/flask/app.py",
line 2091,
in__call__
def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:
"""The WSGI server calls the Flask application object as the
WSGI application. This calls :meth:`wsgi_app`, which can be
wrapped to apply middleware.
"""
return self.wsgi_app(environ, start_response)
File "/home/bitnami/.local/lib/python3.7/site-packages/flask/app.py",
line 2076,
inwsgi_app
try:
ctx.push()
response = self.full_dispatch_request()
except Exception as e:
error = e
response = self.handle_exception(e)
except: # noqa: B001
error = sys.exc_info()[1]
raise
return response(environ, start_response)
finally:
File "/home/bitnami/.local/lib/python3.7/site-packages/flask/app.py",
line 2073,
inwsgi_app
ctx = self.request_context(environ)
error: t.Optional[BaseException] = None
try:
try:
ctx.push()
response = self.full_dispatch_request()
except Exception as e:
error = e
response = self.handle_exception(e)
except: # noqa: B001
error = sys.exc_info()[1]
File "/home/bitnami/.local/lib/python3.7/site-packages/flask/app.py",
line 1518,
infull_dispatch_request
request_started.send(self)
rv = self.preprocess_request()
if rv is None:
rv = self.dispatch_request()
except Exception as e:
rv = self.handle_user_exception(e)
return self.finalize_request(rv)
def finalize_request(
self,
rv: t.Union[ResponseReturnValue, HTTPException],
File "/home/bitnami/.local/lib/python3.7/site-packages/flask/app.py",
line 1516,
infull_dispatch_request
self.try_trigger_before_first_request_functions()
try:
request_started.send(self)
rv = self.preprocess_request()
if rv is None:
rv = self.dispatch_request()
except Exception as e:
rv = self.handle_user_exception(e)
return self.finalize_request(rv)
def finalize_request(
File "/home/bitnami/.local/lib/python3.7/site-packages/flask/app.py",
line 1502,
indispatch_request
getattr(rule, "provide_automatic_options", False)
and req.method == "OPTIONS"
):
return self.make_default_options_response()
# otherwise dispatch to the handler for that endpoint
return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)
def full_dispatch_request(self) -> Response:
"""Dispatches the request and on top of that performs request
pre and postprocessing as well as HTTP exception catching and
error handling.
File "/opt/bitnami/twitter_micalendariolaboral/openai/app.py",
line 147,
intools_description
dominio = request.args.get('dominio')
if dominio:
prompt = openai.ChatCompletion.create(
model="gpt-4o-mini",
messages=generate_prompt_tools_description(dominio)
)
if prompt is not None and prompt.choices is not None and len(prompt.choices) > 0:
message = prompt.choices[0].message
response = app.response_class(
File "/home/bitnami/.local/lib/python3.7/site-packages/openai/api_resources/chat_completion.py",
line 25,
increate
start = time.time()
timeout = kwargs.pop("timeout", None)
while True:
try:
return super().create(*args, **kwargs)
except TryAgain as e:
if timeout is not None and time.time() > start + timeout:
raise
util.log_info("Waiting for model to warm up", error=e)
File "/home/bitnami/.local/lib/python3.7/site-packages/openai/api_resources/abstract/engine_api_resource.py",
line 160,
increate
url,
params=params,
headers=headers,
stream=stream,
request_id=request_id,
request_timeout=request_timeout,
)
if stream:
# must be an iterator
assert not isinstance(response, OpenAIResponse)
File "/home/bitnami/.local/lib/python3.7/site-packages/openai/api_requestor.py",
line 226,
inrequest
files=files,
stream=stream,
request_id=request_id,
request_timeout=request_timeout,
)
resp, got_stream = self._interpret_response(result, stream)
return resp, got_stream, self.api_key
@overload
async def arequest(
self,
File "/home/bitnami/.local/lib/python3.7/site-packages/openai/api_requestor.py",
line 624,
in_interpret_response
return (
self._interpret_response_line(
result.content.decode("utf-8"),
result.status_code,
result.headers,
stream=False,
),
False,
)
async def _interpret_async_response(
File "/home/bitnami/.local/lib/python3.7/site-packages/openai/api_requestor.py",
line 667,
in_interpret_response_line
if rcode == 503:
raise error.ServiceUnavailableError(
"The server is overloaded or not ready yet.",
rbody,
rcode,
headers=rheaders,
)
try:
if 'text/plain' in rheaders.get('Content-Type'):
data = rbody
else:
openai.error.ServiceUnavailableError: The server is overloaded or not ready yet.
This is the Copy/Paste friendly version of the traceback.
The debugger caught an exception in your WSGI application. You can now
look at the traceback which led to the error.
If you enable JavaScript you can also use additional features such as code
execution (if the evalex feature is enabled), automatic pasting of the
exceptions and much more.
Console Locked
The console is locked and needs to be unlocked by entering the PIN.
You can find the PIN printed out on the standard output of your
shell that runs the server.

