2023-10-10 22:23:08 +08:00
|
|
|
import { useCallback, useEffect, useState } from 'react';
|
2023-09-27 22:47:06 +08:00
|
|
|
import { useAsync } from 'react-use';
|
|
|
|
|
import { Subscription } from 'rxjs';
|
|
|
|
|
|
2023-10-25 23:38:55 +08:00
|
|
|
import { llms } from '@grafana/experimental';
|
2023-10-02 22:04:12 +08:00
|
|
|
import { logError } from '@grafana/runtime';
|
|
|
|
|
import { useAppNotification } from 'app/core/copy/appNotification';
|
|
|
|
|
|
2023-10-28 13:10:47 +08:00
|
|
|
import { isLLMPluginEnabled, DEFAULT_OAI_MODEL } from './utils';
|
2023-09-27 22:47:06 +08:00
|
|
|
|
|
|
|
|
// Declared instead of imported from utils to make this hook modular
|
|
|
|
|
// Ideally we will want to move the hook itself to a different scope later.
|
2023-10-25 23:38:55 +08:00
|
|
|
type Message = llms.openai.Message;
|
2023-09-27 22:47:06 +08:00
|
|
|
|
2023-10-05 21:25:35 +08:00
|
|
|
export enum StreamStatus {
|
|
|
|
|
IDLE = 'idle',
|
|
|
|
|
GENERATING = 'generating',
|
|
|
|
|
COMPLETED = 'completed',
|
|
|
|
|
}
|
|
|
|
|
|
2023-10-10 22:23:08 +08:00
|
|
|
export const TIMEOUT = 10000;
|
|
|
|
|
|
2023-09-27 22:47:06 +08:00
|
|
|
// TODO: Add tests
|
|
|
|
|
export function useOpenAIStream(
|
2023-10-28 13:10:47 +08:00
|
|
|
model = DEFAULT_OAI_MODEL,
|
2023-09-27 22:47:06 +08:00
|
|
|
temperature = 1
|
|
|
|
|
): {
|
|
|
|
|
setMessages: React.Dispatch<React.SetStateAction<Message[]>>;
|
2023-11-09 23:06:14 +08:00
|
|
|
setStopGeneration: React.Dispatch<React.SetStateAction<boolean>>;
|
2023-11-17 17:02:53 +08:00
|
|
|
messages: Message[];
|
2023-09-27 22:47:06 +08:00
|
|
|
reply: string;
|
2023-10-05 21:25:35 +08:00
|
|
|
streamStatus: StreamStatus;
|
2023-09-27 22:47:06 +08:00
|
|
|
error: Error | undefined;
|
|
|
|
|
value:
|
|
|
|
|
| {
|
2023-10-02 22:04:12 +08:00
|
|
|
enabled: boolean | undefined;
|
2023-09-27 22:47:06 +08:00
|
|
|
stream?: undefined;
|
|
|
|
|
}
|
|
|
|
|
| {
|
2023-10-02 22:04:12 +08:00
|
|
|
enabled: boolean | undefined;
|
2023-09-27 22:47:06 +08:00
|
|
|
stream: Subscription;
|
|
|
|
|
}
|
|
|
|
|
| undefined;
|
|
|
|
|
} {
|
|
|
|
|
// The messages array to send to the LLM, updated when the button is clicked.
|
|
|
|
|
const [messages, setMessages] = useState<Message[]>([]);
|
2023-11-09 23:06:14 +08:00
|
|
|
const [stopGeneration, setStopGeneration] = useState(false);
|
2023-09-27 22:47:06 +08:00
|
|
|
// The latest reply from the LLM.
|
|
|
|
|
const [reply, setReply] = useState('');
|
2023-10-05 21:25:35 +08:00
|
|
|
const [streamStatus, setStreamStatus] = useState<StreamStatus>(StreamStatus.IDLE);
|
2023-10-02 22:04:12 +08:00
|
|
|
const [error, setError] = useState<Error>();
|
|
|
|
|
const { error: notifyError } = useAppNotification();
|
2023-09-27 22:47:06 +08:00
|
|
|
|
2023-10-10 22:23:08 +08:00
|
|
|
const onError = useCallback(
|
|
|
|
|
(e: Error) => {
|
|
|
|
|
setStreamStatus(StreamStatus.IDLE);
|
|
|
|
|
setMessages([]);
|
2023-11-09 23:06:14 +08:00
|
|
|
setStopGeneration(false);
|
2023-10-10 22:23:08 +08:00
|
|
|
setError(e);
|
|
|
|
|
notifyError(
|
|
|
|
|
'Failed to generate content using OpenAI',
|
|
|
|
|
`Please try again or if the problem persists, contact your organization admin.`
|
|
|
|
|
);
|
|
|
|
|
console.error(e);
|
|
|
|
|
logError(e, { messages: JSON.stringify(messages), model, temperature: String(temperature) });
|
|
|
|
|
},
|
|
|
|
|
[messages, model, temperature, notifyError]
|
|
|
|
|
);
|
|
|
|
|
|
2023-10-04 05:36:45 +08:00
|
|
|
const { error: enabledError, value: enabled } = useAsync(
|
|
|
|
|
async () => await isLLMPluginEnabled(),
|
|
|
|
|
[isLLMPluginEnabled]
|
|
|
|
|
);
|
|
|
|
|
|
2023-10-02 22:04:12 +08:00
|
|
|
const { error: asyncError, value } = useAsync(async () => {
|
2023-10-04 05:36:45 +08:00
|
|
|
if (!enabled || !messages.length) {
|
2023-09-27 22:47:06 +08:00
|
|
|
return { enabled };
|
|
|
|
|
}
|
|
|
|
|
|
2023-10-05 21:25:35 +08:00
|
|
|
setStreamStatus(StreamStatus.GENERATING);
|
2023-10-02 22:04:12 +08:00
|
|
|
setError(undefined);
|
2023-09-27 22:47:06 +08:00
|
|
|
// Stream the completions. Each element is the next stream chunk.
|
2023-10-25 23:38:55 +08:00
|
|
|
const stream = llms.openai
|
2023-09-27 22:47:06 +08:00
|
|
|
.streamChatCompletions({
|
|
|
|
|
model,
|
|
|
|
|
temperature,
|
|
|
|
|
messages,
|
|
|
|
|
})
|
|
|
|
|
.pipe(
|
|
|
|
|
// Accumulate the stream content into a stream of strings, where each
|
|
|
|
|
// element contains the accumulated message so far.
|
2023-10-25 23:38:55 +08:00
|
|
|
llms.openai.accumulateContent()
|
2023-09-27 22:47:06 +08:00
|
|
|
// The stream is just a regular Observable, so we can use standard rxjs
|
|
|
|
|
// functionality to update state, e.g. recording when the stream
|
|
|
|
|
// has completed.
|
|
|
|
|
// The operator decision tree on the rxjs website is a useful resource:
|
2023-10-02 22:04:12 +08:00
|
|
|
// https://rxjs.dev/operator-decision-tree.)
|
2023-09-27 22:47:06 +08:00
|
|
|
);
|
|
|
|
|
// Subscribe to the stream and update the state for each returned value.
|
|
|
|
|
return {
|
|
|
|
|
enabled,
|
|
|
|
|
stream: stream.subscribe({
|
|
|
|
|
next: setReply,
|
2023-10-10 22:23:08 +08:00
|
|
|
error: onError,
|
2023-09-27 22:47:06 +08:00
|
|
|
complete: () => {
|
2023-10-05 21:25:35 +08:00
|
|
|
setStreamStatus(StreamStatus.COMPLETED);
|
|
|
|
|
setTimeout(() => {
|
|
|
|
|
setStreamStatus(StreamStatus.IDLE);
|
|
|
|
|
});
|
2023-09-27 22:47:06 +08:00
|
|
|
setMessages([]);
|
2023-11-09 23:06:14 +08:00
|
|
|
setStopGeneration(false);
|
2023-10-02 22:04:12 +08:00
|
|
|
setError(undefined);
|
2023-09-27 22:47:06 +08:00
|
|
|
},
|
|
|
|
|
}),
|
|
|
|
|
};
|
2023-10-04 05:36:45 +08:00
|
|
|
}, [messages, enabled]);
|
2023-09-27 22:47:06 +08:00
|
|
|
|
2023-10-10 22:23:08 +08:00
|
|
|
// Unsubscribe from the stream when the component unmounts.
|
|
|
|
|
useEffect(() => {
|
|
|
|
|
return () => {
|
|
|
|
|
if (value?.stream) {
|
|
|
|
|
value.stream.unsubscribe();
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
}, [value]);
|
|
|
|
|
|
2023-11-09 23:06:14 +08:00
|
|
|
// Unsubscribe from the stream when user stops the generation.
|
|
|
|
|
useEffect(() => {
|
|
|
|
|
if (stopGeneration) {
|
|
|
|
|
value?.stream?.unsubscribe();
|
|
|
|
|
setStreamStatus(StreamStatus.IDLE);
|
|
|
|
|
setStopGeneration(false);
|
|
|
|
|
setError(undefined);
|
|
|
|
|
setMessages([]);
|
|
|
|
|
}
|
|
|
|
|
}, [stopGeneration, value?.stream]);
|
|
|
|
|
|
2023-10-10 22:23:08 +08:00
|
|
|
// If the stream is generating and we haven't received a reply, it times out.
|
|
|
|
|
useEffect(() => {
|
|
|
|
|
let timeout: NodeJS.Timeout | undefined;
|
|
|
|
|
if (streamStatus === StreamStatus.GENERATING && reply === '') {
|
|
|
|
|
timeout = setTimeout(() => {
|
|
|
|
|
onError(new Error(`OpenAI stream timed out after ${TIMEOUT}ms`));
|
|
|
|
|
}, TIMEOUT);
|
|
|
|
|
}
|
|
|
|
|
return () => {
|
|
|
|
|
timeout && clearTimeout(timeout);
|
|
|
|
|
};
|
|
|
|
|
}, [streamStatus, reply, onError]);
|
|
|
|
|
|
2023-10-04 05:36:45 +08:00
|
|
|
if (asyncError || enabledError) {
|
|
|
|
|
setError(asyncError || enabledError);
|
2023-09-27 22:47:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return {
|
|
|
|
|
setMessages,
|
2023-11-09 23:06:14 +08:00
|
|
|
setStopGeneration,
|
2023-11-17 17:02:53 +08:00
|
|
|
messages,
|
2023-09-27 22:47:06 +08:00
|
|
|
reply,
|
2023-10-05 21:25:35 +08:00
|
|
|
streamStatus,
|
2023-09-27 22:47:06 +08:00
|
|
|
error,
|
|
|
|
|
value,
|
|
|
|
|
};
|
|
|
|
|
}
|