diff --git a/app/client/src/pages/Evaluator/EvaluatorPage.tsx b/app/client/src/pages/Evaluator/EvaluatorPage.tsx
index aee2052..0b8a003 100644
--- a/app/client/src/pages/Evaluator/EvaluatorPage.tsx
+++ b/app/client/src/pages/Evaluator/EvaluatorPage.tsx
@@ -5,7 +5,7 @@ import React, { useEffect, useState } from 'react';
import { useMutation } from 'react-query';
import { useParams } from 'react-router-dom';
import { ModelParameters } from '../../types';
-import { Form, FormInstance } from 'antd';
+import { Button, Form, FormInstance, Result } from 'antd';
import { useGetDataset, useModels } from './hooks';
import EvaluateDataset from './EvaluateDataset';
import EvaluatorSuccess from './EvaluatorSuccess';
@@ -18,7 +18,7 @@ const EvaluatorPage: React.FC = () => {
const [form] = Form.useForm();
const { generate_file_name } = useParams();
const [viewType, setViewType] = useState(ViewType.EVALUATE_F0RM)
- const [ ,setErrorMessage] = useState(null);
+ const [ errorMessage, setErrorMessage] = useState(null);
const [loading, setLoading] = useState(false);
const [evaluateResult, setEvaluateResult] = useState(null);
const { dataset, prompt, examples } = useGetDataset(generate_file_name as string);
@@ -88,7 +88,7 @@ const onSubmit = async () => {
const resp = await evaluateDataset(formData);
console.log('resp', resp);
if (!isEmpty(resp.status) && resp.status === 'failed') {
- setErrorMessage(resp.error);
+ setErrorMessage(resp.error || resp.message);
}
setLoading(false);
if (resp.output_path || resp.job_name) {
@@ -101,6 +101,23 @@ const onSubmit = async () => {
setLoading(false);
}
}
+
+ if (errorMessage) {
+ return (
+ <>
+
+ {'Start Over'}
+
+ }
+ />
+ >
+ )
+ }
return (
<>
diff --git a/app/client/src/pages/Evaluator/EvaluatorSuccess.tsx b/app/client/src/pages/Evaluator/EvaluatorSuccess.tsx
index 3a7ad17..06ece55 100644
--- a/app/client/src/pages/Evaluator/EvaluatorSuccess.tsx
+++ b/app/client/src/pages/Evaluator/EvaluatorSuccess.tsx
@@ -117,7 +117,7 @@ const EvaluatorSuccess: React.FC = ({ result, dataset, demo }) => {
diff --git a/app/client/src/pages/Evaluator/ReevaluatorPage.tsx b/app/client/src/pages/Evaluator/ReevaluatorPage.tsx
index 5eee54e..5351eda 100644
--- a/app/client/src/pages/Evaluator/ReevaluatorPage.tsx
+++ b/app/client/src/pages/Evaluator/ReevaluatorPage.tsx
@@ -1,6 +1,6 @@
import get from 'lodash/get';
import isEmpty from 'lodash/isEmpty';
-import { Form, FormInstance } from "antd";
+import { Button, Form, FormInstance, Result } from "antd";
import { useEffect, useState } from "react";
import { useParams } from "react-router-dom";
import { useGetEvaluate, useModels } from "./hooks";
@@ -15,7 +15,7 @@ const ReevaluatorPage: React.FC = () => {
const { evaluate_file_name } = useParams();
const [viewType, setViewType] = useState(ViewType.REEVALUATE_F0RM);
const [loading, setLoading] = useState(false);
- const [ ,setErrorMessage] = useState(null);
+ const [ errorMessage, setErrorMessage] = useState(null);
const [evaluateResult, setEvaluateResult] = useState(null);
const {
evaluate,
@@ -85,7 +85,7 @@ const ReevaluatorPage: React.FC = () => {
setLoading(true);
const resp = await evaluateDataset(formData);
if (!isEmpty(resp.status) && resp.status === 'failed') {
- setErrorMessage(resp.error);
+ setErrorMessage(resp.error || resp.message);
}
setLoading(false);
if (resp.output_path) {
@@ -99,6 +99,23 @@ const ReevaluatorPage: React.FC = () => {
}
}
+ if (errorMessage) {
+ return (
+ <>
+
+ {'Start Over'}
+
+ }
+ />
+ >
+ )
+ }
+
return (
<>
{viewType === ViewType.REEVALUATE_F0RM &&
diff --git a/app/client/src/pages/Evaluator/SeedEvaluateTable.tsx b/app/client/src/pages/Evaluator/SeedEvaluateTable.tsx
index b272f79..1e45847 100644
--- a/app/client/src/pages/Evaluator/SeedEvaluateTable.tsx
+++ b/app/client/src/pages/Evaluator/SeedEvaluateTable.tsx
@@ -54,7 +54,7 @@ const SeedEvaluateTable: React.FC = ({ results }) => {
title: 'Score',
key: 'score',
dataIndex: 'score',
- width: 20,
+ width: 100,
render: (score: number) => {
return <>>
}
diff --git a/app/client/src/pages/Evaluator/hooks.ts b/app/client/src/pages/Evaluator/hooks.ts
index 576e953..8a650ef 100644
--- a/app/client/src/pages/Evaluator/hooks.ts
+++ b/app/client/src/pages/Evaluator/hooks.ts
@@ -129,8 +129,8 @@ export const useGetEvaluate = (evaluate_file_name: string) => {
const evaluate = get(data, 'evaluate');
const dataset = get(data, 'dataset');
- const prompt = get(data, 'prompt');
- const examples = get(data, 'examples');
+ const prompt = get(evaluate, 'prompt') || get(evaluate, 'custom_prompt');
+ const examples = get(evaluate, 'examples');
return {
data,
diff --git a/app/client/src/pages/Evaluator/types.ts b/app/client/src/pages/Evaluator/types.ts
index 686585a..74d698f 100644
--- a/app/client/src/pages/Evaluator/types.ts
+++ b/app/client/src/pages/Evaluator/types.ts
@@ -85,4 +85,8 @@ export enum ViewType {
EVALUATE_F0RM = 'EVALUATE_F0RM',
REEVALUATE_F0RM = 'REEVALUATE_F0RM',
SUCCESS_VIEW = 'SUCCESS_VIEW'
+}
+
+export interface EvaluationDetails {
+ evaluation: EvaluateResult;
}
\ No newline at end of file
diff --git a/app/client/src/pages/Evaluator/util.ts b/app/client/src/pages/Evaluator/util.ts
index 701c955..4875207 100644
--- a/app/client/src/pages/Evaluator/util.ts
+++ b/app/client/src/pages/Evaluator/util.ts
@@ -206,6 +206,7 @@ export const EVALUATION_RESULT = {
export const getTopicMap = (evaluateResult: EvaluateResult) => {
+ console.log('---evaluateResult', evaluateResult);
const result = get(evaluateResult, 'result');
let topicMap = {};
const topics = [];
diff --git a/app/client/src/pages/Home/DatasetActions.tsx b/app/client/src/pages/Home/DatasetActions.tsx
index 30320ed..28e373e 100644
--- a/app/client/src/pages/Home/DatasetActions.tsx
+++ b/app/client/src/pages/Home/DatasetActions.tsx
@@ -76,12 +76,16 @@ const DatasetActions: React.FC = ({ dataset, refetch, setTo
const menuActions: MenuProps['items'] = [
{
key: '1',
- label: (
-
+ // label: (
+ //
+ // View Dataset Details
+ //
+ // ),
+ label:
+
View Dataset Details
-
- ),
- onClick: () => setShowModal(true),
+ ,
+ // onClick: () => setShowModal(true),
icon:
},
{
diff --git a/app/client/src/pages/Home/EvaluateActions.tsx b/app/client/src/pages/Home/EvaluateActions.tsx
index e8c8e27..0aeb1bd 100644
--- a/app/client/src/pages/Home/EvaluateActions.tsx
+++ b/app/client/src/pages/Home/EvaluateActions.tsx
@@ -69,11 +69,10 @@ const EvaluationActions: React.FC = ({ evaluation, refetch }) => {
{
key: '1',
label: (
-
+
View Evaluation Details
-
+
),
- onClick: () => setShowModal(true),
icon:
},
{
diff --git a/app/client/src/pages/Home/UpgradeButton.tsx b/app/client/src/pages/Home/UpgradeButton.tsx
new file mode 100644
index 0000000..e0da02e
--- /dev/null
+++ b/app/client/src/pages/Home/UpgradeButton.tsx
@@ -0,0 +1,74 @@
+import isEmpty from 'lodash/isEmpty';
+import { Alert, Button, Flex, Modal, Spin, Typography } from 'antd';
+import React, { useEffect } from 'react';
+import { useUpgradeStatus, useUpgradeSynthesisStudio } from './hooks';
+import { LoadingOutlined } from '@ant-design/icons';
+
+const { Text } = Typography;
+
+
+const UpgradeButton: React.FC = () => {
+ const [showModal, setShowModal] = React.useState(false);
+ const [loading, setLoading] = React.useState(false);
+ const [enableUpgrade, setEnableUpgrade] = React.useState(false);
+ const { data, isError } = useUpgradeStatus();
+ const { upgradeStudio, isLoading } = useUpgradeSynthesisStudio();
+ console.log("Upgrade data", data);
+
+ useEffect(() => {
+ if (!isEmpty(data)) {
+ setEnableUpgrade(data?.updates_available);
+ }
+ },[data, isLoading, isError]);
+
+ const onClick = () => setShowModal(true);
+
+
+ if (!enableUpgrade) {
+ return null;
+ }
+ console.log("Upgrade available", enableUpgrade);
+
+ const onFinish = async () => {
+ // logic to handle upgrade
+ upgradeStudio();
+ setLoading(true);
+ }
+
+ return (
+ <>
+ {enableUpgrade && }
+ {showModal && (
+ setShowModal(false)}
+ onOk={() => onFinish()}
+ okButtonProps={{disabled: loading}}
+ width={550}>
+
+ {`Are you sure you want to upgrade Synthesis Studio?`}
+
+
+ {loading &&
+
+ } fullscreen />
+ }
+
+ )}
+ >
+
+ )
+
+}
+
+export default UpgradeButton;
diff --git a/app/client/src/pages/Home/hooks.ts b/app/client/src/pages/Home/hooks.ts
index 15860a5..f791a37 100644
--- a/app/client/src/pages/Home/hooks.ts
+++ b/app/client/src/pages/Home/hooks.ts
@@ -1,6 +1,7 @@
+import { notification } from 'antd';
import isEmpty from 'lodash/isEmpty';
import { useState } from 'react';
-import { useQuery } from 'react-query';
+import { useMutation, useQuery } from 'react-query';
const BASE_API_URL = import.meta.env.VITE_AMP_URL;
@@ -93,4 +94,60 @@ export const useEvaluations = () => {
searchQuery,
setSearchQuery
};
-}
\ No newline at end of file
+}
+
+const fetchUpgradeStatus = async () => {
+ const upgrade_resp = await fetch(`${BASE_API_URL}/synthesis-studio/check-upgrade`, {
+ method: 'GET',
+ });
+ const upgradeStatus = await upgrade_resp.json();
+ return upgradeStatus;
+}
+
+export const useUpgradeStatus = () => {
+ const { data, isLoading, isError, refetch } = useQuery(
+ ["fetchUpgradeStatus", fetchUpgradeStatus],
+ () => fetchUpgradeStatus(),
+ {
+ keepPreviousData: false,
+ refetchInterval: 30000
+ },
+ );
+
+ return {
+ data,
+ isLoading,
+ isError,
+ refetch
+ };
+}
+
+const upgradeSynthesisStudio = async () => {
+ const upgrade_resp = await fetch(`${BASE_API_URL}/synthesis-studio/upgrade`, {
+ method: 'POST',
+ });
+ const body = await upgrade_resp.json();
+ console.log('upgradeSynthesisStudio', body);
+ return body;
+};
+
+export const useUpgradeSynthesisStudio = () => {
+ const mutation = useMutation({
+ mutationFn: upgradeSynthesisStudio
+ });
+
+ if (mutation.isError) {
+ notification.error({
+ message: 'Error',
+ description: `An error occurred while starting the upgrade action.\n`
+ });
+ }
+
+ return {
+ upgradeStudio: mutation.mutate,
+ fetching: mutation.isLoading,
+ error: mutation.error,
+ isError: mutation.isError,
+ data: mutation.data
+ };
+}
diff --git a/app/client/src/pages/Home/types.ts b/app/client/src/pages/Home/types.ts
index b30dc16..d5c31b2 100644
--- a/app/client/src/pages/Home/types.ts
+++ b/app/client/src/pages/Home/types.ts
@@ -23,4 +23,12 @@ export interface Evaluation {
job_id: string;
job_name: string;
job_status: string;
+}
+
+export interface DatasetDetails {
+ generation: DatasetGeneration;
+}
+
+export interface DatasetGeneration {
+ [key: string]: string;
}
\ No newline at end of file
diff --git a/app/client/src/routes.tsx b/app/client/src/routes.tsx
index cac2e23..21e1719 100644
--- a/app/client/src/routes.tsx
+++ b/app/client/src/routes.tsx
@@ -8,6 +8,7 @@ import ReevaluatorPage from "./pages/Evaluator/ReevaluatorPage";
import DatasetDetailsPage from "./pages/DatasetDetails/DatasetDetailsPage";
import WelcomePage from "./pages/Home/WelcomePage";
import ErrorPage from "./pages/ErrorPage";
+import EvaluationDetailsPage from "./pages/EvaluationDetails/EvaluationDetailsPage";
const router = createBrowserRouter([
@@ -49,6 +50,12 @@ const router = createBrowserRouter([
errorElement: ,
loader: async () => null
},
+ {
+ path: `evaluation/:evaluate_file_name`,
+ element: ,
+ errorElement: ,
+ loader: async () => null
+ },
{
path: `welcome`,
element: ,
diff --git a/app/client/src/types.ts b/app/client/src/types.ts
index 4f4ff69..04b3c40 100644
--- a/app/client/src/types.ts
+++ b/app/client/src/types.ts
@@ -5,7 +5,8 @@ export enum Pages {
HOME = 'home',
DATASETS = 'datasets',
WELCOME = 'welcome',
- FEEDBACK = 'feedback'
+ FEEDBACK = 'feedback',
+ UPGRADE = 'upgrade'
}
export enum ModelParameters {
diff --git a/docs/guides/evaluation_workflow.md b/docs/guides/evaluation_workflow.md
new file mode 100644
index 0000000..a6396b4
--- /dev/null
+++ b/docs/guides/evaluation_workflow.md
@@ -0,0 +1,100 @@
+# Supervised Finetuning Workflow:
+
+In this workflow we will see how we can evaluate synthetic data generated in previous steps using Large Language Model as a judge.
+
+User can Trigger evaluation via List view where they can chose evaluation to begin from the dropdown.
+
+
+
+
+## Evaluation Workflow
+
+Similar to generation evaluation also allows users to specify following
+
+1. #### Display Name
+2. #### Model Provider: AWS Bedrock or Cloudera AI Inference
+3. #### Model ID: Calude , LLAMA , Mistral etc.
+
+The code generation and text2sql are templates which allow users to select from already curated prompts, and examples to evaluate datasets.
+
+Custom template on the other hand allows users to define everything from scratch to evaluate created synthteic dataset from previous step.
+
+The screen shot for the same can be seen below:
+
+
+
+### Prompt and Model Parameters
+
+#### Prompt:
+This step allows user to curate their prompts manuallly, or chose from given templates or let LLM curate a prompt based on their description of use case.
+
+```json
+{
+"""Below is a Python coding Question and Solution pair generated by an LLM. Evaluate its quality as a Senior Developer would, considering its suitability for professional use. Use the additive 5-point scoring system described below.
+
+Points are accumulated based on the satisfaction of each criterion:
+ 1. Add 1 point if the code implements basic functionality and solves the core problem, even if it includes some minor issues or non-optimal approaches.
+ 2. Add another point if the implementation is generally correct but lacks refinement in style or fails to follow some best practices. It might use inconsistent naming conventions or have occasional inefficiencies.
+ 3. Award a third point if the code is appropriate for professional use and accurately implements the required functionality. It demonstrates good understanding of Python concepts and common patterns, though it may not be optimal. It resembles the work of a competent developer but may have room for improvement in efficiency or organization.
+ 4. Grant a fourth point if the code is highly efficient and follows Python best practices, exhibiting consistent style and appropriate documentation. It could be similar to the work of an experienced developer, offering robust error handling, proper type hints, and effective use of built-in features. The result is maintainable, well-structured, and valuable for production use.
+ 5. Bestow a fifth point if the code is outstanding, demonstrating mastery of Python and software engineering principles. It includes comprehensive error handling, efficient algorithms, proper testing considerations, and excellent documentation. The solution is scalable, performant, and shows attention to edge cases and security considerations."""
+}
+```
+
+
+#### Model Parameters
+
+We let user decide on following model Parameters:
+
+- **Temperature**
+- **TopK**
+- **TopP**
+
+
+
+### Examples:
+
+In the next step user can specify examples they would want to give for their evaluation of dataset so that LLM can follow same format and Judge/rate datasets accordingly.
+
+The examples for evaluation would be like following:
+
+The **scoring** and **Justification** can be defined by user within the prompt and example for example in this use case we use a 5 point rating system, user can make it **10 point ratings**, **Boolean** , **subjective ("bad", "Good", "Average")** etc.
+
+```json
+{
+ "score": 3,
+ "justification": """The code achieves 3 points by implementing core functionality correctly (1),
+ showing generally correct implementation with proper syntax (2),
+ and being suitable for professional use with good Python patterns and accurate functionality (3).
+ While it demonstrates competent development practices, it lacks the robust error handling
+ and type hints needed for point 4, and could benefit from better efficiency optimization and code organization."""
+ },
+ {
+ "score": 4,
+ "justification": """
+ The code earns 4 points by implementing basic functionality (1), showing correct implementation (2),
+ being production-ready (3), and demonstrating high efficiency with Python best practices
+ including proper error handling, type hints, and clear documentation (4).
+ It exhibits experienced developer qualities with well-structured code and maintainable design, though
+ it lacks the comprehensive testing and security considerations needed for a perfect score."""
+}
+```
+
+
+
+### Final Output:
+
+Finally user can see how their output looks like with corresponding Scores and Justifications.
+
+The output will be saved in Project File System within Cloudera environment.
+
+
+
+The output and corresponding metadata (scores,model etc.) can be seen on the **Evaluations** list view as well as shown in screen shot below.
+
+
+
+
+
+
+
diff --git a/docs/guides/screenshots/evaluate_home_page.png b/docs/guides/screenshots/evaluate_home_page.png
new file mode 100644
index 0000000..501d105
Binary files /dev/null and b/docs/guides/screenshots/evaluate_home_page.png differ
diff --git a/docs/guides/screenshots/evaluate_list.png b/docs/guides/screenshots/evaluate_list.png
new file mode 100644
index 0000000..caf1635
Binary files /dev/null and b/docs/guides/screenshots/evaluate_list.png differ
diff --git a/docs/guides/screenshots/evaluate_output.png b/docs/guides/screenshots/evaluate_output.png
new file mode 100644
index 0000000..655898c
Binary files /dev/null and b/docs/guides/screenshots/evaluate_output.png differ
diff --git a/docs/guides/screenshots/evaluation_sds.png b/docs/guides/screenshots/evaluation_sds.png
new file mode 100644
index 0000000..4167cbc
Binary files /dev/null and b/docs/guides/screenshots/evaluation_sds.png differ
diff --git a/docs/guides/screenshots/export_list.png b/docs/guides/screenshots/export_list.png
new file mode 100644
index 0000000..6e0d079
Binary files /dev/null and b/docs/guides/screenshots/export_list.png differ
diff --git a/docs/guides/screenshots/sds_examples.png b/docs/guides/screenshots/sds_examples.png
new file mode 100644
index 0000000..cc84918
Binary files /dev/null and b/docs/guides/screenshots/sds_examples.png differ
diff --git a/docs/guides/screenshots/sds_export.png b/docs/guides/screenshots/sds_export.png
new file mode 100644
index 0000000..5ecc272
Binary files /dev/null and b/docs/guides/screenshots/sds_export.png differ
diff --git a/docs/guides/screenshots/sds_generation.png b/docs/guides/screenshots/sds_generation.png
new file mode 100644
index 0000000..6f3aaa1
Binary files /dev/null and b/docs/guides/screenshots/sds_generation.png differ
diff --git a/docs/guides/screenshots/sds_hf_export.png b/docs/guides/screenshots/sds_hf_export.png
new file mode 100644
index 0000000..825c381
Binary files /dev/null and b/docs/guides/screenshots/sds_hf_export.png differ
diff --git a/docs/guides/screenshots/sds_home_page.png b/docs/guides/screenshots/sds_home_page.png
new file mode 100644
index 0000000..c5215c5
Binary files /dev/null and b/docs/guides/screenshots/sds_home_page.png differ
diff --git a/docs/guides/screenshots/sds_output.png b/docs/guides/screenshots/sds_output.png
new file mode 100644
index 0000000..42dfc2f
Binary files /dev/null and b/docs/guides/screenshots/sds_output.png differ
diff --git a/docs/guides/screenshots/sds_prompt.png b/docs/guides/screenshots/sds_prompt.png
new file mode 100644
index 0000000..9712460
Binary files /dev/null and b/docs/guides/screenshots/sds_prompt.png differ
diff --git a/docs/guides/screenshots/sds_summary.png b/docs/guides/screenshots/sds_summary.png
new file mode 100644
index 0000000..9632a7e
Binary files /dev/null and b/docs/guides/screenshots/sds_summary.png differ
diff --git a/docs/guides/sft_workflow.md b/docs/guides/sft_workflow.md
new file mode 100644
index 0000000..c360d28
--- /dev/null
+++ b/docs/guides/sft_workflow.md
@@ -0,0 +1,116 @@
+# Generation Workflow:
+
+In this workflow we will see how we can create synthetic data for finetuning our models. The users in this workflow can chose from provided templates like.
+
+## Templates
+
+1. **Code Generation**
+2. **Text to SQL**
+3. **Custom**
+
+The code generation and text2sql are templates which allow users to select from already curated prompts, seeds(more on it below) and examples to produce datasets.
+
+Custom template on the other hand allows users to define everything from scratch and create synthteic dataset for their custom Enterprise use cases.
+
+## Workflow Example: Code Generation
+
+### Home Page
+On home Page user can click on Create Datasets to Get Started
+
+
+
+### Generate Configuration: In the next step user gets to specify following fields:
+
+1. #### Display Name
+2. #### Model Provider: AWS Bedrock or Cloudera AI Inference
+3. #### Model ID: Calude , LLAMA , Mistral etc.
+4. #### Workflow
+ a. Supervised Finetuning :- Generate Prompt and Completion Pairs with or without documents(pdfs, docs, txt etc.)
+ b. Custom Data Curation:- Use Input as json array(which can be uploaded) from the user and generate response based on that. In this case user can have their own inputs, instructions and get customised generated output for corresponding input.
+5. #### Files: Input Files user can chose from their project file system for above workflows
+
+
+
+### Prompt and Model Parameters
+
+#### Prompt:
+This step allows user to curate their prompts manuallly, or chose from given templates or let LLM curate a prompt based on their description of use case.
+
+```json
+{
+"""Write a programming question-pair for the following topic:
+
+Requirements:
+- Each solution must include working code examples
+- Include explanations with the code
+- Follow the same format as the examples
+- Ensure code is properly formatted with appropriate indentation"""
+}
+```
+
+#### Seeds:
+
+This helps LLM diversify dataset user wants to generate. We drew inspiration from **[Self Intruct Paper](https://huggingface.co/papers/2212.10560)**
+ , where 175 hand crafted human seed instructions were used to diversify curation of dataset.
+
+ For example, for code generation, seeds can be:
+- **Algorithms for Operation Research**
+- **Web Development with Flask**
+- **PyTorch for Reinforcement Learning**
+
+Similarly for language translation, seeds can be:
+- **Poems**
+- **Greetings in Formal Communication**
+- **Haikus**
+
+#### Model Parameters
+
+We let user decide on following model Parameters:
+
+- **Temperature**
+- **TopK**
+- **TopP**
+
+#### Dataset Size
+
+
+
+### Examples:
+
+In the next step user can specify examples they would want to give for their synthetic dataset generation so that LLM can follow same format and create datasets accordingly.
+
+The examples for code geneartion would be like following:
+
+```json
+{
+ "question": "How do you read a CSV file into a pandas DataFrame?",
+ "solution": """You can use pandas.read_csv(). Here's an example
+
+ import pandas as pd
+ df = pd.read_csv('data.csv')
+ print(df.head())
+ print(df.info())
+"""
+}
+```
+
+
+
+
+### Summary:
+
+This allows user to finally look at prompt, seeds, dataset size and other parameters they have selected for data generation.
+
+
+
+### Final Output:
+
+Finally user can see how their output looks like with corresponding Prompts and Completions.
+
+The output will be saved in Project File System within Cloudera environment.
+
+
+
+
+
+The output and corresponding metadata (scores,model etc.) can be seen on the **Generations** list view as well as shown in screen shot below.
diff --git a/docs/technical_overview.md b/docs/technical_overview.md
index 7c7027c..abf9bdf 100644
--- a/docs/technical_overview.md
+++ b/docs/technical_overview.md
@@ -165,7 +165,7 @@ df = pd.read_csv('data.csv')\n
"""}]
-Write a programming question-pair for the following topic:
+Write a programming question-answer pair for the following topic:
Requirements:
- Each solution must include working code examples
- Include explanations with the code