From 26959563e5b30161a88ef0d74079a8a607cf0017 Mon Sep 17 00:00:00 2001 From: Sean Hatfield Date: Mon, 12 Aug 2024 11:49:53 -0700 Subject: [PATCH] Finetune ui improvements (#2053) * WIP finetune ui improvements * lint * update order details page finetuning ui * data upload, confirmation, and order placed finetune ui update * update finetune layout * remove unneeded imports * uncomment url * confirmation and data upload component ui updates * finetun main container layout fix --- .../FineTuning/Steps/Confirmation/index.jsx | 167 ++++++++------- .../FineTuning/Steps/DataUpload/index.jsx | 191 +++++++++--------- .../Steps/FulfillmentPolicy/index.jsx | 88 ++++---- .../FineTuning/Steps/Introduction/index.jsx | 75 +++---- .../FineTuning/Steps/OrderDetails/index.jsx | 120 ++++++----- .../FineTuning/Steps/OrderPlaced/index.jsx | 100 +++++---- .../pages/FineTuning/Steps/Privacy/index.jsx | 148 +++++++------- .../Steps/TermsAndConditions/index.jsx | 119 +++++------ frontend/src/pages/FineTuning/Steps/index.jsx | 18 +- frontend/src/pages/FineTuning/index.jsx | 34 ++-- 10 files changed, 541 insertions(+), 519 deletions(-) diff --git a/frontend/src/pages/FineTuning/Steps/Confirmation/index.jsx b/frontend/src/pages/FineTuning/Steps/Confirmation/index.jsx index acf4b409..cfb8d970 100644 --- a/frontend/src/pages/FineTuning/Steps/Confirmation/index.jsx +++ b/frontend/src/pages/FineTuning/Steps/Confirmation/index.jsx @@ -1,9 +1,11 @@ import FineTuning from "@/models/experimental/fineTuning"; import { dollarFormat } from "@/utils/numbers"; import showToast from "@/utils/toast"; -import { CheckCircle } from "@phosphor-icons/react"; -import { useState } from "react"; +import { Check } from "@phosphor-icons/react"; +import { useState, useEffect } from "react"; import FineTuningSteps from "../index"; +import CTAButton from "@/components/lib/CTAButton"; +import Workspace from "@/models/workspace"; /** * @param {{settings: import("../index").OrderSettings}} param0 @@ -11,6 +13,18 @@ import FineTuningSteps from "../index"; */ export default function Confirmation({ settings, setSettings, setStep }) { const [loading, setLoading] = useState(false); + const [workspaces, setWorkspaces] = useState([]); + + useEffect(() => { + Workspace.all() + .then((fetchedWorkspaces) => { + setWorkspaces(fetchedWorkspaces); + }) + .catch(() => { + showToast("Failed to fetch workspaces", "error"); + }); + }, []); + async function handleCheckout() { setLoading(true); const data = await FineTuning.createOrder({ @@ -40,107 +54,124 @@ export default function Confirmation({ settings, setSettings, setStep }) { setStep(FineTuningSteps.confirmation.next()); } + const getWorkspaceName = (slug) => { + const workspace = workspaces.find((ws) => ws.slug === slug); + return workspace ? workspace.name : slug; + }; + return (
-
-
-

Confirm & Submit

-

+

+
+

+ Confirm & Submit +

+

Below are your fine-tuning order details. If you have any questions - before or after ordering your fine-tune you can{" "} + before or after ordering your fine-tune you can checkout the{" "} - checkout the fine-tuning FAQ + fine-tuning FAQ {" "} or email{" "} - + team@mintplexlabs.com .

-
-
-

Contact e-mail:

-

{settings.email}

-
-
-

Base LLM:

-

{settings.baseModel}

-
-
-

Output model name:

-

"{settings.modelName}"

-
-
-
-

Training on workspaces:

- {settings.trainingData.slugs.map((slug, i) => { - return ( -

- "{slug}" - {i !== settings.trainingData.slugs.length - 1 ? "," : ""} -

- ); - })} +
+
+
+

Contact e-mail:

+

{settings.email}

- {settings.trainingData.feedback === true ? ( -

- training on positive-feedback chats only. +

+

Base LLM:

+

{settings.baseModel}

+
+
+

Output model name:

+

"{settings.modelName}"

+
+
+

Training on workspaces:

+
+ {settings.trainingData.slugs.map((slug, i) => ( + + {getWorkspaceName(slug)} + + ))} +
+
+
+

Training data:

+

+ {settings.trainingData.feedback === true + ? "Training on positive-feedback chats only" + : "Training on all chats"}

- ) : ( -

- training on all chats. -

- )} +
-
-
- -

Agreed to Terms and Conditions

-
-
- -

Understand privacy & data handling

-
-
- -

Agreed to Fulfillment terms

+
+
    +
  • + +

    + Agreed to Terms and Conditions +

    +
  • +
  • + +

    + Understand privacy & data handling +

    +
  • +
  • + +

    Agreed to Fulfillment terms

    +
  • +
-
-
+
+

Total one-time cost:

-

+

{dollarFormat(settings.tuningInfo.pricing.usd)} *

-

+

* price does not include any coupons, incentives, or discounts you can apply at checkout.

-

+

Once you proceed to checkout, if you do not complete this purchase your data will be deleted from our servers within 1 hour of abandonment of the creation of the checkout in accordance to our privacy and data handling policy.

+ + {loading ? "Generating order..." : "Start Training →"} +
- -
); diff --git a/frontend/src/pages/FineTuning/Steps/DataUpload/index.jsx b/frontend/src/pages/FineTuning/Steps/DataUpload/index.jsx index d4cec7ea..6a48d591 100644 --- a/frontend/src/pages/FineTuning/Steps/DataUpload/index.jsx +++ b/frontend/src/pages/FineTuning/Steps/DataUpload/index.jsx @@ -1,8 +1,14 @@ import { useEffect, useState } from "react"; import FineTuning from "@/models/experimental/fineTuning"; import Workspace from "@/models/workspace"; -import { CheckCircle, Warning, X } from "@phosphor-icons/react"; +import { + CheckCircle, + Warning, + X, + MagnifyingGlass, +} from "@phosphor-icons/react"; import FineTuningSteps from ".."; +import CTAButton from "@/components/lib/CTAButton"; export default function DataUpload({ setSettings, setStep }) { const [workspaces, setWorkspaces] = useState([]); @@ -41,34 +47,29 @@ export default function DataUpload({ setSettings, setStep }) { return (
-
-
-
-

- Select your training dataset. -

-

- This is the data your model will be trained and tuned on. This is - a critical step and you should always train on the exact - information you want the model to inherit. By default, AnythingLLM - will use all chats, but you can filter chats by workspace and even - limit training to chats which users have left a positive feedback - indication on (thumbs up). -

+
+
+

+ Select your training dataset +

+

+ This is the data your model will be trained and tuned on. This is a + critical step and you should always train on the exact information + you want the model to inherit. By default, AnythingLLM will use all + chats, but you can filter chats by workspace and even limit training + to chats which users have left a positive feedback indication on + (thumbs up). +

-
-
- -

- Enabling this toggle will filter your dataset to only use - "positive" responses that were marked during chatting. -

-
+ +
+ +

+ Enabling this toggle will filter your dataset to only use + "positive" responses that were marked during chatting. +

-
-
- -

- You training data will be limited to these workspaces. -

-
+
+ +

+ Your training data will be limited to these workspaces. +

+ -
- - + + Proceed to Confirmation → + + +
); @@ -155,60 +155,59 @@ function WorkspaceSelector({ } return ( -
-
-
-
-
- {selectedWorkspaces.map((workspace) => { - return ( -
-
- {workspace.name} -
-
+
+
+ + setQuery(e.target.value)} + onFocus={() => setShowSuggestions(true)} + onBlur={() => + setTimeout(() => { + setShowSuggestions(false); + }, 500) + } + placeholder="Enter a workspace name" + className="bg-transparent p-1 px-2 appearance-none outline-none h-full w-full text-white text-xs placeholder:`text-white/50`" + /> +
+
+
+
+
+
+ {selectedWorkspaces.map((workspace) => { + return ( +
+ {workspace.name}
-
- ); - })} -
- setQuery(e.target.value)} - onFocus={() => setShowSuggestions(true)} - onBlur={() => - setTimeout(() => { - setShowSuggestions(false); - }, 500) - } - placeholder="Enter a workspace name" - className="w-[200px] bg-transparent p-1 px-2 appearance-none outline-none h-full w-full text-white" - /> + ); + })}
-
- {showSuggestions && ( -
-
- + {showSuggestions && ( +
+
+ +
-
- )} + )} +
); @@ -221,7 +220,7 @@ function WorkspaceSuggestions({ }) { if (availableWorkspaces.length === 0) { return ( -
+

no workspaces available to select.

@@ -239,7 +238,7 @@ function WorkspaceSuggestions({ : availableWorkspaces; return ( -
+
{filteredWorkspace.map((workspace) => { return (
); diff --git a/frontend/src/pages/FineTuning/Steps/Introduction/index.jsx b/frontend/src/pages/FineTuning/Steps/Introduction/index.jsx index 1b784d96..c60e12d6 100644 --- a/frontend/src/pages/FineTuning/Steps/Introduction/index.jsx +++ b/frontend/src/pages/FineTuning/Steps/Introduction/index.jsx @@ -1,5 +1,6 @@ -import { CheckCircle, XCircle } from "@phosphor-icons/react"; +import { Check, X } from "@phosphor-icons/react"; import FineTuningSteps from ".."; +import CTAButton from "@/components/lib/CTAButton"; export default function Introduction({ setSettings, setStep }) { const handleAccept = () => { @@ -11,12 +12,12 @@ export default function Introduction({ setSettings, setStep }) { return (
-
-
-

+
+
+

What is a "Fine-Tuned" model?

-
+

Fine-tuned models are basically "customized" Language-Learning-Models (LLMs). These can be based on popular @@ -36,8 +37,8 @@ export default function Introduction({ setSettings, setStep }) {

-
-

+
+

When should I get a fine-tuned model?

@@ -45,48 +46,49 @@ export default function Introduction({ setSettings, setStep }) { following

    -
  • - Setting the style, - tone, format, or other qualitative aspects without prompting +
  • + Setting + the style, tone, format, or other qualitative aspects without + prompting
  • -
  • - Improving reliability - at producing a desired output +
  • + {" "} + Improving reliability at producing a desired output
  • -
  • - Correcting failures - to follow complex prompts, citations, or lack of background - knowledge +
  • + {" "} + Correcting failures to follow complex prompts, citations, or + lack of background knowledge
  • -
  • - You want to run this - model privately or offline +
  • + You + want to run this model privately or offline
-
-

+
+

What are fine-tunes bad for?

- Fine-tuned models powerful, but they are not the "silver bullet" - to any issues you have with RAG currently. Some notable + Fine-tuned models are powerful, but they are not the "silver + bullet" to any issues you have with RAG currently. Some notable limitations are

  • - You need perfect recall of - some piece of literature or reference document + You need + perfect recall of some piece of literature or reference document
  • - You want your model to have - perfect memory or recollection + You want + your model to have perfect memory or recollection
-
+

In summary, if you are getting good results with RAG currently, creating a fine-tune can squeeze even more performance out @@ -95,15 +97,14 @@ export default function Introduction({ setSettings, setStep }) { that is what RAG is for! Together, it is a powerful combination.

+ + Create a fine-tune model → +
- -

); diff --git a/frontend/src/pages/FineTuning/Steps/OrderDetails/index.jsx b/frontend/src/pages/FineTuning/Steps/OrderDetails/index.jsx index b0f44a46..657c75ac 100644 --- a/frontend/src/pages/FineTuning/Steps/OrderDetails/index.jsx +++ b/frontend/src/pages/FineTuning/Steps/OrderDetails/index.jsx @@ -2,9 +2,11 @@ import FineTuning from "@/models/experimental/fineTuning"; import { useEffect, useState } from "react"; import FineTuningSteps from ".."; import { CircleNotch } from "@phosphor-icons/react"; +import CTAButton from "@/components/lib/CTAButton"; export default function OrderDetails({ setSettings, setStep }) { const [info, setInfo] = useState({}); + useEffect(() => { FineTuning.info() .then((res) => { @@ -32,33 +34,30 @@ export default function OrderDetails({ setSettings, setStep }) { return (
-
-
-
-

- Time to create your fine tune! -

-

- Creating a model is quite simple. Currently we have a limited base - model selection, however in the future we plan to expand support - to many more foundational models. -

- -
-
- -

- This e-mail is where you will receive all order information - and updates. This e-mail must be accurate or else we - won't be able to contact you with your fine-tuned model! -

-
+
+
+

+ Time to create your fine tune! +

+

+ Creating a model is quite simple. Currently we have a limited base + model selection, however in the future we plan to expand support to + many more foundational models. +

+ +
+ +

+ This e-mail is where you will receive all order information and + updates. This e-mail must be accurate or else we won't be + able to contact you with your fine-tuned model! +

-
-
- -

- This is the foundational model your fine-tune will be based - on. We recommend Llama 3 8B. -

-
+
+ +

+ This is the foundational model your fine-tune will be based on. + We recommend Llama 3 8B. +

{info.hasOwnProperty("availableBaseModels") ? ( ) : ( @@ -103,35 +98,34 @@ export default function OrderDetails({ setSettings, setStep }) { )}
-
-
- -

- What would you like to call your model? This has no impact on - its output or training and is only used for how we communicate - with you about the model. -

-
+
+ +

+ What would you like to call your model? This has no impact on + its output or training and is only used for how we communicate + with you about the model. +

-
- - + + + Proceed to data selection → + + +
); diff --git a/frontend/src/pages/FineTuning/Steps/OrderPlaced/index.jsx b/frontend/src/pages/FineTuning/Steps/OrderPlaced/index.jsx index 018b8fba..8d0b1f01 100644 --- a/frontend/src/pages/FineTuning/Steps/OrderPlaced/index.jsx +++ b/frontend/src/pages/FineTuning/Steps/OrderPlaced/index.jsx @@ -1,13 +1,15 @@ +import CTAButton from "@/components/lib/CTAButton"; +import paths from "@/utils/paths"; + export default function OrderPlaced({ settings }) { return (
-
-
-

+
+
+

Your order is placed!

- -
+

Your fine-tune will begin once payment is complete. If the payment window did not automatically open - your checkout link is below. @@ -15,53 +17,65 @@ export default function OrderPlaced({ settings }) { {new URL(settings.checkoutUrl).origin} -

+

Your fine-tune does not begin until this payment is completed.

-
-
-

- Reference: {settings.jobId} -

-

- This reference id is how we will communicate with you about your - fine-tune training status. Save this reference id. +

+

+ Reference: {settings.jobId} +

+

+ This reference id is how we will communicate with you about your + fine-tune training status. Save this reference id. +

+
+ +
+

+ Contact: {settings.email} +

+

+ Check the email above for order confirmation, status updates, + and more. Mintplex Labs will only contact you about your order + via email. +

+
+ + + +

+ You can close this window or navigate away once you see the + confirmation email in your inbox.

-
-

- Contact: {settings.email} -

-

- Check the email above for order confirmation, status updates, and - more. Mintplex Labs will only contact you about your order via - email. -

-
- - - -

- You can close this window or navigate away once you see the - confirmation email in your inbox. -

+ (window.location.href = paths.home())} + > + Finish +
diff --git a/frontend/src/pages/FineTuning/Steps/Privacy/index.jsx b/frontend/src/pages/FineTuning/Steps/Privacy/index.jsx index 6e0d5e98..e4d71151 100644 --- a/frontend/src/pages/FineTuning/Steps/Privacy/index.jsx +++ b/frontend/src/pages/FineTuning/Steps/Privacy/index.jsx @@ -1,3 +1,4 @@ +import CTAButton from "@/components/lib/CTAButton"; import FineTuningSteps from ".."; export default function PrivacyHandling({ setSettings, setStep }) { @@ -10,12 +11,12 @@ export default function PrivacyHandling({ setSettings, setStep }) { return (
-
-
-

+
+
+

Data Handling Policy & Privacy

-

+

Please accept the terms and conditions to continue with creation and ordering of a fine-tune model. We take the handling of your data very seriously and will only use your uploaded data for training the @@ -23,18 +24,14 @@ export default function PrivacyHandling({ setSettings, setStep }) { completed, or canceled your information is automatically and permanently deleted.

-
-

Privacy Policy

- -

- Mintplex Labs Inc. -

-

Effective Date: July 15, 2024

- -

- 1. Introduction -

-

+

+
+

Privacy Policy

+

Mintplex Labs Inc.

+

Effective Date: July 15, 2024

+
+

1. Introduction

+

Welcome to Mintplex Labs Inc. ("we", "our", "us"). We are committed to protecting your privacy and ensuring the security of your personal information. This Privacy Policy describes how we @@ -42,45 +39,41 @@ export default function PrivacyHandling({ setSettings, setStep }) { services.

-

- 2. Information We Collect -

-

+

2. Information We Collect

+

When you place an order with us for tuning and large language model (LLM) fulfillment, we collect certain personal information from you, including but not limited to:

-
    +
    • Email address
    • Payment information
    • Uploaded training data
    -

    - 3. Use of Information -

    -

    We use the information we collect for the following purposes:

    -
      +

      3. Use of Information

      +

      + We use the information we collect for the following purposes: +

      +
      • To process and fulfill your order
      • To communicate with you regarding your order
      • To improve our services
      -

      - 4. Data Retention and Deletion -

      -

      +

      4. Data Retention and Deletion

      +

      Uploaded training data is only retained for the duration of the model training. Upon training completion, failure, or order cancellation, the user data is permanently deleted from our storage.

      -

      +

      If you partially complete the order flow and do not finalize your order, any details and information associated with your order will be deleted 1 hour from abandonment.

      -

      +

      After you confirm receipt of your resulting model files, you can request us to delete your model from our storage at any time. Additionally, we may proactively reach out to you to confirm that @@ -90,10 +83,8 @@ export default function PrivacyHandling({ setSettings, setStep }) { storage.

      -

      - 5. Data Storage and Security -

      -

      +

      5. Data Storage and Security

      +

      Our cloud storage provider is AWS. We have implement standard encryption and protection policies to ensure the security of your data. The storage solution has no public access, and all requests @@ -104,43 +95,41 @@ export default function PrivacyHandling({ setSettings, setStep }) { e-mail you used during checkout.

      -

      - 6. Payment Processing -

      -

      +

      6. Payment Processing

      +

      We use Stripe as our payment processor. Your email may be shared with Stripe for customer service and payment management purposes.

      -

      - 7. Data Sharing -

      -

      +

      7. Data Sharing

      +

      We do not sell or share your personal information with third parties except as necessary to provide our services, comply with legal obligations, or protect our rights.

      -

      - 8. Your Rights -

      -

      +

      8. Your Rights

      +

      You have the right to access, correct, or delete your personal information. If you wish to exercise these rights, please contact us at{" "} - team@mintplexlabs.com. + + team@mintplexlabs.com + + .

      -

      - 9. California Privacy Rights -

      -

      +

      9. California Privacy Rights

      +

      Under the California Consumer Privacy Act as amended by the - California Privacy Rights Act (the “CCPA”), California residents + California Privacy Rights Act (the "CCPA"), California residents have additional rights beyond what is set out in this privacy notice:

      -
        +
        • Right to Know: You have the right to request information about the categories and specific pieces of personal @@ -170,63 +159,70 @@ export default function PrivacyHandling({ setSettings, setStep }) { your CCPA rights.
        -

        +

        Submitting a Request:
        You may submit a request to know, delete, or correct your personal information by contacting us at{" "} - team@mintplexlabs.com. - We will confirm your identity before processing your request and + + team@mintplexlabs.com + + . We will confirm your identity before processing your request and respond within 45 days. If more time is needed, we will inform you of the reason and extension period in writing. You may make a request for your information twice every 12 months. If you are making an erasure request, please include details of the information you would like erased.

        -

        +

        Please note that if you request that we remove your information, we may retain some of the information for specific reasons, such as to resolve disputes, troubleshoot problems, and as required by law. Some information may not be completely removed from our databases due to technical constraints and regular backups.

        -

        +

        We will not discriminate against you for exercising any of your CCPA rights.

        -

        - 10. Contact Us -

        -

        +

        10. Contact Us

        +

        If you have any questions or concerns about this Privacy Policy, please contact us at{" "} - team@mintplexlabs.com. + + team@mintplexlabs.com + + .

        -

        +

        11. Changes to This Privacy Policy

        -

        +

        We may update this Privacy Policy from time to time. We will notify you of any changes by posting the new Privacy Policy on our website. You are advised to review this Privacy Policy periodically for any changes.

        -

        +

        By using our services, you agree to the terms of this Privacy Policy.

+ + Agree and continue → +
- -
); diff --git a/frontend/src/pages/FineTuning/Steps/TermsAndConditions/index.jsx b/frontend/src/pages/FineTuning/Steps/TermsAndConditions/index.jsx index 339c43de..c29e500d 100644 --- a/frontend/src/pages/FineTuning/Steps/TermsAndConditions/index.jsx +++ b/frontend/src/pages/FineTuning/Steps/TermsAndConditions/index.jsx @@ -1,3 +1,4 @@ +import CTAButton from "@/components/lib/CTAButton"; import FineTuningSteps from ".."; export default function TermsAndConditions({ setSettings, setStep }) { @@ -10,24 +11,21 @@ export default function TermsAndConditions({ setSettings, setStep }) { return (
-
-
-

+
+
+

Terms and Conditions

-

+

Please accept the terms and conditions to continue with creation and ordering of a fine-tune model.

-
-

- Mintplex Labs Inc. Fine-Tuning Terms of Service -

-

- Last Updated: July 15, 2024 -

- -

+

+
+

Mintplex Labs Inc. Fine-Tuning Terms of Service

+

Last Updated: July 15, 2024

+
+

This Agreement is between Mintplex Labs Inc. ("Company") and the customer ("Customer") accessing or using the services provided by the Company. By signing up, accessing, or using the services, @@ -35,20 +33,16 @@ export default function TermsAndConditions({ setSettings, setStep }) { be bound by the terms and conditions outlined below.

-

- 1. Services Provided -

-

+

1. Services Provided

+

Mintplex Labs Inc. provides model fine-tuning services for customers. The deliverable for these services is a download link to the output ".GGUF" file that can be used by the Customer for Large-Language text inferencing.

-

- 2. Payment Terms -

-
    +

    2. Payment Terms

    +
    • One-Time Payment: A one-time payment is required before the execution of the training. @@ -64,10 +58,8 @@ export default function TermsAndConditions({ setSettings, setStep }) {
    -

    - 3. Order Form -

    -
      +

      3. Order Form

      +
      • Service: Model fine-tuning
      • @@ -79,88 +71,81 @@ export default function TermsAndConditions({ setSettings, setStep }) {
      -

      - 4. Customer Responsibilities -

      -

      +

      4. Customer Responsibilities

      +

      The Customer must provide all necessary data and information required for model fine-tuning.

      -

      +

      The Customer must ensure timely payment as per the terms mentioned above.

      -

      +

      The Customer understands the data collected for tuning will be stored to a private cloud storage location temporarily while training is in progress.

      -

      +

      The Customer understands the data collected for tuning will be fully deleted once the order is completed or canceled by the Company.

      -

      +

      The Customer understands and has reviewed the Privacy Policy for Fine-Tuning by the Company.

      -

      - 5. Refund Policy -

      -

      +

      5. Refund Policy

      +

      Refunds will be processed in the event of training failure or if the complete model file is not delivered to the Customer. Refunds will be issued to the original payment method within 30 days of the refund request.

      -

      - 6. Governing Law -

      -

      +

      6. Governing Law

      +

      This Agreement shall be governed by and construed in accordance with the laws of the State of California.

      -

      - 7. Dispute Resolution -

      -

      +

      7. Dispute Resolution

      +

      Any disputes arising out of or in connection with this Agreement shall be resolved in the state or federal courts located in California.

      -

      - 8. Notices -

      -

      +

      8. Notices

      +

      All notices under this Agreement shall be in writing and shall be deemed given when delivered personally, sent by confirmed email, or sent by certified or registered mail, return receipt requested, and addressed to the respective parties as follows:

      -

      +

      For Company:{" "} - team@mintplexlabs.com + + team@mintplexlabs.com + +

      +

      + For Customer: The main email address on Customer's account

      -

      For Customer: The main email address on Customer's account

      -

      - 9. Amendments -

      -

      +

      9. Amendments

      +

      The Company reserves the right to amend these terms at any time by providing notice to the Customer. The Customer's continued use of the services after such amendments will constitute acceptance of the amended terms.

      -

      - 10. Indemnity -

      -

      +

      10. Indemnity

      +

      The Customer agrees to indemnify, defend, and hold harmless Mintplex Labs Inc., its affiliates, and their respective officers, directors, employees, agents, and representatives from and against @@ -173,15 +158,13 @@ export default function TermsAndConditions({ setSettings, setStep }) { person or entity.

+ + Agree and continue → +
- -
); diff --git a/frontend/src/pages/FineTuning/Steps/index.jsx b/frontend/src/pages/FineTuning/Steps/index.jsx index 55ed589d..7cf74263 100644 --- a/frontend/src/pages/FineTuning/Steps/index.jsx +++ b/frontend/src/pages/FineTuning/Steps/index.jsx @@ -26,7 +26,7 @@ import OrderPlaced from "./OrderPlaced"; const FineTuningSteps = { intro: { - name: "Introduction to Fine-Tuning", + name: "1. Introduction to Fine-Tuning", next: () => "privacy", component: ({ settings, setSettings, setStep }) => ( "tos", component: ({ settings, setSettings, setStep }) => ( "fulfillment", component: ({ settings, setSettings, setStep }) => ( "order-details", component: ({ settings, setSettings, setStep }) => ( "data-selection", component: ({ settings, setSettings, setStep }) => ( "confirmation", component: ({ settings, setSettings, setStep }) => ( "done", component: ({ settings, setSettings, setStep }) => ( "done", component: ({ settings }) => , }, @@ -133,7 +133,7 @@ export function FineTuningCreationLayout({ setStep, children }) { return (
@@ -21,28 +21,32 @@ function SideBarSelection({ setStep, currentStep }) {
- {isDone ? ( + {isDone || isSelected ? ( ) : ( -
{props.name}
+
+ {props.name} +
)}
{isDone ? ( - +
+
+
) : ( - )} @@ -63,15 +67,19 @@ export default function FineTuningFlow() { return ( {(settings, setSettings, setStep) => ( -
-
+
+

Custom Fine-Tuned Model

- {StepPage.component({ settings, setSettings, setStep })} +
+
+ {StepPage.component({ settings, setSettings, setStep })} +
+
)}