"""The UI file for the SynthGenAI package.""" import os import asyncio import gradio as gr from synthgenai import DatasetConfig, DatasetGeneratorConfig, LLMConfig, InstructionDatasetGenerator, PreferenceDatasetGenerator,RawDatasetGenerator,SentimentAnalysisDatasetGenerator, SummarizationDatasetGenerator, TextClassificationDatasetGenerator def generate_synthetic_dataset( llm_model, temperature, top_p, max_tokens, dataset_type, topic, domains, language, additional_description, num_entries, hf_token, hf_repo_name, llm_env_vars, ): """ Generate a dataset based on the provided parameters. Args: llm_model (str): The LLM model to use. temperature (float): The temperature for the LLM. top_p (float): The top_p value for the LLM. max_tokens (int): The maximum number of tokens for the LLM. dataset_type (str): The type of dataset to generate. topic (str): The topic of the dataset. domains (str): The domains for the dataset. language (str): The language of the dataset. additional_description (str): Additional description for the dataset. num_entries (int): The number of entries in the dataset. hf_token (str): The Hugging Face token. hf_repo_name (str): The Hugging Face repository name. llm_env_vars (str): Comma-separated environment variables for the LLM. Returns: str: A message indicating the result of the dataset generation. """ os.environ["HF_TOKEN"] = hf_token for var in llm_env_vars.split(","): key, value = var.split("=") os.environ[key.strip()] = value.strip() llm_config = LLMConfig( model=llm_model, temperature=temperature, top_p=top_p, max_tokens=max_tokens, ) dataset_config = DatasetConfig( topic=topic, domains=domains.split(","), language=language, additional_description=additional_description, num_entries=num_entries, ) dataset_generator_config = DatasetGeneratorConfig( llm_config=llm_config, dataset_config=dataset_config, ) if dataset_type == "Raw": generator = RawDatasetGenerator(dataset_generator_config) elif dataset_type == "Instruction": generator = InstructionDatasetGenerator(dataset_generator_config) elif dataset_type == "Preference": generator = PreferenceDatasetGenerator(dataset_generator_config) elif dataset_type == "Sentiment Analysis": generator = SentimentAnalysisDatasetGenerator(dataset_generator_config) elif dataset_type == "Summarization": generator = SummarizationDatasetGenerator(dataset_generator_config) elif dataset_type == "Text Classification": generator = TextClassificationDatasetGenerator(dataset_generator_config) else: return "Invalid dataset type" async def generate(): dataset = await generator.agenerate_dataset() dataset.save_dataset(hf_repo_name=hf_repo_name) return "Dataset generated and saved successfully." try: return asyncio.run(generate()) except RuntimeError as e: if str(e) == "Event loop is closed": loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) return loop.run_until_complete(generate()) else: raise def ui_main(): """ Launch the Gradio UI for the SynthGenAI dataset generator. """ with gr.Blocks( title="SynthGenAI Dataset Generator", css=""" .gradio-container .gr-block { margin-bottom: 10px; margin-left: 5px; margin-right: 5px; text-align: center; } """, theme="ParityError/Interstellar", ) as demo: gr.HTML( """
SynthGenAI is designed to be modular and can be easily extended to include different API providers for LLMs and new features.
Interest in synthetic data generation has surged recently, driven by the growing recognition of data as a critical asset in AI development. Synthetic data generation addresses challenges by allowing us to create diverse and useful datasets using current pre-trained Large Language Models (LLMs).
For more information on which LLMs are allowed and how they can be used, please refer to the documentation.
GitHub Repository | Documentation