import React, { useState, useEffect } from 'react'; import { Shield, Lock, Eye, CheckCircle, AlertTriangle, RefreshCcw, Award, ChevronRight, Info } from 'lucide-react'; const App = () => { const [stage, setStage] = useState('intro'); // intro, training, quiz, result, certificate const [currentQuestion, setCurrentQuestion] = useState(0); const [score, setScore] = useState(0); const [answers, setAnswers] = useState([]); const [quizFinished, setQuizFinished] = useState(false); // Training content points const trainingPoints = [ { title: "Protect Your Credentials", icon: , content: "Never hard-code API keys in your scripts or upload them to version control (GitHub). Use environment variables and .gitignore files to keep secrets private." }, { title: "Data Anonymization", icon: , content: "Before sending data to a cloud-based AI, remove all Personally Identifiable Information (PII) like names, emails, or IDs unless you are using an enterprise-grade, private instance." }, { title: "The Hallucination Factor", icon: , content: "AI can confidently generate incorrect code or false statistical conclusions. Always verify the logic of AI-generated snippets and cross-check data insights." }, { title: "Library Hijacking", icon: , content: "AI might suggest non-existent or malicious Python/R packages. Always verify that a library is legitimate and widely used before installing it in your environment." }, { title: "Bias Awareness", icon: , content: "AI models often mirror biases present in their training data. Critically evaluate if your analysis is reinforcing stereotypes or ignoring specific demographic nuances." } ]; const questions = [ { question: "Where is the safest place to store an AI API key for a Python project?", options: [ "Inside a comment at the top of the main script", "In a .env file that is excluded via .gitignore", "In a public GitHub repository for easy access by the team", "Directly in the variable definition: apiKey = 'sk-123...'" ], correct: 1 }, { question: "You need to analyze a customer dataset using a public LLM. What should you do first?", options: [ "Upload the CSV immediately to get fast results", "Ask the AI if it is safe to upload data", "Strip all PII (names, phone numbers, addresses) from the dataset", "Zip the file and password protect it before uploading" ], correct: 2 }, { question: "The AI suggests using a library called 'pandas-ultra-secure' for data cleaning. You've never heard of it. What is the best practice?", options: [ "Install it immediately as AI knows the latest tools", "Check PyPI/NPM for the package's reputation and source code", "Trust the AI's recommendation but run it as an admin", "Only use it if the AI provides a code example" ], correct: 1 }, { question: "What does 'Human-in-the-Loop' mean in AI data analysis?", options: [ "Hiring someone to type what the AI says", "A human must review and validate AI outputs before they are used for decision making", "The AI is part of a human network", "Training the AI on human skeletons" ], correct: 1 }, { question: "An AI provides a complex SQL query for your database. It runs without errors. Should you trust the results?", options: [ "Yes, if it runs, the logic is definitely correct", "No, you must verify the logic ensures the query actually answers the intended question", "Yes, AI is better at SQL than humans", "Only if the execution time was fast" ], correct: 1 }, { question: "Which of these is a 'Hallucination' in data analysis?", options: [ "The AI becoming self-aware", "The AI generating a visualization that looks like a ghost", "The AI inventing a statistical trend or fact that doesn't exist in the data", "The computer monitor flickering" ], correct: 2 }, { question: "When using AI for automated reporting, why is data bias a concern?", options: [ "It makes the reports look unprofessional", "AI might ignore or misinterpret data from minority groups if the training data was skewed", "Bias causes the AI to use more electricity", "Bias only matters in social media, not data science" ], correct: 1 }, { question: "If you accidentally push an API key to a public repo, what is the correct response?", options: [ "Delete the file and commit again", "Ignore it; nobody will find it among millions of repos", "Immediately revoke (delete) the key on the provider's dashboard and generate a new one", "Edit the commit history to hide the key" ], correct: 2 }, { question: "What is the primary risk of using 'Prompt Injection' on your own data analysis scripts?", options: [ "It makes the AI run faster", "Unauthorized users could manipulate the AI to reveal sensitive system info or bypass filters", "It helps the AI understand the data better", "There is no risk to internal scripts" ], correct: 1 }, { question: "According to safety guidelines, AI is best used as a:", options: [ "Replacement for junior data analysts", "Co-pilot or assistant that requires expert supervision", "Source of absolute truth for all business metrics", "Fully autonomous system for financial reporting" ], correct: 1 } ]; const handleAnswer = (index) => { const newAnswers = [...answers]; newAnswers[currentQuestion] = index; setAnswers(newAnswers); }; const nextQuestion = () => { if (currentQuestion < questions.length - 1) { setCurrentQuestion(currentQuestion + 1); } else { finishQuiz(); } }; const finishQuiz = () => { let finalScore = 0; answers.forEach((ans, idx) => { if (ans === questions[idx].correct) finalScore++; }); setScore(finalScore); setStage('result'); }; const restart = () => { setStage('intro'); setCurrentQuestion(0); setAnswers([]); setScore(0); }; const proceedToQuiz = () => setStage('quiz'); const startTraining = () => setStage('training'); const renderIntro = () => (

AI Data Analysis Safety

Welcome to the team safety certification. Learn the essentials of developing with AI and test your knowledge.

); const renderTraining = () => (

Core Safety Principles

Review these points carefully before the quiz.

{trainingPoints.map((point, i) => (
{point.icon}

{point.title}

{point.content}

))}
); const renderQuiz = () => { const q = questions[currentQuestion]; const progress = ((currentQuestion + 1) / questions.length) * 100; return (
Question {currentQuestion + 1} of {questions.length} Score: {answers.filter((a, i) => a === questions[i].correct).length}

{q.question}

{q.options.map((opt, i) => ( ))}
); }; const renderResult = () => { const passed = score >= 8; // Max 2 wrong = 8/10 const perfect = score === 10; return (

Quiz Complete

Final Score: {score} / 10
{perfect ? (

Perfect Score!

You are officially a Safety Champion. You've demonstrated full mastery of AI data analysis security.

Official Certification

AI Safety Certified

Verified Expert Level

) : passed ? (

Well Done!

You passed the training. You missed {10 - score} question(s), but you have a strong grasp of safe AI practices.

) : (

Not Quite There

You missed {10 - score} questions. For team security compliance, you must get at least 8/10 to pass. Please review and try again.

)}

Review Your Answers:

{questions.map((q, i) => (

{i + 1}. {q.question}

Your answer: {q.options[answers[i]]}

{answers[i] !== q.correct && (

Correct answer: {q.options[q.correct]}

)}
))}
{passed && ( )}
); }; return (
{/* Header Decor */}
{stage === 'intro' && renderIntro()} {stage === 'training' && renderTraining()} {stage === 'quiz' && renderQuiz()} {stage === 'result' && renderResult()}
{/* Footer info */}

AI Safety Internal Training Module v1.0