parent
4315bf1ab3
commit
7cea1003de
@ -0,0 +1,109 @@ |
|||||||
|
from flask import Flask, request, jsonify |
||||||
|
import google.generativeai as genai |
||||||
|
import re,json |
||||||
|
|
||||||
|
# Configure the Flask app |
||||||
|
app = Flask(__name__) |
||||||
|
|
||||||
|
# Configure Gemini API |
||||||
|
GENAI_API_KEY = "AIzaSyD9ZtwHeCQqd4Hdu2549K-xPNEY7G0C8rE" |
||||||
|
genai.configure(api_key=GENAI_API_KEY) |
||||||
|
|
||||||
|
@app.route('/test-spam', methods=['POST']) |
||||||
|
def test_spam(): |
||||||
|
try: |
||||||
|
# Extract the test message from the POST request body |
||||||
|
data = request.get_json() |
||||||
|
if not data or "test_message" not in data: |
||||||
|
return jsonify({"error": "test_message is required in the request body"}), 400 |
||||||
|
|
||||||
|
test_message = data["test_message"] |
||||||
|
|
||||||
|
# Construct a prompt for the Gemini API |
||||||
|
prompt = ( |
||||||
|
f"Please act as a senior spam detector. Based on your knowledge and experience, " |
||||||
|
f"evaluate the following message for spam: \"{test_message}\". " |
||||||
|
f"Provide a spam score from 1 to 10, where 1 means not spam and 10 means highly suspicious. " |
||||||
|
f"Only return the spam score as a number." |
||||||
|
) |
||||||
|
|
||||||
|
# Use the Gemini API to generate content |
||||||
|
model = genai.GenerativeModel("gemini-1.5-flash") |
||||||
|
response = model.generate_content(prompt) |
||||||
|
|
||||||
|
# Extract the spam score from the response |
||||||
|
if response and response.candidates: |
||||||
|
spam_score_str = response.candidates[0].content.parts[0].text.strip() |
||||||
|
else: |
||||||
|
return jsonify({"error": "Invalid response structure from Gemini API"}), 500 |
||||||
|
|
||||||
|
return jsonify({"spam_score": spam_score_str}), 200 |
||||||
|
|
||||||
|
except Exception as e: |
||||||
|
return jsonify({"error": str(e)}), 500 |
||||||
|
|
||||||
|
def parse_questions(response_text): |
||||||
|
# Extract JSON content from the response_text |
||||||
|
match = re.search(r"```json\n(.*?)\n```", response_text, re.DOTALL) |
||||||
|
if not match: |
||||||
|
raise ValueError("JSON content not found in the response_text.") |
||||||
|
|
||||||
|
# Parse the JSON content |
||||||
|
json_content = match.group(1) |
||||||
|
questions_data = json.loads(json_content) |
||||||
|
|
||||||
|
# Extract and format questions |
||||||
|
parsed_questions = [] |
||||||
|
for item in questions_data["questions"]: |
||||||
|
question = { |
||||||
|
"question": item["question"], |
||||||
|
"options": item["options"], |
||||||
|
"correct_answer": item["correct_answer"] |
||||||
|
} |
||||||
|
parsed_questions.append(question) |
||||||
|
|
||||||
|
return parsed_questions |
||||||
|
|
||||||
|
@app.route('/generate-questions', methods=['POST']) |
||||||
|
def generate_questions(): |
||||||
|
try: |
||||||
|
# Extract the topic (string_message) from the POST request body |
||||||
|
data = request.get_json() |
||||||
|
if not data or "string_message" not in data: |
||||||
|
return jsonify({"error": "string_message is required in the request body"}), 400 |
||||||
|
|
||||||
|
string_message = data["string_message"] |
||||||
|
|
||||||
|
# Construct a prompt to generate questions |
||||||
|
prompt = ( |
||||||
|
f"Act as a well-qualified person on the topic: {string_message}. " |
||||||
|
f"Help me generate 10 different questions with 4 options for each question. " |
||||||
|
f"One of the options should be correct, and it should be clear which option is correct." |
||||||
|
f"The response should be just a plain JSON, nothin more, with given schema:" |
||||||
|
f"{'{"questions": [{"question": "What is the capital of France?", "options": ["Paris", "London", "Berlin", "Madrid"], "correct_answer": "Paris"}, ...]}'}" |
||||||
|
f"Please provide the questions in the format specified above." |
||||||
|
f"you need to generate questions based on the topic provided." |
||||||
|
) |
||||||
|
|
||||||
|
# Use the Gemini API to generate content |
||||||
|
model = genai.GenerativeModel("gemini-1.5-flash") |
||||||
|
response = model.generate_content(prompt) |
||||||
|
|
||||||
|
# Extract the response text |
||||||
|
if response and response.candidates: |
||||||
|
response_text = response.candidates[0].content.parts[0].text.strip() |
||||||
|
else: |
||||||
|
return jsonify({"error": "Invalid response structure from Gemini API"}), 500 |
||||||
|
|
||||||
|
# import pdb; pdb.set_trace() |
||||||
|
# Parse the questions from the response text |
||||||
|
questions = parse_questions(response_text) |
||||||
|
|
||||||
|
# Return the questions in the desired format |
||||||
|
return jsonify({"questions": questions}), 200 |
||||||
|
|
||||||
|
except Exception as e: |
||||||
|
return jsonify({"error": str(e)}), 500 |
||||||
|
|
||||||
|
if __name__ == '__main__': |
||||||
|
app.run(debug=True) |
Loading…
Reference in new issue