Reubencf commited on
Commit
dbc2c2a
·
verified ·
1 Parent(s): b371d48

Upload 42 files

Browse files
.gitattributes CHANGED
@@ -34,3 +34,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  public/sukajan.png filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  public/sukajan.png filter=lfs diff=lfs merge=lfs -text
37
+ public/clothes/suit.png filter=lfs diff=lfs merge=lfs -text
38
+ public/clothes/sukajan.png filter=lfs diff=lfs merge=lfs -text
39
+ public/clothes/womenoutfit.png filter=lfs diff=lfs merge=lfs -text
40
+ public/lighting/light1.png filter=lfs diff=lfs merge=lfs -text
41
+ public/lighting/light2.png filter=lfs diff=lfs merge=lfs -text
42
+ public/lighting/light3.png filter=lfs diff=lfs merge=lfs -text
43
+ public/makeup/makeup1.png filter=lfs diff=lfs merge=lfs -text
44
+ public/poses/sit1.png filter=lfs diff=lfs merge=lfs -text
45
+ public/poses/sit2.png filter=lfs diff=lfs merge=lfs -text
46
+ public/poses/stand1.png filter=lfs diff=lfs merge=lfs -text
47
+ public/poses/stand2.png filter=lfs diff=lfs merge=lfs -text
48
+ public/reo.png filter=lfs diff=lfs merge=lfs -text
app/api/auth/callback/route.ts ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { NextRequest, NextResponse } from "next/server";
2
+ import { cookies } from "next/headers";
3
+
4
+ export async function GET(req: NextRequest) {
5
+ const url = new URL(req.url);
6
+ const code = url.searchParams.get('code');
7
+
8
+ if (code) {
9
+ // This is an OAuth redirect, redirect to main page for client-side handling
10
+ return NextResponse.redirect(new URL('/', req.url));
11
+ } else {
12
+ // This is a status check request
13
+ try {
14
+ const cookieStore = await cookies();
15
+ const hfToken = cookieStore.get('hf_token');
16
+
17
+ return NextResponse.json({
18
+ isLoggedIn: !!hfToken?.value,
19
+ hasToken: !!hfToken?.value
20
+ });
21
+ } catch (error) {
22
+ console.error('Error checking HF token:', error);
23
+ return NextResponse.json({ isLoggedIn: false, hasToken: false });
24
+ }
25
+ }
26
+ }
27
+
28
+ export async function POST(req: NextRequest) {
29
+ try {
30
+ const { hf_token } = await req.json();
31
+
32
+ if (!hf_token || typeof hf_token !== "string") {
33
+ return NextResponse.json(
34
+ { error: "Invalid or missing HF token" },
35
+ { status: 400 }
36
+ );
37
+ }
38
+
39
+ // Store the token in a secure HTTP-only cookie
40
+ const cookieStore = await cookies();
41
+ cookieStore.set({
42
+ name: 'hf_token',
43
+ value: hf_token,
44
+ httpOnly: true,
45
+ secure: process.env.NODE_ENV === 'production',
46
+ sameSite: 'lax',
47
+ maxAge: 60 * 60 * 24 * 30 // 30 days
48
+ });
49
+
50
+ return NextResponse.json({ success: true });
51
+ } catch (error) {
52
+ console.error('Error storing HF token:', error);
53
+ return NextResponse.json(
54
+ { error: "Failed to store token" },
55
+ { status: 500 }
56
+ );
57
+ }
58
+ }
59
+
60
+ export async function DELETE() {
61
+ try {
62
+ const cookieStore = await cookies();
63
+ cookieStore.delete('hf_token');
64
+
65
+ return NextResponse.json({ success: true });
66
+ } catch (error) {
67
+ console.error('Error deleting HF token:', error);
68
+ return NextResponse.json(
69
+ { error: "Failed to logout" },
70
+ { status: 500 }
71
+ );
72
+ }
73
+ }
app/api/generate/route.ts CHANGED
@@ -1,11 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import { NextRequest, NextResponse } from "next/server";
2
  import { GoogleGenAI } from "@google/genai";
3
 
4
- export const runtime = "nodejs"; // Ensure Node runtime for SDK
 
5
 
 
 
 
 
 
 
6
  export async function POST(req: NextRequest) {
7
  try {
 
8
  const { prompt, apiToken } = (await req.json()) as { prompt?: string; apiToken?: string };
 
 
9
  if (!prompt || typeof prompt !== "string") {
10
  return NextResponse.json(
11
  { error: "Missing prompt" },
@@ -13,7 +37,7 @@ export async function POST(req: NextRequest) {
13
  );
14
  }
15
 
16
- // Use user-provided API token or fall back to environment variable
17
  const apiKey = apiToken || process.env.GOOGLE_API_KEY;
18
  if (!apiKey || apiKey === 'your_api_key_here') {
19
  return NextResponse.json(
@@ -22,26 +46,34 @@ export async function POST(req: NextRequest) {
22
  );
23
  }
24
 
 
25
  const ai = new GoogleGenAI({ apiKey });
26
 
 
27
  const response = await ai.models.generateContent({
28
- model: "gemini-2.5-flash-image-preview",
29
- contents: prompt,
30
  });
31
 
 
32
  const parts = (response as any)?.candidates?.[0]?.content?.parts ?? [];
33
- const images: string[] = [];
34
- const texts: string[] = [];
35
 
 
36
  for (const part of parts) {
37
  if (part?.inlineData?.data) {
 
38
  images.push(`data:image/png;base64,${part.inlineData.data}`);
39
  } else if (part?.text) {
 
40
  texts.push(part.text as string);
41
  }
42
  }
43
 
 
44
  return NextResponse.json({ images, text: texts.join("\n") });
 
45
  } catch (err) {
46
  console.error("/api/generate error", err);
47
  return NextResponse.json(
 
1
+ /**
2
+ * API ROUTE: /api/generate
3
+ *
4
+ * Text-to-image generation endpoint using Google's Gemini AI model.
5
+ * Generates new images from natural language descriptions.
6
+ *
7
+ * Input: JSON with text prompt and optional API token
8
+ * Output: JSON with generated image(s) as base64 data URLs
9
+ *
10
+ * Example usage:
11
+ * POST /api/generate
12
+ * { "prompt": "A professional portrait photo of a person in business attire" }
13
+ */
14
+
15
  import { NextRequest, NextResponse } from "next/server";
16
  import { GoogleGenAI } from "@google/genai";
17
 
18
+ // Configure Next.js runtime for Node.js (required for Google AI SDK)
19
+ export const runtime = "nodejs";
20
 
21
+ /**
22
+ * Handle POST requests for image generation
23
+ *
24
+ * @param req NextJS request object with JSON body containing prompt and optional API token
25
+ * @returns JSON response with generated images or error message
26
+ */
27
  export async function POST(req: NextRequest) {
28
  try {
29
+ // Parse and validate request body
30
  const { prompt, apiToken } = (await req.json()) as { prompt?: string; apiToken?: string };
31
+
32
+ // Validate required prompt parameter
33
  if (!prompt || typeof prompt !== "string") {
34
  return NextResponse.json(
35
  { error: "Missing prompt" },
 
37
  );
38
  }
39
 
40
+ // Validate and retrieve API key from user input or environment
41
  const apiKey = apiToken || process.env.GOOGLE_API_KEY;
42
  if (!apiKey || apiKey === 'your_api_key_here') {
43
  return NextResponse.json(
 
46
  );
47
  }
48
 
49
+ // Initialize Google AI client
50
  const ai = new GoogleGenAI({ apiKey });
51
 
52
+ // Generate image using Gemini's image generation model
53
  const response = await ai.models.generateContent({
54
+ model: "gemini-2.5-flash-image-preview", // Latest image generation model
55
+ contents: prompt, // Natural language description
56
  });
57
 
58
+ // Parse response to extract images and text
59
  const parts = (response as any)?.candidates?.[0]?.content?.parts ?? [];
60
+ const images: string[] = []; // Array to store generated images as data URLs
61
+ const texts: string[] = []; // Array to store any text responses
62
 
63
+ // Process each part of the response
64
  for (const part of parts) {
65
  if (part?.inlineData?.data) {
66
+ // Convert base64 image data to data URL format
67
  images.push(`data:image/png;base64,${part.inlineData.data}`);
68
  } else if (part?.text) {
69
+ // Collect any text explanations or descriptions
70
  texts.push(part.text as string);
71
  }
72
  }
73
 
74
+ // Return generated content to client
75
  return NextResponse.json({ images, text: texts.join("\n") });
76
+
77
  } catch (err) {
78
  console.error("/api/generate error", err);
79
  return NextResponse.json(
app/api/improve-prompt/route.ts ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * API ROUTE: /api/improve-prompt
3
+ *
4
+ * Uses Gemini 2.5 Flash to improve user prompts for better AI image generation.
5
+ * Takes a basic prompt and enhances it with more detailed, descriptive language
6
+ * that will produce better results from image generation models.
7
+ */
8
+
9
+ import { NextRequest, NextResponse } from "next/server";
10
+ import { GoogleGenAI } from "@google/genai";
11
+ import { cookies } from "next/headers";
12
+
13
+ export const runtime = "nodejs";
14
+ export const maxDuration = 30;
15
+
16
+ export async function POST(req: NextRequest) {
17
+ try {
18
+ const body = await req.json() as {
19
+ prompt: string;
20
+ type?: string; // 'background', 'edit', etc.
21
+ };
22
+
23
+ if (!body.prompt?.trim()) {
24
+ return NextResponse.json(
25
+ { error: "Prompt is required" },
26
+ { status: 400 }
27
+ );
28
+ }
29
+
30
+ // Check if user is logged in with HF Pro
31
+ let isHfProUser = false;
32
+ try {
33
+ const cookieStore = await cookies();
34
+ const hfToken = cookieStore.get('hf_token');
35
+ isHfProUser = !!hfToken?.value;
36
+ } catch (error) {
37
+ console.error('Error reading HF token from cookies:', error);
38
+ }
39
+
40
+ // Get API key
41
+ const apiKey = process.env.GOOGLE_API_KEY;
42
+ if (!apiKey || apiKey === 'your_actual_api_key_here') {
43
+ return NextResponse.json(
44
+ { error: `API key not configured. Please ${isHfProUser ? 'contact support' : 'login with HF Pro'}.` },
45
+ { status: 500 }
46
+ );
47
+ }
48
+
49
+ const ai = new GoogleGenAI({ apiKey });
50
+
51
+ // Create context-specific improvement prompts
52
+ const contextPrompts = {
53
+ background: `You are an expert at writing prompts for AI image generation. Take the following simple background description and transform it into a detailed, vivid prompt that will generate stunning, realistic backgrounds.
54
+
55
+ Focus on:
56
+ - Visual details (lighting, colors, textures, atmosphere)
57
+ - Composition and depth
58
+ - Realistic environmental elements
59
+ - Photography/cinematic quality terms
60
+ - Maintaining the character while enhancing the background
61
+
62
+ Keep the character image and background realistic. Make the description rich and specific but not overly complex.
63
+
64
+ Original prompt: "${body.prompt}"
65
+
66
+ Write a short and concise improved background generation prompt and do not include anything unnecessary:`,
67
+
68
+ edit: `You are an expert at writing prompts for AI image editing. Take the following simple editing request and transform it into a clear, detailed prompt that will produce precise, high-quality image modifications.
69
+ Original prompt: "${body.prompt}" Return a short and concise improved editing prompt and do not include anything unnecessary:`,
70
+
71
+ default: `You are an expert at writing prompts for AI image generation and editing. Take the following simple prompt and transform it into a detailed, effective prompt that will produce better results.
72
+
73
+ Focus on:
74
+ - Clear, specific instructions
75
+ - Visual details and quality descriptors
76
+ - Professional terminology
77
+ - Realistic and natural-looking results
78
+
79
+ Original prompt: "${body.prompt}"
80
+
81
+ Write an improved prompt:`
82
+ };
83
+
84
+ const improvementPrompt = contextPrompts[body.type as keyof typeof contextPrompts] || contextPrompts.default;
85
+
86
+ const response = await ai.models.generateContent({
87
+ model: "gemini-2.5-flash",
88
+ contents: [{ role: "user", parts: [{ text: improvementPrompt }] }],
89
+ });
90
+
91
+ const improvedPrompt = response?.text?.trim();
92
+
93
+ if (!improvedPrompt) {
94
+ return NextResponse.json(
95
+ { error: "Failed to generate improved prompt" },
96
+ { status: 500 }
97
+ );
98
+ }
99
+
100
+ return NextResponse.json({ improvedPrompt });
101
+
102
+ } catch (error: any) {
103
+ console.error('[API] improve-prompt error:', error);
104
+
105
+ return NextResponse.json(
106
+ { error: "Failed to improve prompt. Please try again." },
107
+ { status: 500 }
108
+ );
109
+ }
110
+ }
app/api/merge/route.ts CHANGED
@@ -1,32 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
1
  import { NextRequest, NextResponse } from "next/server";
2
  import { GoogleGenAI } from "@google/genai";
3
 
 
4
  export const runtime = "nodejs";
5
 
 
 
 
 
 
 
 
6
  function parseDataUrl(dataUrl: string): { mimeType: string; data: string } | null {
7
- // data:[<mediatype>][;base64],<data>
8
- const match = dataUrl.match(/^data:(.*?);base64,(.*)$/);
9
- if (!match) return null;
10
- return { mimeType: match[1] || "image/png", data: match[2] };
 
 
11
  }
12
 
 
 
 
 
 
 
 
 
 
 
13
  async function toInlineData(url: string): Promise<{ mimeType: string; data: string } | null> {
14
  try {
 
15
  if (url.startsWith('data:')) {
16
  return parseDataUrl(url);
17
  }
 
 
18
  if (url.startsWith('http')) {
19
- // Fetch HTTP URL and convert to base64
20
- const res = await fetch(url);
21
- const buf = await res.arrayBuffer();
22
- const base64 = Buffer.from(buf).toString('base64');
23
- const mimeType = res.headers.get('content-type') || 'image/jpeg';
24
  return { mimeType, data: base64 };
25
  }
26
- return null;
 
27
  } catch (e) {
28
  console.error('Failed to process image URL:', url.substring(0, 100), e);
29
- return null;
30
  }
31
  }
32
 
@@ -94,12 +128,6 @@ The result should look like all subjects were photographed together in the same
94
  }
95
 
96
  // Debug: Log what we're receiving
97
- console.log(`[MERGE API] Received ${imgs.length} images to merge`);
98
- console.log(`[MERGE API] Image types:`, imgs.map(img => {
99
- if (img.startsWith('data:')) return 'data URL';
100
- if (img.startsWith('http')) return 'HTTP URL';
101
- return 'unknown';
102
- }));
103
 
104
  const parts: any[] = [{ text: prompt }];
105
  for (const url of imgs) {
@@ -111,8 +139,6 @@ The result should look like all subjects were photographed together in the same
111
  parts.push({ inlineData: { mimeType: parsed.mimeType, data: parsed.data } });
112
  }
113
 
114
- console.log(`[MERGE API] Sending ${parts.length - 1} images to model (prompt + images)`);
115
- console.log(`[MERGE API] Prompt preview:`, prompt.substring(0, 200));
116
 
117
  const response = await ai.models.generateContent({
118
  model: "gemini-2.5-flash-image-preview",
 
1
+ /**
2
+ * API ROUTE: /api/merge (DEPRECATED - functionality moved to /api/process)
3
+ *
4
+ * Legacy endpoint for merging multiple character images into cohesive group photos.
5
+ * This functionality is now handled by the main /api/process endpoint with type="MERGE".
6
+ * Kept for backwards compatibility.
7
+ *
8
+ * Input: JSON with array of image URLs/data and optional custom prompt
9
+ * Output: JSON with merged group photo as base64 data URL
10
+ */
11
+
12
  import { NextRequest, NextResponse } from "next/server";
13
  import { GoogleGenAI } from "@google/genai";
14
 
15
+ // Configure Next.js runtime for Node.js (required for Google AI SDK)
16
  export const runtime = "nodejs";
17
 
18
+ /**
19
+ * Parse base64 data URL into MIME type and data components
20
+ * Handles data URLs in the format: data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA...
21
+ *
22
+ * @param dataUrl Complete data URL string
23
+ * @returns Object with mimeType and data, or null if invalid format
24
+ */
25
  function parseDataUrl(dataUrl: string): { mimeType: string; data: string } | null {
26
+ const match = dataUrl.match(/^data:(.*?);base64,(.*)$/); // Extract MIME type and base64 data
27
+ if (!match) return null; // Invalid data URL format
28
+ return {
29
+ mimeType: match[1] || "image/png", // Use extracted MIME type or default to PNG
30
+ data: match[2] // Base64 encoded image data
31
+ };
32
  }
33
 
34
+ /**
35
+ * Convert various image URL formats to inline data format required by Gemini AI
36
+ *
37
+ * Supports:
38
+ * - Data URLs (data:image/png;base64,...)
39
+ * - HTTP/HTTPS URLs (fetches and converts to base64)
40
+ *
41
+ * @param url Image URL in any supported format
42
+ * @returns Promise resolving to inline data object or null on failure
43
+ */
44
  async function toInlineData(url: string): Promise<{ mimeType: string; data: string } | null> {
45
  try {
46
+ // Handle data URLs directly
47
  if (url.startsWith('data:')) {
48
  return parseDataUrl(url);
49
  }
50
+
51
+ // Handle HTTP URLs by fetching and converting to base64
52
  if (url.startsWith('http')) {
53
+ const res = await fetch(url); // Fetch image from URL
54
+ const buf = await res.arrayBuffer(); // Get binary data
55
+ const base64 = Buffer.from(buf).toString('base64'); // Convert to base64
56
+ const mimeType = res.headers.get('content-type') || 'image/jpeg'; // Get MIME type from headers
 
57
  return { mimeType, data: base64 };
58
  }
59
+
60
+ return null; // Unsupported URL format
61
  } catch (e) {
62
  console.error('Failed to process image URL:', url.substring(0, 100), e);
63
+ return null; // Return null on any processing error
64
  }
65
  }
66
 
 
128
  }
129
 
130
  // Debug: Log what we're receiving
 
 
 
 
 
 
131
 
132
  const parts: any[] = [{ text: prompt }];
133
  for (const url of imgs) {
 
139
  parts.push({ inlineData: { mimeType: parsed.mimeType, data: parsed.data } });
140
  }
141
 
 
 
142
 
143
  const response = await ai.models.generateContent({
144
  model: "gemini-2.5-flash-image-preview",
app/api/process/route.ts CHANGED
@@ -1,32 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import { NextRequest, NextResponse } from "next/server";
2
  import { GoogleGenAI } from "@google/genai";
 
3
 
 
4
  export const runtime = "nodejs";
5
 
6
- // Increase the body size limit to 50MB for large images
7
- export const maxDuration = 60; // 60 seconds timeout
8
 
 
 
 
 
 
 
 
 
 
9
  function parseDataUrl(dataUrl: string): { mimeType: string; data: string } | null {
10
- const match = dataUrl.match(/^data:(.*?);base64,(.*)$/);
11
- if (!match) return null;
12
- return { mimeType: match[1] || "image/png", data: match[2] };
 
 
 
13
  }
14
 
 
 
 
 
 
 
 
 
 
15
  export async function POST(req: NextRequest) {
16
  try {
17
- // Log request size for debugging
18
- const contentLength = req.headers.get('content-length');
19
- console.log(`[API] Request size: ${contentLength} bytes`);
20
 
 
21
  let body: any;
22
  try {
23
  body = await req.json() as {
24
- type: string;
25
- image?: string;
26
- images?: string[];
27
- prompt?: string;
28
- params?: any;
29
- apiToken?: string;
30
  };
31
  } catch (jsonError) {
32
  console.error('[API] Failed to parse JSON:', jsonError);
@@ -36,50 +79,90 @@ export async function POST(req: NextRequest) {
36
  );
37
  }
38
 
39
- // Use user-provided API token or fall back to environment variable
 
 
 
 
 
 
 
 
 
 
40
  const apiKey = body.apiToken || process.env.GOOGLE_API_KEY;
41
  if (!apiKey || apiKey === 'your_actual_api_key_here') {
42
  return NextResponse.json(
43
- { error: "API key not provided. Please enter your Hugging Face API token in the top right corner or add GOOGLE_API_KEY to .env.local file." },
44
  { status: 500 }
45
  );
46
  }
47
 
 
48
  const ai = new GoogleGenAI({ apiKey });
49
 
50
- // Helpers
 
 
 
 
 
 
 
 
51
  const toInlineDataFromAny = async (url: string): Promise<{ mimeType: string; data: string } | null> => {
52
- if (!url) return null;
 
53
  try {
 
54
  if (url.startsWith('data:')) {
55
- return parseDataUrl(url);
56
  }
 
 
57
  if (url.startsWith('http')) {
58
- const res = await fetch(url);
59
- const buf = await res.arrayBuffer();
60
- const base64 = Buffer.from(buf).toString('base64');
61
- const mimeType = res.headers.get('content-type') || 'image/jpeg';
62
  return { mimeType, data: base64 };
63
  }
 
 
64
  if (url.startsWith('/')) {
65
- const host = req.headers.get('host') ?? 'localhost:3000';
66
- const proto = req.headers.get('x-forwarded-proto') ?? 'http';
67
- const absolute = `${proto}://${host}${url}`;
68
- const res = await fetch(absolute);
69
- const buf = await res.arrayBuffer();
70
- const base64 = Buffer.from(buf).toString('base64');
71
- const mimeType = res.headers.get('content-type') || 'image/png';
72
  return { mimeType, data: base64 };
73
  }
74
- return null;
 
75
  } catch {
76
- return null;
77
  }
78
  };
79
 
80
- // Handle MERGE node type separately
 
 
 
 
 
 
 
 
 
 
 
 
81
  if (body.type === "MERGE") {
82
- const imgs = body.images?.filter(Boolean) ?? [];
 
 
83
  if (imgs.length < 2) {
84
  return NextResponse.json(
85
  { error: "MERGE requires at least two images" },
@@ -87,8 +170,8 @@ export async function POST(req: NextRequest) {
87
  );
88
  }
89
 
90
- // Build parts array for merge: first the text prompt, then image inlineData parts
91
- let mergePrompt = body.prompt;
92
 
93
  if (!mergePrompt) {
94
  mergePrompt = `MERGE TASK: Create a natural, cohesive group photo combining ALL subjects from ${imgs.length} provided images.
@@ -125,7 +208,6 @@ The result should look like all subjects were photographed together in the same
125
  const mergeParts: any[] = [{ text: mergePrompt }];
126
  for (let i = 0; i < imgs.length; i++) {
127
  const url = imgs[i];
128
- console.log(`[MERGE] Processing image ${i + 1}/${imgs.length}, type: ${typeof url}, length: ${url?.length || 0}`);
129
 
130
  try {
131
  const parsed = await toInlineDataFromAny(url);
@@ -134,13 +216,11 @@ The result should look like all subjects were photographed together in the same
134
  continue;
135
  }
136
  mergeParts.push({ inlineData: { mimeType: parsed.mimeType, data: parsed.data } });
137
- console.log(`[MERGE] Successfully processed image ${i + 1}`);
138
  } catch (error) {
139
  console.error(`[MERGE] Error processing image ${i + 1}:`, error);
140
  }
141
  }
142
 
143
- console.log(`[MERGE] Sending ${mergeParts.length - 1} images to model`);
144
 
145
  const response = await ai.models.generateContent({
146
  model: "gemini-2.5-flash-image-preview",
@@ -182,28 +262,172 @@ The result should look like all subjects were photographed together in the same
182
  const prompts: string[] = [];
183
  const params = body.params || {};
184
 
 
 
185
  // We'll collect additional inline image parts (references)
186
  const referenceParts: { inlineData: { mimeType: string; data: string } }[] = [];
187
 
188
  // Background modifications
189
  if (params.backgroundType) {
190
  const bgType = params.backgroundType;
 
191
  if (bgType === "color") {
192
- prompts.push(`Change the background to a solid ${params.backgroundColor || "white"} background.`);
 
 
 
 
 
 
 
 
 
 
 
 
193
  } else if (bgType === "image") {
194
  prompts.push(`Change the background to ${params.backgroundImage || "a beautiful beach scene"}.`);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195
  } else if (bgType === "upload" && params.customBackgroundImage) {
196
  prompts.push(`Replace the background using the provided custom background reference image (attached below). Ensure perspective and lighting match.`);
197
  const bgRef = await toInlineDataFromAny(params.customBackgroundImage);
198
  if (bgRef) referenceParts.push({ inlineData: bgRef });
199
- } else if (params.customPrompt) {
200
- prompts.push(params.customPrompt);
 
201
  }
202
  }
203
 
204
  // Clothes modifications
205
  if (params.clothesImage) {
206
- console.log(`[API] Processing clothes image, type: ${typeof params.clothesImage}, length: ${params.clothesImage?.length || 0}`);
207
 
208
  if (params.selectedPreset === "Sukajan") {
209
  prompts.push("Replace the person's clothing with a Japanese sukajan jacket (embroidered designs). Use the clothes reference image if provided.");
@@ -216,7 +440,6 @@ The result should look like all subjects were photographed together in the same
216
  try {
217
  const clothesRef = await toInlineDataFromAny(params.clothesImage);
218
  if (clothesRef) {
219
- console.log(`[API] Successfully processed clothes image`);
220
  referenceParts.push({ inlineData: clothesRef });
221
  } else {
222
  console.error('[API] Failed to process clothes image - toInlineDataFromAny returned null');
@@ -230,25 +453,26 @@ The result should look like all subjects were photographed together in the same
230
  if (params.stylePreset) {
231
  const strength = params.styleStrength || 50;
232
  const styleMap: { [key: string]: string } = {
233
- "90s-anime": "Convert the image to 90's anime art style with classic anime features: large expressive eyes, detailed hair, soft shading, nostalgic colors reminiscent of Studio Ghibli and classic anime productions",
234
- "mha": "Transform the image into My Hero Academia anime style with modern crisp lines, vibrant colors, dynamic character design, and heroic aesthetics typical of the series",
235
- "dbz": "Apply Dragon Ball Z anime style with sharp angular features, spiky hair, intense expressions, bold outlines, high contrast shading, and dramatic action-oriented aesthetics",
236
- "ukiyo-e": "Render in traditional Japanese Ukiyo-e woodblock print style with flat colors, bold outlines, stylized waves and clouds, traditional Japanese artistic elements",
237
- "cyberpunk": "Transform into cyberpunk aesthetic with neon colors (cyan, magenta, yellow), dark backgrounds, futuristic elements, holographic effects, tech-noir atmosphere",
238
- "steampunk": "Apply steampunk style with Victorian-era brass and copper tones, mechanical gears, steam effects, vintage industrial aesthetic, sepia undertones",
239
- "cubism": "Render in Cubist art style with geometric fragmentation, multiple perspectives shown simultaneously, abstract angular forms, Picasso-inspired decomposition",
240
- "van-gogh": "Apply Post-Impressionist Van Gogh style with thick swirling brushstrokes, vibrant yellows and blues, expressive texture, starry night-like patterns",
241
- "simpsons": "Convert to The Simpsons cartoon style with yellow skin tones, simple rounded features, bulging eyes, overbite, Matt Groening's distinctive character design",
242
- "family-guy": "Transform into Family Guy animation style with rounded character design, simplified features, Seth MacFarlane's distinctive art style, bold outlines",
243
- "arcane": "Apply Arcane (League of Legends) style with painterly brush-stroke textures, neon rim lighting, hand-painted feel, stylized realism, vibrant color grading",
244
- "wildwest": "Render in Wild West style with dusty desert tones, sunset orange lighting, vintage film grain, cowboy aesthetic, sepia and brown color palette",
245
- "stranger-things": "Apply Stranger Things 80s aesthetic with Kodak film push-process look, neon magenta backlight, grainy vignette, retro sci-fi horror atmosphere",
246
- "breaking-bad": "Transform with Breaking Bad cinematography style featuring dusty New Mexico orange and teal color grading, 35mm film grain, desert atmosphere, dramatic lighting"
247
  };
248
 
249
  const styleDescription = styleMap[params.stylePreset];
250
  if (styleDescription) {
251
  prompts.push(`${styleDescription}. Apply this style transformation at ${strength}% intensity while preserving the core subject matter.`);
 
 
252
  }
253
  }
254
 
@@ -257,30 +481,133 @@ The result should look like all subjects were photographed together in the same
257
  prompts.push(params.editPrompt);
258
  }
259
 
260
- // Camera settings
261
  if (params.focalLength || params.aperture || params.shutterSpeed || params.whiteBalance || params.angle ||
262
- params.iso || params.filmStyle || params.lighting || params.bokeh || params.composition) {
263
- const cameraSettings: string[] = [];
 
 
264
  if (params.focalLength) {
265
- if (params.focalLength === "8mm fisheye") {
266
- cameraSettings.push("Apply 8mm fisheye lens effect with 180-degree circular distortion");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
267
  } else {
268
- cameraSettings.push(`Focal Length: ${params.focalLength}`);
269
  }
270
  }
271
- if (params.aperture) cameraSettings.push(`Aperture: ${params.aperture}`);
272
- if (params.shutterSpeed) cameraSettings.push(`Shutter Speed: ${params.shutterSpeed}`);
273
- if (params.whiteBalance) cameraSettings.push(`White Balance: ${params.whiteBalance}`);
274
- if (params.angle) cameraSettings.push(`Camera Angle: ${params.angle}`);
275
- if (params.iso) cameraSettings.push(`${params.iso}`);
276
- if (params.filmStyle) cameraSettings.push(`Film style: ${params.filmStyle}`);
277
- if (params.lighting) cameraSettings.push(`Lighting: ${params.lighting}`);
278
- if (params.bokeh) cameraSettings.push(`Bokeh effect: ${params.bokeh}`);
279
- if (params.composition) cameraSettings.push(`Composition: ${params.composition}`);
280
 
281
- if (cameraSettings.length > 0) {
282
- prompts.push(`Apply professional photography settings: ${cameraSettings.join(", ")}`);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
283
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
284
  }
285
 
286
  // Age transformation
@@ -288,6 +615,16 @@ The result should look like all subjects were photographed together in the same
288
  prompts.push(`Transform the person to look exactly ${params.targetAge} years old with age-appropriate features.`);
289
  }
290
 
 
 
 
 
 
 
 
 
 
 
291
  // Face modifications
292
  if (params.faceOptions) {
293
  const face = params.faceOptions;
@@ -298,6 +635,7 @@ The result should look like all subjects were photographed together in the same
298
  if (face.changeHairstyle) modifications.push(`change hairstyle to ${face.changeHairstyle}`);
299
  if (face.facialExpression) modifications.push(`change facial expression to ${face.facialExpression}`);
300
  if (face.beardStyle) modifications.push(`add/change beard to ${face.beardStyle}`);
 
301
 
302
  if (modifications.length > 0) {
303
  prompts.push(`Face modifications: ${modifications.join(", ")}`);
@@ -306,7 +644,7 @@ The result should look like all subjects were photographed together in the same
306
 
307
  // Combine all prompts
308
  let prompt = prompts.length > 0
309
- ? prompts.join("\n\n") + "\n\nApply all these modifications while maintaining the person's identity and keeping unspecified aspects unchanged."
310
  : "Process this image with high quality output.";
311
 
312
  // Add the custom prompt if provided
@@ -314,6 +652,8 @@ The result should look like all subjects were photographed together in the same
314
  prompt = body.prompt + "\n\n" + prompt;
315
  }
316
 
 
 
317
  // Generate with Gemini
318
  const parts = [
319
  { text: prompt },
@@ -323,23 +663,71 @@ The result should look like all subjects were photographed together in the same
323
  ...referenceParts,
324
  ];
325
 
326
- const response = await ai.models.generateContent({
327
- model: "gemini-2.5-flash-image-preview",
328
- contents: parts,
329
- });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
330
 
 
331
  const outParts = (response as any)?.candidates?.[0]?.content?.parts ?? [];
332
  const images: string[] = [];
 
 
333
 
334
- for (const p of outParts) {
 
 
335
  if (p?.inlineData?.data) {
336
  images.push(`data:image/png;base64,${p.inlineData.data}`);
337
  }
 
 
 
 
338
  }
339
 
340
  if (!images.length) {
 
341
  return NextResponse.json(
342
- { error: "No image generated. Try adjusting your parameters." },
 
 
 
 
 
 
 
 
343
  { status: 500 }
344
  );
345
  }
@@ -348,15 +736,36 @@ The result should look like all subjects were photographed together in the same
348
  } catch (err: any) {
349
  console.error("/api/process error:", err);
350
  console.error("Error stack:", err?.stack);
 
 
 
 
 
 
 
351
 
352
  // Provide more specific error messages
353
- if (err?.message?.includes('payload size')) {
354
  return NextResponse.json(
355
  { error: "Image data too large. Please use smaller images or reduce image quality." },
356
  { status: 413 }
357
  );
358
  }
359
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
360
  if (err?.message?.includes('JSON')) {
361
  return NextResponse.json(
362
  { error: "Invalid data format. Please ensure images are properly encoded." },
 
1
+ /**
2
+ * API ROUTE: /api/process
3
+ *
4
+ * Main image processing endpoint for the Nano Banana Editor.
5
+ * Handles all image transformation operations using Google's Gemini AI model.
6
+ *
7
+ * Supported Operations:
8
+ * - MERGE: Combine multiple character images into a cohesive group photo
9
+ * - COMBINED: Apply multiple transformations in a single API call
10
+ * - Background changes (color, preset, custom, AI-generated)
11
+ * - Clothing modifications using reference images
12
+ * - Artistic style transfers (anime, cyberpunk, van gogh, etc.)
13
+ * - Text-based editing with natural language prompts
14
+ * - Camera effects and photographic settings
15
+ * - Age transformations
16
+ * - Face modifications (expressions, accessories, hair, etc.)
17
+ *
18
+ * Input: JSON with image data, operation type, and parameters
19
+ * Output: JSON with processed image(s) as base64 data URLs
20
+ */
21
+
22
  import { NextRequest, NextResponse } from "next/server";
23
  import { GoogleGenAI } from "@google/genai";
24
+ import { cookies } from "next/headers";
25
 
26
+ // Configure Next.js runtime for Node.js (required for Google AI SDK)
27
  export const runtime = "nodejs";
28
 
29
+ // Set maximum execution time to 60 seconds for complex AI operations
30
+ export const maxDuration = 60;
31
 
32
+ /**
33
+ * Parse base64 data URL into components
34
+ *
35
+ * Extracts MIME type and base64 data from data URLs like:
36
+ * "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAA..."
37
+ *
38
+ * @param dataUrl The data URL string to parse
39
+ * @returns Object with mimeType and data, or null if invalid
40
+ */
41
  function parseDataUrl(dataUrl: string): { mimeType: string; data: string } | null {
42
+ const match = dataUrl.match(/^data:(.*?);base64,(.*)$/); // Regex to capture MIME type and data
43
+ if (!match) return null; // Invalid format
44
+ return {
45
+ mimeType: match[1] || "image/png", // Default to PNG if no MIME type
46
+ data: match[2] // Base64 image data
47
+ };
48
  }
49
 
50
+ /**
51
+ * Main POST handler for image processing requests
52
+ *
53
+ * Processes incoming image transformation requests through Google's Gemini AI.
54
+ * Handles both single-image operations and multi-image merging.
55
+ *
56
+ * @param req NextJS request object containing JSON body with image data and parameters
57
+ * @returns JSON response with processed image(s) or error message
58
+ */
59
  export async function POST(req: NextRequest) {
60
  try {
61
+ // Log incoming request size for debugging and monitoring
 
 
62
 
63
+ // Parse and validate the JSON request body
64
  let body: any;
65
  try {
66
  body = await req.json() as {
67
+ type: string; // Operation type: "MERGE", "COMBINED", etc.
68
+ image?: string; // Single image for processing (base64 data URL)
69
+ images?: string[]; // Multiple images for merge operations
70
+ prompt?: string; // Custom text prompt for AI
71
+ params?: any; // Node-specific parameters (background, clothes, etc.)
72
+ apiToken?: string; // User's Google AI API token
73
  };
74
  } catch (jsonError) {
75
  console.error('[API] Failed to parse JSON:', jsonError);
 
79
  );
80
  }
81
 
82
+ // Check if user is logged in with HF Pro (for premium features)
83
+ let isHfProUser = false;
84
+ try {
85
+ const cookieStore = await cookies();
86
+ const hfToken = cookieStore.get('hf_token');
87
+ isHfProUser = !!hfToken?.value;
88
+ } catch (error) {
89
+ console.error('Error reading HF token from cookies:', error);
90
+ }
91
+
92
+ // Validate and retrieve Google API key from user input or environment
93
  const apiKey = body.apiToken || process.env.GOOGLE_API_KEY;
94
  if (!apiKey || apiKey === 'your_actual_api_key_here') {
95
  return NextResponse.json(
96
+ { error: `API key not provided. Please ${isHfProUser ? 'enter your Google Gemini API token in the top right' : 'login with HF Pro or enter your Google Gemini API token'}.` },
97
  { status: 500 }
98
  );
99
  }
100
 
101
+ // Initialize Google AI client with the validated API key
102
  const ai = new GoogleGenAI({ apiKey });
103
 
104
+ /**
105
+ * Universal image data converter
106
+ *
107
+ * Converts various image input formats to the inline data format required by Gemini AI.
108
+ * Handles multiple input types for maximum flexibility:
109
+ *
110
+ * @param url Image source: data URL, HTTP URL, or relative path
111
+ * @returns Promise resolving to {mimeType, data} object or null if conversion fails
112
+ */
113
  const toInlineDataFromAny = async (url: string): Promise<{ mimeType: string; data: string } | null> => {
114
+ if (!url) return null; // Handle empty/null input
115
+
116
  try {
117
+ // Case 1: Data URL (data:image/png;base64,...)
118
  if (url.startsWith('data:')) {
119
+ return parseDataUrl(url); // Use existing parser for data URLs
120
  }
121
+
122
+ // Case 2: HTTP/HTTPS URL (external image)
123
  if (url.startsWith('http')) {
124
+ const res = await fetch(url); // Fetch external image
125
+ const buf = await res.arrayBuffer(); // Get binary data
126
+ const base64 = Buffer.from(buf).toString('base64'); // Convert to base64
127
+ const mimeType = res.headers.get('content-type') || 'image/jpeg'; // Get MIME type from headers
128
  return { mimeType, data: base64 };
129
  }
130
+
131
+ // Case 3: Relative path (local image on server)
132
  if (url.startsWith('/')) {
133
+ const host = req.headers.get('host') ?? 'localhost:3000'; // Get current host
134
+ const proto = req.headers.get('x-forwarded-proto') ?? 'http'; // Determine protocol
135
+ const absolute = `${proto}://${host}${url}`; // Build absolute URL
136
+ const res = await fetch(absolute); // Fetch local image
137
+ const buf = await res.arrayBuffer(); // Get binary data
138
+ const base64 = Buffer.from(buf).toString('base64'); // Convert to base64
139
+ const mimeType = res.headers.get('content-type') || 'image/png'; // Get MIME type
140
  return { mimeType, data: base64 };
141
  }
142
+
143
+ return null; // Unsupported URL format
144
  } catch {
145
+ return null; // Handle any conversion errors gracefully
146
  }
147
  };
148
 
149
+ /* ========================================
150
+ MERGE OPERATION - MULTI-IMAGE PROCESSING
151
+ ======================================== */
152
+
153
+ /**
154
+ * Handle MERGE node type separately from single-image operations
155
+ *
156
+ * MERGE operations combine multiple character images into a single cohesive group photo.
157
+ * This requires special handling because:
158
+ * - Multiple input images need to be processed simultaneously
159
+ * - AI must understand how to naturally blend subjects together
160
+ * - Lighting, perspective, and scale must be consistent across all subjects
161
+ */
162
  if (body.type === "MERGE") {
163
+ const imgs = body.images?.filter(Boolean) ?? []; // Remove any null/undefined images
164
+
165
+ // Validate minimum input requirement for merge operations
166
  if (imgs.length < 2) {
167
  return NextResponse.json(
168
  { error: "MERGE requires at least two images" },
 
170
  );
171
  }
172
 
173
+ // Determine the AI prompt for merge operation
174
+ let mergePrompt = body.prompt; // Use custom prompt if provided
175
 
176
  if (!mergePrompt) {
177
  mergePrompt = `MERGE TASK: Create a natural, cohesive group photo combining ALL subjects from ${imgs.length} provided images.
 
208
  const mergeParts: any[] = [{ text: mergePrompt }];
209
  for (let i = 0; i < imgs.length; i++) {
210
  const url = imgs[i];
 
211
 
212
  try {
213
  const parsed = await toInlineDataFromAny(url);
 
216
  continue;
217
  }
218
  mergeParts.push({ inlineData: { mimeType: parsed.mimeType, data: parsed.data } });
 
219
  } catch (error) {
220
  console.error(`[MERGE] Error processing image ${i + 1}:`, error);
221
  }
222
  }
223
 
 
224
 
225
  const response = await ai.models.generateContent({
226
  model: "gemini-2.5-flash-image-preview",
 
262
  const prompts: string[] = [];
263
  const params = body.params || {};
264
 
265
+ // Debug: Log all received parameters
266
+
267
  // We'll collect additional inline image parts (references)
268
  const referenceParts: { inlineData: { mimeType: string; data: string } }[] = [];
269
 
270
  // Background modifications
271
  if (params.backgroundType) {
272
  const bgType = params.backgroundType;
273
+
274
  if (bgType === "color") {
275
+ prompts.push(`Change the background to a solid ${params.backgroundColor || "white"} background with smooth, even color coverage.`);
276
+
277
+ } else if (bgType === "gradient") {
278
+ const direction = params.gradientDirection || "to right";
279
+ const startColor = params.gradientStartColor || "#ff6b6b";
280
+ const endColor = params.gradientEndColor || "#4ecdc4";
281
+
282
+ if (direction === "radial") {
283
+ prompts.push(`Replace the background with a radial gradient that starts with ${startColor} in the center and transitions smoothly to ${endColor} at the edges, creating a circular gradient effect.`);
284
+ } else {
285
+ prompts.push(`Replace the background with a linear gradient flowing ${direction}, starting with ${startColor} and smoothly transitioning to ${endColor}.`);
286
+ }
287
+
288
  } else if (bgType === "image") {
289
  prompts.push(`Change the background to ${params.backgroundImage || "a beautiful beach scene"}.`);
290
+
291
+ } else if (bgType === "city") {
292
+ const sceneType = params.citySceneType || "busy_street";
293
+ const timeOfDay = params.cityTimeOfDay || "daytime";
294
+
295
+ let cityDescription = "";
296
+
297
+ switch (sceneType) {
298
+ case "busy_street":
299
+ cityDescription = "a realistic busy city street with people walking at various distances around the main character. Include pedestrians in business attire, casual clothing, carrying bags and phones - some walking close by (appearing similar size to main character), others further in the background (appearing smaller due to distance). Show urban storefronts, traffic lights, street signs, and parked cars with authentic city atmosphere and proper depth perception";
300
+ break;
301
+ case "tokyo_shibuya":
302
+ cityDescription = "the iconic Tokyo Shibuya Crossing with people walking at various distances around the main character. Include people close by (similar scale to main character) and others further away (smaller due to distance), Japanese signage, neon advertisements, the famous scramble crossing zebra stripes, people in typical Tokyo fashion, some wearing masks, carrying colorful umbrellas. Show the massive LED screens, buildings towering above, and create proper depth with people at different distances creating natural perspective";
303
+ break;
304
+ case "tokyo_subway":
305
+ cityDescription = "a realistic Tokyo subway environment with commuters at various distances from the main character. Include people nearby (similar scale) and others further down corridors (smaller due to perspective), authentic Japanese subway tile walls, directional signage in Japanese, the distinctive Tokyo Metro design aesthetic, and proper depth showing the underground transit system's scale and architecture";
306
+ break;
307
+ case "times_square":
308
+ cityDescription = "Times Square NYC with bright LED billboards, street performers, tourists, and New Yorkers walking closely around the main character. Include authentic yellow taxi cabs, hot dog vendors, people taking selfies, Broadway theater marquees, the famous red steps, TKTS booth, and the overwhelming sensory experience of NYC's most famous intersection";
309
+ break;
310
+ case "downtown_skyline":
311
+ cityDescription = "a downtown city skyline with tall buildings, glass towers, and urban architecture in the background while people in business attire walk nearby on the sidewalk";
312
+ break;
313
+ case "urban_crosswalk":
314
+ cityDescription = "an urban crosswalk intersection with pedestrians of diverse backgrounds crossing around the main character, traffic lights, crosswalk signals, city buses, and the natural flow of city foot traffic";
315
+ break;
316
+ case "shopping_district":
317
+ cityDescription = "a bustling shopping district with people carrying shopping bags walking near the main character, storefront window displays, outdoor cafes, street vendors, and the lively atmosphere of commercial city life";
318
+ break;
319
+ case "city_park":
320
+ cityDescription = "a city park with people jogging, walking dogs, and families enjoying activities around the main character, with urban skyscrapers visible in the background through the trees";
321
+ break;
322
+ case "rooftop_view":
323
+ cityDescription = "a rooftop terrace with people socializing around the main character, overlooking a sprawling city skyline with twinkling lights and urban architecture stretching to the horizon";
324
+ break;
325
+ case "blade_runner_street":
326
+ cityDescription = "a cinematic Blade Runner-inspired street scene with neon-soaked alleyways, people in futuristic clothing walking through steam and rain around the main character. Include holographic advertisements, flying vehicles in the distance, Asian-influenced signage, dark atmospheric lighting with cyan and magenta neon reflections on wet pavement, and the dystopian cyberpunk aesthetic of the iconic film";
327
+ break;
328
+ case "matrix_alley":
329
+ cityDescription = "a Matrix-inspired urban alley with people in dark clothing and sunglasses walking purposefully around the main character. Include the distinctive green-tinted lighting, concrete brutalist architecture, fire escapes, urban decay, shadowy doorways, and the cold, digital atmosphere of the Matrix films with realistic but slightly stylized cinematography";
330
+ break;
331
+ default:
332
+ cityDescription = "a dynamic city environment with people walking naturally around the main character in an authentic urban setting";
333
+ }
334
+
335
+ let timeDescription = "";
336
+ switch (timeOfDay) {
337
+ case "golden_hour":
338
+ timeDescription = " during golden hour with warm, glowing sunlight";
339
+ break;
340
+ case "daytime":
341
+ timeDescription = " during bright daytime with clear lighting";
342
+ break;
343
+ case "blue_hour":
344
+ timeDescription = " during blue hour with twilight atmosphere";
345
+ break;
346
+ case "night":
347
+ timeDescription = " at night with city lights, illuminated windows, and neon glow";
348
+ break;
349
+ case "dawn":
350
+ timeDescription = " at dawn with soft morning light";
351
+ break;
352
+ case "overcast":
353
+ timeDescription = " on an overcast day with diffused lighting";
354
+ break;
355
+ default:
356
+ timeDescription = "";
357
+ }
358
+
359
+ prompts.push(`Replace the background with ${cityDescription}${timeDescription}. CRITICAL SCALE REQUIREMENTS: Keep the main character at their EXACT original size and position - do NOT make them smaller or change their scale. The background people should be appropriately sized relative to their distance from the camera, with people closer to the camera appearing larger and people further away appearing smaller, but the main character must maintain their original proportions. Ensure the main character appears naturally integrated into the scene with proper lighting, shadows, and perspective that matches the environment.`);
360
+
361
+ } else if (bgType === "photostudio") {
362
+ const setup = params.studioSetup || "white_seamless";
363
+ const lighting = params.studioLighting || "key_fill";
364
+ const faceCamera = params.faceCamera || false;
365
+
366
+ let setupDescription = "";
367
+ switch (setup) {
368
+ case "white_seamless":
369
+ setupDescription = "a professional white seamless paper backdrop";
370
+ break;
371
+ case "black_seamless":
372
+ setupDescription = "a professional black seamless paper backdrop";
373
+ break;
374
+ case "grey_seamless":
375
+ setupDescription = "a professional grey seamless paper backdrop";
376
+ break;
377
+ case "colored_seamless":
378
+ const bgColor = params.studioBackgroundColor || "#ffffff";
379
+ setupDescription = `a professional seamless paper backdrop in ${bgColor}`;
380
+ break;
381
+ case "textured_backdrop":
382
+ setupDescription = "a professional textured photography backdrop";
383
+ break;
384
+ case "infinity_cove":
385
+ setupDescription = "a professional infinity cove studio setup with curved backdrop";
386
+ break;
387
+ default:
388
+ setupDescription = "a professional studio backdrop";
389
+ }
390
+
391
+ let lightingDescription = "";
392
+ switch (lighting) {
393
+ case "key_fill":
394
+ lightingDescription = "key and fill lighting for balanced illumination";
395
+ break;
396
+ case "three_point":
397
+ lightingDescription = "three-point lighting with key, fill, and rim lights";
398
+ break;
399
+ case "beauty_lighting":
400
+ lightingDescription = "beauty lighting setup with soft, flattering illumination";
401
+ break;
402
+ case "dramatic_lighting":
403
+ lightingDescription = "dramatic single-light setup with strong shadows";
404
+ break;
405
+ case "soft_lighting":
406
+ lightingDescription = "soft, diffused lighting for gentle illumination";
407
+ break;
408
+ case "hard_lighting":
409
+ lightingDescription = "hard, directional lighting for sharp shadows and contrast";
410
+ break;
411
+ default:
412
+ lightingDescription = "professional studio lighting";
413
+ }
414
+
415
+ const positioningInstruction = faceCamera ? " Position the person to face directly toward the camera with confident posture." : "";
416
+
417
+ prompts.push(`Crop the head and create a 2-inch ID photo. Place the person in a professional photo studio with ${setupDescription} and ${lightingDescription}. Create a clean, professional portrait setup with proper studio atmosphere.${positioningInstruction}`);
418
+
419
  } else if (bgType === "upload" && params.customBackgroundImage) {
420
  prompts.push(`Replace the background using the provided custom background reference image (attached below). Ensure perspective and lighting match.`);
421
  const bgRef = await toInlineDataFromAny(params.customBackgroundImage);
422
  if (bgRef) referenceParts.push({ inlineData: bgRef });
423
+
424
+ } else if (bgType === "custom" && params.customPrompt) {
425
+ prompts.push(`${params.customPrompt}. CRITICAL SCALE REQUIREMENTS: Keep the main character at their EXACT original size and position - do NOT make them smaller or change their scale. Ensure the main character appears naturally integrated into the scene with proper lighting, shadows, and perspective that matches the environment.`);
426
  }
427
  }
428
 
429
  // Clothes modifications
430
  if (params.clothesImage) {
 
431
 
432
  if (params.selectedPreset === "Sukajan") {
433
  prompts.push("Replace the person's clothing with a Japanese sukajan jacket (embroidered designs). Use the clothes reference image if provided.");
 
440
  try {
441
  const clothesRef = await toInlineDataFromAny(params.clothesImage);
442
  if (clothesRef) {
 
443
  referenceParts.push({ inlineData: clothesRef });
444
  } else {
445
  console.error('[API] Failed to process clothes image - toInlineDataFromAny returned null');
 
453
  if (params.stylePreset) {
454
  const strength = params.styleStrength || 50;
455
  const styleMap: { [key: string]: string } = {
456
+ "90s-anime": "Transform into vibrant 90s anime art style",
457
+ "mha": "Convert into My Hero Academia anime style ",
458
+ "spiderverse": "Convert into Spiderverse anime style",
459
+ "dbz": "Convert into Dragon Ball Z anime style ",
460
+ "ukiyo-e": "Convert into traditional Japanese Ukiyo-e woodblock print style with flat color planes, bold black outlines, stylized natural elements, limited color palette of blues and earth tones, geometric patterns, and the distinctive floating world aesthetic of Edo period art",
461
+ "cubism": "Convert into Cubist art style with geometric fragmentation and angular decomposition, use various different colors",
462
+ "van-gogh": "Convert into Post-Impressionist Van Gogh style with thick impasto paint texture, bold swirling brushstrokes that create dynamic movement, vibrant yellows and deep blues with expressive non-naturalistic color choices, visible three-dimensional brush marks, and the characteristic flowing patterns and emotional intensity seen in masterworks like Starry Night",
463
+ "simpsons": "Convert into The Simpsons cartoon style ",
464
+ "family-guy": "Convert into Family Guy animation style",
465
+ "pixar": "Convert into Pixar animation style",
466
+ "manga": "Convert into Manga style",
467
+
468
+
 
469
  };
470
 
471
  const styleDescription = styleMap[params.stylePreset];
472
  if (styleDescription) {
473
  prompts.push(`${styleDescription}. Apply this style transformation at ${strength}% intensity while preserving the core subject matter.`);
474
+ } else {
475
+ console.error(`[API] Style not found in styleMap: ${params.stylePreset}`);
476
  }
477
  }
478
 
 
481
  prompts.push(params.editPrompt);
482
  }
483
 
484
+ // Camera settings - Enhanced for Gemini 2.5 Flash Image
485
  if (params.focalLength || params.aperture || params.shutterSpeed || params.whiteBalance || params.angle ||
486
+ params.iso || params.filmStyle || params.lighting || params.bokeh || params.composition || params.motionBlur) {
487
+ // Build cinematic camera prompt for professional, movie-like results
488
+ let cameraPrompt = "CINEMATIC CAMERA TRANSFORMATION: Transform this image into a professional, cinematic photograph with movie-quality production values";
489
+
490
  if (params.focalLength) {
491
+ if (params.focalLength === "8mm") {
492
+ cameraPrompt += " shot with an ultra-wide 8mm fisheye lens creating dramatic barrel distortion, immersive perspective, and cinematic edge curvature typical of action sequences";
493
+ } else if (params.focalLength === "14mm") {
494
+ cameraPrompt += " captured with a 14mm ultra-wide angle lens for sweeping cinematic vistas and dramatic environmental context";
495
+ } else if (params.focalLength === "24mm") {
496
+ cameraPrompt += " shot with a 24mm wide-angle cinema lens for establishing shots with expansive field of view and slight perspective enhancement";
497
+ } else if (params.focalLength === "35mm") {
498
+ cameraPrompt += " filmed with a 35mm lens providing natural cinematic perspective, the gold standard for narrative storytelling";
499
+ } else if (params.focalLength === "50mm") {
500
+ cameraPrompt += " captured with a 50mm cinema lens for authentic human vision perspective and natural depth rendering";
501
+ } else if (params.focalLength === "85mm") {
502
+ cameraPrompt += " shot with an 85mm portrait cinema lens for intimate character close-ups with beautiful subject isolation and compressed backgrounds";
503
+ } else if (params.focalLength === "100mm") {
504
+ cameraPrompt += " filmed with a 100mm telephoto lens for dramatic compression and cinematic subject isolation";
505
+ } else if (params.focalLength === "135mm") {
506
+ cameraPrompt += " captured with a 135mm telephoto cinema lens for extreme compression and dreamlike background separation";
507
  } else {
508
+ cameraPrompt += ` shot with professional ${params.focalLength} cinema glass`;
509
  }
510
  }
 
 
 
 
 
 
 
 
 
511
 
512
+ if (params.aperture) {
513
+ if (params.aperture === "f/1.2") {
514
+ cameraPrompt += `, shot wide open at f/1.2 for extreme shallow depth of field, ethereal bokeh, and cinematic subject isolation with dreamy background blur`;
515
+ } else if (params.aperture === "f/1.4") {
516
+ cameraPrompt += `, captured at f/1.4 for beautiful shallow depth of field, creating that signature cinematic look with smooth background separation`;
517
+ } else if (params.aperture === "f/2.8") {
518
+ cameraPrompt += `, shot at f/2.8 for controlled depth of field, maintaining subject sharpness while creating pleasing background blur`;
519
+ } else if (params.aperture === "f/4") {
520
+ cameraPrompt += `, filmed at f/4 for balanced depth of field, keeping key subjects sharp while maintaining some background separation`;
521
+ } else if (params.aperture === "f/5.6") {
522
+ cameraPrompt += `, captured at f/5.6 for extended depth of field while maintaining cinematic quality and professional sharpness`;
523
+ } else if (params.aperture === "f/8" || params.aperture === "f/11") {
524
+ cameraPrompt += `, shot at ${params.aperture} for deep focus cinematography with tack-sharp details throughout the entire frame`;
525
+ } else {
526
+ cameraPrompt += `, professionally exposed at ${params.aperture}`;
527
+ }
528
  }
529
+
530
+ if (params.iso) {
531
+ if (params.iso === "ISO 100") {
532
+ cameraPrompt += ", shot at ISO 100 for pristine image quality, zero noise, and maximum dynamic range typical of high-end cinema cameras";
533
+ } else if (params.iso === "ISO 200") {
534
+ cameraPrompt += ", captured at ISO 200 for clean shadows and optimal color reproduction with professional cinema camera characteristics";
535
+ } else if (params.iso === "ISO 400") {
536
+ cameraPrompt += ", filmed at ISO 400 for balanced exposure with minimal noise, the sweet spot for most cinematic scenarios";
537
+ } else if (params.iso === "ISO 800") {
538
+ cameraPrompt += ", shot at ISO 800 creating subtle film grain texture that adds cinematic character and organic feel";
539
+ } else if (params.iso === "ISO 1600") {
540
+ cameraPrompt += ", captured at ISO 1600 with controlled grain for dramatic low-light cinematography and moody atmosphere";
541
+ } else if (params.iso === "ISO 3200") {
542
+ cameraPrompt += ", filmed at ISO 3200 with artistic grain structure for gritty, realistic cinema aesthetics";
543
+ } else {
544
+ cameraPrompt += `, shot at ${params.iso} with appropriate noise characteristics`;
545
+ }
546
+ }
547
+
548
+ if (params.lighting) {
549
+ if (params.lighting === "Golden Hour") {
550
+ cameraPrompt += ", cinematically lit during golden hour with warm, directional sunlight creating magical rim lighting, long shadows, and that coveted cinematic glow";
551
+ } else if (params.lighting === "Blue Hour") {
552
+ cameraPrompt += ", captured during blue hour with soft, even twilight illumination and cool color temperature for moody cinematic atmosphere";
553
+ } else if (params.lighting === "Studio") {
554
+ cameraPrompt += ", professionally lit with multi-point studio lighting setup featuring key light, fill light, and rim light for commercial cinema quality";
555
+ } else if (params.lighting === "Natural") {
556
+ cameraPrompt += ", naturally lit with soft, diffused daylight providing even illumination and organic shadow patterns";
557
+ } else if (params.lighting === "Dramatic") {
558
+ cameraPrompt += ", dramatically lit with high-contrast lighting creating strong shadows and highlights for cinematic tension";
559
+ } else {
560
+ cameraPrompt += `, professionally lit with ${params.lighting} lighting setup`;
561
+ }
562
+ }
563
+
564
+ if (params.bokeh) {
565
+ if (params.bokeh === "Smooth Bokeh") {
566
+ cameraPrompt += ", featuring silky smooth bokeh with perfectly circular out-of-focus highlights and creamy background transitions";
567
+ } else if (params.bokeh === "Swirly Bokeh") {
568
+ cameraPrompt += ", featuring artistic swirly bokeh with spiral-like background blur patterns for unique visual character";
569
+ } else if (params.bokeh === "Hexagonal Bokeh") {
570
+ cameraPrompt += ", featuring hexagonal bokeh with geometric six-sided highlight shapes typical of cinema lenses";
571
+ } else {
572
+ cameraPrompt += `, featuring ${params.bokeh} quality bokeh rendering in out-of-focus areas`;
573
+ }
574
+ }
575
+
576
+ if (params.motionBlur) {
577
+ if (params.motionBlur === "Light Motion Blur") {
578
+ cameraPrompt += ", with subtle motion blur suggesting gentle movement and adding cinematic flow to the image";
579
+ } else if (params.motionBlur === "Medium Motion Blur") {
580
+ cameraPrompt += ", with moderate motion blur creating dynamic energy and sense of movement typical of action cinematography";
581
+ } else if (params.motionBlur === "Heavy Motion Blur") {
582
+ cameraPrompt += ", with pronounced motion blur creating dramatic movement streaks and high-energy cinematic action";
583
+ } else if (params.motionBlur === "Radial Blur") {
584
+ cameraPrompt += ", with radial motion blur emanating from the center, creating explosive zoom-like movement and dramatic focus pull";
585
+ } else if (params.motionBlur === "Zoom Blur") {
586
+ cameraPrompt += ", with zoom blur effect creating dramatic speed lines and kinetic energy radiating outward from the subject";
587
+ } else {
588
+ cameraPrompt += `, with ${params.motionBlur} motion effect`;
589
+ }
590
+ }
591
+
592
+ if (params.angle) {
593
+ if (params.angle === "Low Angle") {
594
+ cameraPrompt += ", shot from a low-angle perspective looking upward for dramatic impact";
595
+ } else if (params.angle === "Bird's Eye") {
596
+ cameraPrompt += ", captured from a bird's eye view directly overhead";
597
+ } else {
598
+ cameraPrompt += `, ${params.angle} camera angle`;
599
+ }
600
+ }
601
+
602
+ if (params.filmStyle && params.filmStyle !== "RAW") {
603
+ cameraPrompt += `, processed with ${params.filmStyle} film aesthetic and color grading`;
604
+ } else if (params.filmStyle === "RAW") {
605
+ cameraPrompt += ", with natural RAW processing maintaining realistic colors and contrast";
606
+ }
607
+
608
+ cameraPrompt += ". Maintain photorealistic quality with authentic camera characteristics, natural lighting, and professional composition.";
609
+
610
+ prompts.push(cameraPrompt);
611
  }
612
 
613
  // Age transformation
 
615
  prompts.push(`Transform the person to look exactly ${params.targetAge} years old with age-appropriate features.`);
616
  }
617
 
618
+ // Lighting effects
619
+ if (params.lightingPrompt && params.selectedLighting) {
620
+ prompts.push(`IMPORTANT: Completely transform the lighting on this person to match this exact description: ${params.lightingPrompt}. The lighting change should be dramatic and clearly visible. Keep their face, clothes, pose, and background exactly the same, but make the lighting transformation very obvious.`);
621
+ }
622
+
623
+ // Pose modifications
624
+ if (params.posePrompt && params.selectedPose) {
625
+ prompts.push(`IMPORTANT: Completely change the person's body pose to match this exact description: ${params.posePrompt}. The pose change should be dramatic and clearly visible. Keep their face, clothes, and background exactly the same, but make the pose transformation very obvious.`);
626
+ }
627
+
628
  // Face modifications
629
  if (params.faceOptions) {
630
  const face = params.faceOptions;
 
635
  if (face.changeHairstyle) modifications.push(`change hairstyle to ${face.changeHairstyle}`);
636
  if (face.facialExpression) modifications.push(`change facial expression to ${face.facialExpression}`);
637
  if (face.beardStyle) modifications.push(`add/change beard to ${face.beardStyle}`);
638
+ if (face.selectedMakeup) modifications.push(`add a face makeup with red colors on cheeks and and some yellow blue colors around the eye area`);
639
 
640
  if (modifications.length > 0) {
641
  prompts.push(`Face modifications: ${modifications.join(", ")}`);
 
644
 
645
  // Combine all prompts
646
  let prompt = prompts.length > 0
647
+ ? prompts.join("\n\n") + "\nApply all these modifications while maintaining the person's identity and keeping unspecified aspects unchanged."
648
  : "Process this image with high quality output.";
649
 
650
  // Add the custom prompt if provided
 
652
  prompt = body.prompt + "\n\n" + prompt;
653
  }
654
 
655
+ // Debug: Log the final combined prompt and parts structure
656
+
657
  // Generate with Gemini
658
  const parts = [
659
  { text: prompt },
 
663
  ...referenceParts,
664
  ];
665
 
666
+
667
+ let response;
668
+ try {
669
+ response = await ai.models.generateContent({
670
+ model: "gemini-2.5-flash-image-preview",
671
+ contents: parts,
672
+ });
673
+ } catch (geminiError: any) {
674
+ console.error('[API] Gemini API error:', geminiError);
675
+ console.error('[API] Gemini error details:', {
676
+ message: geminiError.message,
677
+ status: geminiError.status,
678
+ code: geminiError.code
679
+ });
680
+
681
+ if (geminiError.message?.includes('safety')) {
682
+ return NextResponse.json(
683
+ { error: "Content was blocked by safety filters. Try using different images or prompts." },
684
+ { status: 400 }
685
+ );
686
+ }
687
+
688
+ if (geminiError.message?.includes('quota') || geminiError.message?.includes('limit')) {
689
+ return NextResponse.json(
690
+ { error: "API quota exceeded. Please check your Gemini API usage limits." },
691
+ { status: 429 }
692
+ );
693
+ }
694
+
695
+ return NextResponse.json(
696
+ { error: `Gemini API error: ${geminiError.message || 'Unknown error'}` },
697
+ { status: 500 }
698
+ );
699
+ }
700
 
701
+
702
  const outParts = (response as any)?.candidates?.[0]?.content?.parts ?? [];
703
  const images: string[] = [];
704
+ const texts: string[] = [];
705
+
706
 
707
+ for (let i = 0; i < outParts.length; i++) {
708
+ const p = outParts[i];
709
+
710
  if (p?.inlineData?.data) {
711
  images.push(`data:image/png;base64,${p.inlineData.data}`);
712
  }
713
+
714
+ if (p?.text) {
715
+ texts.push(p.text);
716
+ }
717
  }
718
 
719
  if (!images.length) {
720
+ console.error('[API] No images generated by Gemini. Text responses:', texts);
721
  return NextResponse.json(
722
+ {
723
+ error: "No image generated. Try adjusting your parameters.",
724
+ textResponse: texts.join('\n'),
725
+ debugInfo: {
726
+ partsCount: outParts.length,
727
+ candidatesCount: (response as any)?.candidates?.length || 0,
728
+ hasResponse: !!response
729
+ }
730
+ },
731
  { status: 500 }
732
  );
733
  }
 
736
  } catch (err: any) {
737
  console.error("/api/process error:", err);
738
  console.error("Error stack:", err?.stack);
739
+ console.error("Error details:", {
740
+ name: err?.name,
741
+ message: err?.message,
742
+ code: err?.code,
743
+ status: err?.status,
744
+ details: err?.details
745
+ });
746
 
747
  // Provide more specific error messages
748
+ if (err?.message?.includes('payload size') || err?.code === 413) {
749
  return NextResponse.json(
750
  { error: "Image data too large. Please use smaller images or reduce image quality." },
751
  { status: 413 }
752
  );
753
  }
754
 
755
+ if (err?.message?.includes('API key') || err?.message?.includes('authentication')) {
756
+ return NextResponse.json(
757
+ { error: "Invalid API key. Please check your Google Gemini API token." },
758
+ { status: 401 }
759
+ );
760
+ }
761
+
762
+ if (err?.message?.includes('quota') || err?.message?.includes('limit')) {
763
+ return NextResponse.json(
764
+ { error: "API quota exceeded. Please check your Google Gemini API usage limits." },
765
+ { status: 429 }
766
+ );
767
+ }
768
+
769
  if (err?.message?.includes('JSON')) {
770
  return NextResponse.json(
771
  { error: "Invalid data format. Please ensure images are properly encoded." },
app/editor.css CHANGED
@@ -1,64 +1,81 @@
1
  /* Node editor custom styles and animations */
2
 
3
- /* Animated connection lines */
 
 
 
 
4
  @keyframes flow {
5
- 0% {
6
- stroke-dashoffset: 0;
7
  }
8
- 100% {
9
- stroke-dashoffset: -20;
10
  }
11
  }
12
 
 
13
  .connection-animated {
14
- animation: flow 1s linear infinite;
15
- stroke-dasharray: 5, 5;
16
  }
17
 
18
- /* Processing pulse effect */
19
- @keyframes processingPulse {
20
- 0%, 100% {
21
- opacity: 1;
22
  }
23
- 50% {
24
- opacity: 0.6;
25
  }
26
  }
27
 
 
28
  .connection-processing {
29
- animation: processingPulse 1.5s ease-in-out infinite;
30
- stroke: #22c55e;
31
- stroke-width: 3;
32
- filter: drop-shadow(0 0 3px rgba(34, 197, 94, 0.5));
33
  }
34
 
35
- /* Flow particles effect */
 
 
 
 
36
  @keyframes flowParticle {
37
  0% {
38
- offset-distance: 0%;
39
- opacity: 0;
40
  }
41
  10% {
42
- opacity: 1;
43
  }
44
  90% {
45
- opacity: 1;
46
  }
47
  100% {
48
- offset-distance: 100%;
49
- opacity: 0;
50
  }
51
  }
52
 
 
53
  .flow-particle {
54
- animation: flowParticle 2s linear infinite;
55
  }
56
 
57
- /* Node processing state */
 
 
 
 
58
  .nb-node.processing {
59
- animation: processingPulse 1.5s ease-in-out infinite;
60
  }
61
 
 
62
  .nb-node.processing .nb-header {
 
63
  background: linear-gradient(90deg, rgba(34, 197, 94, 0.2), rgba(34, 197, 94, 0.1));
64
  }
 
1
  /* Node editor custom styles and animations */
2
 
3
+ /* ========================================
4
+ CONNECTION LINE ANIMATIONS
5
+ ======================================== */
6
+
7
+ /* Animation for regular connection lines when dragging - now truly continuous */
8
  @keyframes flow {
9
+ from {
10
+ stroke-dashoffset: 10; /* Start offset equal to one complete dash cycle */
11
  }
12
+ to {
13
+ stroke-dashoffset: 0; /* Move to next cycle, creating seamless continuous flow */
14
  }
15
  }
16
 
17
+ /* Applied to connection lines when user is actively dragging to connect nodes */
18
  .connection-animated {
19
+ animation: flow 0.8s linear infinite; /* Smooth continuous animation, slightly faster */
20
+ stroke-dasharray: 5, 5; /* Create dashed line: 5px dash, 5px gap pattern */
21
  }
22
 
23
+ /* Animation for processing connections - shows data flowing through active connections - now truly continuous */
24
+ @keyframes processingFlow {
25
+ from {
26
+ stroke-dashoffset: 12; /* Start offset equal to one complete dash cycle (8+4=12) */
27
  }
28
+ to {
29
+ stroke-dashoffset: 0; /* Move to next cycle, creating seamless continuous flow */
30
  }
31
  }
32
 
33
+ /* Applied to connection lines when nodes are actively processing data */
34
  .connection-processing {
35
+ animation: processingFlow 1.0s linear infinite; /* Smooth continuous animation, optimized timing */
36
+ stroke: #22c55e; /* Green color to indicate active processing */
37
+ stroke-width: 3; /* Thicker line to make it more prominent */
38
+ stroke-dasharray: 8, 4; /* Longer dashes (8px) with smaller gaps (4px) for better visibility */
39
  }
40
 
41
+ /* ========================================
42
+ PARTICLE FLOW EFFECT (EXPERIMENTAL)
43
+ ======================================== */
44
+
45
+ /* Animation for particles flowing along paths - uses CSS motion path */
46
  @keyframes flowParticle {
47
  0% {
48
+ offset-distance: 0%; /* Start at beginning of path */
49
+ opacity: 0; /* Fade in from transparent */
50
  }
51
  10% {
52
+ opacity: 1; /* Fully visible after 10% of animation */
53
  }
54
  90% {
55
+ opacity: 1; /* Stay visible until 90% of animation */
56
  }
57
  100% {
58
+ offset-distance: 100%; /* End at end of path */
59
+ opacity: 0; /* Fade out to transparent */
60
  }
61
  }
62
 
63
+ /* Class for individual particles flowing along connection paths */
64
  .flow-particle {
65
+ animation: flowParticle 2s linear infinite; /* 2 second cycle for particle to travel full path */
66
  }
67
 
68
+ /* ========================================
69
+ NODE PROCESSING STATES
70
+ ======================================== */
71
+
72
+ /* Animation for nodes themselves when they're processing */
73
  .nb-node.processing {
74
+ animation: processingPulse 1.5s ease-in-out infinite; /* Gentle pulsing effect */
75
  }
76
 
77
+ /* Special styling for processing node headers */
78
  .nb-node.processing .nb-header {
79
+ /* Subtle green gradient background to indicate processing state */
80
  background: linear-gradient(90deg, rgba(34, 197, 94, 0.2), rgba(34, 197, 94, 0.1));
81
  }
app/layout.tsx CHANGED
@@ -1,26 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import type { Metadata } from "next";
2
- import { Geist, Geist_Mono } from "next/font/google";
3
- import "./globals.css";
4
 
 
 
 
 
 
5
  const geistSans = Geist({
6
- variable: "--font-geist-sans",
7
- subsets: ["latin"],
8
  });
9
 
 
 
 
 
 
10
  const geistMono = Geist_Mono({
11
- variable: "--font-geist-mono",
12
- subsets: ["latin"],
13
  });
14
 
 
 
 
 
15
  export const metadata: Metadata = {
16
- title: "Nano Banana Editor",
17
- description: "Node-based photo editor for characters",
18
  };
19
 
 
 
 
 
 
 
 
 
 
20
  export default function RootLayout({
21
  children,
22
  }: Readonly<{
23
- children: React.ReactNode;
24
  }>) {
25
  return (
26
  <html lang="en">
 
1
+ /**
2
+ * ROOT LAYOUT COMPONENT
3
+ *
4
+ * Next.js 13+ app directory root layout that wraps all pages in the application.
5
+ * Defines the basic HTML structure, fonts, and global styling for the entire app.
6
+ *
7
+ * Key Features:
8
+ * - Google Fonts integration (Geist Sans and Geist Mono)
9
+ * - CSS custom properties for font family variables
10
+ * - Global CSS imports (Tailwind CSS and custom styles)
11
+ * - SEO metadata configuration
12
+ * - Consistent theming with CSS variables for background and text colors
13
+ */
14
+
15
  import type { Metadata } from "next";
16
+ import { Geist, Geist_Mono } from "next/font/google"; // Modern Google Fonts
17
+ import "./globals.css"; // Tailwind CSS and global styles
18
 
19
+ /**
20
+ * Configure Geist Sans font
21
+ * Modern, clean sans-serif font optimized for UI text
22
+ * Creates CSS variable --font-geist-sans for use in Tailwind classes
23
+ */
24
  const geistSans = Geist({
25
+ variable: "--font-geist-sans", // CSS custom property name
26
+ subsets: ["latin"], // Character subset to load (reduces bundle size)
27
  });
28
 
29
+ /**
30
+ * Configure Geist Mono font
31
+ * Monospace font for code, technical text, and fixed-width content
32
+ * Creates CSS variable --font-geist-mono for use in Tailwind classes
33
+ */
34
  const geistMono = Geist_Mono({
35
+ variable: "--font-geist-mono", // CSS custom property name
36
+ subsets: ["latin"], // Character subset to load
37
  });
38
 
39
+ /**
40
+ * SEO metadata configuration for the application
41
+ * Defines title, description, and other meta tags for search engines and social media
42
+ */
43
  export const metadata: Metadata = {
44
+ title: "Nano Banana Editor", // Browser tab title and SEO title
45
+ description: "Node-based photo editor for characters", // Meta description for search results
46
  };
47
 
48
+ /**
49
+ * Root Layout Component
50
+ *
51
+ * Wraps all pages with consistent HTML structure and styling.
52
+ * All pages in the app will be rendered inside the {children} placeholder.
53
+ *
54
+ * @param children React components representing the current page content
55
+ * @returns Complete HTML document structure with fonts and styling applied
56
+ */
57
  export default function RootLayout({
58
  children,
59
  }: Readonly<{
60
+ children: React.ReactNode; // Type-safe children prop
61
  }>) {
62
  return (
63
  <html lang="en">
app/nodes.tsx CHANGED
The diff for this file is too large to render. See raw diff
 
app/page.tsx CHANGED
@@ -1,32 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  "use client";
2
 
 
3
  import React, { useEffect, useMemo, useRef, useState } from "react";
 
4
  import "./editor.css";
 
5
  import {
6
- BackgroundNodeView,
7
- ClothesNodeView,
8
- StyleNodeView,
9
- EditNodeView,
10
- CameraNodeView,
11
- AgeNodeView,
12
- FaceNodeView
 
 
13
  } from "./nodes";
 
14
  import { Button } from "../components/ui/button";
15
  import { Input } from "../components/ui/input";
16
-
 
 
 
 
 
 
 
17
  function cx(...args: Array<string | false | null | undefined>) {
18
  return args.filter(Boolean).join(" ");
19
  }
20
 
21
- // Simple ID helper
 
 
 
 
22
  const uid = () => Math.random().toString(36).slice(2, 9);
23
 
24
- // Generate merge prompt based on number of inputs
 
 
 
 
 
 
 
 
 
 
 
25
  function generateMergePrompt(characterData: { image: string; label: string }[]): string {
26
  const count = characterData.length;
27
 
 
28
  const labels = characterData.map((d, i) => `Image ${i + 1} (${d.label})`).join(", ");
29
 
 
30
  return `MERGE TASK: Create a natural, cohesive group photo combining ALL subjects from ${count} provided images.
31
 
32
  Images provided:
@@ -57,148 +101,330 @@ CRITICAL REQUIREMENTS:
57
  The result should look like all subjects were photographed together in the same place at the same time, NOT like separate images placed side by side.`;
58
  }
59
 
60
- // Types
61
- type NodeType = "CHARACTER" | "MERGE" | "BACKGROUND" | "CLOTHES" | "STYLE" | "EDIT" | "CAMERA" | "AGE" | "FACE" | "BLEND";
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
 
 
 
 
63
  type NodeBase = {
64
- id: string;
65
- type: NodeType;
66
- x: number; // world coords
67
- y: number; // world coords
68
  };
69
 
 
 
 
 
 
70
  type CharacterNode = NodeBase & {
71
  type: "CHARACTER";
72
- image: string; // data URL or http URL
73
- label?: string;
74
  };
75
 
 
 
 
 
 
76
  type MergeNode = NodeBase & {
77
  type: "MERGE";
78
- inputs: string[]; // node ids
79
- output?: string | null; // data URL from merge
80
- isRunning?: boolean;
81
- error?: string | null;
82
  };
83
 
 
 
 
 
84
  type BackgroundNode = NodeBase & {
85
  type: "BACKGROUND";
86
- input?: string; // node id
87
- output?: string;
88
- backgroundType: "color" | "image" | "upload" | "custom";
89
- backgroundColor?: string;
90
- backgroundImage?: string;
91
- customBackgroundImage?: string;
92
- customPrompt?: string;
93
- isRunning?: boolean;
94
- error?: string | null;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
  };
96
 
 
 
 
 
97
  type ClothesNode = NodeBase & {
98
  type: "CLOTHES";
99
- input?: string;
100
- output?: string;
101
- clothesImage?: string;
102
- selectedPreset?: string;
103
- clothesPrompt?: string;
104
- isRunning?: boolean;
105
- error?: string | null;
106
  };
107
 
 
 
 
 
108
  type StyleNode = NodeBase & {
109
  type: "STYLE";
110
- input?: string;
111
- output?: string;
112
- stylePreset?: string;
113
- styleStrength?: number;
114
- isRunning?: boolean;
115
- error?: string | null;
116
  };
117
 
 
 
 
 
118
  type EditNode = NodeBase & {
119
  type: "EDIT";
120
- input?: string;
121
- output?: string;
122
- editPrompt?: string;
123
- isRunning?: boolean;
124
- error?: string | null;
125
  };
126
 
 
 
 
 
127
  type CameraNode = NodeBase & {
128
  type: "CAMERA";
129
- input?: string;
130
- output?: string;
131
- focalLength?: string;
132
- aperture?: string;
133
- shutterSpeed?: string;
134
- whiteBalance?: string;
135
- angle?: string;
136
- iso?: string;
137
- filmStyle?: string;
138
- lighting?: string;
139
- bokeh?: string;
140
- composition?: string;
141
- aspectRatio?: string;
142
- isRunning?: boolean;
143
- error?: string | null;
 
144
  };
145
 
 
 
 
 
146
  type AgeNode = NodeBase & {
147
  type: "AGE";
148
- input?: string;
149
- output?: string;
150
- targetAge?: number;
151
- isRunning?: boolean;
152
- error?: string | null;
153
  };
154
 
 
 
 
 
155
  type FaceNode = NodeBase & {
156
  type: "FACE";
157
- input?: string;
158
- output?: string;
159
- faceOptions?: {
160
- removePimples?: boolean;
161
- addSunglasses?: boolean;
162
- addHat?: boolean;
163
- changeHairstyle?: string;
164
- facialExpression?: string;
165
- beardStyle?: string;
 
 
166
  };
167
- isRunning?: boolean;
168
- error?: string | null;
169
  };
170
 
 
 
 
 
171
  type BlendNode = NodeBase & {
172
  type: "BLEND";
173
- input?: string;
174
- output?: string;
175
- blendStrength?: number;
176
- isRunning?: boolean;
177
- error?: string | null;
178
  };
179
 
180
- type AnyNode = CharacterNode | MergeNode | BackgroundNode | ClothesNode | StyleNode | EditNode | CameraNode | AgeNode | FaceNode | BlendNode;
 
 
 
 
 
 
 
 
 
 
 
 
 
181
 
182
- // Default placeholder portrait
183
- const DEFAULT_PERSON =
184
- "https://images.unsplash.com/photo-1527980965255-d3b416303d12?q=80&w=640&auto=format&fit=crop";
185
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
186
  function toDataUrls(files: FileList | File[]): Promise<string[]> {
187
- const arr = Array.from(files as File[]);
188
  return Promise.all(
189
  arr.map(
190
  (file) =>
191
  new Promise<string>((resolve, reject) => {
192
- const r = new FileReader();
193
- r.onload = () => resolve(r.result as string);
194
- r.onerror = reject;
195
- r.readAsDataURL(file);
196
  })
197
  )
198
  );
199
  }
200
 
201
- // Viewport helpers
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202
  function screenToWorld(
203
  clientX: number,
204
  clientY: number,
@@ -207,7 +433,7 @@ function screenToWorld(
207
  ty: number,
208
  scale: number
209
  ) {
210
- const x = (clientX - container.left - tx) / scale;
211
  const y = (clientY - container.top - ty) / scale;
212
  return { x, y };
213
  }
@@ -363,13 +589,15 @@ function CharacterNodeView({
363
  />
364
  <div className="flex items-center gap-2">
365
  <Button
366
- variant="ghost" size="icon" className="text-destructive"
367
  onClick={(e) => {
368
  e.stopPropagation();
369
- if (confirm('Delete MERGE node?')) {
 
370
  onDelete(node.id);
371
  }
372
  }}
 
373
  title="Delete node"
374
  aria-label="Delete node"
375
  >
@@ -388,8 +616,41 @@ function CharacterNodeView({
388
  <img
389
  src={node.image}
390
  alt="character"
391
- className="h-full w-full object-contain"
392
  draggable={false}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
393
  />
394
  </div>
395
  <div className="flex gap-2">
@@ -478,18 +739,23 @@ function MergeNodeView({
478
  />
479
  <div className="font-semibold tracking-wide text-sm flex-1 text-center">MERGE</div>
480
  <div className="flex items-center gap-2">
481
- <button
482
- className="text-2xl leading-none font-bold text-red-400 hover:text-red-300 opacity-50 hover:opacity-100 transition-all hover:scale-110 px-1"
 
 
483
  onClick={(e) => {
484
  e.stopPropagation();
485
- if (confirm('Delete MERGE node?')) {
 
486
  onDelete(node.id);
487
  }
488
  }}
 
489
  title="Delete node"
 
490
  >
491
  ×
492
- </button>
493
  <Port
494
  className="out"
495
  nodeId={node.id}
@@ -528,7 +794,44 @@ function MergeNodeView({
528
  <div key={id} className="flex items-center gap-2 bg-white/10 rounded px-2 py-1">
529
  {image && (
530
  <div className="w-6 h-6 rounded overflow-hidden bg-black/20">
531
- <img src={image} className="w-full h-full object-contain" alt="inp" />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
532
  </div>
533
  )}
534
  <span className="text-xs">{label}</span>
@@ -566,29 +869,74 @@ function MergeNodeView({
566
  </div>
567
 
568
  <div className="mt-2">
569
- <div className="text-xs text-white/70 mb-1">Output</div>
 
 
570
  <div className="w-full min-h-[200px] max-h-[400px] rounded-xl bg-black/40 grid place-items-center">
571
  {node.output ? (
572
- <img src={node.output} className="w-full h-auto max-h-[400px] object-contain rounded-xl" alt="output" />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
573
  ) : (
574
  <span className="text-white/40 text-xs py-16">Run merge to see result</span>
575
  )}
576
  </div>
577
  {node.output && (
578
- <Button
579
- className="w-full mt-2"
580
- variant="secondary"
581
- onClick={() => {
582
- const link = document.createElement('a');
583
- link.href = node.output as string;
584
- link.download = `merge-${Date.now()}.png`;
585
- document.body.appendChild(link);
586
- link.click();
587
- document.body.removeChild(link);
588
- }}
589
- >
590
- 📥 Download Merged Image
591
- </Button>
 
 
592
  )}
593
  {node.error && (
594
  <div className="mt-2">
@@ -635,13 +983,75 @@ export default function EditorPage() {
635
  scaleRef.current = scale;
636
  }, [scale]);
637
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
638
  // Connection dragging state
639
  const [draggingFrom, setDraggingFrom] = useState<string | null>(null);
640
  const [dragPos, setDragPos] = useState<{x: number, y: number} | null>(null);
641
 
642
- // API Token state
643
- const [apiToken, setApiToken] = useState<string>("");
644
  const [showHelpSidebar, setShowHelpSidebar] = useState(false);
 
 
 
 
 
645
 
646
  const characters = nodes.filter((n) => n.type === "CHARACTER") as CharacterNode[];
647
  const merges = nodes.filter((n) => n.type === "MERGE") as MergeNode[];
@@ -720,6 +1130,7 @@ export default function EditorPage() {
720
  setNodes((prev) => prev.map((n) => (n.id === id ? { ...n, ...updates } : n)));
721
  };
722
 
 
723
  // Handle single input connections for new nodes
724
  const handleEndSingleConnection = (nodeId: string) => {
725
  if (draggingFrom) {
@@ -778,17 +1189,39 @@ export default function EditorPage() {
778
  };
779
 
780
  // Helper to extract configuration from a node
781
- const getNodeConfiguration = (node: AnyNode): any => {
782
- const config: any = {};
783
 
784
  switch (node.type) {
785
  case "BACKGROUND":
786
  if ((node as BackgroundNode).backgroundType) {
787
- config.backgroundType = (node as BackgroundNode).backgroundType;
788
- config.backgroundColor = (node as BackgroundNode).backgroundColor;
789
- config.backgroundImage = (node as BackgroundNode).backgroundImage;
790
- config.customBackgroundImage = (node as BackgroundNode).customBackgroundImage;
791
- config.customPrompt = (node as BackgroundNode).customPrompt;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
792
  }
793
  break;
794
  case "CLOTHES":
@@ -821,6 +1254,7 @@ export default function EditorPage() {
821
  if (cam.bokeh && cam.bokeh !== "None") config.bokeh = cam.bokeh;
822
  if (cam.composition && cam.composition !== "None") config.composition = cam.composition;
823
  if (cam.aspectRatio && cam.aspectRatio !== "None") config.aspectRatio = cam.aspectRatio;
 
824
  break;
825
  case "AGE":
826
  if ((node as AgeNode).targetAge) {
@@ -830,7 +1264,7 @@ export default function EditorPage() {
830
  case "FACE":
831
  const face = node as FaceNode;
832
  if (face.faceOptions) {
833
- const opts: any = {};
834
  if (face.faceOptions.removePimples) opts.removePimples = true;
835
  if (face.faceOptions.addSunglasses) opts.addSunglasses = true;
836
  if (face.faceOptions.addHat) opts.addHat = true;
@@ -848,6 +1282,18 @@ export default function EditorPage() {
848
  }
849
  }
850
  break;
 
 
 
 
 
 
 
 
 
 
 
 
851
  }
852
 
853
  return config;
@@ -961,7 +1407,6 @@ export default function EditorPage() {
961
 
962
  // If we found unprocessed merges, we need to execute them first
963
  if (unprocessedMerges.length > 0 && !inputImage) {
964
- console.log(`Found ${unprocessedMerges.length} unprocessed MERGE nodes in chain. Processing them first...`);
965
 
966
  // Process each merge node
967
  for (const merge of unprocessedMerges) {
@@ -1028,10 +1473,7 @@ export default function EditorPage() {
1028
 
1029
  // Show info about batch processing
1030
  if (unprocessedNodeCount > 1) {
1031
- console.log(`🚀 Combining ${unprocessedNodeCount} node transformations into ONE API call`);
1032
- console.log("Combined parameters:", params);
1033
  } else {
1034
- console.log("Processing single node:", node.type);
1035
  }
1036
 
1037
  // Set loading state for all nodes being processed
@@ -1050,7 +1492,6 @@ export default function EditorPage() {
1050
 
1051
  // Check if params contains custom images and validate them
1052
  if (params.clothesImage) {
1053
- console.log("[Process] Clothes image size:", (params.clothesImage.length / 1024).toFixed(2) + "KB");
1054
  // Validate it's a proper data URL
1055
  if (!params.clothesImage.startsWith('data:') && !params.clothesImage.startsWith('http') && !params.clothesImage.startsWith('/')) {
1056
  throw new Error("Invalid clothes image format. Please upload a valid image.");
@@ -1058,7 +1499,6 @@ export default function EditorPage() {
1058
  }
1059
 
1060
  if (params.customBackgroundImage) {
1061
- console.log("[Process] Custom background size:", (params.customBackgroundImage.length / 1024).toFixed(2) + "KB");
1062
  // Validate it's a proper data URL
1063
  if (!params.customBackgroundImage.startsWith('data:') && !params.customBackgroundImage.startsWith('http') && !params.customBackgroundImage.startsWith('/')) {
1064
  throw new Error("Invalid background image format. Please upload a valid image.");
@@ -1066,12 +1506,28 @@ export default function EditorPage() {
1066
  }
1067
 
1068
  // Log request details for debugging
1069
- console.log("[Process] Sending request with:", {
1070
- hasImage: !!inputImage,
1071
- imageSize: inputImage ? (inputImage.length / 1024).toFixed(2) + "KB" : 0,
1072
- paramsKeys: Object.keys(params),
1073
- nodeType: node.type
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1074
  });
 
1075
 
1076
  // Make a SINGLE API call with all accumulated parameters
1077
  const res = await fetch("/api/process", {
@@ -1094,7 +1550,13 @@ export default function EditorPage() {
1094
  }
1095
 
1096
  const data = await res.json();
1097
- if (!res.ok) throw new Error(data.error || "Processing failed");
 
 
 
 
 
 
1098
 
1099
  // Only update the current node with the output
1100
  // Don't show output in intermediate nodes - they were just used for configuration
@@ -1109,10 +1571,14 @@ export default function EditorPage() {
1109
  }
1110
  return n;
1111
  }));
 
 
 
 
 
 
1112
 
1113
  if (unprocessedNodeCount > 1) {
1114
- console.log(`✅ Successfully applied ${unprocessedNodeCount} transformations in ONE API call!`);
1115
- console.log(`Saved ${unprocessedNodeCount - 1} API calls by combining transformations`);
1116
  }
1117
  } catch (e: any) {
1118
  console.error("Process error:", e);
@@ -1232,14 +1698,22 @@ export default function EditorPage() {
1232
  }
1233
 
1234
  // Log merge details for debugging
1235
- console.log("[Merge] Processing merge with:", {
1236
- imageCount: mergeImages.length,
1237
- imageSizes: mergeImages.map(img => (img.length / 1024).toFixed(2) + "KB"),
1238
- labels: inputData.map(d => d.label)
1239
- });
1240
 
1241
  const prompt = generateMergePrompt(inputData);
1242
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1243
  // Use the process route instead of merge route
1244
  const res = await fetch("/api/process", {
1245
  method: "POST",
@@ -1307,13 +1781,28 @@ export default function EditorPage() {
1307
  if (inputData.length < 2) throw new Error("Connect at least two nodes with images (CHARACTER nodes or processed nodes).");
1308
 
1309
  // Debug: Log what we're sending
1310
- console.log("🔄 Merging nodes:", inputData.map(d => d.label).join(", "));
1311
- console.log("📷 Image URLs being sent:", inputData.map(d => d.image.substring(0, 100) + "..."));
1312
 
1313
  // Generate dynamic prompt based on number of inputs
1314
  const prompt = generateMergePrompt(inputData);
1315
  const imgs = inputData.map(d => d.image);
1316
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1317
  // Use the process route with MERGE type
1318
  const res = await fetch("/api/process", {
1319
  method: "POST",
@@ -1345,6 +1834,18 @@ export default function EditorPage() {
1345
  }
1346
  const out = js.image || (js.images?.[0] as string) || null;
1347
  setNodes((prev) => prev.map((n) => (n.id === mergeId && n.type === "MERGE" ? { ...n, output: out, isRunning: false } : n)));
 
 
 
 
 
 
 
 
 
 
 
 
1348
  } catch (e: any) {
1349
  console.error("Merge error:", e);
1350
  setNodes((prev) => prev.map((n) => (n.id === mergeId && n.type === "MERGE" ? { ...n, isRunning: false, error: e?.message || "Error" } : n)));
@@ -1409,7 +1910,7 @@ export default function EditorPage() {
1409
  if (inputNode) {
1410
  const start = getNodeOutputPort(inputNode);
1411
  const end = getNodeInputPort(node);
1412
- const isProcessing = merge.isRunning || (inputNode as any).isRunning;
1413
  paths.push({
1414
  path: createPath(start.x, start.y, end.x, end.y),
1415
  processing: isProcessing
@@ -1423,7 +1924,7 @@ export default function EditorPage() {
1423
  if (inputNode) {
1424
  const start = getNodeOutputPort(inputNode);
1425
  const end = getNodeInputPort(node);
1426
- const isProcessing = (node as any).isRunning || (inputNode as any).isRunning;
1427
  paths.push({
1428
  path: createPath(start.x, start.y, end.x, end.y),
1429
  processing: isProcessing
@@ -1496,7 +1997,30 @@ export default function EditorPage() {
1496
  const rect = containerRef.current!.getBoundingClientRect();
1497
  const world = screenToWorld(e.clientX, e.clientY, rect, tx, ty, scale);
1498
  setMenuWorld(world);
1499
- setMenuPos({ x: e.clientX - rect.left, y: e.clientY - rect.top });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1500
  setMenuOpen(true);
1501
  };
1502
 
@@ -1535,6 +2059,15 @@ export default function EditorPage() {
1535
  case "FACE":
1536
  setNodes(prev => [...prev, { ...commonProps, type: "FACE", faceOptions: {} } as FaceNode]);
1537
  break;
 
 
 
 
 
 
 
 
 
1538
  }
1539
  setMenuOpen(false);
1540
  };
@@ -1545,7 +2078,8 @@ export default function EditorPage() {
1545
  <h1 className="text-lg font-semibold tracking-wide">
1546
  <span className="mr-2" aria-hidden>🍌</span>Nano Banana Editor
1547
  </h1>
1548
- <div className="flex items-center gap-2">
 
1549
  <label htmlFor="api-token" className="text-sm font-medium text-muted-foreground">
1550
  API Token:
1551
  </label>
@@ -1557,15 +2091,18 @@ export default function EditorPage() {
1557
  onChange={(e) => setApiToken(e.target.value)}
1558
  className="w-64"
1559
  />
 
1560
  <Button
1561
- variant="ghost"
1562
  size="sm"
1563
- className="h-8 w-8 p-0 rounded-full hover:bg-red-50 dark:hover:bg-red-900/20"
1564
  type="button"
1565
  onClick={() => setShowHelpSidebar(true)}
1566
  >
1567
- <span className="text-sm font-medium text-red-500 hover:text-red-600">?</span>
1568
  </Button>
 
 
1569
  </div>
1570
  </header>
1571
 
@@ -1593,6 +2130,27 @@ export default function EditorPage() {
1593
  </div>
1594
 
1595
  <div className="space-y-6">
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1596
  <div>
1597
  <h3 className="font-semibold mb-3 text-foreground">🔑 API Token Setup</h3>
1598
  <div className="text-sm text-muted-foreground space-y-3">
@@ -1626,6 +2184,13 @@ export default function EditorPage() {
1626
  <div className="p-4 bg-muted border border-border rounded-lg">
1627
  <h4 className="font-semibold text-foreground mb-2">🔒 Privacy & Security</h4>
1628
  <div className="text-sm text-muted-foreground space-y-1">
 
 
 
 
 
 
 
1629
  <p>• Your API token is stored locally in your browser</p>
1630
  <p>• Tokens are never sent to our servers</p>
1631
  <p>• Keep your API key secure and don't share it</p>
@@ -1827,6 +2392,32 @@ export default function EditorPage() {
1827
  onUpdatePosition={updateNodePosition}
1828
  />
1829
  );
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1830
  default:
1831
  return null;
1832
  }
@@ -1841,7 +2432,10 @@ export default function EditorPage() {
1841
  onMouseLeave={() => setMenuOpen(false)}
1842
  >
1843
  <div className="px-3 py-2 text-xs text-white/60">Add node</div>
1844
- <div className="max-h-[400px] overflow-y-auto">
 
 
 
1845
  <button className="w-full text-left px-3 py-2 text-sm hover:bg-white/10 rounded-lg" onClick={() => addFromMenu("CHARACTER")}>CHARACTER</button>
1846
  <button className="w-full text-left px-3 py-2 text-sm hover:bg-white/10 rounded-lg" onClick={() => addFromMenu("MERGE")}>MERGE</button>
1847
  <button className="w-full text-left px-3 py-2 text-sm hover:bg-white/10 rounded-lg" onClick={() => addFromMenu("BACKGROUND")}>BACKGROUND</button>
@@ -1851,6 +2445,8 @@ export default function EditorPage() {
1851
  <button className="w-full text-left px-3 py-2 text-sm hover:bg-white/10 rounded-lg" onClick={() => addFromMenu("CAMERA")}>CAMERA</button>
1852
  <button className="w-full text-left px-3 py-2 text-sm hover:bg-white/10 rounded-lg" onClick={() => addFromMenu("AGE")}>AGE</button>
1853
  <button className="w-full text-left px-3 py-2 text-sm hover:bg-white/10 rounded-lg" onClick={() => addFromMenu("FACE")}>FACE</button>
 
 
1854
  </div>
1855
  </div>
1856
  )}
 
1
+ /**
2
+ * NANO BANANA EDITOR - MAIN APPLICATION COMPONENT
3
+ *
4
+ * This is a visual node-based editor for AI image processing.
5
+ * Users can create nodes for different operations like merging images,
6
+ * changing backgrounds, adding clothes, applying styles, etc.
7
+ *
8
+ * Key Features:
9
+ * - Drag & drop interface for connecting nodes
10
+ * - Real-time image processing using Google's Gemini API
11
+ * - Support for multiple image operations (merge, style, edit, etc.)
12
+ * - Visual connection lines with animations
13
+ * - Viewport controls (pan, zoom)
14
+ */
15
  "use client";
16
 
17
+ // React imports for hooks and core functionality
18
  import React, { useEffect, useMemo, useRef, useState } from "react";
19
+ // Custom CSS for animations and styling
20
  import "./editor.css";
21
+ // Import all the different node view components
22
  import {
23
+ BackgroundNodeView, // Changes/generates backgrounds
24
+ ClothesNodeView, // Adds/changes clothing
25
+ StyleNodeView, // Applies artistic styles
26
+ EditNodeView, // General text-based editing
27
+ CameraNodeView, // Camera effects and settings
28
+ AgeNodeView, // Age transformation
29
+ FaceNodeView, // Face modifications
30
+ LightningNodeView, // Lighting effects
31
+ PosesNodeView // Pose modifications
32
  } from "./nodes";
33
+ // UI components from shadcn/ui library
34
  import { Button } from "../components/ui/button";
35
  import { Input } from "../components/ui/input";
36
+ // Hugging Face OAuth functionality
37
+ import { oauthLoginUrl, oauthHandleRedirectIfPresent } from '@huggingface/hub';
38
+
39
+ /**
40
+ * Utility function to combine CSS class names conditionally
41
+ * Filters out falsy values and joins the remaining strings with spaces
42
+ * Example: cx("class1", condition && "class2", null) => "class1 class2" or "class1"
43
+ */
44
  function cx(...args: Array<string | false | null | undefined>) {
45
  return args.filter(Boolean).join(" ");
46
  }
47
 
48
+ /**
49
+ * Generate a unique ID for new nodes
50
+ * Uses Math.random() to create a random string identifier
51
+ * Format: random base-36 string (letters + numbers), 7 characters long
52
+ */
53
  const uid = () => Math.random().toString(36).slice(2, 9);
54
 
55
+ /**
56
+ * Generate AI prompt for merging multiple character images into a single cohesive group photo
57
+ *
58
+ * This function creates a detailed prompt that instructs the AI model to:
59
+ * 1. Extract people from separate images
60
+ * 2. Combine them naturally as if photographed together
61
+ * 3. Ensure consistent lighting, shadows, and perspective
62
+ * 4. Create a believable group composition
63
+ *
64
+ * @param characterData Array of objects containing image data and labels
65
+ * @returns Detailed prompt string for the AI merge operation
66
+ */
67
  function generateMergePrompt(characterData: { image: string; label: string }[]): string {
68
  const count = characterData.length;
69
 
70
+ // Create a summary of all images being processed
71
  const labels = characterData.map((d, i) => `Image ${i + 1} (${d.label})`).join(", ");
72
 
73
+ // Return comprehensive prompt with specific instructions for natural-looking merge
74
  return `MERGE TASK: Create a natural, cohesive group photo combining ALL subjects from ${count} provided images.
75
 
76
  Images provided:
 
101
  The result should look like all subjects were photographed together in the same place at the same time, NOT like separate images placed side by side.`;
102
  }
103
 
104
+ /**
105
+ * Copy image to clipboard with PNG conversion
106
+ * The clipboard API only supports PNG format for images, so we convert other formats
107
+ */
108
+ async function copyImageToClipboard(dataUrl: string) {
109
+ try {
110
+ const response = await fetch(dataUrl);
111
+ const blob = await response.blob();
112
+
113
+ // Convert to PNG if not already PNG
114
+ if (blob.type !== 'image/png') {
115
+ const canvas = document.createElement('canvas');
116
+ const ctx = canvas.getContext('2d');
117
+ const img = new Image();
118
+
119
+ await new Promise((resolve) => {
120
+ img.onload = () => {
121
+ canvas.width = img.width;
122
+ canvas.height = img.height;
123
+ ctx?.drawImage(img, 0, 0);
124
+ resolve(void 0);
125
+ };
126
+ img.src = dataUrl;
127
+ });
128
+
129
+ const pngBlob = await new Promise<Blob>((resolve) => {
130
+ canvas.toBlob((blob) => resolve(blob!), 'image/png');
131
+ });
132
+
133
+ await navigator.clipboard.write([
134
+ new ClipboardItem({ 'image/png': pngBlob })
135
+ ]);
136
+ } else {
137
+ await navigator.clipboard.write([
138
+ new ClipboardItem({ 'image/png': blob })
139
+ ]);
140
+ }
141
+ } catch (error) {
142
+ console.error('Failed to copy image to clipboard:', error);
143
+ }
144
+ }
145
+
146
+ /* ========================================
147
+ TYPE DEFINITIONS
148
+ ======================================== */
149
+
150
+ /**
151
+ * All possible node types in the editor
152
+ * Each type represents a different kind of image processing operation
153
+ */
154
+ type NodeType = "CHARACTER" | "MERGE" | "BACKGROUND" | "CLOTHES" | "STYLE" | "EDIT" | "CAMERA" | "AGE" | "FACE" | "BLEND" | "LIGHTNING" | "POSES";
155
 
156
+ /**
157
+ * Base properties that all nodes share
158
+ * Every node has an ID, type, and position in the editor world space
159
+ */
160
  type NodeBase = {
161
+ id: string; // Unique identifier for the node
162
+ type: NodeType; // What kind of operation this node performs
163
+ x: number; // X position in world coordinates (not screen pixels)
164
+ y: number; // Y position in world coordinates (not screen pixels)
165
  };
166
 
167
+ /**
168
+ * CHARACTER node - Contains source images (people/subjects)
169
+ * These are the starting points for most image processing workflows
170
+ * Users can upload images or paste URLs/data URLs
171
+ */
172
  type CharacterNode = NodeBase & {
173
  type: "CHARACTER";
174
+ image: string; // Image data (data URL, http URL, or file path)
175
+ label?: string; // Optional human-readable name for the character
176
  };
177
 
178
+ /**
179
+ * MERGE node - Combines multiple inputs into a single group photo
180
+ * Takes multiple CHARACTER or processed nodes and creates a cohesive image
181
+ * Uses AI to naturally blend subjects together with consistent lighting
182
+ */
183
  type MergeNode = NodeBase & {
184
  type: "MERGE";
185
+ inputs: string[]; // Array of node IDs to merge together
186
+ output?: string | null; // Resulting merged image (data URL)
187
+ isRunning?: boolean; // Whether merge operation is currently processing
188
+ error?: string | null; // Error message if merge failed
189
  };
190
 
191
+ /**
192
+ * BACKGROUND node - Changes or generates backgrounds
193
+ * Can use solid colors, preset images, uploaded custom images, or AI-generated backgrounds
194
+ */
195
  type BackgroundNode = NodeBase & {
196
  type: "BACKGROUND";
197
+ input?: string; // ID of the source node (usually CHARACTER)
198
+ output?: string; // Processed image with new background
199
+ backgroundType: "color" | "gradient" | "image" | "city" | "photostudio" | "upload" | "custom"; // Type of background to apply
200
+ backgroundColor?: string; // Hex color code for solid color backgrounds
201
+
202
+ // Gradient background properties
203
+ gradientDirection?: string; // Direction of gradient (to right, to bottom, radial, etc.)
204
+ gradientStartColor?: string; // Starting color of gradient
205
+ gradientEndColor?: string; // Ending color of gradient
206
+
207
+ backgroundImage?: string; // URL/path for preset background images
208
+
209
+ // City scene properties
210
+ citySceneType?: string; // Type of city scene (busy_street, times_square, etc.)
211
+ cityTimeOfDay?: string; // Time of day for city scene
212
+
213
+ // Photo studio properties
214
+ studioSetup?: string; // Studio background setup type
215
+ studioBackgroundColor?: string; // Color for colored seamless background
216
+ studioLighting?: string; // Studio lighting setup
217
+ faceCamera?: boolean; // Whether to position character facing camera
218
+
219
+ customBackgroundImage?: string; // User-uploaded background image data
220
+ customPrompt?: string; // AI prompt for generating custom backgrounds
221
+ isRunning?: boolean; // Processing state indicator
222
+ error?: string | null; // Error message if processing failed
223
  };
224
 
225
+ /**
226
+ * CLOTHES node - Adds or changes clothing on subjects
227
+ * Can use preset clothing styles or custom uploaded clothing images
228
+ */
229
  type ClothesNode = NodeBase & {
230
  type: "CLOTHES";
231
+ input?: string; // ID of the source node
232
+ output?: string; // Image with modified clothing
233
+ clothesImage?: string; // Custom clothing image to apply
234
+ selectedPreset?: string; // Preset clothing style identifier
235
+ clothesPrompt?: string; // Text description for clothing changes
236
+ isRunning?: boolean; // Processing state
237
+ error?: string | null; // Error message
238
  };
239
 
240
+ /**
241
+ * STYLE node - Applies artistic styles and filters
242
+ * Uses AI to transform images with different artistic styles (oil painting, watercolor, etc.)
243
+ */
244
  type StyleNode = NodeBase & {
245
  type: "STYLE";
246
+ input?: string; // Source node ID
247
+ output?: string; // Styled output image
248
+ stylePreset?: string; // Selected artistic style
249
+ styleStrength?: number; // How strongly to apply the style (0-100)
250
+ isRunning?: boolean; // Processing indicator
251
+ error?: string | null; // Error message
252
  };
253
 
254
+ /**
255
+ * EDIT node - General purpose text-based image editing
256
+ * Uses natural language prompts to make specific changes to images
257
+ */
258
  type EditNode = NodeBase & {
259
  type: "EDIT";
260
+ input?: string; // Input node ID
261
+ output?: string; // Edited output image
262
+ editPrompt?: string; // Natural language description of desired changes
263
+ isRunning?: boolean; // Whether edit is being processed
264
+ error?: string | null; // Error if edit failed
265
  };
266
 
267
+ /**
268
+ * CAMERA node - Applies camera effects and photographic settings
269
+ * Simulates different camera settings, lenses, and photographic techniques
270
+ */
271
  type CameraNode = NodeBase & {
272
  type: "CAMERA";
273
+ input?: string; // Source image node ID
274
+ output?: string; // Image with camera effects applied
275
+ focalLength?: string; // Lens focal length (e.g., "50mm", "85mm")
276
+ aperture?: string; // Aperture setting (e.g., "f/1.4", "f/2.8")
277
+ shutterSpeed?: string; // Shutter speed (e.g., "1/60", "1/125")
278
+ whiteBalance?: string; // Color temperature setting
279
+ angle?: string; // Camera angle/perspective
280
+ iso?: string; // ISO sensitivity setting
281
+ filmStyle?: string; // Film simulation (e.g., "Kodak", "Fuji")
282
+ lighting?: string; // Lighting setup description
283
+ bokeh?: string; // Background blur style
284
+ composition?: string; // Composition technique
285
+ aspectRatio?: string; // Image aspect ratio
286
+ motionBlur?: string; // Motion blur effect
287
+ isRunning?: boolean; // Processing status
288
+ error?: string | null; // Error message
289
  };
290
 
291
+ /**
292
+ * AGE node - Transforms subject age
293
+ * Uses AI to make people appear older or younger while maintaining their identity
294
+ */
295
  type AgeNode = NodeBase & {
296
  type: "AGE";
297
+ input?: string; // Input node ID
298
+ output?: string; // Age-transformed image
299
+ targetAge?: number; // Target age to transform to (in years)
300
+ isRunning?: boolean; // Processing indicator
301
+ error?: string | null; // Error if transformation failed
302
  };
303
 
304
+ /**
305
+ * FACE node - Modifies facial features and accessories
306
+ * Can add/remove facial hair, accessories, change expressions, etc.
307
+ */
308
  type FaceNode = NodeBase & {
309
  type: "FACE";
310
+ input?: string; // Source node ID
311
+ output?: string; // Modified face image
312
+ faceOptions?: { // Collection of face modification options
313
+ removePimples?: boolean; // Clean up skin blemishes
314
+ addSunglasses?: boolean; // Add sunglasses accessory
315
+ addHat?: boolean; // Add hat accessory
316
+ changeHairstyle?: string; // New hairstyle description
317
+ facialExpression?: string; // Change facial expression
318
+ beardStyle?: string; // Add/modify facial hair
319
+ selectedMakeup?: string; // Selected makeup style
320
+ makeupImage?: string; // Path to makeup reference image
321
  };
322
+ isRunning?: boolean; // Processing state
323
+ error?: string | null; // Error message
324
  };
325
 
326
+ /**
327
+ * BLEND node - Blends/composites images with adjustable opacity
328
+ * Used for subtle image combinations and overlay effects
329
+ */
330
  type BlendNode = NodeBase & {
331
  type: "BLEND";
332
+ input?: string; // Primary input node ID
333
+ output?: string; // Blended output image
334
+ blendStrength?: number; // Blend intensity (0-100 percent)
335
+ isRunning?: boolean; // Processing indicator
336
+ error?: string | null; // Error message
337
  };
338
 
339
+ /**
340
+ * LIGHTNING node - Applies lighting effects to images
341
+ * Uses preset lighting styles and images for realistic lighting effects
342
+ */
343
+ type LightningNode = NodeBase & {
344
+ type: "LIGHTNING";
345
+ input?: string; // Source node ID
346
+ output?: string; // Image with lighting applied
347
+ selectedLighting?: string; // Selected lighting preset name
348
+ lightingPrompt?: string; // Text prompt for lighting effect
349
+ lightingStrength?: number; // Intensity of lighting effect (0-100)
350
+ isRunning?: boolean; // Processing state
351
+ error?: string | null; // Error message
352
+ };
353
 
 
 
 
354
 
355
+ /**
356
+ * POSES node - Applies pose modifications to subjects
357
+ * Uses preset pose images to modify subject poses
358
+ */
359
+ type PosesNode = NodeBase & {
360
+ type: "POSES";
361
+ input?: string; // Source node ID
362
+ output?: string; // Image with pose applied
363
+ selectedPose?: string; // Selected pose preset name
364
+ posePrompt?: string; // Text prompt for pose effect
365
+ poseStrength?: number; // How strongly to apply the pose (0-100)
366
+ isRunning?: boolean; // Processing state
367
+ error?: string | null; // Error message
368
+ };
369
+
370
+ /**
371
+ * Union type of all possible node types
372
+ * Used for type-safe handling of nodes throughout the application
373
+ */
374
+ type AnyNode = CharacterNode | MergeNode | BackgroundNode | ClothesNode | StyleNode | EditNode | CameraNode | AgeNode | FaceNode | BlendNode | LightningNode | PosesNode;
375
+
376
+ /* ========================================
377
+ CONSTANTS AND UTILITY FUNCTIONS
378
+ ======================================== */
379
+
380
+ /**
381
+ * Default placeholder image for new CHARACTER nodes
382
+ * Uses Unsplash image as a starting point before users upload their own images
383
+ */
384
+ const DEFAULT_PERSON = "/reo.png";
385
+
386
+ /**
387
+ * Convert File objects to data URLs for image processing
388
+ *
389
+ * Takes a FileList or array of File objects (from drag/drop or file input)
390
+ * and converts each file to a base64 data URL that can be used in img tags
391
+ * or sent to APIs for processing.
392
+ *
393
+ * @param files FileList or File array from input events
394
+ * @returns Promise that resolves to array of data URL strings
395
+ */
396
  function toDataUrls(files: FileList | File[]): Promise<string[]> {
397
+ const arr = Array.from(files as File[]); // Convert FileList to regular array
398
  return Promise.all(
399
  arr.map(
400
  (file) =>
401
  new Promise<string>((resolve, reject) => {
402
+ const r = new FileReader(); // Browser API for reading files
403
+ r.onload = () => resolve(r.result as string); // Success: return data URL
404
+ r.onerror = reject; // Error: reject promise
405
+ r.readAsDataURL(file); // Start reading as base64 data URL
406
  })
407
  )
408
  );
409
  }
410
 
411
+ /**
412
+ * Convert screen pixel coordinates to world coordinates
413
+ *
414
+ * The editor uses a coordinate system where:
415
+ * - Screen coordinates: actual pixel positions on the browser window
416
+ * - World coordinates: virtual positions that account for pan/zoom transformations
417
+ *
418
+ * This function converts mouse/touch positions to world space for accurate node positioning.
419
+ *
420
+ * @param clientX Mouse X position in screen pixels
421
+ * @param clientY Mouse Y position in screen pixels
422
+ * @param container Bounding rect of the editor container
423
+ * @param tx Current pan transform X offset
424
+ * @param ty Current pan transform Y offset
425
+ * @param scale Current zoom scale factor
426
+ * @returns Object with world coordinates {x, y}
427
+ */
428
  function screenToWorld(
429
  clientX: number,
430
  clientY: number,
 
433
  ty: number,
434
  scale: number
435
  ) {
436
+ const x = (clientX - container.left - tx) / scale; // Account for container offset, pan, and zoom
437
  const y = (clientY - container.top - ty) / scale;
438
  return { x, y };
439
  }
 
589
  />
590
  <div className="flex items-center gap-2">
591
  <Button
592
+ variant="ghost" size="icon" className="text-destructive hover:bg-destructive/20 h-6 w-6"
593
  onClick={(e) => {
594
  e.stopPropagation();
595
+ e.preventDefault();
596
+ if (confirm('Delete this character node?')) {
597
  onDelete(node.id);
598
  }
599
  }}
600
+ onPointerDown={(e) => e.stopPropagation()}
601
  title="Delete node"
602
  aria-label="Delete node"
603
  >
 
616
  <img
617
  src={node.image}
618
  alt="character"
619
+ className="h-full w-full object-contain cursor-pointer hover:opacity-80 transition-opacity"
620
  draggable={false}
621
+ onClick={async () => {
622
+ try {
623
+ const response = await fetch(node.image);
624
+ const blob = await response.blob();
625
+ await navigator.clipboard.write([
626
+ new ClipboardItem({ [blob.type]: blob })
627
+ ]);
628
+ } catch (error) {
629
+ console.error('Failed to copy image:', error);
630
+ }
631
+ }}
632
+ onContextMenu={async (e) => {
633
+ e.preventDefault();
634
+ try {
635
+ const response = await fetch(node.image);
636
+ const blob = await response.blob();
637
+ await navigator.clipboard.write([
638
+ new ClipboardItem({ [blob.type]: blob })
639
+ ]);
640
+
641
+ // Show visual feedback
642
+ const img = e.currentTarget;
643
+ const originalFilter = img.style.filter;
644
+ img.style.filter = "brightness(1.2)";
645
+
646
+ setTimeout(() => {
647
+ img.style.filter = originalFilter;
648
+ }, 500);
649
+ } catch (error) {
650
+ console.error('Failed to copy image:', error);
651
+ }
652
+ }}
653
+ title="Click or right-click to copy image to clipboard"
654
  />
655
  </div>
656
  <div className="flex gap-2">
 
739
  />
740
  <div className="font-semibold tracking-wide text-sm flex-1 text-center">MERGE</div>
741
  <div className="flex items-center gap-2">
742
+ <Button
743
+ variant="ghost"
744
+ size="icon"
745
+ className="text-destructive hover:bg-destructive/20 h-6 w-6"
746
  onClick={(e) => {
747
  e.stopPropagation();
748
+ e.preventDefault();
749
+ if (confirm('Delete this merge node?')) {
750
  onDelete(node.id);
751
  }
752
  }}
753
+ onPointerDown={(e) => e.stopPropagation()}
754
  title="Delete node"
755
+ aria-label="Delete node"
756
  >
757
  ×
758
+ </Button>
759
  <Port
760
  className="out"
761
  nodeId={node.id}
 
794
  <div key={id} className="flex items-center gap-2 bg-white/10 rounded px-2 py-1">
795
  {image && (
796
  <div className="w-6 h-6 rounded overflow-hidden bg-black/20">
797
+ <img
798
+ src={image}
799
+ className="w-full h-full object-contain cursor-pointer hover:opacity-80"
800
+ alt="inp"
801
+ onClick={async () => {
802
+ try {
803
+ const response = await fetch(image);
804
+ const blob = await response.blob();
805
+ await navigator.clipboard.write([
806
+ new ClipboardItem({ [blob.type]: blob })
807
+ ]);
808
+ } catch (error) {
809
+ console.error('Failed to copy image:', error);
810
+ }
811
+ }}
812
+ onContextMenu={async (e) => {
813
+ e.preventDefault();
814
+ try {
815
+ const response = await fetch(image);
816
+ const blob = await response.blob();
817
+ await navigator.clipboard.write([
818
+ new ClipboardItem({ [blob.type]: blob })
819
+ ]);
820
+
821
+ // Show visual feedback
822
+ const img = e.currentTarget;
823
+ const originalFilter = img.style.filter;
824
+ img.style.filter = "brightness(1.2)";
825
+
826
+ setTimeout(() => {
827
+ img.style.filter = originalFilter;
828
+ }, 300);
829
+ } catch (error) {
830
+ console.error('Failed to copy image:', error);
831
+ }
832
+ }}
833
+ title="Click or right-click to copy"
834
+ />
835
  </div>
836
  )}
837
  <span className="text-xs">{label}</span>
 
869
  </div>
870
 
871
  <div className="mt-2">
872
+ <div className="flex items-center justify-between mb-1">
873
+ <div className="text-xs text-white/70">Output</div>
874
+ </div>
875
  <div className="w-full min-h-[200px] max-h-[400px] rounded-xl bg-black/40 grid place-items-center">
876
  {node.output ? (
877
+ <img
878
+ src={node.output}
879
+ className="w-full h-auto max-h-[400px] object-contain rounded-xl cursor-pointer hover:opacity-80 transition-opacity"
880
+ alt="output"
881
+ onClick={async () => {
882
+ if (node.output) {
883
+ try {
884
+ const response = await fetch(node.output);
885
+ const blob = await response.blob();
886
+ await navigator.clipboard.write([
887
+ new ClipboardItem({ [blob.type]: blob })
888
+ ]);
889
+ } catch (error) {
890
+ console.error('Failed to copy image:', error);
891
+ }
892
+ }
893
+ }}
894
+ onContextMenu={async (e) => {
895
+ e.preventDefault();
896
+ if (node.output) {
897
+ try {
898
+ const response = await fetch(node.output);
899
+ const blob = await response.blob();
900
+ await navigator.clipboard.write([
901
+ new ClipboardItem({ [blob.type]: blob })
902
+ ]);
903
+
904
+ // Show visual feedback
905
+ const img = e.currentTarget;
906
+ const originalFilter = img.style.filter;
907
+ img.style.filter = "brightness(1.2)";
908
+
909
+ setTimeout(() => {
910
+ img.style.filter = originalFilter;
911
+ }, 500);
912
+ } catch (error) {
913
+ console.error('Failed to copy image:', error);
914
+ }
915
+ }
916
+ }}
917
+ title="Click or right-click to copy image to clipboard"
918
+ />
919
  ) : (
920
  <span className="text-white/40 text-xs py-16">Run merge to see result</span>
921
  )}
922
  </div>
923
  {node.output && (
924
+ <div className="mt-2">
925
+ <Button
926
+ className="w-full"
927
+ variant="secondary"
928
+ onClick={() => {
929
+ const link = document.createElement('a');
930
+ link.href = node.output as string;
931
+ link.download = `merge-${Date.now()}.png`;
932
+ document.body.appendChild(link);
933
+ link.click();
934
+ document.body.removeChild(link);
935
+ }}
936
+ >
937
+ 📥 Download Merged Image
938
+ </Button>
939
+ </div>
940
  )}
941
  {node.error && (
942
  <div className="mt-2">
 
983
  scaleRef.current = scale;
984
  }, [scale]);
985
 
986
+ // HF OAUTH CHECK
987
+ useEffect(() => {
988
+ (async () => {
989
+ setIsCheckingAuth(true);
990
+ try {
991
+ // Handle OAuth redirect if present
992
+ const oauth = await oauthHandleRedirectIfPresent();
993
+ if (oauth) {
994
+ // Store the token server-side
995
+ await fetch('/api/auth/callback', {
996
+ method: 'POST',
997
+ body: JSON.stringify({ hf_token: oauth.accessToken }),
998
+ headers: { 'Content-Type': 'application/json' }
999
+ });
1000
+ setIsHfProLoggedIn(true);
1001
+ } else {
1002
+ // Check if already logged in
1003
+ const response = await fetch('/api/auth/callback', { method: 'GET' });
1004
+ if (response.ok) {
1005
+ const data = await response.json();
1006
+ setIsHfProLoggedIn(data.isLoggedIn);
1007
+ }
1008
+ }
1009
+ } catch (error) {
1010
+ console.error('OAuth error:', error);
1011
+ } finally {
1012
+ setIsCheckingAuth(false);
1013
+ }
1014
+ })();
1015
+ }, []);
1016
+
1017
+ // HF PRO LOGIN HANDLER
1018
+ const handleHfProLogin = async () => {
1019
+ if (isHfProLoggedIn) {
1020
+ // Logout: clear the token
1021
+ try {
1022
+ await fetch('/api/auth/callback', { method: 'DELETE' });
1023
+ setIsHfProLoggedIn(false);
1024
+ } catch (error) {
1025
+ console.error('Logout error:', error);
1026
+ }
1027
+ } else {
1028
+ // Login with HF OAuth
1029
+ const clientId = process.env.NEXT_PUBLIC_OAUTH_CLIENT_ID;
1030
+ if (!clientId) {
1031
+ console.error('OAuth client ID not configured');
1032
+ alert('OAuth client ID not configured. Please check environment variables.');
1033
+ return;
1034
+ }
1035
+
1036
+ window.location.href = await oauthLoginUrl({
1037
+ clientId,
1038
+ redirectUrl: `${window.location.origin}/api/auth/callback`
1039
+ });
1040
+ }
1041
+ };
1042
+
1043
  // Connection dragging state
1044
  const [draggingFrom, setDraggingFrom] = useState<string | null>(null);
1045
  const [dragPos, setDragPos] = useState<{x: number, y: number} | null>(null);
1046
 
1047
+ // API Token state (restored for manual review)
1048
+ const [apiToken, setApiToken] = useState("");
1049
  const [showHelpSidebar, setShowHelpSidebar] = useState(false);
1050
+
1051
+ // HF PRO AUTHENTICATION
1052
+ const [isHfProLoggedIn, setIsHfProLoggedIn] = useState(false);
1053
+ const [isCheckingAuth, setIsCheckingAuth] = useState(true);
1054
+
1055
 
1056
  const characters = nodes.filter((n) => n.type === "CHARACTER") as CharacterNode[];
1057
  const merges = nodes.filter((n) => n.type === "MERGE") as MergeNode[];
 
1130
  setNodes((prev) => prev.map((n) => (n.id === id ? { ...n, ...updates } : n)));
1131
  };
1132
 
1133
+
1134
  // Handle single input connections for new nodes
1135
  const handleEndSingleConnection = (nodeId: string) => {
1136
  if (draggingFrom) {
 
1189
  };
1190
 
1191
  // Helper to extract configuration from a node
1192
+ const getNodeConfiguration = (node: AnyNode): Record<string, unknown> => {
1193
+ const config: Record<string, unknown> = {};
1194
 
1195
  switch (node.type) {
1196
  case "BACKGROUND":
1197
  if ((node as BackgroundNode).backgroundType) {
1198
+ const bgNode = node as BackgroundNode;
1199
+ config.backgroundType = bgNode.backgroundType;
1200
+ config.backgroundColor = bgNode.backgroundColor;
1201
+ config.backgroundImage = bgNode.backgroundImage;
1202
+ config.customBackgroundImage = bgNode.customBackgroundImage;
1203
+ config.customPrompt = bgNode.customPrompt;
1204
+
1205
+ // Gradient properties
1206
+ if (bgNode.backgroundType === "gradient") {
1207
+ config.gradientDirection = bgNode.gradientDirection;
1208
+ config.gradientStartColor = bgNode.gradientStartColor;
1209
+ config.gradientEndColor = bgNode.gradientEndColor;
1210
+ }
1211
+
1212
+ // City scene properties
1213
+ if (bgNode.backgroundType === "city") {
1214
+ config.citySceneType = bgNode.citySceneType;
1215
+ config.cityTimeOfDay = bgNode.cityTimeOfDay;
1216
+ }
1217
+
1218
+ // Photo studio properties
1219
+ if (bgNode.backgroundType === "photostudio") {
1220
+ config.studioSetup = bgNode.studioSetup;
1221
+ config.studioBackgroundColor = bgNode.studioBackgroundColor;
1222
+ config.studioLighting = bgNode.studioLighting;
1223
+ config.faceCamera = bgNode.faceCamera;
1224
+ }
1225
  }
1226
  break;
1227
  case "CLOTHES":
 
1254
  if (cam.bokeh && cam.bokeh !== "None") config.bokeh = cam.bokeh;
1255
  if (cam.composition && cam.composition !== "None") config.composition = cam.composition;
1256
  if (cam.aspectRatio && cam.aspectRatio !== "None") config.aspectRatio = cam.aspectRatio;
1257
+ if (cam.motionBlur && cam.motionBlur !== "None") config.motionBlur = cam.motionBlur;
1258
  break;
1259
  case "AGE":
1260
  if ((node as AgeNode).targetAge) {
 
1264
  case "FACE":
1265
  const face = node as FaceNode;
1266
  if (face.faceOptions) {
1267
+ const opts: Record<string, unknown> = {};
1268
  if (face.faceOptions.removePimples) opts.removePimples = true;
1269
  if (face.faceOptions.addSunglasses) opts.addSunglasses = true;
1270
  if (face.faceOptions.addHat) opts.addHat = true;
 
1282
  }
1283
  }
1284
  break;
1285
+ case "LIGHTNING":
1286
+ if ((node as LightningNode).lightingPrompt && (node as LightningNode).selectedLighting) {
1287
+ config.lightingPrompt = (node as LightningNode).lightingPrompt;
1288
+ config.selectedLighting = (node as LightningNode).selectedLighting;
1289
+ }
1290
+ break;
1291
+ case "POSES":
1292
+ if ((node as PosesNode).posePrompt && (node as PosesNode).selectedPose) {
1293
+ config.posePrompt = (node as PosesNode).posePrompt;
1294
+ config.selectedPose = (node as PosesNode).selectedPose;
1295
+ }
1296
+ break;
1297
  }
1298
 
1299
  return config;
 
1407
 
1408
  // If we found unprocessed merges, we need to execute them first
1409
  if (unprocessedMerges.length > 0 && !inputImage) {
 
1410
 
1411
  // Process each merge node
1412
  for (const merge of unprocessedMerges) {
 
1473
 
1474
  // Show info about batch processing
1475
  if (unprocessedNodeCount > 1) {
 
 
1476
  } else {
 
1477
  }
1478
 
1479
  // Set loading state for all nodes being processed
 
1492
 
1493
  // Check if params contains custom images and validate them
1494
  if (params.clothesImage) {
 
1495
  // Validate it's a proper data URL
1496
  if (!params.clothesImage.startsWith('data:') && !params.clothesImage.startsWith('http') && !params.clothesImage.startsWith('/')) {
1497
  throw new Error("Invalid clothes image format. Please upload a valid image.");
 
1499
  }
1500
 
1501
  if (params.customBackgroundImage) {
 
1502
  // Validate it's a proper data URL
1503
  if (!params.customBackgroundImage.startsWith('data:') && !params.customBackgroundImage.startsWith('http') && !params.customBackgroundImage.startsWith('/')) {
1504
  throw new Error("Invalid background image format. Please upload a valid image.");
 
1506
  }
1507
 
1508
  // Log request details for debugging
1509
+
1510
+ // ORIGINAL PROCESSING LOGIC RESTORED (HF processing commented out)
1511
+ /*
1512
+ // Only use HF + fal.ai processing
1513
+ if (!isHfProLoggedIn) {
1514
+ setNodes(prev => prev.map(n =>
1515
+ n.id === nodeId ? { ...n, error: "Please login with HF Pro to use fal.ai processing", isRunning: false } : n
1516
+ ));
1517
+ return;
1518
+ }
1519
+
1520
+ // Make a SINGLE API call with fal.ai processing
1521
+ const res = await fetch("/api/hf-process", {
1522
+ method: "POST",
1523
+ headers: { "Content-Type": "application/json" },
1524
+ body: JSON.stringify({
1525
+ type: "COMBINED",
1526
+ image: inputImage,
1527
+ params
1528
+ }),
1529
  });
1530
+ */
1531
 
1532
  // Make a SINGLE API call with all accumulated parameters
1533
  const res = await fetch("/api/process", {
 
1550
  }
1551
 
1552
  const data = await res.json();
1553
+ if (!res.ok) {
1554
+ // Handle both string and object error formats
1555
+ const errorMessage = typeof data.error === 'string'
1556
+ ? data.error
1557
+ : data.error?.message || JSON.stringify(data.error) || "Processing failed";
1558
+ throw new Error(errorMessage);
1559
+ }
1560
 
1561
  // Only update the current node with the output
1562
  // Don't show output in intermediate nodes - they were just used for configuration
 
1571
  }
1572
  return n;
1573
  }));
1574
+
1575
+ // Add to node's history
1576
+ const description = unprocessedNodeCount > 1
1577
+ ? `Combined ${unprocessedNodeCount} transformations`
1578
+ : `${node.type} transformation`;
1579
+
1580
 
1581
  if (unprocessedNodeCount > 1) {
 
 
1582
  }
1583
  } catch (e: any) {
1584
  console.error("Process error:", e);
 
1698
  }
1699
 
1700
  // Log merge details for debugging
 
 
 
 
 
1701
 
1702
  const prompt = generateMergePrompt(inputData);
1703
 
1704
+ // ORIGINAL MERGE LOGIC RESTORED (HF processing commented out)
1705
+ /*
1706
+ const res = await fetch("/api/hf-process", {
1707
+ method: "POST",
1708
+ headers: { "Content-Type": "application/json" },
1709
+ body: JSON.stringify({
1710
+ type: "MERGE",
1711
+ images: mergeImages,
1712
+ prompt
1713
+ }),
1714
+ });
1715
+ */
1716
+
1717
  // Use the process route instead of merge route
1718
  const res = await fetch("/api/process", {
1719
  method: "POST",
 
1781
  if (inputData.length < 2) throw new Error("Connect at least two nodes with images (CHARACTER nodes or processed nodes).");
1782
 
1783
  // Debug: Log what we're sending
 
 
1784
 
1785
  // Generate dynamic prompt based on number of inputs
1786
  const prompt = generateMergePrompt(inputData);
1787
  const imgs = inputData.map(d => d.image);
1788
 
1789
+ // ORIGINAL RUNMERGE LOGIC RESTORED (HF processing commented out)
1790
+ /*
1791
+ if (!isHfProLoggedIn) {
1792
+ throw new Error("Please login with HF Pro to use fal.ai processing");
1793
+ }
1794
+
1795
+ const res = await fetch("/api/hf-process", {
1796
+ method: "POST",
1797
+ headers: { "Content-Type": "application/json" },
1798
+ body: JSON.stringify({
1799
+ type: "MERGE",
1800
+ images: imgs,
1801
+ prompt
1802
+ }),
1803
+ });
1804
+ */
1805
+
1806
  // Use the process route with MERGE type
1807
  const res = await fetch("/api/process", {
1808
  method: "POST",
 
1834
  }
1835
  const out = js.image || (js.images?.[0] as string) || null;
1836
  setNodes((prev) => prev.map((n) => (n.id === mergeId && n.type === "MERGE" ? { ...n, output: out, isRunning: false } : n)));
1837
+
1838
+ // Add merge result to node's history
1839
+ if (out) {
1840
+ const inputLabels = merge.inputs.map((id, index) => {
1841
+ const inputNode = nodes.find(n => n.id === id);
1842
+ if (inputNode?.type === "CHARACTER") {
1843
+ return (inputNode as CharacterNode).label || `Character ${index + 1}`;
1844
+ }
1845
+ return `${inputNode?.type || 'Node'} ${index + 1}`;
1846
+ });
1847
+
1848
+ }
1849
  } catch (e: any) {
1850
  console.error("Merge error:", e);
1851
  setNodes((prev) => prev.map((n) => (n.id === mergeId && n.type === "MERGE" ? { ...n, isRunning: false, error: e?.message || "Error" } : n)));
 
1910
  if (inputNode) {
1911
  const start = getNodeOutputPort(inputNode);
1912
  const end = getNodeInputPort(node);
1913
+ const isProcessing = merge.isRunning; // Only animate to the currently processing merge node
1914
  paths.push({
1915
  path: createPath(start.x, start.y, end.x, end.y),
1916
  processing: isProcessing
 
1924
  if (inputNode) {
1925
  const start = getNodeOutputPort(inputNode);
1926
  const end = getNodeInputPort(node);
1927
+ const isProcessing = (node as any).isRunning; // Only animate to the currently processing node
1928
  paths.push({
1929
  path: createPath(start.x, start.y, end.x, end.y),
1930
  processing: isProcessing
 
1997
  const rect = containerRef.current!.getBoundingClientRect();
1998
  const world = screenToWorld(e.clientX, e.clientY, rect, tx, ty, scale);
1999
  setMenuWorld(world);
2000
+
2001
+ // Menu dimensions
2002
+ const menuWidth = 224; // w-56 = 224px
2003
+ const menuHeight = 320; // Approximate height with max-h-[300px] + padding
2004
+
2005
+ // Calculate position relative to container
2006
+ let x = e.clientX - rect.left;
2007
+ let y = e.clientY - rect.top;
2008
+
2009
+ // Adjust if menu would go off right edge
2010
+ if (x + menuWidth > rect.width) {
2011
+ x = rect.width - menuWidth - 10;
2012
+ }
2013
+
2014
+ // Adjust if menu would go off bottom edge
2015
+ if (y + menuHeight > rect.height) {
2016
+ y = rect.height - menuHeight - 10;
2017
+ }
2018
+
2019
+ // Ensure minimum margins from edges
2020
+ x = Math.max(10, x);
2021
+ y = Math.max(10, y);
2022
+
2023
+ setMenuPos({ x, y });
2024
  setMenuOpen(true);
2025
  };
2026
 
 
2059
  case "FACE":
2060
  setNodes(prev => [...prev, { ...commonProps, type: "FACE", faceOptions: {} } as FaceNode]);
2061
  break;
2062
+ case "EDIT":
2063
+ setNodes(prev => [...prev, { ...commonProps, type: "EDIT" } as EditNode]);
2064
+ break;
2065
+ case "LIGHTNING":
2066
+ setNodes(prev => [...prev, { ...commonProps, type: "LIGHTNING", lightingStrength: 75 } as LightningNode]);
2067
+ break;
2068
+ case "POSES":
2069
+ setNodes(prev => [...prev, { ...commonProps, type: "POSES", poseStrength: 60 } as PosesNode]);
2070
+ break;
2071
  }
2072
  setMenuOpen(false);
2073
  };
 
2078
  <h1 className="text-lg font-semibold tracking-wide">
2079
  <span className="mr-2" aria-hidden>🍌</span>Nano Banana Editor
2080
  </h1>
2081
+ <div className="flex items-center gap-3">
2082
+ {/* ORIGINAL API TOKEN INPUT RESTORED */}
2083
  <label htmlFor="api-token" className="text-sm font-medium text-muted-foreground">
2084
  API Token:
2085
  </label>
 
2091
  onChange={(e) => setApiToken(e.target.value)}
2092
  className="w-64"
2093
  />
2094
+
2095
  <Button
2096
+ variant="outline"
2097
  size="sm"
2098
+ className="h-8 px-3"
2099
  type="button"
2100
  onClick={() => setShowHelpSidebar(true)}
2101
  >
2102
+ Help
2103
  </Button>
2104
+
2105
+
2106
  </div>
2107
  </header>
2108
 
 
2130
  </div>
2131
 
2132
  <div className="space-y-6">
2133
+ {/* ORIGINAL HELP CONTENT RESTORED (HF help commented out) */}
2134
+ {/*
2135
+ <div>
2136
+ <h3 className="font-semibold mb-3 text-foreground">🤗 HF Pro Login</h3>
2137
+ <div className="text-sm text-muted-foreground space-y-3">
2138
+ <div className="p-3 bg-primary/10 border border-primary/20 rounded-lg">
2139
+ <p className="font-medium text-primary mb-2">Step 1: Login with Hugging Face</p>
2140
+ <p>Click "Login HF PRO" to authenticate with your Hugging Face account.</p>
2141
+ </div>
2142
+ <div className="p-3 bg-secondary border border-border rounded-lg">
2143
+ <p className="font-medium text-secondary-foreground mb-2">Step 2: Access fal.ai Models</p>
2144
+ <p>Once logged in, you'll have access to fal.ai's Gemini 2.5 Flash Image models.</p>
2145
+ </div>
2146
+ <div className="p-3 bg-accent border border-border rounded-lg">
2147
+ <p className="font-medium text-accent-foreground mb-2">Step 3: Start Creating</p>
2148
+ <p>Use the powerful fal.ai models for image generation, merging, editing, and style transfers.</p>
2149
+ </div>
2150
+ </div>
2151
+ </div>
2152
+ */}
2153
+
2154
  <div>
2155
  <h3 className="font-semibold mb-3 text-foreground">🔑 API Token Setup</h3>
2156
  <div className="text-sm text-muted-foreground space-y-3">
 
2184
  <div className="p-4 bg-muted border border-border rounded-lg">
2185
  <h4 className="font-semibold text-foreground mb-2">🔒 Privacy & Security</h4>
2186
  <div className="text-sm text-muted-foreground space-y-1">
2187
+ {/* ORIGINAL PRIVACY INFO RESTORED (HF privacy info commented out) */}
2188
+ {/*
2189
+ <p>• Your HF token is stored securely in HTTP-only cookies</p>
2190
+ <p>• Authentication happens through Hugging Face OAuth</p>
2191
+ <p>• You can logout anytime to revoke access</p>
2192
+ <p>• Processing happens via fal.ai's secure infrastructure</p>
2193
+ */}
2194
  <p>• Your API token is stored locally in your browser</p>
2195
  <p>• Tokens are never sent to our servers</p>
2196
  <p>• Keep your API key secure and don't share it</p>
 
2392
  onUpdatePosition={updateNodePosition}
2393
  />
2394
  );
2395
+ case "LIGHTNING":
2396
+ return (
2397
+ <LightningNodeView
2398
+ key={node.id}
2399
+ node={node as LightningNode}
2400
+ onDelete={deleteNode}
2401
+ onUpdate={updateNode}
2402
+ onStartConnection={handleStartConnection}
2403
+ onEndConnection={handleEndSingleConnection}
2404
+ onProcess={processNode}
2405
+ onUpdatePosition={updateNodePosition}
2406
+ />
2407
+ );
2408
+ case "POSES":
2409
+ return (
2410
+ <PosesNodeView
2411
+ key={node.id}
2412
+ node={node as PosesNode}
2413
+ onDelete={deleteNode}
2414
+ onUpdate={updateNode}
2415
+ onStartConnection={handleStartConnection}
2416
+ onEndConnection={handleEndSingleConnection}
2417
+ onProcess={processNode}
2418
+ onUpdatePosition={updateNodePosition}
2419
+ />
2420
+ );
2421
  default:
2422
  return null;
2423
  }
 
2432
  onMouseLeave={() => setMenuOpen(false)}
2433
  >
2434
  <div className="px-3 py-2 text-xs text-white/60">Add node</div>
2435
+ <div
2436
+ className="max-h-[300px] overflow-y-auto scrollbar-thin pr-1"
2437
+ onWheel={(e) => e.stopPropagation()}
2438
+ >
2439
  <button className="w-full text-left px-3 py-2 text-sm hover:bg-white/10 rounded-lg" onClick={() => addFromMenu("CHARACTER")}>CHARACTER</button>
2440
  <button className="w-full text-left px-3 py-2 text-sm hover:bg-white/10 rounded-lg" onClick={() => addFromMenu("MERGE")}>MERGE</button>
2441
  <button className="w-full text-left px-3 py-2 text-sm hover:bg-white/10 rounded-lg" onClick={() => addFromMenu("BACKGROUND")}>BACKGROUND</button>
 
2445
  <button className="w-full text-left px-3 py-2 text-sm hover:bg-white/10 rounded-lg" onClick={() => addFromMenu("CAMERA")}>CAMERA</button>
2446
  <button className="w-full text-left px-3 py-2 text-sm hover:bg-white/10 rounded-lg" onClick={() => addFromMenu("AGE")}>AGE</button>
2447
  <button className="w-full text-left px-3 py-2 text-sm hover:bg-white/10 rounded-lg" onClick={() => addFromMenu("FACE")}>FACE</button>
2448
+ <button className="w-full text-left px-3 py-2 text-sm hover:bg-white/10 rounded-lg" onClick={() => addFromMenu("LIGHTNING")}>LIGHTNING</button>
2449
+ <button className="w-full text-left px-3 py-2 text-sm hover:bg-white/10 rounded-lg" onClick={() => addFromMenu("POSES")}>POSES</button>
2450
  </div>
2451
  </div>
2452
  )}
lib/utils.ts CHANGED
@@ -1,6 +1,34 @@
1
- import { clsx, type ClassValue } from "clsx"
2
- import { twMerge } from "tailwind-merge"
 
 
 
 
3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  export function cn(...inputs: ClassValue[]) {
5
  return twMerge(clsx(inputs))
6
  }
 
1
+ /**
2
+ * UTILITY FUNCTIONS
3
+ *
4
+ * Common utility functions used throughout the application.
5
+ * Currently contains the `cn` function for combining CSS class names intelligently.
6
+ */
7
 
8
+ import { clsx, type ClassValue } from "clsx" // Utility for conditional class names
9
+ import { twMerge } from "tailwind-merge" // Utility for merging Tailwind classes
10
+
11
+ /**
12
+ * Combine and merge CSS class names intelligently
13
+ *
14
+ * This function combines the power of two popular utilities:
15
+ * - `clsx`: Handles conditional class names and various input types
16
+ * - `twMerge`: Intelligently merges Tailwind CSS classes, resolving conflicts
17
+ *
18
+ * Key benefits:
19
+ * - Handles conditional classes: cn("base", condition && "conditional")
20
+ * - Resolves Tailwind conflicts: cn("p-4", "p-2") → "p-2" (last one wins)
21
+ * - Removes duplicates and undefined values
22
+ * - Supports arrays, objects, and mixed types
23
+ *
24
+ * @param inputs Variable number of class values (strings, objects, arrays, etc.)
25
+ * @returns Single string with merged and optimized class names
26
+ *
27
+ * @example
28
+ * cn("btn", "btn-primary", isActive && "active")
29
+ * cn("p-4 m-2", { "bg-red-500": hasError, "bg-green-500": isSuccess })
30
+ * cn(["base-class", "modifier"], conditionalClass)
31
+ */
32
  export function cn(...inputs: ClassValue[]) {
33
  return twMerge(clsx(inputs))
34
  }
next.config.ts CHANGED
@@ -8,11 +8,6 @@ const nextConfig: NextConfig = {
8
  serverRuntimeConfig: {
9
  bodySizeLimit: '50mb',
10
  },
11
- api: {
12
- bodyParser: {
13
- sizeLimit: '50mb',
14
- },
15
- },
16
  };
17
 
18
  export default nextConfig;
 
8
  serverRuntimeConfig: {
9
  bodySizeLimit: '50mb',
10
  },
 
 
 
 
 
11
  };
12
 
13
  export default nextConfig;
package-lock.json CHANGED
@@ -8,7 +8,10 @@
8
  "name": "banana",
9
  "version": "0.1.0",
10
  "dependencies": {
 
11
  "@google/genai": "^1.17.0",
 
 
12
  "class-variance-authority": "^0.7.0",
13
  "clsx": "^2.1.1",
14
  "lucide-react": "^0.542.0",
@@ -216,6 +219,20 @@
216
  "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
217
  }
218
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  "node_modules/@google/genai": {
220
  "version": "1.17.0",
221
  "resolved": "https://registry.npmjs.org/@google/genai/-/genai-1.17.0.tgz",
@@ -237,6 +254,52 @@
237
  }
238
  }
239
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
240
  "node_modules/@humanfs/core": {
241
  "version": "0.19.1",
242
  "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz",
@@ -770,6 +833,15 @@
770
  "@jridgewell/sourcemap-codec": "^1.4.14"
771
  }
772
  },
 
 
 
 
 
 
 
 
 
773
  "node_modules/@napi-rs/wasm-runtime": {
774
  "version": "0.2.12",
775
  "resolved": "https://registry.npmjs.org/@napi-rs/wasm-runtime/-/wasm-runtime-0.2.12.tgz",
@@ -1942,6 +2014,16 @@
1942
  "url": "https://github.com/sponsors/epoberezkin"
1943
  }
1944
  },
 
 
 
 
 
 
 
 
 
 
1945
  "node_modules/ansi-styles": {
1946
  "version": "4.3.0",
1947
  "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
@@ -2373,6 +2455,19 @@
2373
  "url": "https://polar.sh/cva"
2374
  }
2375
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
2376
  "node_modules/client-only": {
2377
  "version": "0.0.1",
2378
  "resolved": "https://registry.npmjs.org/client-only/-/client-only-0.0.1.tgz",
@@ -3267,6 +3362,15 @@
3267
  "node": ">=0.10.0"
3268
  }
3269
  },
 
 
 
 
 
 
 
 
 
3270
  "node_modules/extend": {
3271
  "version": "3.0.2",
3272
  "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz",
@@ -4001,6 +4105,16 @@
4001
  "url": "https://github.com/sponsors/ljharb"
4002
  }
4003
  },
 
 
 
 
 
 
 
 
 
 
4004
  "node_modules/is-generator-function": {
4005
  "version": "1.1.0",
4006
  "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.1.0.tgz",
@@ -5432,6 +5546,12 @@
5432
  "node": ">=0.10.0"
5433
  }
5434
  },
 
 
 
 
 
 
5435
  "node_modules/run-parallel": {
5436
  "version": "1.2.0",
5437
  "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz",
@@ -5781,6 +5901,28 @@
5781
  "node": ">= 0.4"
5782
  }
5783
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5784
  "node_modules/string.prototype.includes": {
5785
  "version": "2.0.1",
5786
  "resolved": "https://registry.npmjs.org/string.prototype.includes/-/string.prototype.includes-2.0.1.tgz",
@@ -5894,6 +6036,19 @@
5894
  "url": "https://github.com/sponsors/ljharb"
5895
  }
5896
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
5897
  "node_modules/strip-bom": {
5898
  "version": "3.0.0",
5899
  "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz",
 
8
  "name": "banana",
9
  "version": "0.1.0",
10
  "dependencies": {
11
+ "@fal-ai/serverless-client": "^0.15.0",
12
  "@google/genai": "^1.17.0",
13
+ "@huggingface/hub": "^2.6.3",
14
+ "@huggingface/inference": "^4.8.0",
15
  "class-variance-authority": "^0.7.0",
16
  "clsx": "^2.1.1",
17
  "lucide-react": "^0.542.0",
 
219
  "node": "^18.18.0 || ^20.9.0 || >=21.1.0"
220
  }
221
  },
222
+ "node_modules/@fal-ai/serverless-client": {
223
+ "version": "0.15.0",
224
+ "resolved": "https://registry.npmjs.org/@fal-ai/serverless-client/-/serverless-client-0.15.0.tgz",
225
+ "integrity": "sha512-4Vuocu0342OijAN6xO/lwohDV7h90LbkTnOAEwH+pYvMFVC6RYmHS4GILc/wnOWBTw+iFlZFEKlljEVolkjVfg==",
226
+ "license": "MIT",
227
+ "dependencies": {
228
+ "@msgpack/msgpack": "^3.0.0-beta2",
229
+ "eventsource-parser": "^1.1.2",
230
+ "robot3": "^0.4.1"
231
+ },
232
+ "engines": {
233
+ "node": ">=18.0.0"
234
+ }
235
+ },
236
  "node_modules/@google/genai": {
237
  "version": "1.17.0",
238
  "resolved": "https://registry.npmjs.org/@google/genai/-/genai-1.17.0.tgz",
 
254
  }
255
  }
256
  },
257
+ "node_modules/@huggingface/hub": {
258
+ "version": "2.6.3",
259
+ "resolved": "https://registry.npmjs.org/@huggingface/hub/-/hub-2.6.3.tgz",
260
+ "integrity": "sha512-IEZ67adV+gWqg98A//mU0Ed+Q6xGPQxMfK+aV36b0Ww7R4EXG1O0zyiCcbLE/cvryfCD8+PNEwQgiPU+v63tsQ==",
261
+ "license": "MIT",
262
+ "dependencies": {
263
+ "@huggingface/tasks": "^0.19.45"
264
+ },
265
+ "bin": {
266
+ "hfjs": "dist/cli.js"
267
+ },
268
+ "engines": {
269
+ "node": ">=18"
270
+ },
271
+ "optionalDependencies": {
272
+ "cli-progress": "^3.12.0"
273
+ }
274
+ },
275
+ "node_modules/@huggingface/inference": {
276
+ "version": "4.8.0",
277
+ "resolved": "https://registry.npmjs.org/@huggingface/inference/-/inference-4.8.0.tgz",
278
+ "integrity": "sha512-Eq98EAXqYn4rKMfrbEXuhc3IjKfaeIO6eXNOZk9xk6v5akrIWRtd6d1h0fjAWyX4zRbdUpXRh6MvsqXnzGvXCA==",
279
+ "license": "MIT",
280
+ "dependencies": {
281
+ "@huggingface/jinja": "^0.5.1",
282
+ "@huggingface/tasks": "^0.19.45"
283
+ },
284
+ "engines": {
285
+ "node": ">=18"
286
+ }
287
+ },
288
+ "node_modules/@huggingface/jinja": {
289
+ "version": "0.5.1",
290
+ "resolved": "https://registry.npmjs.org/@huggingface/jinja/-/jinja-0.5.1.tgz",
291
+ "integrity": "sha512-yUZLld4lrM9iFxHCwFQ7D1HW2MWMwSbeB7WzWqFYDWK+rEb+WldkLdAJxUPOmgICMHZLzZGVcVjFh3w/YGubng==",
292
+ "license": "MIT",
293
+ "engines": {
294
+ "node": ">=18"
295
+ }
296
+ },
297
+ "node_modules/@huggingface/tasks": {
298
+ "version": "0.19.45",
299
+ "resolved": "https://registry.npmjs.org/@huggingface/tasks/-/tasks-0.19.45.tgz",
300
+ "integrity": "sha512-lM3QOgbfkGZ5gAZOYWOmzMM6BbKcXOIHjgnUAoymTdZEcEcGSr0vy/LWGEiK+vBXC4vU+sCT+WNoA/JZ8TEWdA==",
301
+ "license": "MIT"
302
+ },
303
  "node_modules/@humanfs/core": {
304
  "version": "0.19.1",
305
  "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz",
 
833
  "@jridgewell/sourcemap-codec": "^1.4.14"
834
  }
835
  },
836
+ "node_modules/@msgpack/msgpack": {
837
+ "version": "3.1.2",
838
+ "resolved": "https://registry.npmjs.org/@msgpack/msgpack/-/msgpack-3.1.2.tgz",
839
+ "integrity": "sha512-JEW4DEtBzfe8HvUYecLU9e6+XJnKDlUAIve8FvPzF3Kzs6Xo/KuZkZJsDH0wJXl/qEZbeeE7edxDNY3kMs39hQ==",
840
+ "license": "ISC",
841
+ "engines": {
842
+ "node": ">= 18"
843
+ }
844
+ },
845
  "node_modules/@napi-rs/wasm-runtime": {
846
  "version": "0.2.12",
847
  "resolved": "https://registry.npmjs.org/@napi-rs/wasm-runtime/-/wasm-runtime-0.2.12.tgz",
 
2014
  "url": "https://github.com/sponsors/epoberezkin"
2015
  }
2016
  },
2017
+ "node_modules/ansi-regex": {
2018
+ "version": "5.0.1",
2019
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
2020
+ "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
2021
+ "license": "MIT",
2022
+ "optional": true,
2023
+ "engines": {
2024
+ "node": ">=8"
2025
+ }
2026
+ },
2027
  "node_modules/ansi-styles": {
2028
  "version": "4.3.0",
2029
  "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
 
2455
  "url": "https://polar.sh/cva"
2456
  }
2457
  },
2458
+ "node_modules/cli-progress": {
2459
+ "version": "3.12.0",
2460
+ "resolved": "https://registry.npmjs.org/cli-progress/-/cli-progress-3.12.0.tgz",
2461
+ "integrity": "sha512-tRkV3HJ1ASwm19THiiLIXLO7Im7wlTuKnvkYaTkyoAPefqjNg7W7DHKUlGRxy9vxDvbyCYQkQozvptuMkGCg8A==",
2462
+ "license": "MIT",
2463
+ "optional": true,
2464
+ "dependencies": {
2465
+ "string-width": "^4.2.3"
2466
+ },
2467
+ "engines": {
2468
+ "node": ">=4"
2469
+ }
2470
+ },
2471
  "node_modules/client-only": {
2472
  "version": "0.0.1",
2473
  "resolved": "https://registry.npmjs.org/client-only/-/client-only-0.0.1.tgz",
 
3362
  "node": ">=0.10.0"
3363
  }
3364
  },
3365
+ "node_modules/eventsource-parser": {
3366
+ "version": "1.1.2",
3367
+ "resolved": "https://registry.npmjs.org/eventsource-parser/-/eventsource-parser-1.1.2.tgz",
3368
+ "integrity": "sha512-v0eOBUbiaFojBu2s2NPBfYUoRR9GjcDNvCXVaqEf5vVfpIAh9f8RCo4vXTP8c63QRKCFwoLpMpTdPwwhEKVgzA==",
3369
+ "license": "MIT",
3370
+ "engines": {
3371
+ "node": ">=14.18"
3372
+ }
3373
+ },
3374
  "node_modules/extend": {
3375
  "version": "3.0.2",
3376
  "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz",
 
4105
  "url": "https://github.com/sponsors/ljharb"
4106
  }
4107
  },
4108
+ "node_modules/is-fullwidth-code-point": {
4109
+ "version": "3.0.0",
4110
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
4111
+ "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
4112
+ "license": "MIT",
4113
+ "optional": true,
4114
+ "engines": {
4115
+ "node": ">=8"
4116
+ }
4117
+ },
4118
  "node_modules/is-generator-function": {
4119
  "version": "1.1.0",
4120
  "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.1.0.tgz",
 
5546
  "node": ">=0.10.0"
5547
  }
5548
  },
5549
+ "node_modules/robot3": {
5550
+ "version": "0.4.1",
5551
+ "resolved": "https://registry.npmjs.org/robot3/-/robot3-0.4.1.tgz",
5552
+ "integrity": "sha512-hzjy826lrxzx8eRgv80idkf8ua1JAepRc9Efdtj03N3KNJuznQCPlyCJ7gnUmDFwZCLQjxy567mQVKmdv2BsXQ==",
5553
+ "license": "BSD-2-Clause"
5554
+ },
5555
  "node_modules/run-parallel": {
5556
  "version": "1.2.0",
5557
  "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz",
 
5901
  "node": ">= 0.4"
5902
  }
5903
  },
5904
+ "node_modules/string-width": {
5905
+ "version": "4.2.3",
5906
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
5907
+ "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
5908
+ "license": "MIT",
5909
+ "optional": true,
5910
+ "dependencies": {
5911
+ "emoji-regex": "^8.0.0",
5912
+ "is-fullwidth-code-point": "^3.0.0",
5913
+ "strip-ansi": "^6.0.1"
5914
+ },
5915
+ "engines": {
5916
+ "node": ">=8"
5917
+ }
5918
+ },
5919
+ "node_modules/string-width/node_modules/emoji-regex": {
5920
+ "version": "8.0.0",
5921
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
5922
+ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
5923
+ "license": "MIT",
5924
+ "optional": true
5925
+ },
5926
  "node_modules/string.prototype.includes": {
5927
  "version": "2.0.1",
5928
  "resolved": "https://registry.npmjs.org/string.prototype.includes/-/string.prototype.includes-2.0.1.tgz",
 
6036
  "url": "https://github.com/sponsors/ljharb"
6037
  }
6038
  },
6039
+ "node_modules/strip-ansi": {
6040
+ "version": "6.0.1",
6041
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
6042
+ "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
6043
+ "license": "MIT",
6044
+ "optional": true,
6045
+ "dependencies": {
6046
+ "ansi-regex": "^5.0.1"
6047
+ },
6048
+ "engines": {
6049
+ "node": ">=8"
6050
+ }
6051
+ },
6052
  "node_modules/strip-bom": {
6053
  "version": "3.0.0",
6054
  "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz",
package.json CHANGED
@@ -9,7 +9,10 @@
9
  "lint": "eslint"
10
  },
11
  "dependencies": {
 
12
  "@google/genai": "^1.17.0",
 
 
13
  "class-variance-authority": "^0.7.0",
14
  "clsx": "^2.1.1",
15
  "lucide-react": "^0.542.0",
 
9
  "lint": "eslint"
10
  },
11
  "dependencies": {
12
+ "@fal-ai/serverless-client": "^0.15.0",
13
  "@google/genai": "^1.17.0",
14
+ "@huggingface/hub": "^2.6.3",
15
+ "@huggingface/inference": "^4.8.0",
16
  "class-variance-authority": "^0.7.0",
17
  "clsx": "^2.1.1",
18
  "lucide-react": "^0.542.0",
public/clothes/blazzer.png ADDED
public/clothes/suit.png ADDED

Git LFS Details

  • SHA256: f0cea85ab9a6790fa806ce3d4af596b098d034a3c6e02cbf4e1084e2ff40e716
  • Pointer size: 131 Bytes
  • Size of remote file: 252 kB
public/clothes/sukajan.png ADDED

Git LFS Details

  • SHA256: 55ddf339f96ccdc5c8304b28ec9734749b9f8d6b51537fccff9562ee7abdb8f4
  • Pointer size: 131 Bytes
  • Size of remote file: 106 kB
public/clothes/womenoutfit.png ADDED

Git LFS Details

  • SHA256: d15bd48b7b6c183e6ced4a14f4dfbec2ee08dcb720d744be9123c582b232f3f5
  • Pointer size: 132 Bytes
  • Size of remote file: 1.44 MB
public/lighting/light1.png ADDED

Git LFS Details

  • SHA256: 93fc82cb9d60413d2cb20f18ed26e2205acc4e5bd522166214ce490e1c7dd4da
  • Pointer size: 132 Bytes
  • Size of remote file: 1.14 MB
public/lighting/light2.png ADDED

Git LFS Details

  • SHA256: f74108c8fb6e6ad31412fc76220c91f5dc2df78400c04b08785a0e72584f8952
  • Pointer size: 131 Bytes
  • Size of remote file: 521 kB
public/lighting/light3.png ADDED

Git LFS Details

  • SHA256: 0016e53efb8d7c09c71510b79c8d60274d090bf75d75a2044969a42d269e096d
  • Pointer size: 131 Bytes
  • Size of remote file: 363 kB
public/makeup/makeup1.png ADDED

Git LFS Details

  • SHA256: dae001f597d923286a6a50c8f34e33001891e7955c0edab2d7ea9b2972336df7
  • Pointer size: 131 Bytes
  • Size of remote file: 441 kB
public/poses/sit1.png ADDED

Git LFS Details

  • SHA256: 56f037e9af7299f0a1f816f3d3d38f1789589f623e73ef5223f7f07d2711df81
  • Pointer size: 131 Bytes
  • Size of remote file: 288 kB
public/poses/sit2.png ADDED

Git LFS Details

  • SHA256: 0485c4e595132ba1cbd14e745379f89c6d6f2aa0d784bc9eeeaf2eb73cbf53d9
  • Pointer size: 131 Bytes
  • Size of remote file: 170 kB
public/poses/stand1.png ADDED

Git LFS Details

  • SHA256: 23b020da3b48d56f3dff304a20cc9586c9976080bc5c845b096547c847085a56
  • Pointer size: 131 Bytes
  • Size of remote file: 280 kB
public/poses/stand2.png ADDED

Git LFS Details

  • SHA256: 090dcd2e031401378d3d4ce16ed812de4ab3ba53a96641793f9c57eea5703bb9
  • Pointer size: 131 Bytes
  • Size of remote file: 321 kB
public/reo.png ADDED

Git LFS Details

  • SHA256: e0e9f5a3dfb575a6353b500ee519581cff46fc86005b4a497320fb8a3175134e
  • Pointer size: 132 Bytes
  • Size of remote file: 2.61 MB