Erweiterte Funktionen

Erweiterte Funktionen und Fähigkeiten von Qwen Image Edit

Erweiterte Funktionen

Erweiterte Funktionen und Techniken für professionelle Anwendungen von Qwen Image Edit.

🔄 Asynchrone Verarbeitung

Webhooks für Langzeitoperationen

Für komplexe Bildbearbeitungen, die länger dauern können, verwende Webhooks:

// Webhook-Endpunkt einrichten
const express = require('express');
const app = express();

app.use(express.json());

// Webhook-Handler
app.post('/webhook/qwen', (req, res) => {
  const { jobId, status, result, error } = req.body;
  
  console.log(`Job ${jobId} Status: ${status}`);
  
  if (status === 'completed') {
    console.log('✅ Verarbeitung abgeschlossen:', result.imageUrl);
    // Weitere Verarbeitung...
  } else if (status === 'failed') {
    console.error('❌ Verarbeitung fehlgeschlagen:', error);
  }
  
  res.status(200).send('OK');
});

app.listen(3000, () => {
  console.log('Webhook-Server läuft auf Port 3000');
});

// Asynchrone Verarbeitung starten
const job = await editor.editTextAsync({
  image: './komplexes-bild.jpg',
  prompt: 'Komplexe Stilübertragung mit hoher Qualität',
  webhookUrl: 'https://meine-domain.com/webhook/qwen',
  quality: 'ultra'
});

console.log('Job gestartet:', job.jobId);

Job-Tracking und -Management

class JobManager {
  constructor(editor) {
    this.editor = editor;
    this.activeJobs = new Map();
    this.completedJobs = new Map();
  }
  
  async startJob(operation, options) {
    const jobId = this.generateJobId();
    
    const jobInfo = {
      id: jobId,
      operation,
      options,
      status: 'pending',
      startTime: Date.now(),
      progress: 0
    };
    
    this.activeJobs.set(jobId, jobInfo);
    
    try {
      // Starte asynchrone Operation
      const result = await this.editor[`${operation}Async`]({
        ...options,
        jobId,
        progressCallback: (progress) => {
          this.updateJobProgress(jobId, progress);
        }
      });
      
      this.completeJob(jobId, result);
      return { jobId, status: 'started' };
    } catch (error) {
      this.failJob(jobId, error);
      throw error;
    }
  }
  
  updateJobProgress(jobId, progress) {
    const job = this.activeJobs.get(jobId);
    if (job) {
      job.progress = progress;
      job.lastUpdate = Date.now();
      
      console.log(`📊 Job ${jobId}: ${progress}% abgeschlossen`);
    }
  }
  
  completeJob(jobId, result) {
    const job = this.activeJobs.get(jobId);
    if (job) {
      job.status = 'completed';
      job.result = result;
      job.endTime = Date.now();
      job.duration = job.endTime - job.startTime;
      
      this.completedJobs.set(jobId, job);
      this.activeJobs.delete(jobId);
      
      console.log(`✅ Job ${jobId} abgeschlossen in ${job.duration}ms`);
    }
  }
  
  failJob(jobId, error) {
    const job = this.activeJobs.get(jobId);
    if (job) {
      job.status = 'failed';
      job.error = error;
      job.endTime = Date.now();
      
      this.activeJobs.delete(jobId);
      
      console.error(`❌ Job ${jobId} fehlgeschlagen:`, error.message);
    }
  }
  
  getJobStatus(jobId) {
    return this.activeJobs.get(jobId) || this.completedJobs.get(jobId);
  }
  
  getAllActiveJobs() {
    return Array.from(this.activeJobs.values());
  }
  
  generateJobId() {
    return `job_${Date.now()}_${Math.random().toString(36).substring(2, 8)}`;
  }
}

// Verwendung
const jobManager = new JobManager(editor);

// Job starten
const job = await jobManager.startJob('editText', {
  image: './bild.jpg',
  prompt: 'Komplexe Bearbeitung'
});

// Status abfragen
setInterval(() => {
  const status = jobManager.getJobStatus(job.jobId);
  console.log(`Job Status: ${status?.status} (${status?.progress}%)`);
}, 1000);

🚀 Optimierte Batch-Verarbeitung

Intelligente Parallelisierung

import pLimit from 'p-limit';
import pRetry from 'p-retry';

class BatchProcessor {
  constructor(editor, options = {}) {
    this.editor = editor;
    this.concurrency = options.concurrency || 3;
    this.retries = options.retries || 2;
    this.limit = pLimit(this.concurrency);
  }
  
  async processBatch(operations) {
    console.log(`🚀 Starte Batch-Verarbeitung: ${operations.length} Operationen`);
    
    const results = await Promise.allSettled(
      operations.map((operation, index) => 
        this.limit(() => this.processWithRetry(operation, index))
      )
    );
    
    const successful = results.filter(r => r.status === 'fulfilled').length;
    const failed = results.filter(r => r.status === 'rejected').length;
    
    console.log(`✅ Batch abgeschlossen: ${successful} erfolgreich, ${failed} fehlgeschlagen`);
    
    return {
      successful,
      failed,
      results: results.map((result, index) => ({
        index,
        status: result.status,
        value: result.status === 'fulfilled' ? result.value : null,
        error: result.status === 'rejected' ? result.reason : null
      }))
    };
  }
  
  async processWithRetry(operation, index) {
    return pRetry(async () => {
      console.log(`📝 Verarbeite Operation ${index + 1}: ${operation.type}`);
      
      const startTime = Date.now();
      
      let result;
      switch (operation.type) {
        case 'editText':
          result = await this.editor.editText(operation.params);
          break;
        case 'editElement':
          result = await this.editor.editElement(operation.params);
          break;
        case 'transferStyle':
          result = await this.editor.transferStyle(operation.params);
          break;
        case 'analyzeImage':
          result = await this.editor.analyzeImage(operation.params);
          break;
        default:
          throw new Error(`Unbekannte Operation: ${operation.type}`);
      }
      
      const duration = Date.now() - startTime;
      console.log(`✅ Operation ${index + 1} abgeschlossen in ${duration}ms`);
      
      return {
        ...result,
        operationIndex: index,
        duration
      };
    }, {
      retries: this.retries,
      factor: 2,
      minTimeout: 1000,
      maxTimeout: 10000,
      onFailedAttempt: (error) => {
        console.log(`⚠️ Operation ${index + 1} Versuch ${error.attemptNumber} fehlgeschlagen: ${error.message}`);
      }
    });
  }
}

// Verwendung
const batchProcessor = new BatchProcessor(editor, {
  concurrency: 5,
  retries: 3
});

const operations = [
  {
    type: 'editText',
    params: {
      image: './bild1.jpg',
      prompt: 'Ändere Text zu "Neu"'
    }
  },
  {
    type: 'transferStyle',
    params: {
      image: './bild2.jpg',
      styleImage: './stil.jpg',
      intensity: 0.8
    }
  },
  {
    type: 'editElement',
    params: {
      image: './bild3.jpg',
      prompt: 'Füge Logo hinzu'
    }
  }
];

const batchResult = await batchProcessor.processBatch(operations);
console.log('Batch-Ergebnis:', batchResult);

Intelligente Lastverteilung

class LoadBalancer {
  constructor(editors) {
    this.editors = editors;
    this.currentIndex = 0;
    this.stats = editors.map(() => ({
      requests: 0,
      errors: 0,
      avgResponseTime: 0,
      lastUsed: 0
    }));
  }
  
  getOptimalEditor() {
    // Finde Editor mit niedrigster Last
    let bestIndex = 0;
    let bestScore = Infinity;
    
    for (let i = 0; i < this.editors.length; i++) {
      const stat = this.stats[i];
      
      // Berechne Score basierend auf verschiedenen Faktoren
      const loadScore = stat.requests * 0.3;
      const errorScore = stat.errors * 0.4;
      const timeScore = stat.avgResponseTime * 0.2;
      const idleScore = (Date.now() - stat.lastUsed) * -0.1;
      
      const totalScore = loadScore + errorScore + timeScore + idleScore;
      
      if (totalScore < bestScore) {
        bestScore = totalScore;
        bestIndex = i;
      }
    }
    
    return bestIndex;
  }
  
  async executeOperation(operation, params) {
    const editorIndex = this.getOptimalEditor();
    const editor = this.editors[editorIndex];
    const stat = this.stats[editorIndex];
    
    const startTime = Date.now();
    stat.requests++;
    stat.lastUsed = startTime;
    
    try {
      const result = await editor[operation](params);
      
      const duration = Date.now() - startTime;
      stat.avgResponseTime = (stat.avgResponseTime + duration) / 2;
      
      console.log(`✅ Operation auf Editor ${editorIndex} in ${duration}ms`);
      
      return result;
    } catch (error) {
      stat.errors++;
      console.error(`❌ Fehler auf Editor ${editorIndex}:`, error.message);
      throw error;
    }
  }
  
  getStats() {
    return this.stats.map((stat, index) => ({
      editor: index,
      ...stat,
      healthScore: this.calculateHealthScore(stat)
    }));
  }
  
  calculateHealthScore(stat) {
    if (stat.requests === 0) return 100;
    
    const errorRate = stat.errors / stat.requests;
    const responseTimeScore = Math.max(0, 100 - (stat.avgResponseTime / 100));
    const errorScore = Math.max(0, 100 - (errorRate * 100));
    
    return Math.round((responseTimeScore + errorScore) / 2);
  }
}

// Setup mit mehreren Editor-Instanzen
const editors = [
  new QwenImageEdit({ apiKey: process.env.QWEN_API_KEY, region: 'eu-west-1' }),
  new QwenImageEdit({ apiKey: process.env.QWEN_API_KEY, region: 'us-east-1' }),
  new QwenImageEdit({ apiKey: process.env.QWEN_API_KEY, region: 'ap-southeast-1' })
];

const loadBalancer = new LoadBalancer(editors);

// Verwendung
const result = await loadBalancer.executeOperation('editText', {
  image: './bild.jpg',
  prompt: 'Bearbeite das Bild'
});

// Statistiken anzeigen
console.log('Load Balancer Stats:', loadBalancer.getStats());

🔍 Tiefgehende Bildanalyse

Umfassende Bildanalyse

class AdvancedImageAnalyzer {
  constructor(editor) {
    this.editor = editor;
  }
  
  async performDeepAnalysis(imagePath) {
    console.log('🔍 Starte tiefgehende Bildanalyse...');
    
    const analyses = await Promise.all([
      this.analyzeText(imagePath),
      this.analyzeObjects(imagePath),
      this.analyzeFaces(imagePath),
      this.analyzeColors(imagePath),
      this.analyzeStyle(imagePath),
      this.analyzeQuality(imagePath)
    ]);
    
    const [text, objects, faces, colors, style, quality] = analyses;
    
    const report = {
      timestamp: new Date().toISOString(),
      image: imagePath,
      analysis: {
        text,
        objects,
        faces,
        colors,
        style,
        quality
      },
      recommendations: this.generateRecommendations({
        text, objects, faces, colors, style, quality
      })
    };
    
    console.log('✅ Tiefgehende Analyse abgeschlossen');
    return report;
  }
  
  async analyzeText(imagePath) {
    const result = await this.editor.analyzeImage({
      image: imagePath,
      analysisTypes: ['text']
    });
    
    return {
      hasText: result.analysis.text?.content?.length > 0,
      content: result.analysis.text?.content || '',
      language: result.analysis.text?.language || 'unknown',
      confidence: result.analysis.text?.confidence || 0,
      regions: result.analysis.text?.regions || []
    };
  }
  
  async analyzeObjects(imagePath) {
    const result = await this.editor.analyzeImage({
      image: imagePath,
      analysisTypes: ['objects']
    });
    
    const objects = result.analysis.objects || [];
    
    return {
      count: objects.length,
      objects: objects.map(obj => ({
        name: obj.name,
        confidence: obj.confidence,
        category: obj.category,
        boundingBox: obj.boundingBox
      })),
      categories: [...new Set(objects.map(obj => obj.category))],
      dominantObject: objects.reduce((max, obj) => 
        obj.confidence > (max?.confidence || 0) ? obj : max, null
      )
    };
  }
  
  async analyzeFaces(imagePath) {
    const result = await this.editor.analyzeImage({
      image: imagePath,
      analysisTypes: ['faces']
    });
    
    const faces = result.analysis.faces || [];
    
    return {
      count: faces.length,
      faces: faces.map(face => ({
        confidence: face.confidence,
        age: face.age,
        gender: face.gender,
        emotion: face.emotion,
        boundingBox: face.boundingBox
      })),
      averageAge: faces.length > 0 ? 
        faces.reduce((sum, face) => sum + face.age, 0) / faces.length : 0,
      dominantEmotion: this.getDominantEmotion(faces)
    };
  }
  
  async analyzeColors(imagePath) {
    const result = await this.editor.analyzeImage({
      image: imagePath,
      analysisTypes: ['colors']
    });
    
    const colors = result.analysis.colors || {};
    
    return {
      dominantColors: colors.dominant || [],
      palette: colors.palette || [],
      colorScheme: colors.scheme || 'unknown',
      brightness: colors.brightness || 0,
      contrast: colors.contrast || 0,
      saturation: colors.saturation || 0,
      temperature: colors.temperature || 'neutral'
    };
  }
  
  async analyzeStyle(imagePath) {
    const result = await this.editor.analyzeImage({
      image: imagePath,
      analysisTypes: ['style']
    });
    
    const style = result.analysis.style || {};
    
    return {
      category: style.category || 'unknown',
      subcategory: style.subcategory || 'unknown',
      artistic: style.artistic || false,
      photographic: style.photographic || false,
      vintage: style.vintage || false,
      modern: style.modern || false,
      confidence: style.confidence || 0
    };
  }
  
  async analyzeQuality(imagePath) {
    const result = await this.editor.analyzeImage({
      image: imagePath,
      analysisTypes: ['quality']
    });
    
    const quality = result.analysis.quality || {};
    
    return {
      overall: quality.overall || 0,
      sharpness: quality.sharpness || 0,
      noise: quality.noise || 0,
      exposure: quality.exposure || 0,
      composition: quality.composition || 0,
      resolution: quality.resolution || 'unknown',
      issues: quality.issues || []
    };
  }
  
  getDominantEmotion(faces) {
    if (faces.length === 0) return null;
    
    const emotions = {};
    faces.forEach(face => {
      emotions[face.emotion] = (emotions[face.emotion] || 0) + 1;
    });
    
    return Object.entries(emotions)
      .sort(([,a], [,b]) => b - a)[0][0];
  }
  
  generateRecommendations(analysis) {
    const recommendations = [];
    
    // Text-basierte Empfehlungen
    if (analysis.text.hasText && analysis.text.confidence < 0.8) {
      recommendations.push({
        type: 'text',
        priority: 'medium',
        message: 'Text-Erkennung hat niedrige Konfidenz. Erwäge Bildverbesserung.',
        action: 'enhanceImage'
      });
    }
    
    // Qualitäts-basierte Empfehlungen
    if (analysis.quality.overall < 0.6) {
      recommendations.push({
        type: 'quality',
        priority: 'high',
        message: 'Bildqualität ist niedrig. Verbesserung empfohlen.',
        action: 'enhanceImage'
      });
    }
    
    if (analysis.quality.noise > 0.7) {
      recommendations.push({
        type: 'noise',
        priority: 'medium',
        message: 'Hoher Rauschpegel erkannt. Rauschreduzierung empfohlen.',
        action: 'cleanImage'
      });
    }
    
    // Farb-basierte Empfehlungen
    if (analysis.colors.contrast < 0.3) {
      recommendations.push({
        type: 'contrast',
        priority: 'medium',
        message: 'Niedriger Kontrast. Kontrast-Verbesserung empfohlen.',
        action: 'enhanceContrast'
      });
    }
    
    // Stil-basierte Empfehlungen
    if (analysis.style.confidence < 0.5) {
      recommendations.push({
        type: 'style',
        priority: 'low',
        message: 'Stil nicht eindeutig erkannt. Manuelle Kategorisierung empfohlen.',
        action: 'manualReview'
      });
    }
    
    return recommendations;
  }
}

// Verwendung
const analyzer = new AdvancedImageAnalyzer(editor);
const report = await analyzer.performDeepAnalysis('./komplexes-bild.jpg');

console.log('📊 Analyse-Bericht:', JSON.stringify(report, null, 2));

// Empfehlungen umsetzen
for (const rec of report.recommendations) {
  if (rec.priority === 'high') {
    console.log(`🚨 Hohe Priorität: ${rec.message}`);
    
    if (rec.action === 'enhanceImage') {
      const enhanced = await editor.enhanceImage({
        image: './komplexes-bild.jpg',
        enhancements: ['quality', 'sharpness']
      });
      console.log('✅ Bild verbessert:', enhanced.imageUrl);
    }
  }
}

🛡️ Inhaltsmoderation

Automatische Inhaltsfilterung

class ContentModerator {
  constructor(editor) {
    this.editor = editor;
    this.moderationRules = {
      adult: { enabled: true, threshold: 0.7 },
      violence: { enabled: true, threshold: 0.8 },
      inappropriate: { enabled: true, threshold: 0.6 },
      spam: { enabled: true, threshold: 0.5 }
    };
  }
  
  async moderateImage(imagePath) {
    console.log('🛡️ Starte Inhaltsmoderation...');
    
    const result = await this.editor.analyzeImage({
      image: imagePath,
      analysisTypes: ['moderation']
    });
    
    const moderation = result.analysis.moderation || {};
    
    const violations = this.checkViolations(moderation);
    
    const report = {
      timestamp: new Date().toISOString(),
      image: imagePath,
      safe: violations.length === 0,
      violations,
      scores: moderation,
      action: this.determineAction(violations)
    };
    
    console.log(`${report.safe ? '✅' : '❌'} Moderation abgeschlossen: ${violations.length} Verstöße`);
    
    return report;
  }
  
  checkViolations(scores) {
    const violations = [];
    
    for (const [category, rule] of Object.entries(this.moderationRules)) {
      if (rule.enabled && scores[category] > rule.threshold) {
        violations.push({
          category,
          score: scores[category],
          threshold: rule.threshold,
          severity: this.getSeverity(scores[category])
        });
      }
    }
    
    return violations.sort((a, b) => b.score - a.score);
  }
  
  getSeverity(score) {
    if (score >= 0.9) return 'critical';
    if (score >= 0.8) return 'high';
    if (score >= 0.6) return 'medium';
    return 'low';
  }
  
  determineAction(violations) {
    if (violations.length === 0) return 'approve';
    
    const criticalViolations = violations.filter(v => v.severity === 'critical');
    const highViolations = violations.filter(v => v.severity === 'high');
    
    if (criticalViolations.length > 0) return 'block';
    if (highViolations.length > 0) return 'review';
    
    return 'flag';
  }
  
  async moderateAndProcess(imagePath, operation, params) {
    // Erst moderieren
    const moderationReport = await this.moderateImage(imagePath);
    
    switch (moderationReport.action) {
      case 'approve':
        console.log('✅ Inhalt genehmigt, verarbeite...');
        return await this.editor[operation](params);
        
      case 'flag':
        console.log('⚠️ Inhalt markiert, verarbeite mit Vorsicht...');
        // Verarbeitung mit zusätzlichen Einschränkungen
        return await this.editor[operation]({
          ...params,
          safeMode: true,
          watermark: true
        });
        
      case 'review':
        console.log('👀 Inhalt benötigt manuelle Überprüfung');
        throw new Error('CONTENT_REQUIRES_REVIEW');
        
      case 'block':
        console.log('🚫 Inhalt blockiert');
        throw new Error('CONTENT_BLOCKED');
        
      default:
        throw new Error('UNKNOWN_MODERATION_ACTION');
    }
  }
}

// Verwendung
const moderator = new ContentModerator(editor);

try {
  const result = await moderator.moderateAndProcess(
    './zu-pruefendes-bild.jpg',
    'editText',
    {
      image: './zu-pruefendes-bild.jpg',
      prompt: 'Bearbeite das Bild'
    }
  );
  
  console.log('✅ Verarbeitung erfolgreich:', result.imageUrl);
} catch (error) {
  if (error.message === 'CONTENT_BLOCKED') {
    console.log('🚫 Inhalt wurde blockiert');
  } else if (error.message === 'CONTENT_REQUIRES_REVIEW') {
    console.log('👀 Inhalt benötigt manuelle Überprüfung');
    // Zur Überprüfungsqueue hinzufügen
  }
}

🎯 Bedingte Bearbeitung

Intelligente Bearbeitungslogik

class ConditionalEditor {
  constructor(editor) {
    this.editor = editor;
    this.rules = [];
  }
  
  addRule(condition, action) {
    this.rules.push({ condition, action });
  }
  
  async processWithConditions(imagePath) {
    console.log('🎯 Starte bedingte Bearbeitung...');
    
    // Erst analysieren
    const analysis = await this.editor.analyzeImage({
      image: imagePath,
      analysisTypes: ['text', 'objects', 'quality', 'colors']
    });
    
    const context = {
      image: imagePath,
      analysis: analysis.analysis,
      metadata: {
        hasText: analysis.analysis.text?.content?.length > 0,
        objectCount: analysis.analysis.objects?.length || 0,
        quality: analysis.analysis.quality?.overall || 0,
        brightness: analysis.analysis.colors?.brightness || 0
      }
    };
    
    const results = [];
    
    // Regeln anwenden
    for (const rule of this.rules) {
      if (await rule.condition(context)) {
        console.log(`✅ Regel erfüllt, führe Aktion aus: ${rule.action.name}`);
        
        const result = await rule.action(context);
        results.push({
          rule: rule.action.name,
          result
        });
        
        // Kontext für nachfolgende Regeln aktualisieren
        if (result.imageUrl) {
          context.image = result.imageUrl;
        }
      }
    }
    
    return {
      originalImage: imagePath,
      finalImage: context.image,
      appliedRules: results
    };
  }
}

// Beispiel-Regeln definieren
const conditionalEditor = new ConditionalEditor(editor);

// Regel 1: Verbessere Qualität wenn niedrig
conditionalEditor.addRule(
  // Bedingung
  async (context) => context.metadata.quality < 0.6,
  // Aktion
  async (context) => {
    return await editor.enhanceImage({
      image: context.image,
      enhancements: ['quality', 'sharpness']
    });
  }
);

// Regel 2: Helligkeit anpassen wenn zu dunkel
conditionalEditor.addRule(
  async (context) => context.metadata.brightness < 0.3,
  async (context) => {
    return await editor.editElement({
      image: context.image,
      prompt: 'Erhöhe die Helligkeit um 30%'
    });
  }
);

// Regel 3: Text übersetzen wenn nicht-englisch
conditionalEditor.addRule(
  async (context) => {
    const text = context.analysis.text;
    return text?.content && text.language !== 'en';
  },
  async (context) => {
    const originalText = context.analysis.text.content;
    return await editor.editText({
      image: context.image,
      prompt: `Übersetze "${originalText}" ins Englische`
    });
  }
);

// Regel 4: Logo hinzufügen wenn kommerzielles Bild
conditionalEditor.addRule(
  async (context) => {
    const objects = context.analysis.objects || [];
    const hasProducts = objects.some(obj => 
      ['product', 'item', 'merchandise'].includes(obj.category)
    );
    return hasProducts;
  },
  async (context) => {
    return await editor.editElement({
      image: context.image,
      prompt: 'Füge dezent ein Firmenlogo in der unteren rechten Ecke hinzu'
    });
  }
);

// Verwendung
const result = await conditionalEditor.processWithConditions('./produkt-bild.jpg');

console.log('🎯 Bedingte Bearbeitung abgeschlossen:');
console.log(`Original: ${result.originalImage}`);
console.log(`Final: ${result.finalImage}`);
console.log(`Angewendete Regeln: ${result.appliedRules.length}`);

result.appliedRules.forEach((rule, index) => {
  console.log(`${index + 1}. ${rule.rule}`);
});

🔧 Konfigurierbare Verarbeitungspipeline

Modulare Pipeline-Architektur

class ProcessingPipeline {
  constructor(editor) {
    this.editor = editor;
    this.steps = [];
    this.middleware = [];
  }
  
  addStep(name, processor, options = {}) {
    this.steps.push({
      name,
      processor,
      enabled: options.enabled !== false,
      condition: options.condition || (() => true),
      priority: options.priority || 0
    });
    
    // Nach Priorität sortieren
    this.steps.sort((a, b) => b.priority - a.priority);
  }
  
  addMiddleware(middleware) {
    this.middleware.push(middleware);
  }
  
  async process(imagePath, context = {}) {
    console.log('🔧 Starte Verarbeitungspipeline...');
    
    let currentImage = imagePath;
    const results = [];
    const pipelineContext = {
      originalImage: imagePath,
      currentImage,
      results,
      metadata: {},
      ...context
    };
    
    // Middleware vor Verarbeitung
    for (const middleware of this.middleware) {
      if (middleware.before) {
        await middleware.before(pipelineContext);
      }
    }
    
    // Schritte ausführen
    for (const step of this.steps) {
      if (!step.enabled) {
        console.log(`⏭️ Schritt übersprungen: ${step.name} (deaktiviert)`);
        continue;
      }
      
      if (!(await step.condition(pipelineContext))) {
        console.log(`⏭️ Schritt übersprungen: ${step.name} (Bedingung nicht erfüllt)`);
        continue;
      }
      
      console.log(`🔄 Führe Schritt aus: ${step.name}`);
      
      try {
        const stepResult = await step.processor(pipelineContext);
        
        if (stepResult.imageUrl) {
          currentImage = stepResult.imageUrl;
          pipelineContext.currentImage = currentImage;
        }
        
        results.push({
          step: step.name,
          result: stepResult,
          timestamp: new Date().toISOString()
        });
        
        console.log(`✅ Schritt abgeschlossen: ${step.name}`);
      } catch (error) {
        console.error(`❌ Schritt fehlgeschlagen: ${step.name}`, error.message);
        
        results.push({
          step: step.name,
          error: error.message,
          timestamp: new Date().toISOString()
        });
        
        // Pipeline stoppen bei kritischen Fehlern
        if (error.critical) {
          break;
        }
      }
    }
    
    // Middleware nach Verarbeitung
    for (const middleware of this.middleware) {
      if (middleware.after) {
        await middleware.after(pipelineContext);
      }
    }
    
    console.log(`🏁 Pipeline abgeschlossen: ${results.length} Schritte ausgeführt`);
    
    return {
      originalImage: imagePath,
      finalImage: currentImage,
      steps: results,
      metadata: pipelineContext.metadata
    };
  }
}

// Pipeline konfigurieren
const pipeline = new ProcessingPipeline(editor);

// Schritt 1: Bildanalyse (höchste Priorität)
pipeline.addStep(
  'analyze',
  async (context) => {
    const analysis = await editor.analyzeImage({
      image: context.currentImage,
      analysisTypes: ['text', 'objects', 'quality', 'colors']
    });
    
    context.metadata.analysis = analysis.analysis;
    return { analysis: analysis.analysis };
  },
  { priority: 100 }
);

// Schritt 2: Inhaltsmoderation
pipeline.addStep(
  'moderate',
  async (context) => {
    const moderation = await editor.analyzeImage({
      image: context.currentImage,
      analysisTypes: ['moderation']
    });
    
    context.metadata.moderation = moderation.analysis.moderation;
    
    if (moderation.analysis.moderation.adult > 0.8) {
      const error = new Error('Inhalt blockiert');
      error.critical = true;
      throw error;
    }
    
    return { moderation: moderation.analysis.moderation };
  },
  { priority: 90 }
);

// Schritt 3: Qualitätsverbesserung (bedingt)
pipeline.addStep(
  'enhance',
  async (context) => {
    return await editor.enhanceImage({
      image: context.currentImage,
      enhancements: ['quality', 'sharpness']
    });
  },
  {
    priority: 80,
    condition: (context) => {
      const quality = context.metadata.analysis?.quality?.overall || 1;
      return quality < 0.7;
    }
  }
);

// Schritt 4: Text-Übersetzung (bedingt)
pipeline.addStep(
  'translate',
  async (context) => {
    const text = context.metadata.analysis.text;
    return await editor.editText({
      image: context.currentImage,
      prompt: `Übersetze "${text.content}" ins Deutsche`
    });
  },
  {
    priority: 70,
    condition: (context) => {
      const text = context.metadata.analysis?.text;
      return text?.content && text.language === 'en';
    }
  }
);

// Schritt 5: Wasserzeichen hinzufügen
pipeline.addStep(
  'watermark',
  async (context) => {
    return await editor.editElement({
      image: context.currentImage,
      prompt: 'Füge ein dezentes Wasserzeichen hinzu'
    });
  },
  { priority: 10 }
);

// Schritt 6: Größe anpassen
pipeline.addStep(
  'resize',
  async (context) => {
    return await editor.resizeImage({
      image: context.currentImage,
      width: 1024,
      height: 1024,
      mode: 'fit'
    });
  },
  { priority: 5 }
);

// Middleware für Logging
pipeline.addMiddleware({
  before: async (context) => {
    console.log('📊 Pipeline gestartet für:', context.originalImage);
    context.startTime = Date.now();
  },
  after: async (context) => {
    const duration = Date.now() - context.startTime;
    console.log(`⏱️ Pipeline abgeschlossen in ${duration}ms`);
    
    // Statistiken speichern
    const stats = {
      image: context.originalImage,
      duration,
      steps: context.results.length,
      timestamp: new Date().toISOString()
    };
    
    // In Datei oder Datenbank speichern
    console.log('📈 Pipeline-Statistiken:', stats);
  }
});

// Pipeline ausführen
const result = await pipeline.process('./input-bild.jpg', {
  targetLanguage: 'de',
  addWatermark: true
});

console.log('🎉 Pipeline-Ergebnis:', result);

📚 Zusätzliche Ressourcen

🚀 Erste Schritte

Schneller Einstieg in die Qwen Image Edit API

Quickstart Guide →

💡 Beispiele

Praktische Anwendungsbeispiele und Code-Snippets

Beispiele ansehen →

📖 API-Referenz

Vollständige Dokumentation aller verfügbaren Methoden

API-Referenz →

🛠️ Fehlerbehebung

Häufige Probleme und deren Lösungen

Fehlerbehebung →