#!/usr/bin/env python3
"""
Twitter Scraper Backend API
Flask server to connect the web UI with the Python scraper
"""

from flask import Flask, request, jsonify, send_from_directory
from flask_cors import CORS
import os
import json
from datetime import datetime
import threading
import sys

# Import the scraper
from twitter_radar_scraper import TwitterRadarScraper

app = Flask(__name__)
CORS(app)

# Global state
scraping_status = {
    'is_running': False,
    'progress': 0,
    'status': 'idle',
    'message': 'Ready',
    'posts': [],
    'error': None
}

@app.route('/')
def index():
    """Serve the main hub"""
    return send_from_directory('.', 'twitter-scraper-index.html')

@app.route('/twitter-scraper-ui.html')
def basic_ui():
    """Serve the basic UI"""
    return send_from_directory('.', 'twitter-scraper-ui.html')

@app.route('/twitter-scraper-advanced.html')
def advanced_ui():
    """Serve the advanced analytics UI"""
    return send_from_directory('.', 'twitter-scraper-advanced.html')

@app.route('/twitter-scraper-compare.html')
def compare_ui():
    """Serve the comparison UI"""
    return send_from_directory('.', 'twitter-scraper-compare.html')

@app.route('/twitter-scraper-monitor.html')
def monitor_ui():
    """Serve the real-time monitor UI"""
    return send_from_directory('.', 'twitter-scraper-monitor.html')

@app.route('/api/status', methods=['GET'])
def get_status():
    """Get current scraping status"""
    return jsonify(scraping_status)

@app.route('/api/scrape', methods=['POST'])
def start_scraping():
    """Start scraping process"""
    global scraping_status

    if scraping_status['is_running']:
        return jsonify({'error': 'Scraping already in progress'}), 400

    data = request.json
    url = data.get('url')
    method = data.get('method', 'auto')
    headless = data.get('headless', True)
    export_format = data.get('export_format', 'json')

    if not url:
        return jsonify({'error': 'URL is required'}), 400

    # Reset status
    scraping_status.update({
        'is_running': True,
        'progress': 0,
        'status': 'starting',
        'message': 'Iniciando scraping...',
        'posts': [],
        'error': None
    })

    # Run scraping in background thread
    thread = threading.Thread(
        target=run_scraping,
        args=(url, method, headless, export_format)
    )
    thread.start()

    return jsonify({'message': 'Scraping started', 'status': scraping_status})

def run_scraping(url, method, headless, export_format):
    """Run the scraping process"""
    global scraping_status

    try:
        # Update status
        scraping_status.update({
            'status': 'scraping',
            'message': f'Scraping com método: {method}...',
            'progress': 20
        })

        # Create scraper instance
        scraper = TwitterRadarScraper(url, headless=headless)

        # Update status
        scraping_status.update({
            'message': 'Coletando posts...',
            'progress': 40
        })

        # Scrape posts
        posts = scraper.scrape(method=method)

        # Update status
        scraping_status.update({
            'message': 'Processando dados...',
            'progress': 70
        })

        # Save results
        timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')

        if export_format in ['json', 'both']:
            filename = f'twitter_radar_{scraper.query_id}_{timestamp}.json'
            scraper.save_to_json(filename)

        if export_format in ['csv', 'both']:
            filename = f'twitter_radar_{scraper.query_id}_{timestamp}.csv'
            scraper.save_to_csv(filename)

        # Update status - success
        scraping_status.update({
            'is_running': False,
            'status': 'completed',
            'message': f'✅ {len(posts)} posts coletados com sucesso!',
            'progress': 100,
            'posts': posts
        })

    except Exception as e:
        # Update status - error
        scraping_status.update({
            'is_running': False,
            'status': 'error',
            'message': f'Erro: {str(e)}',
            'progress': 0,
            'error': str(e)
        })

@app.route('/api/clear', methods=['POST'])
def clear_results():
    """Clear scraping results"""
    global scraping_status

    scraping_status.update({
        'is_running': False,
        'progress': 0,
        'status': 'idle',
        'message': 'Ready',
        'posts': [],
        'error': None
    })

    return jsonify({'message': 'Results cleared', 'status': scraping_status})

@app.route('/api/posts', methods=['GET'])
def get_posts():
    """Get scraped posts"""
    return jsonify({
        'posts': scraping_status['posts'],
        'total': len(scraping_status['posts'])
    })

@app.route('/api/export/<format>', methods=['GET'])
def export_posts(format):
    """Export posts in specified format"""
    posts = scraping_status['posts']

    if not posts:
        return jsonify({'error': 'No posts to export'}), 400

    timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')

    if format == 'json':
        filename = f'twitter_export_{timestamp}.json'
        with open(filename, 'w', encoding='utf-8') as f:
            json.dump(posts, f, indent=2, ensure_ascii=False)
        return send_from_directory('.', filename, as_attachment=True)

    elif format == 'csv':
        import csv
        filename = f'twitter_export_{timestamp}.csv'

        fieldnames = set()
        for post in posts:
            fieldnames.update(post.keys())
        fieldnames = sorted(fieldnames)

        with open(filename, 'w', newline='', encoding='utf-8') as f:
            writer = csv.DictWriter(f, fieldnames=fieldnames)
            writer.writeheader()
            writer.writerows(posts)

        return send_from_directory('.', filename, as_attachment=True)

    else:
        return jsonify({'error': 'Invalid format'}), 400

@app.route('/health', methods=['GET'])
def health_check():
    """Health check endpoint"""
    return jsonify({
        'status': 'healthy',
        'timestamp': datetime.now().isoformat(),
        'scraping_active': scraping_status['is_running']
    })

if __name__ == '__main__':
    port = int(os.environ.get('PORT', 5002))

    print("""
╔══════════════════════════════════════════════════════════════╗
║        Twitter/X Radar Scraper - Backend API                ║
╚══════════════════════════════════════════════════════════════╝

🚀 Starting server...

📍 Access the app at:
   → http://localhost:{port}
   → http://100.75.88.8:{port} (via Tailscale)

📡 API Endpoints:
   → GET  /                    - Web UI
   → GET  /api/status          - Get scraping status
   → POST /api/scrape          - Start scraping
   → POST /api/clear           - Clear results
   → GET  /api/posts           - Get scraped posts
   → GET  /api/export/<format> - Export posts (json/csv)
   → GET  /health              - Health check

💡 Press Ctrl+C to stop the server
    """.format(port=port))

    app.run(host='0.0.0.0', port=port, debug=True, threaded=True)
