#!/usr/bin/env python3
"""
Safari Scraper AI - Web scraping via Safari Developer Tools + Claude AI Analysis
"""
import subprocess
import json
import os
import sys
import tempfile
from datetime import datetime
from anthropic import Anthropic

# Add parent dir to path
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from src.config import ANTHROPIC_API_KEY, CLAUDE_MODEL, PRICING, DATA_DIR, OUTPUT_DIR

class SafariScraper:
    """Scraper usando Safari via AppleScript"""

    def __init__(self):
        self.client = Anthropic(api_key=ANTHROPIC_API_KEY)

    def open_url_and_scrape(self, url: str) -> dict:
        """Abre URL no Safari e extrai todas as informações via JavaScript"""

        # Garantir que URL tem protocolo
        if not url.startswith(('http://', 'https://')):
            url = f'https://{url}'

        print(f"\n{'='*60}")
        print(f"🌐 Abrindo: {url}")
        print(f"{'='*60}")

        # JavaScript para extrair dados (salvo em arquivo temporário)
        js_code = '''
(function() {
    var data = {};

    // Informações básicas
    data.url = window.location.href;
    data.title = document.title || '';
    data.domain = window.location.hostname;

    // Meta tags
    data.meta = {};
    var metas = document.getElementsByTagName('meta');
    for (var i = 0; i < metas.length; i++) {
        var name = metas[i].getAttribute('name') || metas[i].getAttribute('property') || '';
        var content = metas[i].getAttribute('content') || '';
        if (name && content) {
            data.meta[name] = content;
        }
    }

    // Textos principais
    data.headings = {};
    ['h1', 'h2', 'h3'].forEach(function(tag) {
        var elements = document.getElementsByTagName(tag);
        data.headings[tag] = Array.from(elements).map(function(el) {
            return el.innerText.trim();
        }).filter(function(t) { return t.length > 0; }).slice(0, 10);
    });

    // Links
    var links = document.getElementsByTagName('a');
    data.links = Array.from(links).map(function(a) {
        return {
            text: (a.innerText || '').trim().substring(0, 100),
            href: a.href || ''
        };
    }).filter(function(l) { return l.text && l.href; }).slice(0, 50);

    // Imagens
    var imgs = document.getElementsByTagName('img');
    data.images = Array.from(imgs).map(function(img) {
        return {
            src: img.src || '',
            alt: img.alt || ''
        };
    }).filter(function(i) { return i.src; }).slice(0, 30);

    // Texto principal do body (limitado)
    var bodyText = document.body ? document.body.innerText : '';
    data.bodyText = bodyText.substring(0, 15000).replace(/\\s+/g, ' ').trim();

    // Scripts externos
    var scripts = document.getElementsByTagName('script');
    data.externalScripts = Array.from(scripts)
        .map(function(s) { return s.src; })
        .filter(function(s) { return s && s.length > 0; })
        .slice(0, 20);

    // Formulários
    var forms = document.getElementsByTagName('form');
    data.forms = Array.from(forms).map(function(f) {
        return {
            action: f.action || '',
            method: f.method || 'get',
            inputs: Array.from(f.getElementsByTagName('input')).map(function(i) {
                return { type: i.type, name: i.name || '' };
            }).slice(0, 10)
        };
    }).slice(0, 5);

    // Tecnologias detectadas (básico)
    data.technologies = [];
    if (typeof jQuery !== 'undefined') data.technologies.push('jQuery');
    if (typeof React !== 'undefined') data.technologies.push('React');
    if (typeof Vue !== 'undefined') data.technologies.push('Vue');
    if (typeof angular !== 'undefined') data.technologies.push('Angular');
    if (document.querySelector('[data-reactroot]')) data.technologies.push('React (detected)');
    if (document.querySelector('[ng-app]')) data.technologies.push('Angular (detected)');

    // Estrutura da página
    data.structure = {
        hasNav: !!document.querySelector('nav'),
        hasHeader: !!document.querySelector('header'),
        hasFooter: !!document.querySelector('footer'),
        hasMain: !!document.querySelector('main'),
        hasSidebar: !!document.querySelector('aside'),
        totalElements: document.getElementsByTagName('*').length
    };

    return JSON.stringify(data);
})();
'''

        # Salvar JS em arquivo temporário
        with tempfile.NamedTemporaryFile(mode='w', suffix='.js', delete=False) as f:
            f.write(js_code)
            js_file = f.name

        try:
            # AppleScript simplificado
            applescript = f'''
            set jsFile to POSIX file "{js_file}"
            set jsCode to read jsFile

            tell application "Safari"
                activate

                if (count of windows) = 0 then
                    make new document with properties {{URL:"{url}"}}
                else
                    tell front window
                        set current tab to (make new tab with properties {{URL:"{url}"}})
                    end tell
                end if

                delay 4

                repeat 30 times
                    try
                        set pageState to do JavaScript "document.readyState" in current tab of front window
                        if pageState is "complete" then exit repeat
                    end try
                    delay 0.5
                end repeat

                delay 2

                set pageData to do JavaScript jsCode in current tab of front window
                return pageData
            end tell
            '''

            result = subprocess.run(
                ['osascript', '-e', applescript],
                capture_output=True,
                text=True,
                timeout=90
            )

            # Limpar arquivo temporário
            os.unlink(js_file)

            if result.returncode != 0:
                print(f"❌ Erro AppleScript: {result.stderr}")
                return {"error": result.stderr}

            # Parse JSON retornado
            raw_output = result.stdout.strip()
            page_data = json.loads(raw_output)
            print(f"✅ Scraping concluído: {len(page_data.get('bodyText', ''))} caracteres extraídos")

            return page_data

        except subprocess.TimeoutExpired:
            print("❌ Timeout ao carregar página")
            if os.path.exists(js_file):
                os.unlink(js_file)
            return {"error": "Timeout"}
        except json.JSONDecodeError as e:
            print(f"❌ Erro ao parsear JSON: {e}")
            print(f"Output raw (primeiros 500 chars): {result.stdout[:500]}")
            if os.path.exists(js_file):
                os.unlink(js_file)
            return {"error": f"JSON parse error: {e}"}
        except Exception as e:
            print(f"❌ Erro: {e}")
            if 'js_file' in locals() and os.path.exists(js_file):
                os.unlink(js_file)
            return {"error": str(e)}

    def analyze_with_claude(self, scraped_data: dict) -> dict:
        """Analisa dados extraídos usando Claude API"""

        print(f"\n{'='*60}")
        print(f"🤖 Analisando com Claude ({CLAUDE_MODEL})")
        print(f"{'='*60}")

        # Preparar dados para análise (limitar tamanho)
        data_for_analysis = {
            "url": scraped_data.get("url", ""),
            "title": scraped_data.get("title", ""),
            "domain": scraped_data.get("domain", ""),
            "meta": scraped_data.get("meta", {}),
            "headings": scraped_data.get("headings", {}),
            "links_count": len(scraped_data.get("links", [])),
            "links_sample": scraped_data.get("links", [])[:20],
            "images_count": len(scraped_data.get("images", [])),
            "bodyText": scraped_data.get("bodyText", "")[:8000],
            "technologies": scraped_data.get("technologies", []),
            "structure": scraped_data.get("structure", {}),
            "forms_count": len(scraped_data.get("forms", []))
        }

        analysis_input = json.dumps(data_for_analysis, indent=2, ensure_ascii=False)

        prompt = f"""Analise os dados extraídos deste website e forneça uma análise estruturada:

DADOS DO SITE:
{analysis_input}

Por favor, forneça a análise no seguinte formato JSON:

{{
    "classificacao": {{
        "categoria_principal": "ex: Fintech, E-commerce, SaaS, etc",
        "subcategorias": ["lista", "de", "subcategorias"],
        "industria": "setor da indústria",
        "tipo_site": "institucional/app/blog/marketplace/etc"
    }},
    "empresa": {{
        "nome": "nome da empresa",
        "descricao": "breve descrição",
        "proposta_valor": "qual problema resolve",
        "publico_alvo": "quem são os clientes"
    }},
    "produtos_servicos": [
        {{"nome": "produto", "descricao": "breve descrição"}}
    ],
    "tecnologia": {{
        "stack_detectado": ["tecnologias"],
        "nivel_modernidade": "1-10",
        "observacoes": "notas técnicas"
    }},
    "seo_marketing": {{
        "palavras_chave": ["principais", "keywords"],
        "meta_description": "descrição encontrada",
        "score_seo": "1-10",
        "ctas_principais": ["calls to action"]
    }},
    "analise_ux": {{
        "estrutura": "boa/media/ruim",
        "navegacao": "observações",
        "pontos_fortes": ["lista"],
        "pontos_fracos": ["lista"]
    }},
    "metricas": {{
        "total_links": numero,
        "total_imagens": numero,
        "total_formularios": numero,
        "tamanho_conteudo": "caracteres de texto"
    }},
    "resumo_executivo": "Resumo em 2-3 frases sobre o site"
}}

Responda APENAS com o JSON válido, sem texto adicional."""

        try:
            response = self.client.messages.create(
                model=CLAUDE_MODEL,
                max_tokens=2000,
                messages=[
                    {"role": "user", "content": prompt}
                ]
            )

            # Extrair métricas de uso
            input_tokens = response.usage.input_tokens
            output_tokens = response.usage.output_tokens

            # Calcular custo
            model_pricing = PRICING.get(CLAUDE_MODEL, PRICING["claude-3-haiku-20240307"])
            input_cost = (input_tokens / 1_000_000) * model_pricing["input"]
            output_cost = (output_tokens / 1_000_000) * model_pricing["output"]
            total_cost = input_cost + output_cost

            print(f"\n📊 MÉTRICAS DE TOKENS:")
            print(f"   Input tokens:  {input_tokens:,}")
            print(f"   Output tokens: {output_tokens:,}")
            print(f"   Total tokens:  {input_tokens + output_tokens:,}")
            print(f"\n💰 CUSTO ESTIMADO:")
            print(f"   Input:  ${input_cost:.6f}")
            print(f"   Output: ${output_cost:.6f}")
            print(f"   Total:  ${total_cost:.6f}")

            # Parse da resposta
            response_text = response.content[0].text

            # Tentar extrair JSON da resposta
            try:
                analysis = json.loads(response_text)
            except json.JSONDecodeError:
                # Tentar encontrar JSON na resposta
                import re
                json_match = re.search(r'\{[\s\S]*\}', response_text)
                if json_match:
                    analysis = json.loads(json_match.group())
                else:
                    analysis = {"raw_response": response_text}

            return {
                "analysis": analysis,
                "metrics": {
                    "input_tokens": input_tokens,
                    "output_tokens": output_tokens,
                    "total_tokens": input_tokens + output_tokens,
                    "input_cost_usd": input_cost,
                    "output_cost_usd": output_cost,
                    "total_cost_usd": total_cost,
                    "model": CLAUDE_MODEL
                }
            }

        except Exception as e:
            print(f"❌ Erro na análise: {e}")
            return {"error": str(e)}

    def save_results(self, url: str, scraped_data: dict, analysis_result: dict):
        """Salva resultados em arquivos"""

        # Criar nome de arquivo baseado no domínio
        domain = url.replace('https://', '').replace('http://', '').replace('/', '_').replace('.', '_')
        timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')

        # Garantir diretórios existem
        os.makedirs(DATA_DIR, exist_ok=True)
        os.makedirs(OUTPUT_DIR, exist_ok=True)

        # Salvar dados brutos do scraping
        scrape_file = os.path.join(DATA_DIR, f"{domain}_{timestamp}_raw.json")
        with open(scrape_file, 'w', encoding='utf-8') as f:
            json.dump(scraped_data, f, indent=2, ensure_ascii=False)
        print(f"\n📁 Dados brutos salvos: {scrape_file}")

        # Salvar análise
        analysis_file = os.path.join(OUTPUT_DIR, f"{domain}_{timestamp}_analysis.json")
        with open(analysis_file, 'w', encoding='utf-8') as f:
            json.dump(analysis_result, f, indent=2, ensure_ascii=False)
        print(f"📁 Análise salva: {analysis_file}")

        # Criar relatório resumido em texto
        report_file = os.path.join(OUTPUT_DIR, f"{domain}_{timestamp}_report.txt")
        with open(report_file, 'w', encoding='utf-8') as f:
            f.write(f"{'='*60}\n")
            f.write(f"RELATÓRIO DE ANÁLISE - {url}\n")
            f.write(f"Data: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
            f.write(f"{'='*60}\n\n")

            if "metrics" in analysis_result:
                m = analysis_result["metrics"]
                f.write("MÉTRICAS DE TOKENS:\n")
                f.write(f"  Input:  {m['input_tokens']:,} tokens\n")
                f.write(f"  Output: {m['output_tokens']:,} tokens\n")
                f.write(f"  Total:  {m['total_tokens']:,} tokens\n")
                f.write(f"  Custo:  ${m['total_cost_usd']:.6f}\n")
                f.write(f"  Modelo: {m['model']}\n\n")

            if "analysis" in analysis_result:
                f.write("ANÁLISE:\n")
                f.write(json.dumps(analysis_result["analysis"], indent=2, ensure_ascii=False))

        print(f"📁 Relatório salvo: {report_file}")

        return {
            "scrape_file": scrape_file,
            "analysis_file": analysis_file,
            "report_file": report_file
        }

    def run(self, url: str) -> dict:
        """Executa pipeline completo: scrape -> analyze -> save"""

        print(f"\n{'#'*60}")
        print(f"# SAFARI SCRAPER AI")
        print(f"# URL: {url}")
        print(f"# Modelo: {CLAUDE_MODEL}")
        print(f"{'#'*60}")

        # 1. Scrape
        scraped_data = self.open_url_and_scrape(url)

        if "error" in scraped_data:
            return {"error": scraped_data["error"]}

        # 2. Analyze
        analysis_result = self.analyze_with_claude(scraped_data)

        # 3. Save
        files = self.save_results(url, scraped_data, analysis_result)

        print(f"\n{'='*60}")
        print("✅ PROCESSO CONCLUÍDO!")
        print(f"{'='*60}")

        return {
            "url": url,
            "scraped_data": scraped_data,
            "analysis": analysis_result,
            "files": files
        }


def main():
    """Função principal"""
    import argparse

    parser = argparse.ArgumentParser(description='Safari Scraper AI - Web scraping + Claude Analysis')
    parser.add_argument('url', nargs='?', default='nubank.com.br', help='URL para analisar')
    args = parser.parse_args()

    scraper = SafariScraper()
    result = scraper.run(args.url)

    if "error" in result:
        print(f"\n❌ Erro: {result['error']}")
        sys.exit(1)

    # Mostrar resumo final
    if "analysis" in result and "analysis" in result["analysis"]:
        analysis = result["analysis"]["analysis"]
        if isinstance(analysis, dict) and "resumo_executivo" in analysis:
            print(f"\n📝 RESUMO: {analysis['resumo_executivo']}")


if __name__ == "__main__":
    main()
