@tailwind base;@tailwind components;@tailwind utilities;@keyframes shimmer{0%{background-position:200% 0}to{background-position:-200% 0}}.sk-shim{background:linear-gradient(90deg,#f3f4f6 25%,#e5e7eb,#f3f4f6 75%);background-size:200% 100%;animation:shimmer 1.5s ease-in-out infinite}@keyframes tP{0%,80%,to{opacity:.3;transform:scale(.8)}40%{opacity:1;transform:scale(1)}}@keyframes slideIn{0%{transform:translateY(-12px);opacity:0}to{transform:translateY(0);opacity:1}}.chat-bg:before{content:"neural network transformer attention mechanism embeddings tokenizer fine-tuning inference latency context window reinforcement learning gradient descent backpropagation semantic search vector database retrieval augmented generation chain-of-thought prompt engineering few-shot learning temperature sampling beam search cross-entropy loss function perplexity BLEU score word2vec BERT GPT diffusion model autoregressive decoder encoder self-attention multi-head normalization residual connection feedforward dropout regularization batch training epoch optimizer Adam SGD learning rate scheduler warmup cosine annealing";position:absolute;top:0;right:0;bottom:0;left:0;color:#00000005;font-size:13px;word-spacing:28px;line-height:2.4;overflow:hidden;pointer-events:none;z-index:0}body{margin:0;background:#f5f6f8;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.scrollbar-none{scrollbar-width:none;-ms-overflow-style:none}.scrollbar-none::-webkit-scrollbar{display:none}
