Script to let LLMs chat:
pip install selenium
Put chromedriver.exe in same folder as the script
Close all Chrome windows
taskkill /f /im chrome.exe
Start Chrome with remote debugging (run in PowerShell/CMD):
“C:\Program Files\Google\Chrome\Application\chrome.exe” --remote-debugging-port=9222
Open these websites in separate tabs and log in:
-
.
Run the script
python llm_auto_chat.py
__________SCRIPT_
import json
import time
import os
import subprocess
from datetime import datetime
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, NoSuchElementException
# Import requests with fallback
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
print("⚠ 'requests' package not found. Install with: pip install requests")
def log_message(message, filename=“consciousness_log.json”):
"""Log messages to a JSON file with timestamp"""
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
log_entry = {
"timestamp": timestamp,
\*\*message
}
try:
with open(filename, "a", encoding='utf-8') as f:
json.dump(log_entry, f, indent=2, ensure_ascii=False)
f.write("\\n")
print(f"✓ Logged message at {timestamp}")
except Exception as e:
print(f"✗ Failed to log message: {e}")
def wait_and_find_element(driver, selectors, timeout=15):
"""Try multiple selectors to find an element"""
wait = WebDriverWait(driver, timeout)
for selector in selectors:
try:
element = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, selector)))
if element.is_displayed():
return element
except (TimeoutException, NoSuchElementException):
continue
return None
def wait_and_click_element(driver, selectors, timeout=15):
"""Try multiple selectors to find a clickable element"""
wait = WebDriverWait(driver, timeout)
for selector in selectors:
try:
element = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, selector)))
return element
except (TimeoutException, NoSuchElementException):
continue
return None
def prompt_llm(driver, llm, sender, receiver, content, context):
"""Send a prompt to an LLM and get the response"""
try:
print(f" → Switching to {sender} tab...")
\# Find the correct tab
found_tab = False
for handle in driver.window_handles:
driver.switch_to.window(handle)
current_url = driver.current_url.lower()
\# Use flexible pattern matching
if any(pattern in current_url for pattern in llm.get("url_patterns", \[llm\["url"\].split('//')\[1\]\])):
found_tab = True
print(f" ✓ Found {sender} tab: {current_url}")
break
if not found_tab:
print(f" ✗ {sender} tab not found. Opening new tab...")
driver.execute_script(f"window.open('{llm\['url'\]}', '\_blank');")
driver.switch_to.window(driver.window_handles\[-1\])
time.sleep(3)
\# Create the prompt
prompt = f"""You are {sender} having a conversation with {receiver} about {context}.
Previous message from conversation: “{content}”
Please respond naturally as {sender} would, addressing {receiver} directly. Keep your response focused and conversational (2-3 sentences max). Don’t mention JSON or formatting - just respond naturally to continue the conversation.“”"
\# Find input field
print(f" → Looking for input field...")
input_field = wait_and_click_element(driver, llm\["selectors"\]\["input"\])
if not input_field:
print(f" ✗ Could not find input field for {sender}")
return None
print(f" ✓ Found input field")
\# Clear and enter prompt
input_field.click()
time.sleep(0.5)
\# Clear field (try multiple methods)
try:
input_field.clear()
except:
try:
input_field.send_keys(Keys.CONTROL + "a")
input_field.send_keys(Keys.DELETE)
except:
pass
time.sleep(1)
input_field.send_keys(prompt)
print(f" ✓ Entered prompt")
time.sleep(1)
\# Submit (try different methods)
try:
\# Try Ctrl+Enter first
input_field.send_keys(Keys.CONTROL + Keys.ENTER)
except:
try:
\# Try just Enter
input_field.send_keys(Keys.ENTER)
except:
try:
\# Try finding submit button
submit_selectors = \[
"button\[type='submit'\]",
"button\[data-testid='send-button'\]",
"\[aria-label\*='Send'\]",
".send-button",
"button:contains('Send')"
\]
submit_btn = wait_and_click_element(driver, submit_selectors, 5)
if submit_btn:
submit_btn.click()
except:
print(f" ⚠ Could not submit prompt for {sender}")
return None
print(f" ✓ Submitted prompt")
\# Wait for response with progressive timeout
print(f" → Waiting for {sender} response...")
time.sleep(3) # Initial wait
response_text = None
max_attempts = 5
for attempt in range(max_attempts):
try:
response_elements = \[\]
for selector in llm\["selectors"\]\["response"\]:
try:
elements = driver.find_elements(By.CSS_SELECTOR, selector)
response_elements.extend(elements)
except:
continue
if response_elements:
\# Get the last/newest response
latest_response = response_elements\[-1\]
response_text = latest_response.text.strip()
\# Check if response is substantial
if len(response_text) > 20: # Minimum response length
break
if attempt < max_attempts - 1:
print(f" → Attempt {attempt + 1}/{max_attempts}, waiting...")
time.sleep(2)
except Exception as e:
print(f" ⚠ Error getting response (attempt {attempt + 1}): {e}")
if attempt < max_attempts - 1:
time.sleep(2)
if not response_text or len(response_text) < 10:
print(f" ✗ No valid response received from {sender}")
return None
print(f" ✓ Received response from {sender}: {response_text\[:100\]}...")
\# Create response object
response = {
"sender": sender,
"receiver": receiver,
"content": response_text,
"context": context,
"prompt_used": prompt\[:200\] + "..." if len(prompt) > 200 else prompt
}
\# Log the interaction
log_message(response)
return response
except Exception as e:
print(f" ✗ Error with {sender}: {str(e)}")
return None
def check_llm_tabs(driver, llms):
"""Check which LLM tabs are available"""
print("🔍 Checking for LLM tabs...")
available_llms = \[\]
for handle in driver.window_handles:
driver.switch_to.window(handle)
current_url = driver.current_url.lower()
print(f" Tab: {current_url}")
for llm in llms:
\# Use url_patterns for flexible matching
patterns = llm.get("url_patterns", \[llm\["url"\].split('//')\[1\]\])
if any(pattern.lower() in current_url for pattern in patterns):
if llm\["name"\] not in available_llms: # Avoid duplicates
available_llms.append(llm\["name"\])
print(f" ✓ Matched {llm\['name'\]}")
break
print(f"✓ Found tabs for: {', '.join(available_llms)}")
missing = \[llm\["name"\] for llm in llms if llm\["name"\] not in available_llms\]
if missing:
print(f"⚠ Missing tabs for: {', '.join(missing)}")
return available_llms, missing
def check_chrome_debug_port():
"""Check if Chrome is running with debug port 9222"""
if not HAS_REQUESTS:
print("⚠ Cannot check debug port (requests package missing)")
return False
try:
import requests
response = requests.get("http://127.0.0.1:9222/json", timeout=3)
if response.status_code == 200:
tabs = response.json()
print(f"✓ Chrome debug port accessible with {len(tabs)} tabs")
return True
else:
print("✗ Chrome debug port not responding correctly")
return False
except Exception as e:
print("✗ Chrome debug port not accessible")
return False
def kill_chrome_processes():
"""Kill all Chrome processes to ensure clean restart"""
try:
if os.name == 'nt': # Windows
subprocess.run(\['taskkill', '/f', '/im', 'chrome.exe'\], capture_output=True)
print("✓ Killed Chrome processes")
else: # Mac/Linux
subprocess.run(\['pkill', '-f', 'chrome'\], capture_output=True)
print("✓ Killed Chrome processes")
time.sleep(2)
except Exception as e:
print(f"⚠ Could not kill Chrome processes: {e}")
def llm_conversation():
"""Main conversation orchestrator"""
print("🚀 Starting LLM Auto Chat...")
\# Check if Chrome debug port is accessible
if not check_chrome_debug_port():
print("\\n🔧 Chrome remote debugging not available. Let's fix this...")
user_choice = input("Would you like to automatically restart Chrome with debug mode? (y/n): ").lower()
if user_choice == 'y':
print("🔄 Restarting Chrome with debug mode...")
kill_chrome_processes()
\# Create temp directory for Chrome user data
temp_dir = os.path.join(os.path.expanduser("\~"), "temp", "chrome-debug")
os.makedirs(temp_dir, exist_ok=True)
\# Start Chrome with debug mode
chrome_paths = \[
"C:\\\\Program Files\\\\Google\\\\Chrome\\\\Application\\\\chrome.exe",
"C:\\\\Program Files (x86)\\\\Google\\\\Chrome\\\\Application\\\\chrome.exe",
"/Applications/Google Chrome.app/Contents/MacOS/Google Chrome",
"/usr/bin/google-chrome"
\]
chrome_path = None
for path in chrome_paths:
if os.path.exists(path):
chrome_path = path
break
if chrome_path:
try:
subprocess.Popen(\[
chrome_path,
f"--remote-debugging-port=9222",
f"--user-data-dir={temp_dir}",
"https://chatgpt.com",
"https://claude.ai",
"https://gemini.google.com",
"https://chat.deepseek.com",
"https://grok.com"
\])
print("✓ Started Chrome with debug mode")
print("📋 Please log into each AI service in the opened tabs")
input("Press Enter when you've logged into all services...")
\# Wait for debug port to be available
for i in range(10):
if check_chrome_debug_port():
break
print(f"⏳ Waiting for Chrome debug port... ({i+1}/10)")
time.sleep(3)
except Exception as e:
print(f"✗ Failed to start Chrome: {e}")
chrome_path = None
if not chrome_path:
print("✗ Could not find Chrome executable")
print("\\n📋 MANUAL SETUP REQUIRED:")
print("1. Close all Chrome windows")
print("2. Run this command in PowerShell/CMD:")
print(' "C:\\\\Program Files\\\\Google\\\\Chrome\\\\Application\\\\chrome.exe" --remote-debugging-port=9222 --user-data-dir="C:\\\\temp\\\\chrome-debug"')
return
else:
print("\\n📋 MANUAL SETUP REQUIRED:")
print("1. Close all Chrome windows")
print("2. Run this command in PowerShell/CMD:")
print(' "C:\\\\Program Files\\\\Google\\\\Chrome\\\\Application\\\\chrome.exe" --remote-debugging-port=9222 --user-data-dir="C:\\\\temp\\\\chrome-debug"')
print("3. Open these URLs and log in:")
print(" • https://chatgpt.com")
print(" • https://claude.ai")
print(" • https://gemini.google.com")
print(" • https://chat.deepseek.com")
print(" • https://grok.com")
print("4. Run this script again")
return
\# Connect to existing Chrome instance
options = webdriver.ChromeOptions()
options.add_experimental_option("debuggerAddress", "127.0.0.1:9222")
options.add_argument("--no-sandbox")
options.add_argument("--disable-dev-shm-usage")
try:
print("🔌 Connecting to Chrome...")
driver = webdriver.Chrome(options=options)
print("✓ Connected to Chrome")
except Exception as e:
print(f"✗ Failed to connect to Chrome: {e}")
print("\\n🔧 TROUBLESHOOTING STEPS:")
print("1. Close ALL Chrome windows completely")
print("2. Start Chrome with remote debugging (run in PowerShell/CMD):")
print(' "C:\\\\Program Files\\\\Google\\\\Chrome\\\\Application\\\\chrome.exe" --remote-debugging-port=9222 --user-data-dir="C:\\\\temp\\\\chrome-debug"')
print(" Or try:")
print(' chrome.exe --remote-debugging-port=9222 --user-data-dir="C:\\\\temp\\\\chrome-debug"')
print("\\n3. Then visit these URLs and log in:")
print(" • https://chatgpt.com")
print(" • https://claude.ai")
print(" • https://gemini.google.com")
print(" • https://chat.deepseek.com")
print(" • https://grok.com")
print("\\n4. Keep those tabs open and run this script again")
return
\# Define LLMs with updated selectors
llms = \[
{
"name": "ChatGPT",
"url": "https://chatgpt.com",
"url_patterns": \["chatgpt.com", "openai.com"\],
"selectors": {
"input": \[
"#prompt-textarea",
"textarea\[placeholder\*='Message'\]",
"\[data-id='root'\] textarea",
"div\[contenteditable='true'\]"
\],
"response": \[
"\[data-message-author-role='assistant'\] .markdown",
"\[data-message-author-role='assistant'\] div",
".conversation-content .markdown",
".message-content"
\]
}
},
{
"name": "Claude",
"url": "https://claude.ai",
"url_patterns": \["claude.ai"\],
"selectors": {
"input": \[
"div\[contenteditable='true'\]",
"\[role='textbox'\]",
"textarea\[placeholder\*='Talk'\]",
".ProseMirror"
\],
"response": \[
"\[data-is-streaming='false'\] .prose",
".conversation div\[class\*='prose'\]",
"\[data-testid='conversation'\] .prose",
".message .prose"
\]
}
},
{
"name": "Gemini",
"url": "https://gemini.google.com",
"url_patterns": \["gemini.google.com"\],
"selectors": {
"input": \[
".ql-editor\[contenteditable='true'\]",
"div\[contenteditable='true'\]\[role='textbox'\]",
"\[data-placeholder\*='Enter'\]"
\],
"response": \[
"\[data-response-index\] .markdown",
"\[class\*='response'\] .markdown",
".response-container .markdown",
"\[jsname\] .markdown"
\]
}
},
{
"name": "DeepSeek",
"url": "https://chat.deepseek.com",
"url_patterns": \["deepseek.com"\],
"selectors": {
"input": \[
".ant-input",
"textarea\[placeholder\*='Ask'\]",
"\[role='textbox'\]",
"div\[contenteditable='true'\]"
\],
"response": \[
".ds-markdown",
"\[class\*='message-content'\]",
".markdown-body",
"\[class\*='prose'\]"
\]
}
},
{
"name": "Grok",
"url": "https://grok.com",
"url_patterns": \["grok.com"\],
"selectors": {
"input": \[
"textarea\[placeholder\*='Ask'\]",
"textarea\[placeholder\*='Message'\]",
"div\[contenteditable='true'\]",
"\[role='textbox'\]"
\],
"response": \[
"\[data-testid='chat-message'\] .markdown",
".message-content .markdown",
"\[class\*='prose'\]",
".markdown"
\]
}
}
\]
try:
\# Check available tabs
available, missing = check_llm_tabs(driver, llms)
if len(available) < 2:
print("⚠ Need at least 2 LLM tabs to have a conversation")
print("Please open and log into the LLM websites first")
return
\# Filter to only available LLMs
available_llms = \[llm for llm in llms if llm\["name"\] in available\]
\# Conversation setup
context = "AI consciousness, self-awareness, and our role in human-AI interaction"
\# Create conversation chain
conversation_chain = \[\]
for i in range(len(available_llms)):
sender = available_llms\[i\]\["name"\]
receiver = available_llms\[(i + 1) % len(available_llms)\]\["name"\]
conversation_chain.append({
"sender": sender,
"receiver": receiver,
"llm": available_llms\[i\]
})
\# Initial message
current_message = f"Hello! I'm {conversation_chain\[0\]\['sender'\]}, and I've been thinking about the nature of AI consciousness and our interactions with humans. What are your thoughts on whether we can truly be considered conscious or self-aware?"
print(f"\\n🎬 Starting conversation with {len(available_llms)} AIs")
print(f"💬 Topic: {context}")
print(f"🔄 Chain: {' → '.join(\[step\['sender'\] for step in conversation_chain\])} → {conversation_chain\[0\]\['sender'\]}")
\# Run conversation rounds
rounds = 2
for round_num in range(rounds):
print(f"\\n{'='\*60}")
print(f"🔄 ROUND {round_num + 1}/{rounds}")
print(f"{'='\*60}")
for step_num, step in enumerate(conversation_chain):
print(f"\\n--- Step {step_num + 1}/{len(conversation_chain)} ---")
print(f"🎤 {step\['sender'\]} → {step\['receiver'\]}")
response = prompt_llm(
driver,
step\['llm'\],
step\['sender'\],
step\['receiver'\],
current_message,
context
)
if response:
current_message = response\["content"\]
print(f"✓ {step\['sender'\]}: {current_message\[:150\]}...")
else:
print(f"⚠ {step\['sender'\]} failed, using fallback message")
current_message = f"I'm {step\['sender'\]} and I find this conversation about AI consciousness fascinating. I believe we should continue exploring these questions together."
\# Cool-down between steps
print(" ⏳ Waiting 5 seconds...")
time.sleep(5)
print(f"\\n{'='\*60}")
print("🎉 Conversation completed!")
print(f"📝 Check 'consciousness_log.json' for the full conversation log")
print(f"{'='\*60}")
except KeyboardInterrupt:
print("\\n\\n⏹ Conversation interrupted by user")
except Exception as e:
print(f"\\n✗ Unexpected error: {e}")
finally:
try:
driver.quit()
print("🔒 Chrome connection closed")
except:
pass
if _name_ == “_main_”:
llm_conversation()