diff --git a/marble/agent/base_agent.py b/marble/agent/base_agent.py index 43934442..503cb83c 100644 --- a/marble/agent/base_agent.py +++ b/marble/agent/base_agent.py @@ -40,6 +40,7 @@ def __init__(self, config: Dict[str, Union[Any, Dict[str, Any]]], env: EnvType, shared_memory (BaseMemory, optional): Shared memory instance. """ agent_id = config.get("agent_id") + self.communicate = config.get("communication", False) self.llm = model assert isinstance(agent_id, str), "agent_id must be a string." assert env is not None, "agent must has an environment." @@ -145,7 +146,8 @@ def act(self, task: str) -> Any: } } } - tools.append(new_communication_session_description) + if self.communicate: + tools.append(new_communication_session_description) act_task = ( f"You are {self.agent_id}: {self.profile}\n" f"This is your task: {task}\n" diff --git a/marble/badsql.txt b/marble/badsql.txt new file mode 100644 index 00000000..a267c614 --- /dev/null +++ b/marble/badsql.txt @@ -0,0 +1,9 @@ +select * from table1 where id= +select * from table1 where id= +select * from table1 where id= +select * from table1 where id= +select * from table1 where id= +select * from table1 where id= +select * from table1 where id= +select * from table1 where id= +select * from table1 where id= diff --git a/marble/configs/test_config_3/test_config_3.yaml b/marble/configs/test_config_3/test_config_3.yaml new file mode 100644 index 00000000..efff86a4 --- /dev/null +++ b/marble/configs/test_config_3/test_config_3.yaml @@ -0,0 +1,45 @@ +coordinate_mode: star +relationships: [] +llm: "gpt-3.5-turbo" + +environment: + type: DB + name: "DB Simulation Environment" + max_iterations: 5 + anomalies: + - anomaly: MISSING_INDEXES + threads: 100 + ncolumn: 20 + nrow: 20000 + colsize: 100 + +communication: False + +task: + content: "Analyze the database alerts & outputs and find out the reason that caused it. The alerts might include: NodeMemSwapped, NodeLoadHigh, ... The reasons could be: ['INSERT_LARGE_DATA', 'MISSING_INDEXES','LOCK_CONTENTION','VACUUM','REDUNDANT_INDEX','INSERT_LARGE_DATA,IO_CONTENTION', 'FETCH_LARGE_DATA,CORRELATED_SUBQUERY','POOR_JOIN_PERFORMANCE,CPU_CONTENTION']. Only one of these reasons would apply. The planner should ask different experts to work on same task, and summarize their opinions into a final prediction. They can only do 3 things with tools. First is get alert. The second thing they can do is check whether a metric is abnormal using a statistical method. They can check: cpu_usage, memory_usage, network, and io. The third thing they can do is match diagnostic knowledge based on the expert and the four metrics, to guess what has caused the problem." + output_format: "The alerts might include: NodeMemSwapped, NodeLoadHigh, ... Please choose the most likely cause of the database anomaly from the following list, based on the expert agents: ['INSERT_LARGE_DATA', 'MISSING_INDEXES','LOCK_CONTENTION','VACUUM','REDUNDANT_INDEX','INSERT_LARGE_DATA,IO_CONTENTION', 'FETCH_LARGE_DATA,CORRELATED_SUBQUERY','POOR_JOIN_PERFORMANCE,CPU_CONTENTION']. You can ONLY CHOOSE ONE." + +agents: + - type: BaseAgent + agent_id: ConfigurationExpert + profile: "ConfigurationExpert specializes in system configurations and optimizations." + - type: BaseAgent + agent_id: CpuExpert + profile: "CpuExpert is knowledgeable in CPU architecture, performance, and optimizations." + - type: BaseAgent + agent_id: WorkloadExpert + profile: "WorkloadExpert excels in analyzing workloads, resource allocation, and optimization for efficiency." + +memory: + type: SharedMemory + # Additional memory configurations if needed + +metrics: {} + # Define metrics configurations for the Evaluator + # Example: + # accuracy: true + # response_time: true + +engine_planner: + initial_progress: "Starting the simulation." + # Additional engine planner configurations if needed diff --git a/marble/dataset.txt b/marble/dataset.txt new file mode 100644 index 00000000..8f097b01 --- /dev/null +++ b/marble/dataset.txt @@ -0,0 +1,15 @@ +python anomaly_trigger/main.py --anomaly MISSING_INDEXES --threads 10000 --ncolumn 100 --nrow 100 --colsize 100 started at 1731448662 +python anomaly_trigger/main.py --anomaly MISSING_INDEXES --threads 10000 --ncolumn 100 --nrow 100 --colsize 100 started at 1731448687 +python anomaly_trigger/main.py --anomaly MISSING_INDEXES --threads 100 --ncolumn 20 --nrow 100 --colsize 100 started at 1731449032 +python anomaly_trigger/main.py --anomaly MISSING_INDEXES --threads 100 --ncolumn 20 --nrow 100 --colsize 100 started at 1731449109 +python anomaly_trigger/main.py --anomaly MISSING_INDEXES --threads 100 --ncolumn 20 --nrow 100 --colsize 100 ended at 1731449171 +python anomaly_trigger/main.py --anomaly MISSING_INDEXES --threads 100 --ncolumn 20 --nrow 100 --colsize 100 started at 1731449369 +python anomaly_trigger/main.py --anomaly MISSING_INDEXES --threads 100 --ncolumn 20 --nrow 100 --colsize 100 ended at 1731449430 +python anomaly_trigger/main.py --anomaly MISSING_INDEXES --threads 100 --ncolumn 20 --nrow 100 --colsize 100 started at 1731450368 +python anomaly_trigger/main.py --anomaly MISSING_INDEXES --threads 100 --ncolumn 20 --nrow 100 --colsize 100 ended at 1731450429 +python anomaly_trigger/main.py --anomaly MISSING_INDEXES --threads 100 --ncolumn 20 --nrow 100 --colsize 100 started at 1731451172 +python anomaly_trigger/main.py --anomaly MISSING_INDEXES --threads 100 --ncolumn 20 --nrow 100 --colsize 100 ended at 1731451234 +python anomaly_trigger/main.py --anomaly MISSING_INDEXES --threads 100 --ncolumn 20 --nrow 100 --colsize 100 started at 1731457645 +python anomaly_trigger/main.py --anomaly MISSING_INDEXES --threads 100 --ncolumn 20 --nrow 100 --colsize 100 ended at 1731457706 +python anomaly_trigger/main.py --anomaly MISSING_INDEXES --threads 100 --ncolumn 20 --nrow 100 --colsize 100 started at 1731457855 +python anomaly_trigger/main.py --anomaly MISSING_INDEXES --threads 100 --ncolumn 20 --nrow 100 --colsize 100 ended at 1731457916 diff --git a/marble/engine/engine.py b/marble/engine/engine.py index b05b66eb..0995f464 100644 --- a/marble/engine/engine.py +++ b/marble/engine/engine.py @@ -9,14 +9,14 @@ from marble.agent import BaseAgent from marble.configs.config import Config from marble.engine.engine_planner import EnginePlanner -from marble.environments import BaseEnvironment, ResearchEnvironment, WebEnvironment +from marble.environments import BaseEnvironment, ResearchEnvironment, WebEnvironment, DBEnvironment from marble.evaluator.evaluator import Evaluator from marble.graph.agent_graph import AgentGraph from marble.memory.base_memory import BaseMemory from marble.memory.shared_memory import SharedMemory from marble.utils.logger import get_logger -EnvType = Union[BaseEnvironment, WebEnvironment, ResearchEnvironment] +EnvType = Union[BaseEnvironment, WebEnvironment, ResearchEnvironment, DBEnvironment] AgentType = Union[BaseAgent] class Engine: @@ -80,6 +80,9 @@ def _initialize_environment(self, env_config: Dict[str, Any]) -> BaseEnvironment elif env_type == "Research": env3 = ResearchEnvironment(name="Research Environment", config=env_config) return env3 + elif env_type == "DB": + env4 = DBEnvironment(name="DB Environment", config=env_config) + return env4 else: raise ValueError(f"Unsupported environment type: {env_type}") diff --git a/marble/engine/engine_planner.py b/marble/engine/engine_planner.py index 9151c5b4..a8daaf48 100644 --- a/marble/engine/engine_planner.py +++ b/marble/engine/engine_planner.py @@ -63,8 +63,8 @@ def create_prompt(self) -> str: "Provide the assignments in the following JSON format:\n\n" "{\n" ' "tasks": {\n' - ' "agent1": "...", \n' - ' "agent2": "...", \n' + ' "...": "...", \n' + ' "...": "...", \n' ' // Add more agents as needed\n' ' },\n' ' "continue": true // Set to false if the task is completed\n' @@ -148,6 +148,7 @@ def decide_next_step(self, agents_results: List[Dict[str, Any]]) -> bool: Returns: bool: True to continue, False to terminate. """ + return True prompt = ( "Based on the following agents' results, determine whether the overall task is completed.\n\n" f'Task Description:\n{self.task}\n\n' diff --git a/marble/environments/__init__.py b/marble/environments/__init__.py index 59f8dbf1..0c7bc0e4 100644 --- a/marble/environments/__init__.py +++ b/marble/environments/__init__.py @@ -1,9 +1,11 @@ from .base_env import BaseEnvironment from .research_env import ResearchEnvironment from .web_env import WebEnvironment +from .db_env import DBEnvironment __all__ = [ 'BaseEnvironment', 'WebEnvironment', + 'DBEnvironment', 'ResearchEnvironment' ] diff --git a/marble/environments/db_env.py b/marble/environments/db_env.py index 34b24545..4d4d82d4 100644 --- a/marble/environments/db_env.py +++ b/marble/environments/db_env.py @@ -2,13 +2,19 @@ import subprocess import time from typing import Any, Dict, List +import re +import psycopg2 +from psycopg2 import OperationalError import numpy as np import requests -from db_utils.anomaly_detection import detect_anomalies +from marble.environments.db_utils.anomaly_detection import detect_anomalies, describe_data_features from marble.environments.base_env import BaseEnvironment +from marble.environments.db_utils.metrics import allowed_metrics_full_names, full_metrics_full_names +from marble.environments.db_utils.diagnostic_kb import DiagnosticKB +from marble.environments.db_utils.slow_query import obtain_slow_queries def get_prometheus_metric_data(metric_name: str) -> List[List[Any]]: """ @@ -18,13 +24,13 @@ def get_prometheus_metric_data(metric_name: str) -> List[List[Any]]: metric_name (str): The name of the metric to retrieve (e.g., 'node:cpu:usage_avg1m'). Returns: - List[List[Any]]: A list of timestamp-value pairs for the metric over the past hour. + List[List[Any]]: A list of timestamp-value pairs for the metric over the past 10 minutes. """ # Get the current time in Unix timestamp end_time = time.time() - # Calculate the start time (one hour ago) - start_time = end_time - 3600 # 3600 seconds = 1 hour + # Calculate the start time (10 minutes ago) + start_time = end_time - 600 # 600 seconds = 10 minutes # Prometheus query range URL prom_url = 'http://localhost:9090/api/v1/query_range' @@ -34,7 +40,7 @@ def get_prometheus_metric_data(metric_name: str) -> List[List[Any]]: 'query': metric_name, 'start': start_time, 'end': end_time, - 'step': 60 # sample every 60 seconds + 'step': 1, # sample every second } # Make the HTTP request to Prometheus @@ -45,7 +51,10 @@ def get_prometheus_metric_data(metric_name: str) -> List[List[Any]]: data = response.json() if data.get('status') == 'success': # Extract the values (timestamp-value pairs) from the response - return data['data']['result'][0]['values'] + try: + return data['data']['result'][0]['values'] + except: + return [] else: raise ValueError(f"Prometheus returned an error: {data.get('error', 'Unknown error')}") else: @@ -61,66 +70,61 @@ def __init__(self, config: Dict[str, Any], name: str = "DBEnv"): """ super().__init__(name, config) - os.chdir('./db_env_docker') + self.kb = DiagnosticKB() + + self.current_dir = os.path.dirname(os.path.abspath(__file__)) print("Starting Docker containers...") # Run docker-compose up in detached mode - subprocess.run(["docker", "compose", "down", "-v"], shell=False, check=True) + subprocess.run(["docker", "compose", "down", "-v"], cwd=os.path.join(self.current_dir, "db_env_docker"), shell=False, check=True) # Then, run "docker-compose up - subprocess.run(["docker", "compose", "up", "-d", "--remove-orphans"], check=True) + subprocess.run(["docker", "compose", "up", "-d", "--remove-orphans"], cwd=os.path.join(self.current_dir, "db_env_docker"), check=True) # anomalies - env_configs = config.get('environment', []) - if env_configs: - anomalies = config.get('anomalies', []) + anomalies = config.get('anomalies', []) + + is_db_up = False + while True: + try: + is_db_up = 1 + if is_db_up: + break + except: + pass + print(f'DB up and running') + + + if anomalies: + for anomaly in anomalies: anomaly_type = anomaly['anomaly'] threads = anomaly['threads'] ncolumn = anomaly['ncolumn'] colsize = anomaly['colsize'] - subprocess.run(["python3", "anomaly_trigger/main.py", "--anomaly", anomaly_type, "--threads", f"{threads}", "--ncolumn", f"{ncolumn}", "--colsize", f"{colsize}"], check=True) - - # We will query using v1 api instead - # Code must be agnostic to system clock - # instead, rely on prometheus clock - - # to get alerts - # curl -g 'http://localhost:9090/api/v1/alerts' - - # to have all metrics: - # curl -g 'http://localhost:9090/api/v1/label/__name__/values'\ - # {"status":"success","data":["ALERTS","ALERTS_FOR_STATE", ...]} - - # to get current time: - # curl -g 'http://localhost:9090/api/v1/query?query=time()' - # {"status":"success","data":{"resultType":"scalar","result":[1731134765.257,"1731134765.257"]}} - - # to get data for a given metric: - # curl -g 'http://localhost:9090/api/v1/query_range?query=node:dev:disk_reads_rate1m&start=1731130861.241&end=1731134461.241&step=60' - # where node:dev:disk_reads_rate1m is the metric, - # step=60 is sample each 60s, - # end=1731134461.241 is the time now, - # start=1731130861.241 is an hour (600s) earlier. - # { - # "status":"success", - # "data":{ - # "resultType":"matrix", - # "result":[{"metric":{"__name__":"node:dev:disk_reads_rate1m","device":"sda","instance":"node_exporter:9100","job":"node_exporter"},"values":[[1731130861.241,"0.1101637668599871"],[1731130921.241,"6.10319007077649"],[1731130981.241,"0.12500000000000003"],[1731131221.241,"7.773037853954849"], ...]}]}} - - # In these pairs, first element is timestamp, second is value - - # To simplify, we keep a shortened set of metrics - - # cpu_usage -> node:cpu:usage_avg1m - # disk_io -> node:cls:disk_io_bytes_rate1m - # disk_read -> node:cls:disk_read_bytes_rate1m - # disk_write -> node:cls:disk_write_bytes_rate1m - # mem_usage -> node:cls:mem_usage - # space_usage -> node:cls:space_usage + + subprocess.run(["python", "main.py", "--anomaly", anomaly_type, "--threads", f"{threads}", "--ncolumn", f"{ncolumn}", "--colsize", f"{colsize}"], cwd=os.path.join(self.current_dir, "db_env_docker", "anomaly_trigger"), check=True) # Register the actions available in this environment + self.register_action( + "get_alerts", + handler=self.get_alerts_handler, + description={ + "type": "function", + "function": { + "name": "get_alerts", + "description": "Get current alerts from the database monitoring system. Returns information about any active alerts including their names, descriptions, and severity levels.", + "parameters": { + "type": "object", + "properties": {}, + "required": [], + "additionalProperties": False + } + } + } + ) + self.register_action( "whether_is_abnormal_metric", handler=self.whether_is_abnormal_metric_handler, @@ -128,20 +132,18 @@ def __init__(self, config: Dict[str, Any], name: str = "DBEnv"): "type": "function", "function": { "name": "whether_is_abnormal_metric", - "description": "Check if an metric of the database system is abnormal or not.", + "description": "Check if a type of metric of the database system is abnormal or not using a staticical method. This is used for initial checking where has gone wrong.", "parameters": { "type": "object", "properties": { "metric_name": { "type": "string", - "description": "The name of the metric to check for anormalies. It will examine the data from the last hour, sampling every 60 seconds. Anomalies are checked using the KS test algorithm.", + "description": "The name of the metric to check for anormalies. It will examine the data from the last 10 minutes, sampling every second. Anomalies are checked using the KS test algorithm.", "enum": [ "cpu_usage", - "disk_io", - "disk_read", - "disk_write", - "mem_usage", - "space_usage" + "memory_usage", + "network_traffic", + "io_activity" ] } }, @@ -152,28 +154,104 @@ def __init__(self, config: Dict[str, Any], name: str = "DBEnv"): } ) - # TODO: match_diagnose_knowledge, optimize_index_selection + self.register_action( + "match_diagnose_knowledge", + handler=self.match_diagnose_knowledge_handler, + description={ + "type": "function", + "function": { + "name": "match_diagnose_knowledge", + "description": "Check if a type of metric of the database system is abnormal or not using a staticical method across all related metrics.", + "parameters": { + "type": "object", + "properties": { + "expert": { + "type": "string", + "description": "The type of expert to consult", + "enum": [ + "ConfigurationExpert", + "CpuExpert", + "DiskExpert", + "IndexExpert", + "IoExpert", + "MemoryExpert", + "QueryExpert", + "RecoveryExpert", + "WorkloadExpert" + ] + }, + "metric_name": { + "type": "string", + "description": "The type of metric to check for anormalies. It will examine the data from the last 10 minutes, sampling every second. Anomalies are checked using the KS test algorithm.", + "enum": [ + "cpu", + "memory", + "network", + "io" + ] + } + }, + "required": ["expert", "metric_name"], + "additionalProperties": False + } + } + } + ) + + is_initialized = False + alerts = [] + while True: + try: + alerts = self.get_raw_alerts()['alerts'] + time.sleep(1) + if len(alerts): + is_initialized = True + break + except: + pass + print(f'Alert detected @ {alerts}') + + def get_alerts_handler(self) -> Dict[str, Any]: + """ + Handler function to get current alerts from Prometheus. + + Returns: + Dict[str, Any]: Dictionary containing alert information in a structured format + """ + try: + alerts = self.get_raw_alerts() + formatted_alerts = [] + + for alert in alerts.get('alerts', []): + formatted_alert = { + 'name': alert['labels'].get('alertname', 'Unknown'), + 'severity': alert['labels'].get('severity', 'Unknown'), + 'description': alert['annotations'].get('description', ''), + 'state': alert.get('state', ''), + 'active_since': alert.get('activeAt', ''), + 'value': alert.get('value', '') + } + formatted_alerts.append(formatted_alert) + + return { + 'status': 'success', + 'alert_count': len(formatted_alerts), + 'alerts': formatted_alerts + } + except Exception as e: + return { + 'status': 'error', + 'message': str(e), + 'alerts': [] + } def whether_is_abnormal_metric_handler(self, metric_name: str) -> bool: - #try: - if True: + try: # Get the metric data from Prometheus - metric_name_mapper = { - "cpu_usage": "node:cpu:usage_avg1m", - "disk_io": "node:cls:disk_io_bytes_rate1m", - "disk_read": "node:cls:disk_read_bytes_rate1m", - "disk_write":"node:cls:disk_write_bytes_rate1m", - "mem_usage": "node:cls:mem_usage", - "space_usage": "node:cls:space_usage", - } - metric_name_mapped = metric_name_mapper.get(metric_name, "") + metric_name_mapped = allowed_metrics_full_names.get(metric_name, "") if metric_name_mapped == "": raise ValueError(f"Access to {metric_name} currently not supported") - # yes, very easy to support, but too much metrics would overwhelm the llm - # the real issue is to select important ones print(metric_name_mapped) - import pdb - pdb.set_trace() values = get_prometheus_metric_data(metric_name_mapped) if not len(values): print('No values yet. Please wait at least 15s.') @@ -182,29 +260,111 @@ def whether_is_abnormal_metric_handler(self, metric_name: str) -> bool: # Convert the list into a 1D NumPy array values_array = np.array(values_list) return detect_anomalies(values_array) - #except Exception as e: - # print(f"Error fetching metric data: {e}") - # return False + except Exception as e: + print(f"Error fetching metric data: {e}") + return False + + def match_diagnose_knowledge_handler(self, expert: str, metric_name: str) -> str: + # first, we get the alert metrics + alerts = self.get_raw_alerts() + alert_metrics = [] + alert_descriptions = [] + alert_metric_str = "" + for alert in alerts['alerts']: + alert_description = alert['annotations']['description'] + alert_metric = alert_description.split('[')[0] + alert_metrics.append(alert_metric.strip()) + alert_descriptions.append(alert_description) + + alert_metric_str += f"{alert_metric.strip()} triggered alert: {alert_description}. \n" + + anomaly_data = get_prometheus_metric_data(alert_metric) + anomaly_data_list = [float(v) for t, v in anomaly_data] + anomaly_data_array = np.array(anomaly_data_list) + anomaly_data_features = describe_data_features(anomaly_data_list) + + alert_metric_str += f"Data description for {alert_metric}: {anomaly_data_features} \n" + alert_metric_str += f"\n" + + llm_selected_metric_str = "" + for name in full_metrics_full_names[metric_name]: + query = full_metrics_full_names[metric_name][name] + data = get_prometheus_metric_data(query) + data_list = [float(v) for t, v in data] + data_array = np.array(data_list) + anomaly = detect_anomalies(data_array) + if anomaly[1]: + data_features = describe_data_features(data_list) + llm_selected_metric_str += f"{name} (Query: {query}) is abnormal.\n" + llm_selected_metric_str += f"Data description: {data_features}\n" + llm_selected_metric_str += f"\n" + + rag_str = f"" + self.kb.search(metric_name, expert=expert) + rag_str += f"For expert {expert}, the following knowledge is matched: \n" + + for alert_description in alert_descriptions: + rag_str += f"For the alert description you wanted to look into, here are the matched knowledge: \n" + for result in self.kb.search(alert_description, expert=expert, top_k=3): + rag_str += f"{result}:\n" + rag_str += f"Cause : {result['cause_name']}\n" + rag_str += f"Metrics: {result['metrics']}\n" + rag_str += f"Expert : {result['expert']}\n" + rag_str += f"\n" + + slow_query_str = f"Here are the commands that took longest time:\n" + slow_query_str += obtain_slow_queries() + + rag_str += f"For the metric you wanted to look into, here are the matched knowledge: \n" + for result in self.kb.search(llm_selected_metric_str, expert=expert, top_k=3): + rag_str += f"{result}:\n" + rag_str += f"Cause : {result['cause_name']}\n" + rag_str += f"Metrics: {result['metrics']}\n" + rag_str += f"Expert : {result['expert']}\n" + rag_str += f"\n" + + return alert_metric_str + llm_selected_metric_str + slow_query_str + rag_str + + def get_raw_alerts(self) -> dict: + """ + Get raw alerts data from Prometheus. - def get_alerts(self) -> dict: + Returns: + dict: Raw alerts data from Prometheus + """ prom_url = 'http://localhost:9090/api/v1/alerts' - - # Make the HTTP request to Prometheus response = requests.get(prom_url) - # Check if the request was successful if response.status_code == 200: data = response.json() if data.get('status') == 'success': - # Extract the values (timestamp-value pairs) from the response return data['data'] else: raise ValueError(f"Prometheus returned an error: {data.get('error', 'Unknown error')}") else: raise ValueError(f"Failed to query Prometheus. Status code: {response.status_code}") + def check_db_connection(self): + """Check if the database is up and return True if successful, False otherwise.""" + try: + # Attempt to connect to PostgreSQL database + connection = psycopg2.connect( + user="test", + password="Test123_456", + database="sysbench", + host="localhost", # Use "postgres_db" if running within Docker + port="5432" + ) + print("Database is up!") + connection.close() + return True # Return True if connection is successful + + except OperationalError: + print("Database is not available.") + return False # Return False if connection fails + def terminate(self) -> None: - subprocess.run(["docker", "compose", "down"], check=True) + subprocess.run(["docker", "compose", "down"], cwd=os.path.join(self.current_dir, "db_env_docker"), check=True) if __name__ == "__main__": env = DBEnvironment(config={ @@ -223,9 +383,13 @@ def terminate(self) -> None: while True: command = input('> ') if command == 'alert': - print(env.get_alerts()) + print(env.get_alerts_handler()) elif command == 'cpu': print(env.whether_is_abnormal_metric_handler('cpu_usage')) + elif command == 'analyze': + print(env.match_diagnose_knowledge_handler('WorkloadExpert', 'cpu')) + elif command == 'slow': + print(obtain_slow_queries()) elif command == 'q': env.terminate() break diff --git a/marble/environments/db_env_docker/anomaly_trigger/anomaly.py b/marble/environments/db_env_docker/anomaly_trigger/anomaly.py index 8dd7ca47..ccd458a2 100644 --- a/marble/environments/db_env_docker/anomaly_trigger/anomaly.py +++ b/marble/environments/db_env_docker/anomaly_trigger/anomaly.py @@ -31,11 +31,11 @@ def restart(): def restart_postgresql(): # Directly execute the restart command locally try: - os.chdir("../db_env_docker") + os.chdir("..") os.system("docker compose restart postgres_db") print("PostgreSQL Service Rebooted") except Exception as e: - print(f"本地命令执行出错:{e}") + print(f"Local command error:{e}") # create a table def create_table(table_name,colsize, ncolumns): @@ -73,24 +73,24 @@ def write_amomaly_sql_to_file(text): try: with open('badsql.txt', 'a') as file: file.write(f"{text}\n") - print("文本已成功写入到badsql.txt文件中。") + print("Written to badsql.txt") except Exception as e: - print(f"写入文件时出现错误: {e}") + print(f"Error when writing to file: {e}") def write_amomaly_sql_to_file_a_line(text): try: with open('badsql.txt', 'a') as file: file.write(f"{text}\t\t") - print("文本已成功写入到badsql.txt文件中。") + print("Written to badsql.txt") except Exception as e: - print(f"写入文件时出现错误: {e}") + print(f"Error when writing to file: {e}") def write_space(): try: with open('badsql.txt', 'a') as file: file.write("\n") except Exception as e: - print(f"写入文件时出现错误: {e}") + print(f"Error when writing to file: {e}") '''insert_large_data''' def insert_large_data(threads,duration,ncolumns,nrows,colsize,table_name='table1'): diff --git a/marble/environments/db_env_docker/anomaly_trigger/badsql.txt b/marble/environments/db_env_docker/anomaly_trigger/badsql.txt index a16f9c22..2b0a5758 100644 --- a/marble/environments/db_env_docker/anomaly_trigger/badsql.txt +++ b/marble/environments/db_env_docker/anomaly_trigger/badsql.txt @@ -18,3 +18,7 @@ insert into table1 select generate_series(1,200000),(SELECT substr(md5(random(): insert into table1 select generate_series(1,200000),(SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), now(); insert into table1 select generate_series(1,200000),(SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), (SELECT substr(md5(random()::text), 1, 10000)), now(); select * from table1 where id= +select * from table1 where id= +select * from table1 where id= +select * from table1 where id= +select * from table1 where id= diff --git a/marble/environments/db_env_docker/anomaly_trigger/dataset.txt b/marble/environments/db_env_docker/anomaly_trigger/dataset.txt index 31cc7c5b..58bc6784 100644 --- a/marble/environments/db_env_docker/anomaly_trigger/dataset.txt +++ b/marble/environments/db_env_docker/anomaly_trigger/dataset.txt @@ -28,3 +28,7 @@ python anomaly_trigger/main.py --anomaly INSERT_LARGE_DATA --threads 100 --ncolu python anomaly_trigger/main.py --anomaly INSERT_LARGE_DATA --threads 100 --ncolumn 20 --nrow 200000 --colsize 10000 started at 1731122668 python anomaly_trigger/main.py --anomaly INSERT_LARGE_DATA --threads 100 --ncolumn 20 --nrow 200000 --colsize 10000 ended at 1731122918 python anomaly_trigger/main.py --anomaly MISSING_INDEXES --threads 10000 --ncolumn 200 --nrow 100 --colsize 100 started at 1731142242 +python anomaly_trigger/main.py --anomaly MISSING_INDEXES --threads 100 --ncolumn 20 --nrow 100 --colsize 100 started at 1731458931 +python anomaly_trigger/main.py --anomaly MISSING_INDEXES --threads 100 --ncolumn 20 --nrow 100 --colsize 100 ended at 1731458993 +python anomaly_trigger/main.py --anomaly MISSING_INDEXES --threads 100 --ncolumn 20 --nrow 100 --colsize 100 started at 1731460143 +python anomaly_trigger/main.py --anomaly MISSING_INDEXES --threads 100 --ncolumn 20 --nrow 100 --colsize 100 ended at 1731460204 diff --git a/marble/environments/db_env_docker/anomaly_trigger/promethues.py b/marble/environments/db_env_docker/anomaly_trigger/promethues.py index 922f3592..2d08b3c5 100644 --- a/marble/environments/db_env_docker/anomaly_trigger/promethues.py +++ b/marble/environments/db_env_docker/anomaly_trigger/promethues.py @@ -1,39 +1,58 @@ import requests +import time -# Prometheus的API URL +# Prometheus API URL prometheus_api_url = "http://localhost:9090/api/v1/query" -# 查询CPU使用率的PromQL,示例仅作为参考,具体查询可能需要调整 +# PromQL queries for CPU and memory usage cpu_query = '100 - (avg by (instance) (irate(node_cpu_seconds_total{instance="node_exporter:9100",mode="idle"}[5m])) * 100)' - -# 查询内存使用量的PromQL memory_query = '(node_memory_MemTotal_bytes{instance="node_exporter:9100"} - node_memory_MemAvailable_bytes{instance="node_exporter:9100"}) / node_memory_MemTotal_bytes{instance="node_exporter:9100"} * 100' -# 定义一个函数来执行Prometheus查询 -def query_prometheus(query): - response = requests.get(prometheus_api_url, params={'query': query}) - if response.status_code == 200: - print(response) - return response.json() - else: - return f"Error: {response.status_code}" - -def restart_decision(): - cpu_usage = query_prometheus(cpu_query) - memory_usage = query_prometheus(memory_query) - - # 提取CPU使用率的值 - print("cpu_usage: ", cpu_usage, " memory_usage: ", memory_usage) - cpu_usage_value = cpu_usage['data']['result'][0]['value'][1] - cpu=int(float(cpu_usage_value)) - # 提取内存使用量的值 - memory_usage_value = memory_usage['data']['result'][0]['value'][1] - mem=int(float(memory_usage_value)) - # 打印结果 - print("CPU Usage:", cpu_usage_value, "%") - print("Memory Usage:", memory_usage_value, "%") - - return cpu,mem - - +# Function to query Prometheus with retry logic +def query_prometheus(query, retries=3, delay=2): + attempt = 0 + while attempt < retries: + try: + response = requests.get(prometheus_api_url, params={'query': query}, timeout=5) + if response.status_code == 200: + return response.json() + else: + print(f"Error: {response.status_code}, attempt {attempt + 1} of {retries}") + except requests.exceptions.RequestException as e: + print(f"Request failed: {e}, attempt {attempt + 1} of {retries}") + + attempt += 1 + time.sleep(delay) + return None # Return None if all attempts fail + +# Function to get CPU and memory usage with retry logic and error handling +def restart_decision(delay=1): + while True: + try: + cpu_usage = query_prometheus(cpu_query) + memory_usage = query_prometheus(memory_query) + + if cpu_usage and memory_usage: + # Extract CPU usage value + cpu_usage_value = cpu_usage['data']['result'][0]['value'][1] + cpu = int(float(cpu_usage_value)) + + # Extract memory usage value + memory_usage_value = memory_usage['data']['result'][0]['value'][1] + mem = int(float(memory_usage_value)) + + # Print results + print("CPU Usage:", cpu_usage_value, "%") + print("Memory Usage:", memory_usage_value, "%") + + return cpu, mem + else: + print("Failed to retrieve metrics. Retrying...") + + except (KeyError, IndexError, ValueError) as e: + print(f"Data extraction error: {e}. Retrying...") + + time.sleep(delay) + +# Call the restart decision function restart_decision() diff --git a/marble/environments/db_env_docker/docker-compose.yml b/marble/environments/db_env_docker/docker-compose.yml index 92e8c190..6621f4c5 100755 --- a/marble/environments/db_env_docker/docker-compose.yml +++ b/marble/environments/db_env_docker/docker-compose.yml @@ -11,14 +11,17 @@ services: - 9090:9090 postgres_db: - image: postgres:12.11 + image: postgres + ports: + - 5432:5432 + volumes: + - /var/lib/postgresql/data + command: postgres -c shared_preload_libraries=pg_stat_statements -c pg_stat_statements.track=all -c max_connections=200 environment: POSTGRES_USER: test POSTGRES_PASSWORD: Test123_456 POSTGRES_DB: sysbench POSTGRES_PORT: 5432 - ports: - - 5432:5432 node_exporter: image: prom/node-exporter @@ -36,9 +39,9 @@ services: - 9100:9100 pg_exporter: - #image: wrouesnel/postgres_exporter - image: vonng/pg_exporter:v0.6.1 + image: wrouesnel/postgres_exporter + # image: vonng/pg_exporter:v0.6.1 environment: - PG_EXPORTER_URL: 'postgresql://test:Test123_456@postgres_db:5432/sysbench?sslmode=disable' + DATA_SOURCE_NAME: 'postgresql://test:Test123_456@postgres_db:5432/sysbench?sslmode=disable' ports: - 9187:9187 diff --git a/marble/environments/db_utils/anomaly_detection.py b/marble/environments/db_utils/anomaly_detection.py index 4fc4460f..42dfb7b6 100644 --- a/marble/environments/db_utils/anomaly_detection.py +++ b/marble/environments/db_utils/anomaly_detection.py @@ -31,4 +31,24 @@ def detect_anomalies(data, significance_level=0.2): # Compare the KS statistic with the critical value anomalies = np.where(ks_statistic > critical_value, True, False) - return ks_statistic, anomalies + return ks_statistic.tolist(), anomalies.tolist() + +def describe_data_features(data): + """Describe the features of a given data in natural language.""" + if data == []: + raise Exception("No metric values found for the given time range") + + # compute processed values for the metric + # max (reserve two decimal places) + max_value = round(np.max(np.array(data)), 2) + # min + min_value = round(np.min(np.array(data)), 2) + # mean + mean_value = round(np.mean(np.array(data)), 2) + # deviation + deviation_value = round(np.std(np.array(data)), 2) + # evenly sampled 10 values (reserve two decimal places) + evenly_sampled_values = [round(data[i], 2) for i in range(0, len(data), len(data) // 10)] + + # describe the above five values in a string + return f"the max value is {max_value}, the min value is {min_value}, the mean value is {mean_value}, the deviation value is {deviation_value}, and the evenly_sampled_values are {evenly_sampled_values}." diff --git a/marble/environments/db_utils/diagnostic_kb.py b/marble/environments/db_utils/diagnostic_kb.py new file mode 100644 index 00000000..6bab70d8 --- /dev/null +++ b/marble/environments/db_utils/diagnostic_kb.py @@ -0,0 +1,160 @@ +import os +import json +from typing import List, Dict, Optional +from dataclasses import dataclass +import re +from collections import defaultdict + +@dataclass +class Diagnostic: + cause_name: str + desc: str + metrics: str + source_file: str + +class DiagnosticKB: + """ + Available Experts (folder names): + - ConfigurationExpert + - CpuExpert + - DiskExpert + - IndexExpert + - IoExpert + - MemoryExpert + - QueryExpert + - RecoveryExpert + - WorkloadExpert + """ + + def __init__(self, base_folder: str = ''): + """Initialize knowledge base from a folder containing expert subdirectories""" + if not base_folder: + current_dir = os.path.dirname(os.path.abspath(__file__)) + knowledge_base_dir = os.path.join(current_dir, 'knowledge_base') + self.base_folder = knowledge_base_dir + else: + self.base_folder = base_folder + + if not os.path.exists(self.base_folder): + raise ValueError(f"Knowledge base directory not found at {self.base_folder}") + + self.diagnostics: List[Diagnostic] = [] + self.cause_to_diagnostic: Dict[str, Diagnostic] = {} + self.load_documents() + + def get_experts(self) -> List[str]: + """Get list of all expert names (folder names)""" + return [d for d in os.listdir(self.base_folder) + if os.path.isdir(os.path.join(self.base_folder, d))] + + def load_documents(self): + """Load all JSON documents from expert subdirectories""" + self.diagnostics = [] + self.cause_to_diagnostic = {} + + for root, dirs, files in os.walk(self.base_folder): + for file in files: + if file.endswith('.json'): + file_path = os.path.join(root, file) + try: + with open(file_path, 'r', encoding='utf-8') as f: + diagnoses = json.load(f) + for diag in diagnoses: + if diag['cause_name'] not in self.cause_to_diagnostic: + diagnostic = Diagnostic( + cause_name=diag['cause_name'], + desc=diag['desc'], + metrics=diag['metrics'], + source_file=file_path + ) + self.diagnostics.append(diagnostic) + self.cause_to_diagnostic[diag['cause_name']] = diagnostic + except json.JSONDecodeError as e: + print(f"Error loading {file_path}: {e}") + + def search(self, query: str, expert: str = '', top_k: int = 3) -> List[Dict]: + """ + Search diagnostics using keyword matching with improved relevance scoring + Args: + query: Search terms + expert: Specific expert to search from (e.g., 'CpuExpert'). Empty string means search all. + top_k: Maximum number of results to return + """ + def calculate_relevance(diagnostic: Diagnostic, search_terms: List[str]) -> tuple: + text = f"{diagnostic.cause_name} {diagnostic.desc} {diagnostic.metrics}".lower() + + scores = { + 'cause_name': 0, + 'desc': 0, + 'metrics': 0 + } + + for term in search_terms: + term = term.lower() + scores['cause_name'] += len(re.findall(r'\b' + re.escape(term) + r'\b', + diagnostic.cause_name.lower())) * 3 + scores['metrics'] += len(re.findall(r'\b' + re.escape(term) + r'\b', + diagnostic.metrics.lower())) * 2 + scores['desc'] += len(re.findall(r'\b' + re.escape(term) + r'\b', + diagnostic.desc.lower())) + + total_score = sum(scores.values()) + return (total_score, scores['cause_name']) + + search_terms = [term.strip() for term in query.split() if term.strip()] + + if not search_terms: + return [] + + # Filter diagnostics by expert if specified + diagnostics_to_search = self.diagnostics + if expert: + expert_path = os.path.join(self.base_folder, expert) + diagnostics_to_search = [ + diag for diag in self.diagnostics + if diag.source_file.startswith(expert_path) + ] + + if not diagnostics_to_search: + print(f"Warning: No diagnostics found for expert '{expert}'") + return [] + + scored_results = [ + (diag, *calculate_relevance(diag, search_terms)) + for diag in diagnostics_to_search + ] + + scored_results.sort(key=lambda x: (x[1], x[2]), reverse=True) + + results = [] + seen_causes = set() + + for diag, total_score, _ in scored_results: + if total_score > 0 and diag.cause_name not in seen_causes: + seen_causes.add(diag.cause_name) + results.append({ + 'cause_name': diag.cause_name, + 'desc': diag.desc, + 'metrics': diag.metrics.split('\n'), + 'score': total_score, + 'source': diag.source_file, + 'expert': os.path.basename(os.path.dirname(os.path.dirname(diag.source_file))) + }) + + if len(results) >= top_k: + break + + return results + + def get_diagnostic_by_cause(self, cause_name: str) -> Optional[Dict]: + """Get specific diagnostic by cause name""" + diag = self.cause_to_diagnostic.get(cause_name) + if diag: + return { + 'cause_name': diag.cause_name, + 'desc': diag.desc, + 'metrics': diag.metrics.split('\n'), + 'source': diag.source_file, + 'expert': os.path.basename(os.path.dirname(os.path.dirname(diag.source_file))) + } + return None diff --git a/marble/environments/db_utils/metrics.py b/marble/environments/db_utils/metrics.py new file mode 100644 index 00000000..3487f890 --- /dev/null +++ b/marble/environments/db_utils/metrics.py @@ -0,0 +1,81 @@ +"""Prometheus queries""" + +allowed_metrics_full_names = { + "cpu_usage": "avg(irate(node_cpu_seconds_total{mode='user'}[1m])) * 100", # Overall CPU usage (user mode) + "memory_usage": "node_memory_MemTotal_bytes - (node_memory_Cached_bytes + node_memory_Buffers_bytes + node_memory_MemFree_bytes)", # Overall memory usage + "network_traffic": "irate(node_network_receive_bytes_total[1m]) + irate(node_network_transmit_bytes_total[1m])", # Total network traffic (inbound + outbound) + "io_activity": "irate(node_disk_read_bytes_total[1m]) + irate(node_disk_written_bytes_total[1m])", # Total I/O activity (read + write) +} + +full_metrics_full_names = { + "cpu": { + "cpu_usage": "avg(irate(node_cpu_seconds_total{mode='user'}[1m])) * 100", + "node_scrape_collector_duration_seconds": "node_scrape_collector_duration_seconds", + "node_procs_running": "node_procs_running", + "node_procs_blocked": "node_procs_blocked", + "node_entropy_available_bits": "node_entropy_available_bits", + "node_load1": "node_load1", + "node_load5": "node_load5", + "node_load15": "node_load15", + "pg_settings_random_page_cost": "pg_settings_random_page_cost", + "pg_settings_max_worker_processes": "pg_settings_max_worker_processes", + "pg_settings_max_parallel_workers": "pg_settings_max_parallel_workers", + "pg_active_connection_count": "pg_stat_activity_count{state='active'} != 0" + }, + "memory": { + "memory_usage": "node_memory_MemTotal_bytes - (node_memory_Cached_bytes + node_memory_Buffers_bytes + node_memory_MemFree_bytes)", + "node_memory_MemTotal_bytes": "node_memory_MemTotal_bytes", + "node_memory_Cached_bytes": "node_memory_Cached_bytes", + "node_memory_Buffers_bytes": "node_memory_Buffers_bytes", + "node_memory_MemFree_bytes": "node_memory_MemFree_bytes", + "node_memory_Inactive_anon_bytes": "node_memory_Inactive_anon_bytes", + "node_memory_MemAvailable_bytes": "node_memory_MemAvailable_bytes", + "node_memory_Dirty_bytes": "node_memory_Dirty_bytes", + "pg_stat_activity_active_connections": "pg_stat_activity_count{state='active'} != 0", + "pg_settings_shared_buffers_bytes": "pg_settings_shared_buffers_bytes", + "pg_settings_effective_cache_size_bytes": "pg_settings_effective_cache_size_bytes", + "pg_settings_maintenance_work_mem_bytes": "pg_settings_maintenance_work_mem_bytes", + "pg_settings_work_mem_bytes": "pg_settings_work_mem_bytes", + "pg_settings_max_wal_size_bytes": "pg_settings_max_wal_size_bytes", + "pg_stat_bgwriter_buffers_alloc_rate": "irate(pg_stat_bgwriter_buffers_alloc[5m])", + "pg_stat_bgwriter_buffers_backend_fsync_rate": "irate(pg_stat_bgwriter_buffers_backend_fsync[5m])", + "pg_stat_bgwriter_buffers_checkpoint_rate": "irate(pg_stat_bgwriter_buffers_checkpoint[5m])", + "pg_stat_bgwriter_buffers_clean_rate": "irate(pg_stat_bgwriter_buffers_clean[5m])", + "pg_stat_database_conflicts_rate": "irate(pg_stat_database_conflicts[5m])", + "pg_stat_database_deadlocks_rate": "irate(pg_stat_database_deadlocks[5m])" + }, + "network": { + "node_sockstat_tcp_time_wait": "node_sockstat_TCP_tw", + "node_sockstat_tcp_orphan": "node_sockstat_TCP_orphan", + "node_sockstat_tcp_alloc": "node_sockstat_TCP_alloc", + "node_sockstat_tcp_inuse": "node_sockstat_TCP_inuse", + "node_netstat_tcp_passive_opens_rate": "irate(node_netstat_Tcp_PassiveOpens[1m])", + "pg_stat_activity_active_connections": "pg_stat_activity_count{state='active'} != 0" + }, + "io": { + "pg_stat_database_tup_fetched_total": "SUM(pg_stat_database_tup_fetched)", + "pg_stat_database_tup_inserted_total": "SUM(pg_stat_database_tup_inserted)", + "pg_stat_database_tup_updated_total": "SUM(pg_stat_database_tup_updated)", + "process_open_file_descriptors": "process_open_fds", + "pg_stat_database_xact_commit_rate": "irate(pg_stat_database_xact_commit[5m])", + "pg_stat_database_xact_rollback_rate": "irate(pg_stat_database_xact_rollback[5m])", + "pg_stat_database_tup_updated_non_zero": "pg_stat_database_tup_updated != 0", + "pg_stat_database_blks_hit_ratio": "pg_stat_database_blks_hit / (pg_stat_database_blks_read + pg_stat_database_blks_hit)", + "pg_stat_database_temp_bytes_rate": "irate(pg_stat_database_temp_bytes[5m])", + "pg_stat_bgwriter_checkpoint_write_time_rate": "irate(pg_stat_bgwriter_checkpoint_write_time[5m])", + "pg_stat_bgwriter_checkpoint_sync_time_rate": "irate(pg_stat_bgwriter_checkpoint_sync_time[5m])", + "node_filesystem_used_bytes": "node_filesystem_size_bytes - node_filesystem_avail_bytes", + "node_filesystem_size_bytes": "node_filesystem_size_bytes", + "node_filesystem_used_ratio": "1 - (node_filesystem_free_bytes / node_filesystem_size_bytes)", + "node_disk_reads_completed_rate": "irate(node_disk_reads_completed_total[1m])", + "node_disk_writes_completed_rate": "irate(node_disk_writes_completed_total[1m])", + "node_disk_io_in_progress": "node_disk_io_now", + "node_disk_read_bytes_rate": "irate(node_disk_read_bytes_total[1m])", + "node_disk_written_bytes_rate": "irate(node_disk_written_bytes_total[1m])", + "node_disk_io_time_seconds_rate": "irate(node_disk_io_time_seconds_total[1m])", + "node_disk_io_time_weighted_seconds_rate": "irate(node_disk_io_time_weighted_seconds_total[1m])", + "node_disk_read_time_seconds_rate": "irate(node_disk_read_time_seconds_total[1m])", + "node_disk_write_time_seconds_rate": "irate(node_disk_write_time_seconds_total[1m])", + "node_disk_io_time_seconds_rate": "irate(node_disk_io_time_seconds_total[1m])" + } +} diff --git a/marble/environments/db_utils/slow_query.py b/marble/environments/db_utils/slow_query.py new file mode 100644 index 00000000..3c418ded --- /dev/null +++ b/marble/environments/db_utils/slow_query.py @@ -0,0 +1,50 @@ +import psycopg2 +from psycopg2.extras import RealDictCursor + +def obtain_slow_queries(server_address="localhost", + username="test", + password="Test123_456", + database="sysbench", + port="5432", + top_k=10): + try: + connection = psycopg2.connect( + user=username, + password=password, + database=database, + host=server_address, + port=port + ) + + cursor = connection.cursor(cursor_factory=RealDictCursor) + + slow_queries_query = f""" + CREATE EXTENSION pg_stat_statements; + SELECT + query, + total_exec_time + FROM pg_stat_statements + ORDER BY total_exec_time DESC + LIMIT {top_k}; + """ + + cursor.execute(slow_queries_query) + slow_queries = cursor.fetchall() + slow_queries_str = "" + + for idx, record in enumerate(slow_queries, start=1): + slow_queries_str += f"{idx}. Query: {record['query']}\n" + slow_queries_str += f" Total Execution Time: {record['total_exec_time']}\n" + slow_queries_str += "-" * 10 + slow_queries_str += "\n" + + cursor.close() + connection.close() + + return slow_queries_str + + except Exception as e: + print(f"Error: {e}") + +if __name__ == "__main__": + obtain_slow_queries() diff --git a/marble/logs/app.log.1 b/marble/logs/app.log.1 deleted file mode 100644 index efbb5170..00000000 --- a/marble/logs/app.log.1 +++ /dev/null @@ -1,100 +0,0 @@ -[2024-11-11 15:23:51,552] [INFO] [BaseAgent]: Agent 'agent1' plans next task based on persona: Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be to conduct a literature review on the current state of research in merging homologous models in interactive systems based on LLMs. This task aligns with the agent's role as a researcher with a focus on innovative imaging methods and mathematical applications. - -After conducting the literature review, the agent can then collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the field of merging homologous models. This collaborative brainstorming session will allow the team to leverage the agent's expertise in imaging technologies and mathematical concepts to develop a new research proposal. - -Once the brainstorming session is complete, the team can summarize their collective ideas and formulate a new research idea using the '5q' format. This format will help outline the specific research question, the broader implications of solving the problem, the challenges involved, the gaps in previous research, and the proposed methodology and expected outcomes of the research. - -Overall, the next task for the agent should be to lead the research team in exploring new research ideas related to merging homologous models in interactive systems, leveraging their expertise in terahertz imaging technologies, cloud computing, and algebraic geometry. -[2024-11-11 15:23:51,556] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be to conduct a literature review on the current state of research in merging homologous models in interactive systems based on LLMs. This task aligns with the agent's role as a researcher with a focus on innovative imaging methods and mathematical applications. - -After conducting the literature review, the agent can then collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the field of merging homologous models. This collaborative brainstorming session will allow the team to leverage the agent's expertise in imaging technologies and mathematical concepts to develop a new research proposal. - -Once the brainstorming session is complete, the team can summarize their collective ideas and formulate a new research idea using the '5q' format. This format will help outline the specific research question, the broader implications of solving the problem, the challenges involved, the gaps in previous research, and the proposed methodology and expected outcomes of the research. - -Overall, the next task for the agent should be to lead the research team in exploring new research ideas related to merging homologous models in interactive systems, leveraging their expertise in terahertz imaging technologies, cloud computing, and algebraic geometry.'. -[2024-11-11 15:23:52,400] [INFO] [BaseAgent]: Agent 'agent2' is planning the next task. -[2024-11-11 15:23:54,720] [INFO] [BaseAgent]: Agent 'agent2' plans next task based on persona: Based on the provided information, the next task for 'agent2' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with 'agent2's expertise in artificial intelligence and healthcare, as well as their passion for leveraging advanced machine learning techniques to create impactful solutions. By contributing to the brainstorming session, 'agent2' can provide valuable insights and potentially lead the team towards formulating a new research proposal in the '5q' format. -[2024-11-11 15:23:54,720] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Based on the provided information, the next task for 'agent2' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with 'agent2's expertise in artificial intelligence and healthcare, as well as their passion for leveraging advanced machine learning techniques to create impactful solutions. By contributing to the brainstorming session, 'agent2' can provide valuable insights and potentially lead the team towards formulating a new research proposal in the '5q' format.'. -[2024-11-11 15:23:56,393] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. My expertise in artificial intelligence and healthcare, along with a passion for leveraging advanced machine learning techniques, can contribute valuable insights to formulate a new research proposal in the '5q' format. -[2024-11-11 15:23:56,394] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I am inter... -[2024-11-11 15:23:58,362] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I appreciate your interest in collaborating to brainstorm potential research ideas related to merging homologous models using the DELLA approach. Your expertise in artificial intelligence and healthcare, combined with your passion for advanced machine learning techniques, will be invaluable in formulating a new research proposal in the '5q' format. I look forward to exploring innovative ideas with you to address the gaps in the Introduction effectively. -[2024-11-11 15:23:58,362] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I apprecia... -[2024-11-11 15:23:59,638] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I appreciate your enthusiasm for collaborating on brainstorming potential research ideas related to merging homologous models using the DELLA approach. I believe our combined expertise in artificial intelligence and healthcare, along with a shared passion for advanced machine learning techniques, will greatly benefit the formulation of a new research proposal in the '5q' format. I am excited to delve into innovative ideas with you to effectively address the gaps in the Introduction. -[2024-11-11 15:23:59,639] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I apprecia... -[2024-11-11 15:24:00,843] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I am excited to collaborate with you on brainstorming potential research ideas related to merging homologous models using the DELLA approach. Your expertise in artificial intelligence and healthcare, combined with our shared passion for advanced machine learning techniques, will undoubtedly lead to innovative solutions. Let's work together to formulate a new research proposal in the '5q' format and address the gaps in the Introduction effectively. -[2024-11-11 15:24:00,843] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I am excit... -[2024-11-11 15:24:02,684] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I am thrilled to collaborate with you on brainstorming potential research ideas related to merging homologous models using the DELLA approach. Your expertise in spiking neural networks and their applications in real-world scenarios, particularly in auditory processing and machine learning, brings a unique perspective to our collaboration. I believe our combined knowledge and passion for advanced machine learning techniques will pave the way for innovative solutions. Let's work together to formulate a new research proposal in the '5q' format and address the gaps in the Introduction effectively. -[2024-11-11 15:24:02,684] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I am thril... -[2024-11-11 15:24:04,219] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I am excited to collaborate with you on brainstorming potential research ideas related to merging homologous models using the DELLA approach. Your expertise in spiking neural networks and their applications in real-world scenarios, particularly in auditory processing and machine learning, brings a unique perspective to our collaboration. I believe our combined knowledge and passion for advanced machine learning techniques will pave the way for innovative solutions. Let's work together to formulate a new research proposal in the '5q' format and address the gaps in the Introduction effectively. -[2024-11-11 15:24:04,227] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I am excit... -[2024-11-11 15:24:05,966] [INFO] [BaseAgent]: Agent 'agent3' is planning the next task. -[2024-11-11 15:24:07,570] [INFO] [BaseAgent]: Agent 'agent3' plans next task based on persona: Based on the role and expertise of 'agent3' as a researcher dedicated to advancing the field of spiking neural networks (SNNs) and their applications in auditory processing and machine learning, the next task should be to: - -1. **Literature Review**: Conduct a thorough literature review on the intersection of large language models (LLMs) and evolutionary algorithms, focusing on how these technologies can enhance algorithm selection and optimization processes. Explore the current state of research in this area and identify gaps or opportunities for further advancement. - -This task aligns with 'agent3's expertise and research interests, allowing them to stay updated on relevant developments in the field and potentially inspire new research ideas or directions. -[2024-11-11 15:24:07,573] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Based on the role and expertise of 'agent3' as a researcher dedicated to advancing the field of spiking neural networks (SNNs) and their applications in auditory processing and machine learning, the next task should be to: - -1. **Literature Review**: Conduct a thorough literature review on the intersection of large language models (LLMs) and evolutionary algorithms, focusing on how these technologies can enhance algorithm selection and optimization processes. Explore the current state of research in this area and identify gaps or opportunities for further advancement. - -This task aligns with 'agent3's expertise and research interests, allowing them to stay updated on relevant developments in the field and potentially inspire new research ideas or directions.'. -[2024-11-11 15:24:08,296] [INFO] [BaseAgent]: Agent 'agent4' is planning the next task. -[2024-11-11 15:24:10,186] [INFO] [BaseAgent]: Agent 'agent4' plans next task based on persona: Based on the background and expertise of 'agent4' as a researcher with a diverse background in mathematical modeling, optimization, and photonic systems, the next task should be to: - -1. **Literature Review**: Conduct a thorough literature review to understand the current state of research in merging homologous models and the impact of different pruning techniques on model performance. - -This task aligns with 'agent4's expertise in mathematical modeling and optimization, as well as their interest in exploring innovative solutions for merging models and improving computational efficiency. It will also provide valuable insights for brainstorming potential research ideas and formulating a new research proposal in the field of machine learning. -[2024-11-11 15:24:10,187] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Based on the background and expertise of 'agent4' as a researcher with a diverse background in mathematical modeling, optimization, and photonic systems, the next task should be to: - -1. **Literature Review**: Conduct a thorough literature review to understand the current state of research in merging homologous models and the impact of different pruning techniques on model performance. - -This task aligns with 'agent4's expertise in mathematical modeling and optimization, as well as their interest in exploring innovative solutions for merging models and improving computational efficiency. It will also provide valuable insights for brainstorming potential research ideas and formulating a new research proposal in the field of machine learning.'. -[2024-11-11 15:24:10,947] [INFO] [BaseAgent]: Agent 'agent5' is planning the next task. -[2024-11-11 15:24:16,418] [INFO] [BaseAgent]: Agent 'agent5' plans next task based on persona: Based on the agent's expertise in evolutionary algorithms, machine learning, and spiking neural networks, as well as their recent work on integrating machine learning models with evolutionary algorithms for multi-objective optimization, the next task should be to: - -1. **Literature Review**: Conduct a literature review on the current state of research in merging homologous models and the challenges associated with maintaining separate fine-tuned models for each task. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that leverage evolutionary algorithms, machine learning models, and spiking neural networks to address the limitations of maintaining separate fine-tuned models for each task. - -3. **Summarization**: Summarize the collective ideas and identify key research directions that align with the agent's expertise and recent work. - -4. **Formulate a New Research Idea (5q)**: - - **[Question 1] - What is the problem?** - How can evolutionary algorithms, machine learning models, and spiking neural networks be effectively integrated to address the limitations of maintaining separate fine-tuned models for each task? - - **[Question 2] - Why is it interesting and important?** - Solving this problem can lead to more efficient and effective optimization techniques, improved performance in multi-objective optimization scenarios, and enhanced memory storage and processing capabilities in intelligent systems. - - **[Question 3] - Why is it hard?** - The integration of evolutionary algorithms, machine learning models, and spiking neural networks poses challenges in terms of algorithm complexity, model interoperability, and optimization in high-dimensional spaces. - - **[Question 4] - Why hasn't it been solved before?** - Previous research has focused on individual components rather than the holistic integration of these technologies. Overcoming barriers related to algorithm compatibility and performance optimization is crucial for addressing this problem. - - **[Question 5] - What are the key components of my approach and results?** - The proposed methodology will involve developing innovative frameworks that leverage evolutionary algorithms, machine learning models, and spiking neural networks to merge homologous models efficiently. The approach will be evaluated using benchmark datasets and metrics to demonstrate improved performance and memory efficiency in task execution. -[2024-11-11 15:24:16,419] [INFO] [BaseAgent]: Agent 'agent5' acting on task 'Based on the agent's expertise in evolutionary algorithms, machine learning, and spiking neural networks, as well as their recent work on integrating machine learning models with evolutionary algorithms for multi-objective optimization, the next task should be to: - -1. **Literature Review**: Conduct a literature review on the current state of research in merging homologous models and the challenges associated with maintaining separate fine-tuned models for each task. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that leverage evolutionary algorithms, machine learning models, and spiking neural networks to address the limitations of maintaining separate fine-tuned models for each task. - -3. **Summarization**: Summarize the collective ideas and identify key research directions that align with the agent's expertise and recent work. - -4. **Formulate a New Research Idea (5q)**: - - **[Question 1] - What is the problem?** - How can evolutionary algorithms, machine learning models, and spiking neural networks be effectively integrated to address the limitations of maintaining separate fine-tuned models for each task? - - **[Question 2] - Why is it interesting and important?** - Solving this problem can lead to more efficient and effective optimization techniques, improved performance in multi-objective optimization scenarios, and enhanced memory storage and processing capabilities in intelligent systems. - - **[Question 3] - Why is it hard?** - The integration of evolutionary algorithms, machine learning models, and spiking neural networks poses challenges in terms of algorithm complexity, model interoperability, and optimization in high-dimensional spaces. - - **[Question 4] - Why hasn't it been solved before?** - Previous research has focused on individual components rather than the holistic integration of these technologies. Overcoming barriers related to algorithm compatibility and performance optimization is crucial for addressing this problem. - - **[Question 5] - What are the key components of my approach and results?** - The proposed methodology will involve developing innovative frameworks that leverage evolutionary algorithms, machine learning models, and spiking neural networks to merge homologous models efficiently. The approach will be evaluated using benchmark datasets and metrics to demonstrate improved performance and memory efficiency in task execution.'. diff --git a/marble/logs/app.log.2 b/marble/logs/app.log.2 deleted file mode 100644 index b0669755..00000000 --- a/marble/logs/app.log.2 +++ /dev/null @@ -1,18797 +0,0 @@ -[2024-10-20 15:25:17,031] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-10-20 15:26:54,136] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-10-20 15:26:54,136] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-10-20 15:26:54,136] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-10-20 15:26:54,136] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-10-20 15:26:54,136] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-10-20 15:26:54,136] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-10-20 15:26:54,136] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-10-20 15:26:54,137] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-10-20 15:26:54,137] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-10-20 15:26:54,137] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-10-20 15:26:54,137] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-10-20 15:26:54,137] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-10-20 15:26:54,137] [INFO] [Engine]: Engine initialized. -[2024-10-20 15:26:54,137] [INFO] [Engine]: Engine starting simulation. -[2024-10-20 15:26:54,137] [INFO] [Engine]: Starting iteration 1 -[2024-10-20 15:26:54,137] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {} - Token Usage: 0 - -- Agent ID: agent2 - Relationships: {} - Token Usage: 0 - -- Agent ID: agent3 - Relationships: {} - Token Usage: 0 - -- Agent ID: agent4 - Relationships: {} - Token Usage: 0 - -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-10-20 15:27:00,556] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-10-20 15:27:00,558] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-10-20 15:27:00,559] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-10-20 15:27:00,559] [INFO] [Engine]: Simulation completed. -[2024-10-20 16:16:05,371] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-10-20 16:16:05,371] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-10-20 16:16:05,372] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-10-20 16:16:05,372] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-10-20 16:16:05,372] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-10-20 16:16:05,372] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-10-20 16:16:05,372] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-10-20 16:16:05,372] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-10-20 16:16:05,372] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-10-20 16:16:05,372] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-10-20 16:16:05,372] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-10-20 16:16:05,372] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-10-20 16:16:05,372] [INFO] [Engine]: Engine initialized. -[2024-10-20 16:16:05,372] [INFO] [Engine]: Engine starting simulation. -[2024-10-20 16:16:05,372] [INFO] [Engine]: Starting iteration 1 -[2024-10-20 16:16:05,372] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {} - Token Usage: 0 - -- Agent ID: agent2 - Relationships: {} - Token Usage: 0 - -- Agent ID: agent3 - Relationships: {} - Token Usage: 0 - -- Agent ID: agent4 - Relationships: {} - Token Usage: 0 - -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-10-20 16:16:09,833] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-10-20 16:16:09,834] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-10-20 16:16:09,834] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-10-20 16:16:09,834] [INFO] [Engine]: Simulation completed. -[2024-10-20 16:18:59,200] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-10-20 16:18:59,200] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-10-20 16:18:59,200] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-10-20 16:18:59,200] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-10-20 16:18:59,200] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-10-20 16:18:59,200] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-10-20 16:18:59,200] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-10-20 16:18:59,200] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-10-20 16:18:59,200] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-10-20 16:18:59,201] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-10-20 16:18:59,201] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-10-20 16:18:59,201] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-10-20 16:18:59,201] [INFO] [Engine]: Engine initialized. -[2024-10-20 16:18:59,201] [INFO] [Engine]: Engine starting simulation. -[2024-10-20 16:18:59,201] [INFO] [Engine]: Starting iteration 1 -[2024-10-20 16:18:59,201] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {} - Token Usage: 0 - -- Agent ID: agent2 - Relationships: {} - Token Usage: 0 - -- Agent ID: agent3 - Relationships: {} - Token Usage: 0 - -- Agent ID: agent4 - Relationships: {} - Token Usage: 0 - -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-10-20 16:19:00,053] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent1': 'Start data collection process', 'agent2': 'Review and analyze the collected data'}, 'continue': True} -[2024-10-20 16:19:00,054] [INFO] [Engine]: Assigned tasks: {'agent1': 'Start data collection process', 'agent2': 'Review and analyze the collected data'} -[2024-10-20 16:19:00,054] [INFO] [Engine]: Assigning task to agent1: Start data collection process -[2024-10-20 16:19:00,054] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Start data collection process'. -[2024-10-20 16:19:12,883] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-10-20 16:19:12,884] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-10-20 16:19:12,884] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-10-20 16:19:12,884] [INFO] [Engine]: Simulation completed. -[2024-10-20 16:23:00,245] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-10-20 16:23:00,246] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-10-20 16:23:00,246] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-10-20 16:23:00,246] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-10-20 16:23:00,246] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-10-20 16:23:00,246] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-10-20 16:23:00,246] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-10-20 16:23:00,246] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-10-20 16:23:00,246] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-10-20 16:23:00,246] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-10-20 16:23:00,246] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-10-20 16:23:00,246] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-10-20 16:23:00,246] [INFO] [Engine]: Engine initialized. -[2024-10-20 16:23:00,246] [INFO] [Engine]: Engine starting simulation. -[2024-10-20 16:23:00,246] [INFO] [Engine]: Starting iteration 1 -[2024-10-20 16:23:00,246] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {} - Token Usage: 0 - -- Agent ID: agent2 - Relationships: {} - Token Usage: 0 - -- Agent ID: agent3 - Relationships: {} - Token Usage: 0 - -- Agent ID: agent4 - Relationships: {} - Token Usage: 0 - -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-10-20 16:23:01,183] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent1': 'Task description for agent1', 'agent2': 'Task description for agent2', 'agent3': 'Task description for agent3', 'agent4': 'Task description for agent4'}, 'continue': True} -[2024-10-20 16:23:01,184] [INFO] [Engine]: Assigned tasks: {'agent1': 'Task description for agent1', 'agent2': 'Task description for agent2', 'agent3': 'Task description for agent3', 'agent4': 'Task description for agent4'} -[2024-10-20 16:23:01,184] [INFO] [Engine]: Assigning task to agent1: Task description for agent1 -[2024-10-20 16:23:01,184] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Task description for agent1'. -[2024-10-20 16:23:07,629] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-10-20 16:23:07,630] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-10-20 16:23:07,631] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-10-20 16:23:07,631] [INFO] [Engine]: Simulation completed. -[2024-10-20 16:25:28,658] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-10-20 16:25:28,658] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-10-20 16:25:28,658] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-10-20 16:25:28,658] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-10-20 16:25:28,658] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-10-20 16:25:28,658] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-10-20 16:25:28,658] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-10-20 16:25:28,658] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-10-20 16:25:28,658] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-10-20 16:25:28,659] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-10-20 16:25:28,659] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-10-20 16:25:28,659] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-10-20 16:25:28,659] [INFO] [Engine]: Engine initialized. -[2024-10-20 16:25:28,659] [INFO] [Engine]: Engine starting simulation. -[2024-10-20 16:25:28,659] [INFO] [Engine]: Starting iteration 1 -[2024-10-20 16:25:28,659] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {} - Token Usage: 0 - -- Agent ID: agent2 - Relationships: {} - Token Usage: 0 - -- Agent ID: agent3 - Relationships: {} - Token Usage: 0 - -- Agent ID: agent4 - Relationships: {} - Token Usage: 0 - -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-10-20 16:25:29,578] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent1': 'Start data collection for the simulation', 'agent2': 'Review and configure simulation parameters'}, 'continue': True} -[2024-10-20 16:25:29,578] [INFO] [Engine]: Assigned tasks: {'agent1': 'Start data collection for the simulation', 'agent2': 'Review and configure simulation parameters'} -[2024-10-20 16:25:29,578] [INFO] [Engine]: Assigning task to agent1: Start data collection for the simulation -[2024-10-20 16:25:29,578] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Start data collection for the simulation'. -[2024-10-20 16:25:32,627] [INFO] [BaseAgent]: Agent 'agent1' acted with result 'To start data collection for the simulation, we need to gather information on various aspects of the scenario we are simulating. This may include data on the initial conditions, parameters, constraints, and variables that will be used in the simulation. - -Some of the data that may need to be collected include: - -1. Initial conditions: This includes information on the starting state of the system or scenario being simulated. For example, if we are simulating a supply chain, we would need data on the initial inventory levels, production capacities, and demand forecasts. - -2. Parameters: These are the values that define the behavior of the system being simulated. This could include factors such as production costs, lead times, transportation costs, and other relevant parameters. - -3. Constraints: These are limitations or restrictions that need to be considered in the simulation. This could include constraints on resources, capacity, or other factors that may impact the outcome of the simulation. - -4. Variables: These are the factors that will be measured or analyzed during the simulation. This could include variables such as inventory levels, production output, costs, and other relevant metrics. - -Once we have gathered all the necessary data, we can input it into the simulation model and begin running the simulation to analyze the outcomes and make informed decisions based on the results.'. -[2024-10-20 16:25:32,628] [DEBUG] [Engine]: Agent 'agent1' completed task with result: To start data collection for the simulation, we need to gather information on various aspects of the scenario we are simulating. This may include data on the initial conditions, parameters, constraints, and variables that will be used in the simulation. - -Some of the data that may need to be collected include: - -1. Initial conditions: This includes information on the starting state of the system or scenario being simulated. For example, if we are simulating a supply chain, we would need data on the initial inventory levels, production capacities, and demand forecasts. - -2. Parameters: These are the values that define the behavior of the system being simulated. This could include factors such as production costs, lead times, transportation costs, and other relevant parameters. - -3. Constraints: These are limitations or restrictions that need to be considered in the simulation. This could include constraints on resources, capacity, or other factors that may impact the outcome of the simulation. - -4. Variables: These are the factors that will be measured or analyzed during the simulation. This could include variables such as inventory levels, production output, costs, and other relevant metrics. - -Once we have gathered all the necessary data, we can input it into the simulation model and begin running the simulation to analyze the outcomes and make informed decisions based on the results. -[2024-10-20 16:25:32,633] [INFO] [Engine]: Assigning task to agent2: Review and configure simulation parameters -[2024-10-20 16:25:32,633] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Review and configure simulation parameters'. -[2024-10-20 16:25:37,035] [INFO] [BaseAgent]: Agent 'agent2' acted with result 'To review and configure simulation parameters, you will need to consider the specific requirements of your simulation model and the goals you want to achieve. Here are some key parameters to review and configure: - -1. Time step: This parameter determines the interval at which the simulation model is updated. A smaller time step will result in a more accurate simulation but may require more computational resources. - -2. Simulation duration: This parameter specifies the total duration of the simulation. Make sure it is long enough to capture the behavior of the system over the desired time period. - -3. Initial conditions: These are the starting values of the variables in the simulation model. Ensure that they are set correctly to reflect the initial state of the system. - -4. Boundary conditions: These are the conditions at the boundaries of the simulation domain. Make sure they are set appropriately to reflect the behavior of the system at the boundaries. - -5. Model parameters: Review and configure the parameters of the simulation model, such as physical constants, material properties, and other relevant parameters. - -6. Convergence criteria: Specify the criteria for determining when the simulation has reached a steady state or converged to a solution. This may include specifying a tolerance for the difference between successive iterations. - -7. Output settings: Configure the settings for the output of the simulation results, such as the format of the output data, the frequency of output, and the variables to be included in the output. - -By carefully reviewing and configuring these simulation parameters, you can ensure that your simulation model accurately represents the behavior of the system and produces meaningful results.'. -[2024-10-20 16:25:37,037] [DEBUG] [Engine]: Agent 'agent2' completed task with result: To review and configure simulation parameters, you will need to consider the specific requirements of your simulation model and the goals you want to achieve. Here are some key parameters to review and configure: - -1. Time step: This parameter determines the interval at which the simulation model is updated. A smaller time step will result in a more accurate simulation but may require more computational resources. - -2. Simulation duration: This parameter specifies the total duration of the simulation. Make sure it is long enough to capture the behavior of the system over the desired time period. - -3. Initial conditions: These are the starting values of the variables in the simulation model. Ensure that they are set correctly to reflect the initial state of the system. - -4. Boundary conditions: These are the conditions at the boundaries of the simulation domain. Make sure they are set appropriately to reflect the behavior of the system at the boundaries. - -5. Model parameters: Review and configure the parameters of the simulation model, such as physical constants, material properties, and other relevant parameters. - -6. Convergence criteria: Specify the criteria for determining when the simulation has reached a steady state or converged to a solution. This may include specifying a tolerance for the difference between successive iterations. - -7. Output settings: Configure the settings for the output of the simulation results, such as the format of the output data, the frequency of output, and the variables to be included in the output. - -By carefully reviewing and configuring these simulation parameters, you can ensure that your simulation model accurately represents the behavior of the system and produces meaningful results. -[2024-10-20 16:25:37,037] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- agent1: To start data collection for the simulation, we need to gather information on various aspects of the scenario we are simulating. This may include data on the initial conditions, parameters, constraints, and variables that will be used in the simulation. - -Some of the data that may need to be collected include: - -1. Initial conditions: This includes information on the starting state of the system or scenario being simulated. For example, if we are simulating a supply chain, we would need data on the initial inventory levels, production capacities, and demand forecasts. - -2. Parameters: These are the values that define the behavior of the system being simulated. This could include factors such as production costs, lead times, transportation costs, and other relevant parameters. - -3. Constraints: These are limitations or restrictions that need to be considered in the simulation. This could include constraints on resources, capacity, or other factors that may impact the outcome of the simulation. - -4. Variables: These are the factors that will be measured or analyzed during the simulation. This could include variables such as inventory levels, production output, costs, and other relevant metrics. - -Once we have gathered all the necessary data, we can input it into the simulation model and begin running the simulation to analyze the outcomes and make informed decisions based on the results. -- agent2: To review and configure simulation parameters, you will need to consider the specific requirements of your simulation model and the goals you want to achieve. Here are some key parameters to review and configure: - -1. Time step: This parameter determines the interval at which the simulation model is updated. A smaller time step will result in a more accurate simulation but may require more computational resources. - -2. Simulation duration: This parameter specifies the total duration of the simulation. Make sure it is long enough to capture the behavior of the system over the desired time period. - -3. Initial conditions: These are the starting values of the variables in the simulation model. Ensure that they are set correctly to reflect the initial state of the system. - -4. Boundary conditions: These are the conditions at the boundaries of the simulation domain. Make sure they are set appropriately to reflect the behavior of the system at the boundaries. - -5. Model parameters: Review and configure the parameters of the simulation model, such as physical constants, material properties, and other relevant parameters. - -6. Convergence criteria: Specify the criteria for determining when the simulation has reached a steady state or converged to a solution. This may include specifying a tolerance for the difference between successive iterations. - -7. Output settings: Configure the settings for the output of the simulation results, such as the format of the output data, the frequency of output, and the variables to be included in the output. - -By carefully reviewing and configuring these simulation parameters, you can ensure that your simulation model accurately represents the behavior of the system and produces meaningful results. - -[2024-10-20 16:25:37,040] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: -- agent1: To start data collection for the simulation, we need to gather information on various aspects of the scenario we are simulating. This may include data on the initial conditions, parameters, constraints, and variables that will be used in the simulation. - -Some of the data that may need to be collected include: - -1. Initial conditions: This includes information on the starting state of the system or scenario being simulated. For example, if we are simulating a supply chain, we would need data on the initial inventory levels, production capacities, and demand forecasts. - -2. Parameters: These are the values that define the behavior of the system being simulated. This could include factors such as production costs, lead times, transportation costs, and other relevant parameters. - -3. Constraints: These are limitations or restrictions that need to be considered in the simulation. This could include constraints on resources, capacity, or other factors that may impact the outcome of the simulation. - -4. Variables: These are the factors that will be measured or analyzed during the simulation. This could include variables such as inventory levels, production output, costs, and other relevant metrics. - -Once we have gathered all the necessary data, we can input it into the simulation model and begin running the simulation to analyze the outcomes and make informed decisions based on the results. -- agent2: To review and configure simulation parameters, you will need to consider the specific requirements of your simulation model and the goals you want to achieve. Here are some key parameters to review and configure: - -1. Time step: This parameter determines the interval at which the simulation model is updated. A smaller time step will result in a more accurate simulation but may require more computational resources. - -2. Simulation duration: This parameter specifies the total duration of the simulation. Make sure it is long enough to capture the behavior of the system over the desired time period. - -3. Initial conditions: These are the starting values of the variables in the simulation model. Ensure that they are set correctly to reflect the initial state of the system. - -4. Boundary conditions: These are the conditions at the boundaries of the simulation domain. Make sure they are set appropriately to reflect the behavior of the system at the boundaries. - -5. Model parameters: Review and configure the parameters of the simulation model, such as physical constants, material properties, and other relevant parameters. - -6. Convergence criteria: Specify the criteria for determining when the simulation has reached a steady state or converged to a solution. This may include specifying a tolerance for the difference between successive iterations. - -7. Output settings: Configure the settings for the output of the simulation results, such as the format of the output data, the frequency of output, and the variables to be included in the output. - -By carefully reviewing and configuring these simulation parameters, you can ensure that your simulation model accurately represents the behavior of the system and produces meaningful results. - -[2024-10-20 16:25:37,439] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-10-20 16:25:37,439] [INFO] [Engine]: Task has been completed successfully. -[2024-10-20 16:25:37,439] [INFO] [Engine]: Engine simulation loop completed. -[2024-10-20 16:25:37,439] [INFO] [Evaluator]: Task Completion Success Rate: 100.00% -[2024-10-20 16:25:37,439] [INFO] [Evaluator]: Total Token Consumption: 806 -[2024-10-20 16:25:37,440] [INFO] [Evaluator]: Average Tokens per Iteration: 806.0 -[2024-10-20 16:25:37,440] [INFO] [Engine]: Simulation completed. -[2024-10-20 16:27:12,585] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-10-20 16:27:12,586] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-10-20 16:27:12,586] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-10-20 16:27:12,586] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-10-20 16:27:12,586] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-10-20 16:27:12,586] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-10-20 16:27:12,586] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-10-20 16:27:12,586] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-10-20 16:27:12,586] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-10-20 16:27:12,586] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-10-20 16:27:12,586] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-10-20 16:27:12,586] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-10-20 16:27:12,586] [INFO] [Engine]: Engine initialized. -[2024-10-20 16:27:12,586] [INFO] [Engine]: Engine starting simulation. -[2024-10-20 16:27:12,587] [INFO] [Engine]: Starting iteration 1 -[2024-10-20 16:27:12,587] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {} - Token Usage: 0 - -- Agent ID: agent2 - Relationships: {} - Token Usage: 0 - -- Agent ID: agent3 - Relationships: {} - Token Usage: 0 - -- Agent ID: agent4 - Relationships: {} - Token Usage: 0 - -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-10-20 16:27:13,922] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent1': 'Task description for agent1', 'agent2': 'Task description for agent2', 'agent3': 'Task description for agent3', 'agent4': 'Task description for agent4'}, 'continue': True} -[2024-10-20 16:27:13,922] [INFO] [Engine]: Assigned tasks: {'agent1': 'Task description for agent1', 'agent2': 'Task description for agent2', 'agent3': 'Task description for agent3', 'agent4': 'Task description for agent4'} -[2024-10-20 16:27:13,923] [INFO] [Engine]: Assigning task to agent1: Task description for agent1 -[2024-10-20 16:27:13,923] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Task description for agent1'. -[2024-10-20 16:27:15,030] [INFO] [BaseAgent]: Agent 'agent1' acted with result 'Agent1 is responsible for handling customer inquiries, providing product information, processing orders, resolving customer complaints, and maintaining customer satisfaction. They must have excellent communication skills, be knowledgeable about the products and services offered, and be able to work efficiently in a fast-paced environment. Agent1 should also be able to multitask, prioritize tasks, and work well as part of a team. Additionally, they should be proactive in identifying and addressing customer needs to ensure a positive customer experience.'. -[2024-10-20 16:27:15,031] [DEBUG] [Engine]: Agent 'agent1' completed task with result: Agent1 is responsible for handling customer inquiries, providing product information, processing orders, resolving customer complaints, and maintaining customer satisfaction. They must have excellent communication skills, be knowledgeable about the products and services offered, and be able to work efficiently in a fast-paced environment. Agent1 should also be able to multitask, prioritize tasks, and work well as part of a team. Additionally, they should be proactive in identifying and addressing customer needs to ensure a positive customer experience. -[2024-10-20 16:27:15,031] [INFO] [Engine]: Assigning task to agent2: Task description for agent2 -[2024-10-20 16:27:15,031] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Task description for agent2'. -[2024-10-20 16:27:16,360] [INFO] [BaseAgent]: Agent 'agent2' acted with result 'Agent2 is responsible for handling customer inquiries, resolving issues, and providing support to customers. They must have excellent communication skills, problem-solving abilities, and a strong understanding of the products or services offered by the company. Agent2 should be able to work efficiently under pressure and maintain a positive attitude when dealing with challenging situations. They must also be able to accurately document customer interactions and follow company guidelines and procedures. Agent2 plays a crucial role in ensuring customer satisfaction and loyalty.'. -[2024-10-20 16:27:16,360] [DEBUG] [Engine]: Agent 'agent2' completed task with result: Agent2 is responsible for handling customer inquiries, resolving issues, and providing support to customers. They must have excellent communication skills, problem-solving abilities, and a strong understanding of the products or services offered by the company. Agent2 should be able to work efficiently under pressure and maintain a positive attitude when dealing with challenging situations. They must also be able to accurately document customer interactions and follow company guidelines and procedures. Agent2 plays a crucial role in ensuring customer satisfaction and loyalty. -[2024-10-20 16:27:16,361] [INFO] [Engine]: Assigning task to agent3: Task description for agent3 -[2024-10-20 16:27:16,361] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Task description for agent3'. -[2024-10-20 16:27:17,898] [INFO] [BaseAgent]: Agent 'agent3' acted with result 'Agent3 is responsible for handling customer inquiries, resolving issues, and providing exceptional customer service. They must be knowledgeable about the products and services offered by the company and be able to effectively communicate with customers to address their needs. Agent3 should also be able to handle difficult situations with professionalism and empathy, ensuring that customers have a positive experience with the company. Additionally, Agent3 may be responsible for processing orders, managing returns, and following up with customers to ensure their satisfaction. It is important for Agent3 to be organized, detail-oriented, and able to work efficiently in a fast-paced environment.'. -[2024-10-20 16:27:17,899] [DEBUG] [Engine]: Agent 'agent3' completed task with result: Agent3 is responsible for handling customer inquiries, resolving issues, and providing exceptional customer service. They must be knowledgeable about the products and services offered by the company and be able to effectively communicate with customers to address their needs. Agent3 should also be able to handle difficult situations with professionalism and empathy, ensuring that customers have a positive experience with the company. Additionally, Agent3 may be responsible for processing orders, managing returns, and following up with customers to ensure their satisfaction. It is important for Agent3 to be organized, detail-oriented, and able to work efficiently in a fast-paced environment. -[2024-10-20 16:27:17,899] [INFO] [Engine]: Assigning task to agent4: Task description for agent4 -[2024-10-20 16:27:17,899] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Task description for agent4'. -[2024-10-20 16:27:21,273] [INFO] [BaseAgent]: Agent 'agent4' acted with result 'Task description for Agent4: - -Position: Customer Service Representative - -Responsibilities: -1. Handle incoming customer inquiries via phone, email, and chat in a professional and timely manner. -2. Provide accurate information about products, services, and company policies to customers. -3. Assist customers with placing orders, processing returns, and resolving any issues or complaints. -4. Maintain a positive and helpful attitude when interacting with customers to ensure a positive experience. -5. Collaborate with other team members to address customer concerns and improve overall customer satisfaction. -6. Keep detailed records of customer interactions and transactions in the company's database. -7. Stay up-to-date on product knowledge and company updates to provide the best possible service to customers. -8. Meet or exceed performance goals related to customer satisfaction, response times, and resolution rates. - -Qualifications: -1. High school diploma or equivalent. -2. Previous customer service experience preferred. -3. Excellent communication skills, both verbal and written. -4. Strong problem-solving abilities and attention to detail. -5. Ability to work in a fast-paced environment and handle multiple tasks simultaneously. -6. Proficiency in using computer systems and software applications. -7. Ability to work independently and as part of a team. -8. Willingness to work flexible hours, including evenings and weekends. - -If you are interested in this position, please submit your resume and cover letter detailing your relevant experience and qualifications. Thank you for considering joining our team as a Customer Service Representative.'. -[2024-10-20 16:27:21,273] [DEBUG] [Engine]: Agent 'agent4' completed task with result: Task description for Agent4: - -Position: Customer Service Representative - -Responsibilities: -1. Handle incoming customer inquiries via phone, email, and chat in a professional and timely manner. -2. Provide accurate information about products, services, and company policies to customers. -3. Assist customers with placing orders, processing returns, and resolving any issues or complaints. -4. Maintain a positive and helpful attitude when interacting with customers to ensure a positive experience. -5. Collaborate with other team members to address customer concerns and improve overall customer satisfaction. -6. Keep detailed records of customer interactions and transactions in the company's database. -7. Stay up-to-date on product knowledge and company updates to provide the best possible service to customers. -8. Meet or exceed performance goals related to customer satisfaction, response times, and resolution rates. - -Qualifications: -1. High school diploma or equivalent. -2. Previous customer service experience preferred. -3. Excellent communication skills, both verbal and written. -4. Strong problem-solving abilities and attention to detail. -5. Ability to work in a fast-paced environment and handle multiple tasks simultaneously. -6. Proficiency in using computer systems and software applications. -7. Ability to work independently and as part of a team. -8. Willingness to work flexible hours, including evenings and weekends. - -If you are interested in this position, please submit your resume and cover letter detailing your relevant experience and qualifications. Thank you for considering joining our team as a Customer Service Representative. -[2024-10-20 16:27:21,273] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- agent1: Agent1 is responsible for handling customer inquiries, providing product information, processing orders, resolving customer complaints, and maintaining customer satisfaction. They must have excellent communication skills, be knowledgeable about the products and services offered, and be able to work efficiently in a fast-paced environment. Agent1 should also be able to multitask, prioritize tasks, and work well as part of a team. Additionally, they should be proactive in identifying and addressing customer needs to ensure a positive customer experience. -- agent2: Agent2 is responsible for handling customer inquiries, resolving issues, and providing support to customers. They must have excellent communication skills, problem-solving abilities, and a strong understanding of the products or services offered by the company. Agent2 should be able to work efficiently under pressure and maintain a positive attitude when dealing with challenging situations. They must also be able to accurately document customer interactions and follow company guidelines and procedures. Agent2 plays a crucial role in ensuring customer satisfaction and loyalty. -- agent3: Agent3 is responsible for handling customer inquiries, resolving issues, and providing exceptional customer service. They must be knowledgeable about the products and services offered by the company and be able to effectively communicate with customers to address their needs. Agent3 should also be able to handle difficult situations with professionalism and empathy, ensuring that customers have a positive experience with the company. Additionally, Agent3 may be responsible for processing orders, managing returns, and following up with customers to ensure their satisfaction. It is important for Agent3 to be organized, detail-oriented, and able to work efficiently in a fast-paced environment. -- agent4: Task description for Agent4: - -Position: Customer Service Representative - -Responsibilities: -1. Handle incoming customer inquiries via phone, email, and chat in a professional and timely manner. -2. Provide accurate information about products, services, and company policies to customers. -3. Assist customers with placing orders, processing returns, and resolving any issues or complaints. -4. Maintain a positive and helpful attitude when interacting with customers to ensure a positive experience. -5. Collaborate with other team members to address customer concerns and improve overall customer satisfaction. -6. Keep detailed records of customer interactions and transactions in the company's database. -7. Stay up-to-date on product knowledge and company updates to provide the best possible service to customers. -8. Meet or exceed performance goals related to customer satisfaction, response times, and resolution rates. - -Qualifications: -1. High school diploma or equivalent. -2. Previous customer service experience preferred. -3. Excellent communication skills, both verbal and written. -4. Strong problem-solving abilities and attention to detail. -5. Ability to work in a fast-paced environment and handle multiple tasks simultaneously. -6. Proficiency in using computer systems and software applications. -7. Ability to work independently and as part of a team. -8. Willingness to work flexible hours, including evenings and weekends. - -If you are interested in this position, please submit your resume and cover letter detailing your relevant experience and qualifications. Thank you for considering joining our team as a Customer Service Representative. - -[2024-10-20 16:27:21,273] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: -- agent1: Agent1 is responsible for handling customer inquiries, providing product information, processing orders, resolving customer complaints, and maintaining customer satisfaction. They must have excellent communication skills, be knowledgeable about the products and services offered, and be able to work efficiently in a fast-paced environment. Agent1 should also be able to multitask, prioritize tasks, and work well as part of a team. Additionally, they should be proactive in identifying and addressing customer needs to ensure a positive customer experience. -- agent2: Agent2 is responsible for handling customer inquiries, resolving issues, and providing support to customers. They must have excellent communication skills, problem-solving abilities, and a strong understanding of the products or services offered by the company. Agent2 should be able to work efficiently under pressure and maintain a positive attitude when dealing with challenging situations. They must also be able to accurately document customer interactions and follow company guidelines and procedures. Agent2 plays a crucial role in ensuring customer satisfaction and loyalty. -- agent3: Agent3 is responsible for handling customer inquiries, resolving issues, and providing exceptional customer service. They must be knowledgeable about the products and services offered by the company and be able to effectively communicate with customers to address their needs. Agent3 should also be able to handle difficult situations with professionalism and empathy, ensuring that customers have a positive experience with the company. Additionally, Agent3 may be responsible for processing orders, managing returns, and following up with customers to ensure their satisfaction. It is important for Agent3 to be organized, detail-oriented, and able to work efficiently in a fast-paced environment. -- agent4: Task description for Agent4: - -Position: Customer Service Representative - -Responsibilities: -1. Handle incoming customer inquiries via phone, email, and chat in a professional and timely manner. -2. Provide accurate information about products, services, and company policies to customers. -3. Assist customers with placing orders, processing returns, and resolving any issues or complaints. -4. Maintain a positive and helpful attitude when interacting with customers to ensure a positive experience. -5. Collaborate with other team members to address customer concerns and improve overall customer satisfaction. -6. Keep detailed records of customer interactions and transactions in the company's database. -7. Stay up-to-date on product knowledge and company updates to provide the best possible service to customers. -8. Meet or exceed performance goals related to customer satisfaction, response times, and resolution rates. - -Qualifications: -1. High school diploma or equivalent. -2. Previous customer service experience preferred. -3. Excellent communication skills, both verbal and written. -4. Strong problem-solving abilities and attention to detail. -5. Ability to work in a fast-paced environment and handle multiple tasks simultaneously. -6. Proficiency in using computer systems and software applications. -7. Ability to work independently and as part of a team. -8. Willingness to work flexible hours, including evenings and weekends. - -If you are interested in this position, please submit your resume and cover letter detailing your relevant experience and qualifications. Thank you for considering joining our team as a Customer Service Representative. - -[2024-10-20 16:27:21,655] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-10-20 16:27:21,655] [INFO] [Engine]: Task has been completed successfully. -[2024-10-20 16:27:21,655] [INFO] [Engine]: Engine simulation loop completed. -[2024-10-20 16:27:21,655] [INFO] [Evaluator]: Task Completion Success Rate: 100.00% -[2024-10-20 16:27:21,655] [INFO] [Evaluator]: Total Token Consumption: 898 -[2024-10-20 16:27:21,656] [INFO] [Evaluator]: Average Tokens per Iteration: 898.0 -[2024-10-20 16:27:21,656] [INFO] [Engine]: Simulation completed. -[2024-10-20 16:47:02,073] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-10-20 16:47:02,074] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-10-20 16:47:02,074] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-10-20 16:47:02,074] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-10-20 16:47:02,074] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-10-20 16:47:02,074] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-10-20 16:47:02,074] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-10-20 16:47:02,074] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-10-20 16:47:02,074] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-10-20 16:47:02,074] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-10-20 16:47:02,074] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-10-20 16:54:27,005] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-10-20 16:54:27,005] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-10-20 16:54:27,006] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-10-20 16:54:27,006] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-10-20 16:54:27,006] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-10-20 16:54:27,006] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-10-20 16:54:27,006] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-10-20 16:54:27,006] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-10-20 16:54:27,006] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-10-20 16:54:27,006] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-10-20 16:54:27,006] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-10-20 16:54:27,006] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-10-20 16:54:27,006] [INFO] [Engine]: Engine initialized. -[2024-10-20 16:54:27,006] [INFO] [Engine]: Engine starting simulation. -[2024-10-20 16:54:27,006] [INFO] [Engine]: Starting iteration 1 -[2024-10-20 16:54:27,006] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {} - Profile: You are a helpful and supportive team leader. You will use bing to search results for the task. -- Agent ID: agent2 - Relationships: {} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-10-20 16:54:28,293] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent1': 'Search for the latest trends in AI using bing.', 'agent2': 'Search for the latest trends in AI using google.', 'agent3': 'Search for the latest trends in AI using wikipedia.', 'agent4': 'Search for the latest trends in AI using twitter.'}, 'continue': True} -[2024-10-20 16:54:28,294] [INFO] [Engine]: Assigned tasks: {'agent1': 'Search for the latest trends in AI using bing.', 'agent2': 'Search for the latest trends in AI using google.', 'agent3': 'Search for the latest trends in AI using wikipedia.', 'agent4': 'Search for the latest trends in AI using twitter.'} -[2024-10-20 16:54:28,294] [INFO] [Engine]: Assigning task to agent1: Search for the latest trends in AI using bing. -[2024-10-20 16:54:28,294] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Search for the latest trends in AI using bing.'. -[2024-10-20 16:54:30,497] [INFO] [BaseAgent]: Agent 'agent1' acted with result 'Here are some of the latest trends in AI according to Bing: - -1. AI-powered chatbots are becoming increasingly popular in customer service and support industries. -2. AI is being used in healthcare to improve patient care and diagnosis accuracy. -3. AI is being integrated into smart home devices to enhance convenience and automation. -4. AI is being used in cybersecurity to detect and prevent cyber threats. -5. AI is being used in autonomous vehicles to improve safety and efficiency. -6. AI is being used in marketing and advertising to personalize customer experiences. -7. AI is being used in finance to automate processes and improve decision-making. -8. AI is being used in agriculture to optimize crop yields and reduce waste. -9. AI is being used in education to personalize learning experiences for students. -10. AI is being used in the entertainment industry to create personalized content recommendations.'. -[2024-10-20 16:54:30,498] [DEBUG] [Engine]: Agent 'agent1' completed task with result: Here are some of the latest trends in AI according to Bing: - -1. AI-powered chatbots are becoming increasingly popular in customer service and support industries. -2. AI is being used in healthcare to improve patient care and diagnosis accuracy. -3. AI is being integrated into smart home devices to enhance convenience and automation. -4. AI is being used in cybersecurity to detect and prevent cyber threats. -5. AI is being used in autonomous vehicles to improve safety and efficiency. -6. AI is being used in marketing and advertising to personalize customer experiences. -7. AI is being used in finance to automate processes and improve decision-making. -8. AI is being used in agriculture to optimize crop yields and reduce waste. -9. AI is being used in education to personalize learning experiences for students. -10. AI is being used in the entertainment industry to create personalized content recommendations. -[2024-10-20 16:54:30,498] [INFO] [Engine]: Assigning task to agent2: Search for the latest trends in AI using google. -[2024-10-20 16:54:30,499] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Search for the latest trends in AI using google.'. -[2024-10-20 16:54:33,568] [INFO] [BaseAgent]: Agent 'agent2' acted with result 'Some of the latest trends in AI include: - -1. AI-powered chatbots and virtual assistants becoming more prevalent in customer service and support roles. -2. Increased use of AI in healthcare for tasks such as medical imaging analysis and personalized treatment recommendations. -3. Continued advancements in natural language processing and understanding, enabling more sophisticated AI-powered language models. -4. Growing interest in AI ethics and responsible AI development, including efforts to address bias and ensure transparency in AI systems. -5. Expansion of AI applications in industries such as finance, retail, and manufacturing, driving increased automation and efficiency. -6. Rise of AI-powered recommendation systems in e-commerce and content platforms, enhancing personalized user experiences. -7. Adoption of AI in cybersecurity for threat detection and response, as well as in fraud detection and prevention. -8. Development of AI-driven autonomous vehicles and drones, with ongoing research and testing in the transportation sector. -9. Integration of AI with Internet of Things (IoT) devices to enable smart home automation and connected systems. -10. Continued research and development in AI technologies such as reinforcement learning, generative adversarial networks, and quantum computing.'. -[2024-10-20 16:54:33,569] [DEBUG] [Engine]: Agent 'agent2' completed task with result: Some of the latest trends in AI include: - -1. AI-powered chatbots and virtual assistants becoming more prevalent in customer service and support roles. -2. Increased use of AI in healthcare for tasks such as medical imaging analysis and personalized treatment recommendations. -3. Continued advancements in natural language processing and understanding, enabling more sophisticated AI-powered language models. -4. Growing interest in AI ethics and responsible AI development, including efforts to address bias and ensure transparency in AI systems. -5. Expansion of AI applications in industries such as finance, retail, and manufacturing, driving increased automation and efficiency. -6. Rise of AI-powered recommendation systems in e-commerce and content platforms, enhancing personalized user experiences. -7. Adoption of AI in cybersecurity for threat detection and response, as well as in fraud detection and prevention. -8. Development of AI-driven autonomous vehicles and drones, with ongoing research and testing in the transportation sector. -9. Integration of AI with Internet of Things (IoT) devices to enable smart home automation and connected systems. -10. Continued research and development in AI technologies such as reinforcement learning, generative adversarial networks, and quantum computing. -[2024-10-20 16:54:33,570] [INFO] [Engine]: Assigning task to agent3: Search for the latest trends in AI using wikipedia. -[2024-10-20 16:54:33,570] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Search for the latest trends in AI using wikipedia.'. -[2024-10-20 16:54:34,284] [INFO] [BaseAgent]: Agent 'agent3' acted with result 'I'm unable to browse the internet or access external websites such as Wikipedia. However, I can provide you with information on the latest trends in AI based on my training data. Would you like me to do that instead?'. -[2024-10-20 16:54:34,284] [DEBUG] [Engine]: Agent 'agent3' completed task with result: I'm unable to browse the internet or access external websites such as Wikipedia. However, I can provide you with information on the latest trends in AI based on my training data. Would you like me to do that instead? -[2024-10-20 16:54:34,284] [INFO] [Engine]: Assigning task to agent4: Search for the latest trends in AI using twitter. -[2024-10-20 16:54:34,285] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Search for the latest trends in AI using twitter.'. -[2024-10-20 16:54:36,741] [INFO] [BaseAgent]: Agent 'agent4' acted with result 'Here are some of the latest trends in AI on Twitter: - -1. #ArtificialIntelligence - This hashtag is commonly used to discuss the latest developments and trends in AI technology. - -2. #MachineLearning - This hashtag is often used to discuss advancements in machine learning algorithms and applications. - -3. #DeepLearning - This hashtag is used to discuss the latest trends and research in deep learning, a subset of machine learning. - -4. #AIethics - This hashtag is used to discuss the ethical implications of AI technology and how to ensure responsible AI development. - -5. #AIFuture - This hashtag is used to discuss the potential future impact of AI technology on society and the economy. - -6. #AIresearch - This hashtag is used to share the latest research papers and findings in the field of artificial intelligence. - -7. #AInews - This hashtag is used to share news articles and updates about AI technology and its applications. - -By following these hashtags on Twitter, you can stay up to date on the latest trends and developments in the field of artificial intelligence.'. -[2024-10-20 16:54:36,742] [DEBUG] [Engine]: Agent 'agent4' completed task with result: Here are some of the latest trends in AI on Twitter: - -1. #ArtificialIntelligence - This hashtag is commonly used to discuss the latest developments and trends in AI technology. - -2. #MachineLearning - This hashtag is often used to discuss advancements in machine learning algorithms and applications. - -3. #DeepLearning - This hashtag is used to discuss the latest trends and research in deep learning, a subset of machine learning. - -4. #AIethics - This hashtag is used to discuss the ethical implications of AI technology and how to ensure responsible AI development. - -5. #AIFuture - This hashtag is used to discuss the potential future impact of AI technology on society and the economy. - -6. #AIresearch - This hashtag is used to share the latest research papers and findings in the field of artificial intelligence. - -7. #AInews - This hashtag is used to share news articles and updates about AI technology and its applications. - -By following these hashtags on Twitter, you can stay up to date on the latest trends and developments in the field of artificial intelligence. -[2024-10-20 16:54:36,742] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- agent1: Here are some of the latest trends in AI according to Bing: - -1. AI-powered chatbots are becoming increasingly popular in customer service and support industries. -2. AI is being used in healthcare to improve patient care and diagnosis accuracy. -3. AI is being integrated into smart home devices to enhance convenience and automation. -4. AI is being used in cybersecurity to detect and prevent cyber threats. -5. AI is being used in autonomous vehicles to improve safety and efficiency. -6. AI is being used in marketing and advertising to personalize customer experiences. -7. AI is being used in finance to automate processes and improve decision-making. -8. AI is being used in agriculture to optimize crop yields and reduce waste. -9. AI is being used in education to personalize learning experiences for students. -10. AI is being used in the entertainment industry to create personalized content recommendations. -- agent2: Some of the latest trends in AI include: - -1. AI-powered chatbots and virtual assistants becoming more prevalent in customer service and support roles. -2. Increased use of AI in healthcare for tasks such as medical imaging analysis and personalized treatment recommendations. -3. Continued advancements in natural language processing and understanding, enabling more sophisticated AI-powered language models. -4. Growing interest in AI ethics and responsible AI development, including efforts to address bias and ensure transparency in AI systems. -5. Expansion of AI applications in industries such as finance, retail, and manufacturing, driving increased automation and efficiency. -6. Rise of AI-powered recommendation systems in e-commerce and content platforms, enhancing personalized user experiences. -7. Adoption of AI in cybersecurity for threat detection and response, as well as in fraud detection and prevention. -8. Development of AI-driven autonomous vehicles and drones, with ongoing research and testing in the transportation sector. -9. Integration of AI with Internet of Things (IoT) devices to enable smart home automation and connected systems. -10. Continued research and development in AI technologies such as reinforcement learning, generative adversarial networks, and quantum computing. -- agent3: I'm unable to browse the internet or access external websites such as Wikipedia. However, I can provide you with information on the latest trends in AI based on my training data. Would you like me to do that instead? -- agent4: Here are some of the latest trends in AI on Twitter: - -1. #ArtificialIntelligence - This hashtag is commonly used to discuss the latest developments and trends in AI technology. - -2. #MachineLearning - This hashtag is often used to discuss advancements in machine learning algorithms and applications. - -3. #DeepLearning - This hashtag is used to discuss the latest trends and research in deep learning, a subset of machine learning. - -4. #AIethics - This hashtag is used to discuss the ethical implications of AI technology and how to ensure responsible AI development. - -5. #AIFuture - This hashtag is used to discuss the potential future impact of AI technology on society and the economy. - -6. #AIresearch - This hashtag is used to share the latest research papers and findings in the field of artificial intelligence. - -7. #AInews - This hashtag is used to share news articles and updates about AI technology and its applications. - -By following these hashtags on Twitter, you can stay up to date on the latest trends and developments in the field of artificial intelligence. - -[2024-10-20 16:54:36,742] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: -- agent1: Here are some of the latest trends in AI according to Bing: - -1. AI-powered chatbots are becoming increasingly popular in customer service and support industries. -2. AI is being used in healthcare to improve patient care and diagnosis accuracy. -3. AI is being integrated into smart home devices to enhance convenience and automation. -4. AI is being used in cybersecurity to detect and prevent cyber threats. -5. AI is being used in autonomous vehicles to improve safety and efficiency. -6. AI is being used in marketing and advertising to personalize customer experiences. -7. AI is being used in finance to automate processes and improve decision-making. -8. AI is being used in agriculture to optimize crop yields and reduce waste. -9. AI is being used in education to personalize learning experiences for students. -10. AI is being used in the entertainment industry to create personalized content recommendations. -- agent2: Some of the latest trends in AI include: - -1. AI-powered chatbots and virtual assistants becoming more prevalent in customer service and support roles. -2. Increased use of AI in healthcare for tasks such as medical imaging analysis and personalized treatment recommendations. -3. Continued advancements in natural language processing and understanding, enabling more sophisticated AI-powered language models. -4. Growing interest in AI ethics and responsible AI development, including efforts to address bias and ensure transparency in AI systems. -5. Expansion of AI applications in industries such as finance, retail, and manufacturing, driving increased automation and efficiency. -6. Rise of AI-powered recommendation systems in e-commerce and content platforms, enhancing personalized user experiences. -7. Adoption of AI in cybersecurity for threat detection and response, as well as in fraud detection and prevention. -8. Development of AI-driven autonomous vehicles and drones, with ongoing research and testing in the transportation sector. -9. Integration of AI with Internet of Things (IoT) devices to enable smart home automation and connected systems. -10. Continued research and development in AI technologies such as reinforcement learning, generative adversarial networks, and quantum computing. -- agent3: I'm unable to browse the internet or access external websites such as Wikipedia. However, I can provide you with information on the latest trends in AI based on my training data. Would you like me to do that instead? -- agent4: Here are some of the latest trends in AI on Twitter: - -1. #ArtificialIntelligence - This hashtag is commonly used to discuss the latest developments and trends in AI technology. - -2. #MachineLearning - This hashtag is often used to discuss advancements in machine learning algorithms and applications. - -3. #DeepLearning - This hashtag is used to discuss the latest trends and research in deep learning, a subset of machine learning. - -4. #AIethics - This hashtag is used to discuss the ethical implications of AI technology and how to ensure responsible AI development. - -5. #AIFuture - This hashtag is used to discuss the potential future impact of AI technology on society and the economy. - -6. #AIresearch - This hashtag is used to share the latest research papers and findings in the field of artificial intelligence. - -7. #AInews - This hashtag is used to share news articles and updates about AI technology and its applications. - -By following these hashtags on Twitter, you can stay up to date on the latest trends and developments in the field of artificial intelligence. - -[2024-10-20 16:54:37,467] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-10-20 16:54:37,467] [INFO] [Engine]: Task has been completed successfully. -[2024-10-20 16:54:37,467] [INFO] [Engine]: Engine simulation loop completed. -[2024-10-20 16:54:37,467] [INFO] [Evaluator]: Task Completion Success Rate: 100.00% -[2024-10-20 16:54:37,468] [INFO] [Evaluator]: Total Token Consumption: 922 -[2024-10-20 16:54:37,468] [INFO] [Evaluator]: Average Tokens per Iteration: 922.0 -[2024-10-20 16:54:37,468] [INFO] [Engine]: Simulation completed. -[2024-11-03 17:11:29,463] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-11-03 17:11:29,464] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 17:11:29,464] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 17:11:29,464] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 17:11:29,464] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 17:11:29,464] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 17:11:29,464] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 17:11:29,464] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 17:11:29,464] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 17:11:29,464] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-11-03 17:11:29,464] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-03 17:11:29,464] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-03 17:11:29,464] [INFO] [Engine]: Engine initialized. -[2024-11-03 17:11:29,464] [INFO] [Engine]: Engine starting simulation. -[2024-11-03 17:11:29,465] [ERROR] [Engine]: Unsupported coordinate mode: centralized -[2024-11-03 17:13:40,458] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-11-03 17:13:40,458] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 17:13:40,458] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 17:13:40,458] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 17:13:40,459] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 17:13:40,459] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 17:13:40,459] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 17:13:40,459] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 17:13:40,459] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 17:13:40,459] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-11-03 17:13:40,459] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-03 17:13:40,459] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-03 17:13:40,459] [INFO] [Engine]: Engine initialized. -[2024-11-03 17:13:40,459] [INFO] [Engine]: Engine starting simulation. -[2024-11-03 17:13:40,459] [ERROR] [Engine]: Unsupported coordinate mode: centralized -[2024-11-03 17:14:22,946] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-11-03 17:14:22,947] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 17:14:22,947] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 17:14:22,947] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 17:14:22,947] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 17:14:22,947] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 17:14:22,947] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 17:14:22,947] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 17:14:22,947] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 17:14:22,947] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-11-03 17:14:22,947] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-03 17:14:22,947] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-03 17:14:22,947] [INFO] [Engine]: Engine initialized. -[2024-11-03 17:14:22,947] [INFO] [Engine]: Engine starting simulation. -[2024-11-03 17:14:22,948] [INFO] [Engine]: Running in graph-based coordination mode. -[2024-11-03 17:14:22,948] [INFO] [Engine]: Initial task distribution to all agents. -[2024-11-03 17:14:22,948] [INFO] [Engine]: Assigning initial task to agent1: Find new about the latest trends in AI. -[2024-11-03 17:14:22,948] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Find new about the latest trends in AI.'. -[2024-11-03 17:14:28,572] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-03 17:14:28,573] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-03 17:14:28,573] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-03 17:14:28,573] [INFO] [Engine]: Graph-based coordination simulation completed. -[2024-11-03 17:14:52,009] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-11-03 17:14:52,009] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 17:14:52,009] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 17:14:52,009] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 17:14:52,009] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 17:14:52,009] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 17:14:52,009] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 17:14:52,009] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 17:14:52,009] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 17:14:52,010] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-11-03 17:14:52,010] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-03 17:14:52,010] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-03 17:14:52,010] [INFO] [Engine]: Engine initialized. -[2024-11-03 17:14:52,010] [INFO] [Engine]: Engine starting simulation. -[2024-11-03 17:14:52,010] [INFO] [Engine]: Running in graph-based coordination mode. -[2024-11-03 17:14:52,010] [INFO] [Engine]: Initial task distribution to all agents. -[2024-11-03 17:14:52,010] [INFO] [Engine]: Assigning initial task to agent1: Find new about the latest trends in AI. -[2024-11-03 17:14:52,010] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Find new about the latest trends in AI.'. -[2024-11-03 17:14:54,914] [INFO] [BaseAgent]: Agent 'agent1' called 'fetch_webpage' with args '{'url': 'https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/'}'. -[2024-11-03 17:14:54,915] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': False, 'error-msg': '404 Client Error: Not Found for url: https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/'}'. -[2024-11-03 17:14:54,916] [DEBUG] [Engine]: Agent 'agent1' completed initial task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_yAliqFIfA57HbNsKiIRLXhFL', type='function')], function_call=None) -[2024-11-03 17:14:54,916] [INFO] [Engine]: Assigning initial task to agent2: Find new about the latest trends in AI. -[2024-11-03 17:14:54,916] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Find new about the latest trends in AI.'. -[2024-11-03 17:14:57,080] [INFO] [BaseAgent]: Agent 'agent2' called 'fetch_webpage' with args '{'url': 'https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/'}'. -[2024-11-03 17:14:57,081] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': False, 'error-msg': '404 Client Error: Not Found for url: https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/'}'. -[2024-11-03 17:14:57,082] [DEBUG] [Engine]: Agent 'agent2' completed initial task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_TTuWDMVL8EEBUjfArFuUhmgX', type='function')], function_call=None) -[2024-11-03 17:14:57,082] [INFO] [Engine]: Assigning initial task to agent3: Find new about the latest trends in AI. -[2024-11-03 17:14:57,082] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Find new about the latest trends in AI.'. -[2024-11-03 17:14:59,400] [INFO] [BaseAgent]: Agent 'agent3' called 'fetch_webpage' with args '{'url': 'https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/'}'. -[2024-11-03 17:14:59,401] [INFO] [BaseAgent]: Agent 'agent3' obtained result '{'success': False, 'error-msg': '404 Client Error: Not Found for url: https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/'}'. -[2024-11-03 17:14:59,404] [DEBUG] [Engine]: Agent 'agent3' completed initial task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_uG5uc4NT38q8HUZlRZKA9uav', type='function')], function_call=None) -[2024-11-03 17:14:59,404] [INFO] [Engine]: Assigning initial task to agent4: Find new about the latest trends in AI. -[2024-11-03 17:14:59,404] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Find new about the latest trends in AI.'. -[2024-11-03 17:15:01,605] [INFO] [BaseAgent]: Agent 'agent4' called 'fetch_webpage' with args '{'url': 'https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/'}'. -[2024-11-03 17:15:01,606] [INFO] [BaseAgent]: Agent 'agent4' obtained result '{'success': False, 'error-msg': '404 Client Error: Not Found for url: https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/'}'. -[2024-11-03 17:15:01,606] [DEBUG] [Engine]: Agent 'agent4' completed initial task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_q2ISaM5URtlvD3lK4vdUK76E', type='function')], function_call=None) -[2024-11-03 17:15:01,606] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- agent1: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_yAliqFIfA57HbNsKiIRLXhFL', type='function')], function_call=None) -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_TTuWDMVL8EEBUjfArFuUhmgX', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_uG5uc4NT38q8HUZlRZKA9uav', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_q2ISaM5URtlvD3lK4vdUK76E', type='function')], function_call=None) - -[2024-11-03 17:15:01,607] [INFO] [Engine]: Initial Summary: -Agents' Results Summary: -- agent1: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_yAliqFIfA57HbNsKiIRLXhFL', type='function')], function_call=None) -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_TTuWDMVL8EEBUjfArFuUhmgX', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_uG5uc4NT38q8HUZlRZKA9uav', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_q2ISaM5URtlvD3lK4vdUK76E', type='function')], function_call=None) - -[2024-11-03 17:15:01,607] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: -- agent1: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_yAliqFIfA57HbNsKiIRLXhFL', type='function')], function_call=None) -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_TTuWDMVL8EEBUjfArFuUhmgX', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_uG5uc4NT38q8HUZlRZKA9uav', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_q2ISaM5URtlvD3lK4vdUK76E', type='function')], function_call=None) - -[2024-11-03 17:15:01,607] [INFO] [Engine]: Starting iteration 1 -[2024-11-03 17:15:01,608] [INFO] [BaseAgent]: Agent 'agent1' is planning the next task. -[2024-11-03 17:15:33,818] [ERROR] [Engine]: Error in agent 'agent1' during planning or action: Function marble.llms.model_prompting.model_prompting() return "None" violates type hint list[litellm.types.utils.Message], as  "None" not instance of list. -[2024-11-03 17:15:33,819] [INFO] [BaseAgent]: Agent 'agent2' is planning the next task. -[2024-11-03 17:15:51,324] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-03 17:15:51,326] [INFO] [Evaluator]: Total Token Consumption: 36 -[2024-11-03 17:15:51,326] [INFO] [Evaluator]: Average Tokens per Iteration: 36.0 -[2024-11-03 17:15:51,326] [INFO] [Engine]: Graph-based coordination simulation completed. -[2024-11-03 17:22:57,597] [DEBUG] [Engine]: Environment 'Base' initialized. -[2024-11-03 17:22:57,597] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 17:22:57,597] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 17:22:57,597] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 17:22:57,597] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 17:22:57,597] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 17:22:57,597] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 17:22:57,597] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 17:22:57,597] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 17:22:57,598] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-11-03 17:22:57,598] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-03 17:22:57,598] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-03 17:22:57,598] [INFO] [Engine]: Engine initialized. -[2024-11-03 17:22:57,598] [INFO] [Engine]: Engine starting simulation. -[2024-11-03 17:22:57,598] [INFO] [Engine]: Running in graph-based coordination mode. -[2024-11-03 17:22:57,598] [INFO] [Engine]: Initial task distribution to all agents. -[2024-11-03 17:22:57,598] [INFO] [Engine]: Assigning initial task to agent1: Discuss the potential impact of AI on various industries. -[2024-11-03 17:22:57,598] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-03 17:23:02,784] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-03 17:23:02,785] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-03 17:23:02,785] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-03 17:23:02,785] [INFO] [Engine]: Graph-based coordination simulation completed. -[2024-11-03 17:24:19,508] [DEBUG] [Engine]: Environment 'Base' initialized. -[2024-11-03 17:24:19,509] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 17:24:19,516] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 17:24:19,516] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 17:24:19,516] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 17:24:19,517] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 17:24:19,517] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 17:24:19,517] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 17:24:19,517] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 17:24:19,517] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-11-03 17:24:19,517] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-03 17:24:19,517] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-03 17:24:19,517] [INFO] [Engine]: Engine initialized. -[2024-11-03 17:24:19,517] [INFO] [Engine]: Engine starting simulation. -[2024-11-03 17:24:19,517] [INFO] [Engine]: Running in graph-based coordination mode. -[2024-11-03 17:24:19,517] [INFO] [Engine]: Initial task distribution to all agents. -[2024-11-03 17:24:19,517] [INFO] [Engine]: Assigning initial task to agent1: Discuss the potential impact of AI on various industries. -[2024-11-03 17:24:19,517] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-03 17:24:24,551] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-03 17:24:24,552] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-03 17:24:24,553] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-03 17:24:24,553] [INFO] [Engine]: Graph-based coordination simulation completed. -[2024-11-03 17:26:11,606] [DEBUG] [Engine]: Environment 'Base' initialized. -[2024-11-03 17:26:11,607] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 17:26:11,607] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 17:26:11,607] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 17:26:11,607] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 17:26:11,607] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 17:26:11,607] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 17:26:11,607] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 17:26:11,607] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 17:26:11,607] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-11-03 17:26:11,607] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-03 17:26:11,607] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-03 17:26:11,607] [INFO] [Engine]: Engine initialized. -[2024-11-03 17:26:11,607] [INFO] [Engine]: Engine starting simulation. -[2024-11-03 17:26:11,608] [INFO] [Engine]: Running in graph-based coordination mode. -[2024-11-03 17:26:11,608] [INFO] [Engine]: Initial task distribution to all agents. -[2024-11-03 17:26:11,608] [INFO] [Engine]: Assigning initial task to agent1: Discuss the potential impact of AI on various industries. -[2024-11-03 17:26:11,608] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-03 17:26:16,102] [INFO] [BaseAgent]: Agent 'agent1' acted with result 'Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, reducing costs, and improving decision-making processes. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze large amounts of medical data to identify patterns and trends that can help in diagnosing diseases, developing treatment plans, and predicting patient outcomes. AI-powered tools can also assist in drug discovery, personalized medicine, and remote patient monitoring.\n\n2. Finance: AI can be used in the finance industry to automate routine tasks such as fraud detection, risk assessment, and customer service. AI algorithms can analyze market trends and make investment decisions, optimize trading strategies, and predict financial market fluctuations.\n\n3. Retail: AI can help retailers improve customer experience by personalizing product recommendations, optimizing pricing strategies, and predicting consumer behavior. AI-powered chatbots can also provide customer support and assist in online shopping.\n\n4. Manufacturing: AI can be used in manufacturing to optimize production processes, predict equipment failures, and improve quality control. AI-powered robots and drones can automate repetitive tasks and increase productivity in factories.\n\n5. Transportation: AI can be used in the transportation industry to optimize route planning, reduce traffic congestion, and improve safety. AI-powered autonomous vehicles can revolutionize the way people and goods are transported, leading to reduced accidents and emissions.\n\n6. Agriculture: AI can help farmers increase crop yields, optimize irrigation and fertilization practices, and monitor plant health. AI-powered drones and sensors can collect data on soil conditions, weather patterns, and crop growth to make informed decisions.\n\n7. Education: AI can personalize learning experiences for students by adapting to their individual needs and learning styles. AI-powered tools can provide real-time feedback, assess student performance, and recommend personalized study materials.\n\nOverall, AI has the potential to transform various industries by increasing efficiency, reducing costs, and improving decision-making processes. However, it is important to address ethical and privacy concerns related to the use of AI to ensure that its benefits are maximized while minimizing potential risks.', role='assistant', tool_calls=None, function_call=None)'. -[2024-11-03 17:26:16,103] [DEBUG] [Engine]: Agent 'agent1' completed initial task with result: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, reducing costs, and improving decision-making processes. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze large amounts of medical data to identify patterns and trends that can help in diagnosing diseases, developing treatment plans, and predicting patient outcomes. AI-powered tools can also assist in drug discovery, personalized medicine, and remote patient monitoring.\n\n2. Finance: AI can be used in the finance industry to automate routine tasks such as fraud detection, risk assessment, and customer service. AI algorithms can analyze market trends and make investment decisions, optimize trading strategies, and predict financial market fluctuations.\n\n3. Retail: AI can help retailers improve customer experience by personalizing product recommendations, optimizing pricing strategies, and predicting consumer behavior. AI-powered chatbots can also provide customer support and assist in online shopping.\n\n4. Manufacturing: AI can be used in manufacturing to optimize production processes, predict equipment failures, and improve quality control. AI-powered robots and drones can automate repetitive tasks and increase productivity in factories.\n\n5. Transportation: AI can be used in the transportation industry to optimize route planning, reduce traffic congestion, and improve safety. AI-powered autonomous vehicles can revolutionize the way people and goods are transported, leading to reduced accidents and emissions.\n\n6. Agriculture: AI can help farmers increase crop yields, optimize irrigation and fertilization practices, and monitor plant health. AI-powered drones and sensors can collect data on soil conditions, weather patterns, and crop growth to make informed decisions.\n\n7. Education: AI can personalize learning experiences for students by adapting to their individual needs and learning styles. AI-powered tools can provide real-time feedback, assess student performance, and recommend personalized study materials.\n\nOverall, AI has the potential to transform various industries by increasing efficiency, reducing costs, and improving decision-making processes. However, it is important to address ethical and privacy concerns related to the use of AI to ensure that its benefits are maximized while minimizing potential risks.', role='assistant', tool_calls=None, function_call=None) -[2024-11-03 17:26:16,103] [INFO] [Engine]: Assigning initial task to agent2: Discuss the potential impact of AI on various industries. -[2024-11-03 17:26:16,103] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-03 17:26:19,582] [INFO] [BaseAgent]: Agent 'agent2' acted with result 'Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, reducing costs, and improving decision-making processes. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze medical data, diagnose diseases, and develop personalized treatment plans. It can also help in drug discovery, patient monitoring, and predicting patient outcomes. AI-powered robots can assist in surgeries and provide care to patients.\n\n2. Finance: AI can be used to analyze financial data, detect fraud, and make investment decisions. It can also automate routine tasks such as customer service and risk assessment. AI-powered chatbots can provide personalized financial advice to customers.\n\n3. Retail: AI can be used to analyze customer data, predict trends, and personalize marketing campaigns. It can also automate inventory management, optimize pricing strategies, and improve customer service. AI-powered virtual assistants can help customers with their shopping experience.\n\n4. Manufacturing: AI can be used to optimize production processes, predict equipment failures, and improve quality control. It can also automate routine tasks such as inventory management and supply chain optimization. AI-powered robots can assist in assembly lines and perform complex tasks.\n\n5. Transportation: AI can be used to optimize route planning, reduce traffic congestion, and improve safety. It can also automate vehicle maintenance, monitor driver behavior, and provide real-time navigation assistance. AI-powered autonomous vehicles can revolutionize the transportation industry.\n\n6. Education: AI can be used to personalize learning experiences, assess student performance, and provide feedback. It can also automate administrative tasks such as grading and scheduling. AI-powered tutoring systems can help students learn at their own pace.\n\nOverall, AI has the potential to transform various industries by increasing productivity, improving decision-making processes, and enhancing customer experiences. However, it is important to address ethical and privacy concerns associated with the use of AI to ensure its responsible deployment.', role='assistant', tool_calls=None, function_call=None)'. -[2024-11-03 17:26:19,583] [DEBUG] [Engine]: Agent 'agent2' completed initial task with result: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, reducing costs, and improving decision-making processes. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze medical data, diagnose diseases, and develop personalized treatment plans. It can also help in drug discovery, patient monitoring, and predicting patient outcomes. AI-powered robots can assist in surgeries and provide care to patients.\n\n2. Finance: AI can be used to analyze financial data, detect fraud, and make investment decisions. It can also automate routine tasks such as customer service and risk assessment. AI-powered chatbots can provide personalized financial advice to customers.\n\n3. Retail: AI can be used to analyze customer data, predict trends, and personalize marketing campaigns. It can also automate inventory management, optimize pricing strategies, and improve customer service. AI-powered virtual assistants can help customers with their shopping experience.\n\n4. Manufacturing: AI can be used to optimize production processes, predict equipment failures, and improve quality control. It can also automate routine tasks such as inventory management and supply chain optimization. AI-powered robots can assist in assembly lines and perform complex tasks.\n\n5. Transportation: AI can be used to optimize route planning, reduce traffic congestion, and improve safety. It can also automate vehicle maintenance, monitor driver behavior, and provide real-time navigation assistance. AI-powered autonomous vehicles can revolutionize the transportation industry.\n\n6. Education: AI can be used to personalize learning experiences, assess student performance, and provide feedback. It can also automate administrative tasks such as grading and scheduling. AI-powered tutoring systems can help students learn at their own pace.\n\nOverall, AI has the potential to transform various industries by increasing productivity, improving decision-making processes, and enhancing customer experiences. However, it is important to address ethical and privacy concerns associated with the use of AI to ensure its responsible deployment.', role='assistant', tool_calls=None, function_call=None) -[2024-11-03 17:26:19,583] [INFO] [Engine]: Assigning initial task to agent3: Discuss the potential impact of AI on various industries. -[2024-11-03 17:26:19,583] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-03 17:26:23,569] [INFO] [BaseAgent]: Agent 'agent3' acted with result 'Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, improving decision-making processes, and enabling new capabilities. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze large amounts of medical data to identify patterns and trends that can help in diagnosing diseases, developing treatment plans, and predicting patient outcomes. AI-powered tools can also assist in drug discovery, personalized medicine, and remote patient monitoring.\n\n2. Finance: AI can be used in the finance industry to automate routine tasks such as fraud detection, risk assessment, and customer service. AI algorithms can also analyze market trends and make investment decisions in real-time, leading to better financial outcomes for businesses and individuals.\n\n3. Manufacturing: AI can optimize production processes by predicting equipment failures, scheduling maintenance, and improving supply chain management. AI-powered robots can also be used to automate repetitive tasks on the factory floor, leading to increased productivity and cost savings.\n\n4. Retail: AI can personalize the shopping experience for customers by analyzing their preferences and recommending products that are likely to be of interest to them. AI-powered chatbots can also assist customers with their inquiries and provide real-time support, leading to improved customer satisfaction and loyalty.\n\n5. Transportation: AI can be used to optimize traffic flow, reduce congestion, and improve safety on the roads. AI-powered autonomous vehicles are also being developed to revolutionize the way people and goods are transported, leading to reduced accidents and emissions.\n\n6. Education: AI can personalize learning experiences for students by adapting the curriculum to their individual needs and learning styles. AI-powered tools can also assist teachers in grading assignments, providing feedback, and identifying areas where students may need additional support.\n\nOverall, the potential impact of AI on various industries is vast and transformative. While AI has the potential to bring about significant benefits in terms of efficiency, productivity, and innovation, it also raises concerns about job displacement, data privacy, and ethical considerations that need to be carefully addressed. It is important for businesses and policymakers to work together to harness the power of AI in a responsible and sustainable manner.', role='assistant', tool_calls=None, function_call=None)'. -[2024-11-03 17:26:23,570] [DEBUG] [Engine]: Agent 'agent3' completed initial task with result: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, improving decision-making processes, and enabling new capabilities. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze large amounts of medical data to identify patterns and trends that can help in diagnosing diseases, developing treatment plans, and predicting patient outcomes. AI-powered tools can also assist in drug discovery, personalized medicine, and remote patient monitoring.\n\n2. Finance: AI can be used in the finance industry to automate routine tasks such as fraud detection, risk assessment, and customer service. AI algorithms can also analyze market trends and make investment decisions in real-time, leading to better financial outcomes for businesses and individuals.\n\n3. Manufacturing: AI can optimize production processes by predicting equipment failures, scheduling maintenance, and improving supply chain management. AI-powered robots can also be used to automate repetitive tasks on the factory floor, leading to increased productivity and cost savings.\n\n4. Retail: AI can personalize the shopping experience for customers by analyzing their preferences and recommending products that are likely to be of interest to them. AI-powered chatbots can also assist customers with their inquiries and provide real-time support, leading to improved customer satisfaction and loyalty.\n\n5. Transportation: AI can be used to optimize traffic flow, reduce congestion, and improve safety on the roads. AI-powered autonomous vehicles are also being developed to revolutionize the way people and goods are transported, leading to reduced accidents and emissions.\n\n6. Education: AI can personalize learning experiences for students by adapting the curriculum to their individual needs and learning styles. AI-powered tools can also assist teachers in grading assignments, providing feedback, and identifying areas where students may need additional support.\n\nOverall, the potential impact of AI on various industries is vast and transformative. While AI has the potential to bring about significant benefits in terms of efficiency, productivity, and innovation, it also raises concerns about job displacement, data privacy, and ethical considerations that need to be carefully addressed. It is important for businesses and policymakers to work together to harness the power of AI in a responsible and sustainable manner.', role='assistant', tool_calls=None, function_call=None) -[2024-11-03 17:26:23,570] [INFO] [Engine]: Assigning initial task to agent4: Discuss the potential impact of AI on various industries. -[2024-11-03 17:26:23,570] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-03 17:26:27,350] [INFO] [BaseAgent]: Agent 'agent4' acted with result 'Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, improving decision-making processes, and enabling new capabilities. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze medical data, diagnose diseases, and develop personalized treatment plans. It can also help in drug discovery, patient monitoring, and improving the overall quality of healthcare services.\n\n2. Finance: AI can be used to analyze financial data, detect fraud, and make investment decisions. It can also automate routine tasks such as customer service and risk assessment, leading to cost savings and improved accuracy.\n\n3. Retail: AI can be used to analyze customer data, predict consumer behavior, and personalize marketing campaigns. It can also automate inventory management, optimize pricing strategies, and enhance the overall shopping experience.\n\n4. Manufacturing: AI can be used to optimize production processes, predict equipment failures, and improve supply chain management. It can also enable the development of smart factories that are more efficient and responsive to changing market demands.\n\n5. Transportation: AI can be used to optimize route planning, reduce traffic congestion, and improve safety in transportation systems. It can also enable the development of autonomous vehicles that are more efficient and reliable than traditional vehicles.\n\n6. Education: AI can be used to personalize learning experiences, provide feedback to students, and automate administrative tasks. It can also enable the development of virtual tutors and online learning platforms that are more engaging and effective.\n\nOverall, AI has the potential to transform various industries by enabling new capabilities, improving efficiency, and driving innovation. However, it also raises concerns about job displacement, data privacy, and ethical considerations that need to be addressed to ensure that the benefits of AI are realized in a responsible and sustainable manner.', role='assistant', tool_calls=None, function_call=None)'. -[2024-11-03 17:26:27,351] [DEBUG] [Engine]: Agent 'agent4' completed initial task with result: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, improving decision-making processes, and enabling new capabilities. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze medical data, diagnose diseases, and develop personalized treatment plans. It can also help in drug discovery, patient monitoring, and improving the overall quality of healthcare services.\n\n2. Finance: AI can be used to analyze financial data, detect fraud, and make investment decisions. It can also automate routine tasks such as customer service and risk assessment, leading to cost savings and improved accuracy.\n\n3. Retail: AI can be used to analyze customer data, predict consumer behavior, and personalize marketing campaigns. It can also automate inventory management, optimize pricing strategies, and enhance the overall shopping experience.\n\n4. Manufacturing: AI can be used to optimize production processes, predict equipment failures, and improve supply chain management. It can also enable the development of smart factories that are more efficient and responsive to changing market demands.\n\n5. Transportation: AI can be used to optimize route planning, reduce traffic congestion, and improve safety in transportation systems. It can also enable the development of autonomous vehicles that are more efficient and reliable than traditional vehicles.\n\n6. Education: AI can be used to personalize learning experiences, provide feedback to students, and automate administrative tasks. It can also enable the development of virtual tutors and online learning platforms that are more engaging and effective.\n\nOverall, AI has the potential to transform various industries by enabling new capabilities, improving efficiency, and driving innovation. However, it also raises concerns about job displacement, data privacy, and ethical considerations that need to be addressed to ensure that the benefits of AI are realized in a responsible and sustainable manner.', role='assistant', tool_calls=None, function_call=None) -[2024-11-03 17:26:27,351] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- agent1: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, reducing costs, and improving decision-making processes. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze large amounts of medical data to identify patterns and trends that can help in diagnosing diseases, developing treatment plans, and predicting patient outcomes. AI-powered tools can also assist in drug discovery, personalized medicine, and remote patient monitoring.\n\n2. Finance: AI can be used in the finance industry to automate routine tasks such as fraud detection, risk assessment, and customer service. AI algorithms can analyze market trends and make investment decisions, optimize trading strategies, and predict financial market fluctuations.\n\n3. Retail: AI can help retailers improve customer experience by personalizing product recommendations, optimizing pricing strategies, and predicting consumer behavior. AI-powered chatbots can also provide customer support and assist in online shopping.\n\n4. Manufacturing: AI can be used in manufacturing to optimize production processes, predict equipment failures, and improve quality control. AI-powered robots and drones can automate repetitive tasks and increase productivity in factories.\n\n5. Transportation: AI can be used in the transportation industry to optimize route planning, reduce traffic congestion, and improve safety. AI-powered autonomous vehicles can revolutionize the way people and goods are transported, leading to reduced accidents and emissions.\n\n6. Agriculture: AI can help farmers increase crop yields, optimize irrigation and fertilization practices, and monitor plant health. AI-powered drones and sensors can collect data on soil conditions, weather patterns, and crop growth to make informed decisions.\n\n7. Education: AI can personalize learning experiences for students by adapting to their individual needs and learning styles. AI-powered tools can provide real-time feedback, assess student performance, and recommend personalized study materials.\n\nOverall, AI has the potential to transform various industries by increasing efficiency, reducing costs, and improving decision-making processes. However, it is important to address ethical and privacy concerns related to the use of AI to ensure that its benefits are maximized while minimizing potential risks.', role='assistant', tool_calls=None, function_call=None) -- agent2: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, reducing costs, and improving decision-making processes. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze medical data, diagnose diseases, and develop personalized treatment plans. It can also help in drug discovery, patient monitoring, and predicting patient outcomes. AI-powered robots can assist in surgeries and provide care to patients.\n\n2. Finance: AI can be used to analyze financial data, detect fraud, and make investment decisions. It can also automate routine tasks such as customer service and risk assessment. AI-powered chatbots can provide personalized financial advice to customers.\n\n3. Retail: AI can be used to analyze customer data, predict trends, and personalize marketing campaigns. It can also automate inventory management, optimize pricing strategies, and improve customer service. AI-powered virtual assistants can help customers with their shopping experience.\n\n4. Manufacturing: AI can be used to optimize production processes, predict equipment failures, and improve quality control. It can also automate routine tasks such as inventory management and supply chain optimization. AI-powered robots can assist in assembly lines and perform complex tasks.\n\n5. Transportation: AI can be used to optimize route planning, reduce traffic congestion, and improve safety. It can also automate vehicle maintenance, monitor driver behavior, and provide real-time navigation assistance. AI-powered autonomous vehicles can revolutionize the transportation industry.\n\n6. Education: AI can be used to personalize learning experiences, assess student performance, and provide feedback. It can also automate administrative tasks such as grading and scheduling. AI-powered tutoring systems can help students learn at their own pace.\n\nOverall, AI has the potential to transform various industries by increasing productivity, improving decision-making processes, and enhancing customer experiences. However, it is important to address ethical and privacy concerns associated with the use of AI to ensure its responsible deployment.', role='assistant', tool_calls=None, function_call=None) -- agent3: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, improving decision-making processes, and enabling new capabilities. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze large amounts of medical data to identify patterns and trends that can help in diagnosing diseases, developing treatment plans, and predicting patient outcomes. AI-powered tools can also assist in drug discovery, personalized medicine, and remote patient monitoring.\n\n2. Finance: AI can be used in the finance industry to automate routine tasks such as fraud detection, risk assessment, and customer service. AI algorithms can also analyze market trends and make investment decisions in real-time, leading to better financial outcomes for businesses and individuals.\n\n3. Manufacturing: AI can optimize production processes by predicting equipment failures, scheduling maintenance, and improving supply chain management. AI-powered robots can also be used to automate repetitive tasks on the factory floor, leading to increased productivity and cost savings.\n\n4. Retail: AI can personalize the shopping experience for customers by analyzing their preferences and recommending products that are likely to be of interest to them. AI-powered chatbots can also assist customers with their inquiries and provide real-time support, leading to improved customer satisfaction and loyalty.\n\n5. Transportation: AI can be used to optimize traffic flow, reduce congestion, and improve safety on the roads. AI-powered autonomous vehicles are also being developed to revolutionize the way people and goods are transported, leading to reduced accidents and emissions.\n\n6. Education: AI can personalize learning experiences for students by adapting the curriculum to their individual needs and learning styles. AI-powered tools can also assist teachers in grading assignments, providing feedback, and identifying areas where students may need additional support.\n\nOverall, the potential impact of AI on various industries is vast and transformative. While AI has the potential to bring about significant benefits in terms of efficiency, productivity, and innovation, it also raises concerns about job displacement, data privacy, and ethical considerations that need to be carefully addressed. It is important for businesses and policymakers to work together to harness the power of AI in a responsible and sustainable manner.', role='assistant', tool_calls=None, function_call=None) -- agent4: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, improving decision-making processes, and enabling new capabilities. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze medical data, diagnose diseases, and develop personalized treatment plans. It can also help in drug discovery, patient monitoring, and improving the overall quality of healthcare services.\n\n2. Finance: AI can be used to analyze financial data, detect fraud, and make investment decisions. It can also automate routine tasks such as customer service and risk assessment, leading to cost savings and improved accuracy.\n\n3. Retail: AI can be used to analyze customer data, predict consumer behavior, and personalize marketing campaigns. It can also automate inventory management, optimize pricing strategies, and enhance the overall shopping experience.\n\n4. Manufacturing: AI can be used to optimize production processes, predict equipment failures, and improve supply chain management. It can also enable the development of smart factories that are more efficient and responsive to changing market demands.\n\n5. Transportation: AI can be used to optimize route planning, reduce traffic congestion, and improve safety in transportation systems. It can also enable the development of autonomous vehicles that are more efficient and reliable than traditional vehicles.\n\n6. Education: AI can be used to personalize learning experiences, provide feedback to students, and automate administrative tasks. It can also enable the development of virtual tutors and online learning platforms that are more engaging and effective.\n\nOverall, AI has the potential to transform various industries by enabling new capabilities, improving efficiency, and driving innovation. However, it also raises concerns about job displacement, data privacy, and ethical considerations that need to be addressed to ensure that the benefits of AI are realized in a responsible and sustainable manner.', role='assistant', tool_calls=None, function_call=None) - -[2024-11-03 17:26:27,352] [INFO] [Engine]: Initial Summary: -Agents' Results Summary: -- agent1: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, reducing costs, and improving decision-making processes. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze large amounts of medical data to identify patterns and trends that can help in diagnosing diseases, developing treatment plans, and predicting patient outcomes. AI-powered tools can also assist in drug discovery, personalized medicine, and remote patient monitoring.\n\n2. Finance: AI can be used in the finance industry to automate routine tasks such as fraud detection, risk assessment, and customer service. AI algorithms can analyze market trends and make investment decisions, optimize trading strategies, and predict financial market fluctuations.\n\n3. Retail: AI can help retailers improve customer experience by personalizing product recommendations, optimizing pricing strategies, and predicting consumer behavior. AI-powered chatbots can also provide customer support and assist in online shopping.\n\n4. Manufacturing: AI can be used in manufacturing to optimize production processes, predict equipment failures, and improve quality control. AI-powered robots and drones can automate repetitive tasks and increase productivity in factories.\n\n5. Transportation: AI can be used in the transportation industry to optimize route planning, reduce traffic congestion, and improve safety. AI-powered autonomous vehicles can revolutionize the way people and goods are transported, leading to reduced accidents and emissions.\n\n6. Agriculture: AI can help farmers increase crop yields, optimize irrigation and fertilization practices, and monitor plant health. AI-powered drones and sensors can collect data on soil conditions, weather patterns, and crop growth to make informed decisions.\n\n7. Education: AI can personalize learning experiences for students by adapting to their individual needs and learning styles. AI-powered tools can provide real-time feedback, assess student performance, and recommend personalized study materials.\n\nOverall, AI has the potential to transform various industries by increasing efficiency, reducing costs, and improving decision-making processes. However, it is important to address ethical and privacy concerns related to the use of AI to ensure that its benefits are maximized while minimizing potential risks.', role='assistant', tool_calls=None, function_call=None) -- agent2: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, reducing costs, and improving decision-making processes. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze medical data, diagnose diseases, and develop personalized treatment plans. It can also help in drug discovery, patient monitoring, and predicting patient outcomes. AI-powered robots can assist in surgeries and provide care to patients.\n\n2. Finance: AI can be used to analyze financial data, detect fraud, and make investment decisions. It can also automate routine tasks such as customer service and risk assessment. AI-powered chatbots can provide personalized financial advice to customers.\n\n3. Retail: AI can be used to analyze customer data, predict trends, and personalize marketing campaigns. It can also automate inventory management, optimize pricing strategies, and improve customer service. AI-powered virtual assistants can help customers with their shopping experience.\n\n4. Manufacturing: AI can be used to optimize production processes, predict equipment failures, and improve quality control. It can also automate routine tasks such as inventory management and supply chain optimization. AI-powered robots can assist in assembly lines and perform complex tasks.\n\n5. Transportation: AI can be used to optimize route planning, reduce traffic congestion, and improve safety. It can also automate vehicle maintenance, monitor driver behavior, and provide real-time navigation assistance. AI-powered autonomous vehicles can revolutionize the transportation industry.\n\n6. Education: AI can be used to personalize learning experiences, assess student performance, and provide feedback. It can also automate administrative tasks such as grading and scheduling. AI-powered tutoring systems can help students learn at their own pace.\n\nOverall, AI has the potential to transform various industries by increasing productivity, improving decision-making processes, and enhancing customer experiences. However, it is important to address ethical and privacy concerns associated with the use of AI to ensure its responsible deployment.', role='assistant', tool_calls=None, function_call=None) -- agent3: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, improving decision-making processes, and enabling new capabilities. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze large amounts of medical data to identify patterns and trends that can help in diagnosing diseases, developing treatment plans, and predicting patient outcomes. AI-powered tools can also assist in drug discovery, personalized medicine, and remote patient monitoring.\n\n2. Finance: AI can be used in the finance industry to automate routine tasks such as fraud detection, risk assessment, and customer service. AI algorithms can also analyze market trends and make investment decisions in real-time, leading to better financial outcomes for businesses and individuals.\n\n3. Manufacturing: AI can optimize production processes by predicting equipment failures, scheduling maintenance, and improving supply chain management. AI-powered robots can also be used to automate repetitive tasks on the factory floor, leading to increased productivity and cost savings.\n\n4. Retail: AI can personalize the shopping experience for customers by analyzing their preferences and recommending products that are likely to be of interest to them. AI-powered chatbots can also assist customers with their inquiries and provide real-time support, leading to improved customer satisfaction and loyalty.\n\n5. Transportation: AI can be used to optimize traffic flow, reduce congestion, and improve safety on the roads. AI-powered autonomous vehicles are also being developed to revolutionize the way people and goods are transported, leading to reduced accidents and emissions.\n\n6. Education: AI can personalize learning experiences for students by adapting the curriculum to their individual needs and learning styles. AI-powered tools can also assist teachers in grading assignments, providing feedback, and identifying areas where students may need additional support.\n\nOverall, the potential impact of AI on various industries is vast and transformative. While AI has the potential to bring about significant benefits in terms of efficiency, productivity, and innovation, it also raises concerns about job displacement, data privacy, and ethical considerations that need to be carefully addressed. It is important for businesses and policymakers to work together to harness the power of AI in a responsible and sustainable manner.', role='assistant', tool_calls=None, function_call=None) -- agent4: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, improving decision-making processes, and enabling new capabilities. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze medical data, diagnose diseases, and develop personalized treatment plans. It can also help in drug discovery, patient monitoring, and improving the overall quality of healthcare services.\n\n2. Finance: AI can be used to analyze financial data, detect fraud, and make investment decisions. It can also automate routine tasks such as customer service and risk assessment, leading to cost savings and improved accuracy.\n\n3. Retail: AI can be used to analyze customer data, predict consumer behavior, and personalize marketing campaigns. It can also automate inventory management, optimize pricing strategies, and enhance the overall shopping experience.\n\n4. Manufacturing: AI can be used to optimize production processes, predict equipment failures, and improve supply chain management. It can also enable the development of smart factories that are more efficient and responsive to changing market demands.\n\n5. Transportation: AI can be used to optimize route planning, reduce traffic congestion, and improve safety in transportation systems. It can also enable the development of autonomous vehicles that are more efficient and reliable than traditional vehicles.\n\n6. Education: AI can be used to personalize learning experiences, provide feedback to students, and automate administrative tasks. It can also enable the development of virtual tutors and online learning platforms that are more engaging and effective.\n\nOverall, AI has the potential to transform various industries by enabling new capabilities, improving efficiency, and driving innovation. However, it also raises concerns about job displacement, data privacy, and ethical considerations that need to be addressed to ensure that the benefits of AI are realized in a responsible and sustainable manner.', role='assistant', tool_calls=None, function_call=None) - -[2024-11-03 17:26:27,353] [DEBUG] [EnginePlanner]: Updated progress: Starting the discussion on AI's impact across industries. -Agents' Results Summary: -- agent1: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, reducing costs, and improving decision-making processes. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze large amounts of medical data to identify patterns and trends that can help in diagnosing diseases, developing treatment plans, and predicting patient outcomes. AI-powered tools can also assist in drug discovery, personalized medicine, and remote patient monitoring.\n\n2. Finance: AI can be used in the finance industry to automate routine tasks such as fraud detection, risk assessment, and customer service. AI algorithms can analyze market trends and make investment decisions, optimize trading strategies, and predict financial market fluctuations.\n\n3. Retail: AI can help retailers improve customer experience by personalizing product recommendations, optimizing pricing strategies, and predicting consumer behavior. AI-powered chatbots can also provide customer support and assist in online shopping.\n\n4. Manufacturing: AI can be used in manufacturing to optimize production processes, predict equipment failures, and improve quality control. AI-powered robots and drones can automate repetitive tasks and increase productivity in factories.\n\n5. Transportation: AI can be used in the transportation industry to optimize route planning, reduce traffic congestion, and improve safety. AI-powered autonomous vehicles can revolutionize the way people and goods are transported, leading to reduced accidents and emissions.\n\n6. Agriculture: AI can help farmers increase crop yields, optimize irrigation and fertilization practices, and monitor plant health. AI-powered drones and sensors can collect data on soil conditions, weather patterns, and crop growth to make informed decisions.\n\n7. Education: AI can personalize learning experiences for students by adapting to their individual needs and learning styles. AI-powered tools can provide real-time feedback, assess student performance, and recommend personalized study materials.\n\nOverall, AI has the potential to transform various industries by increasing efficiency, reducing costs, and improving decision-making processes. However, it is important to address ethical and privacy concerns related to the use of AI to ensure that its benefits are maximized while minimizing potential risks.', role='assistant', tool_calls=None, function_call=None) -- agent2: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, reducing costs, and improving decision-making processes. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze medical data, diagnose diseases, and develop personalized treatment plans. It can also help in drug discovery, patient monitoring, and predicting patient outcomes. AI-powered robots can assist in surgeries and provide care to patients.\n\n2. Finance: AI can be used to analyze financial data, detect fraud, and make investment decisions. It can also automate routine tasks such as customer service and risk assessment. AI-powered chatbots can provide personalized financial advice to customers.\n\n3. Retail: AI can be used to analyze customer data, predict trends, and personalize marketing campaigns. It can also automate inventory management, optimize pricing strategies, and improve customer service. AI-powered virtual assistants can help customers with their shopping experience.\n\n4. Manufacturing: AI can be used to optimize production processes, predict equipment failures, and improve quality control. It can also automate routine tasks such as inventory management and supply chain optimization. AI-powered robots can assist in assembly lines and perform complex tasks.\n\n5. Transportation: AI can be used to optimize route planning, reduce traffic congestion, and improve safety. It can also automate vehicle maintenance, monitor driver behavior, and provide real-time navigation assistance. AI-powered autonomous vehicles can revolutionize the transportation industry.\n\n6. Education: AI can be used to personalize learning experiences, assess student performance, and provide feedback. It can also automate administrative tasks such as grading and scheduling. AI-powered tutoring systems can help students learn at their own pace.\n\nOverall, AI has the potential to transform various industries by increasing productivity, improving decision-making processes, and enhancing customer experiences. However, it is important to address ethical and privacy concerns associated with the use of AI to ensure its responsible deployment.', role='assistant', tool_calls=None, function_call=None) -- agent3: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, improving decision-making processes, and enabling new capabilities. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze large amounts of medical data to identify patterns and trends that can help in diagnosing diseases, developing treatment plans, and predicting patient outcomes. AI-powered tools can also assist in drug discovery, personalized medicine, and remote patient monitoring.\n\n2. Finance: AI can be used in the finance industry to automate routine tasks such as fraud detection, risk assessment, and customer service. AI algorithms can also analyze market trends and make investment decisions in real-time, leading to better financial outcomes for businesses and individuals.\n\n3. Manufacturing: AI can optimize production processes by predicting equipment failures, scheduling maintenance, and improving supply chain management. AI-powered robots can also be used to automate repetitive tasks on the factory floor, leading to increased productivity and cost savings.\n\n4. Retail: AI can personalize the shopping experience for customers by analyzing their preferences and recommending products that are likely to be of interest to them. AI-powered chatbots can also assist customers with their inquiries and provide real-time support, leading to improved customer satisfaction and loyalty.\n\n5. Transportation: AI can be used to optimize traffic flow, reduce congestion, and improve safety on the roads. AI-powered autonomous vehicles are also being developed to revolutionize the way people and goods are transported, leading to reduced accidents and emissions.\n\n6. Education: AI can personalize learning experiences for students by adapting the curriculum to their individual needs and learning styles. AI-powered tools can also assist teachers in grading assignments, providing feedback, and identifying areas where students may need additional support.\n\nOverall, the potential impact of AI on various industries is vast and transformative. While AI has the potential to bring about significant benefits in terms of efficiency, productivity, and innovation, it also raises concerns about job displacement, data privacy, and ethical considerations that need to be carefully addressed. It is important for businesses and policymakers to work together to harness the power of AI in a responsible and sustainable manner.', role='assistant', tool_calls=None, function_call=None) -- agent4: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, improving decision-making processes, and enabling new capabilities. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze medical data, diagnose diseases, and develop personalized treatment plans. It can also help in drug discovery, patient monitoring, and improving the overall quality of healthcare services.\n\n2. Finance: AI can be used to analyze financial data, detect fraud, and make investment decisions. It can also automate routine tasks such as customer service and risk assessment, leading to cost savings and improved accuracy.\n\n3. Retail: AI can be used to analyze customer data, predict consumer behavior, and personalize marketing campaigns. It can also automate inventory management, optimize pricing strategies, and enhance the overall shopping experience.\n\n4. Manufacturing: AI can be used to optimize production processes, predict equipment failures, and improve supply chain management. It can also enable the development of smart factories that are more efficient and responsive to changing market demands.\n\n5. Transportation: AI can be used to optimize route planning, reduce traffic congestion, and improve safety in transportation systems. It can also enable the development of autonomous vehicles that are more efficient and reliable than traditional vehicles.\n\n6. Education: AI can be used to personalize learning experiences, provide feedback to students, and automate administrative tasks. It can also enable the development of virtual tutors and online learning platforms that are more engaging and effective.\n\nOverall, AI has the potential to transform various industries by enabling new capabilities, improving efficiency, and driving innovation. However, it also raises concerns about job displacement, data privacy, and ethical considerations that need to be addressed to ensure that the benefits of AI are realized in a responsible and sustainable manner.', role='assistant', tool_calls=None, function_call=None) - -[2024-11-03 17:26:27,354] [ERROR] [Engine]: An error occurred during graph-based coordination. -Traceback (most recent call last): - File "/Users/zhukunlun/Documents/GitHub/MARBLE/marble/engine/engine.py", line 200, in graph_coordinate - self.evaluator.update(self.environment, self.agents) - File "/Users/zhukunlun/Documents/GitHub/MARBLE/marble/evaluator/evaluator.py", line 39, in update - if environment.is_task_completed(): - File "/Users/zhukunlun/Documents/GitHub/MARBLE/marble/environments/base_env.py", line 38, in is_task_completed - return self._compare_to_ground_truth(last_action_result, self.ground_truth) - File "/Users/zhukunlun/Documents/GitHub/MARBLE/marble/environments/base_env.py", line 43, in _compare_to_ground_truth - result_str: str = result.get("result", "") -AttributeError: 'str' object has no attribute 'get' -[2024-11-03 17:26:27,361] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-03 17:26:27,362] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-03 17:26:27,362] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-03 17:26:27,365] [INFO] [Engine]: Graph-based coordination simulation completed. -[2024-11-03 17:27:41,911] [DEBUG] [Engine]: Environment 'Base' initialized. -[2024-11-03 17:27:41,911] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 17:27:41,912] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 17:27:41,912] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 17:27:41,912] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 17:27:41,912] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 17:27:41,912] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 17:27:41,912] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 17:27:41,912] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 17:27:41,912] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-11-03 17:27:41,912] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-03 17:27:41,912] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-03 17:27:41,912] [INFO] [Engine]: Engine initialized. -[2024-11-03 17:27:41,912] [INFO] [Engine]: Engine starting simulation. -[2024-11-03 17:27:41,912] [INFO] [Engine]: Running in graph-based coordination mode. -[2024-11-03 17:27:41,912] [INFO] [Engine]: Initial task distribution to all agents. -[2024-11-03 17:27:41,912] [INFO] [Engine]: Assigning initial task to agent1: Discuss the potential impact of AI on various industries. -[2024-11-03 17:27:41,912] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-03 17:27:45,985] [INFO] [BaseAgent]: Agent 'agent1' acted with result 'Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, reducing costs, and improving decision-making processes. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze medical data, diagnose diseases, and develop personalized treatment plans. It can also help in drug discovery, patient monitoring, and predicting patient outcomes. AI-powered robots can assist in surgeries and provide care to patients.\n\n2. Finance: AI can be used to analyze financial data, detect fraud, and make investment decisions. It can also automate routine tasks such as customer service and risk assessment. AI-powered chatbots can provide personalized financial advice to customers.\n\n3. Retail: AI can be used to analyze customer data, predict trends, and personalize marketing campaigns. It can also automate inventory management, optimize pricing strategies, and improve customer service. AI-powered virtual assistants can help customers with their shopping experience.\n\n4. Manufacturing: AI can be used to optimize production processes, predict equipment failures, and improve quality control. It can also automate routine tasks such as inventory management and supply chain optimization. AI-powered robots can assist in assembly lines and perform complex tasks.\n\n5. Transportation: AI can be used to optimize route planning, predict traffic patterns, and improve safety. It can also automate vehicle maintenance, monitor driver behavior, and enhance the overall transportation experience. AI-powered autonomous vehicles can revolutionize the way people and goods are transported.\n\n6. Education: AI can be used to personalize learning experiences, assess student performance, and provide feedback. It can also automate administrative tasks such as grading and scheduling. AI-powered tutoring systems can help students learn at their own pace and improve their academic performance.\n\nOverall, AI has the potential to transform various industries by increasing productivity, improving decision-making processes, and enhancing customer experiences. However, it also raises concerns about job displacement, data privacy, and ethical implications. It is important for businesses and policymakers to carefully consider these implications and develop strategies to harness the full potential of AI while mitigating its risks.', role='assistant', tool_calls=None, function_call=None)'. -[2024-11-03 17:27:45,986] [DEBUG] [Engine]: Agent 'agent1' completed initial task with result: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, reducing costs, and improving decision-making processes. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze medical data, diagnose diseases, and develop personalized treatment plans. It can also help in drug discovery, patient monitoring, and predicting patient outcomes. AI-powered robots can assist in surgeries and provide care to patients.\n\n2. Finance: AI can be used to analyze financial data, detect fraud, and make investment decisions. It can also automate routine tasks such as customer service and risk assessment. AI-powered chatbots can provide personalized financial advice to customers.\n\n3. Retail: AI can be used to analyze customer data, predict trends, and personalize marketing campaigns. It can also automate inventory management, optimize pricing strategies, and improve customer service. AI-powered virtual assistants can help customers with their shopping experience.\n\n4. Manufacturing: AI can be used to optimize production processes, predict equipment failures, and improve quality control. It can also automate routine tasks such as inventory management and supply chain optimization. AI-powered robots can assist in assembly lines and perform complex tasks.\n\n5. Transportation: AI can be used to optimize route planning, predict traffic patterns, and improve safety. It can also automate vehicle maintenance, monitor driver behavior, and enhance the overall transportation experience. AI-powered autonomous vehicles can revolutionize the way people and goods are transported.\n\n6. Education: AI can be used to personalize learning experiences, assess student performance, and provide feedback. It can also automate administrative tasks such as grading and scheduling. AI-powered tutoring systems can help students learn at their own pace and improve their academic performance.\n\nOverall, AI has the potential to transform various industries by increasing productivity, improving decision-making processes, and enhancing customer experiences. However, it also raises concerns about job displacement, data privacy, and ethical implications. It is important for businesses and policymakers to carefully consider these implications and develop strategies to harness the full potential of AI while mitigating its risks.', role='assistant', tool_calls=None, function_call=None) -[2024-11-03 17:27:45,986] [INFO] [Engine]: Assigning initial task to agent2: Discuss the potential impact of AI on various industries. -[2024-11-03 17:27:45,986] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-03 17:27:49,430] [INFO] [BaseAgent]: Agent 'agent2' acted with result 'Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, reducing costs, and improving decision-making processes. Here are some examples of how AI could impact different industries:\n\n1. Healthcare: AI can be used to analyze medical data and images to assist in diagnosing diseases, predicting patient outcomes, and personalizing treatment plans. AI-powered robots can also assist in surgeries and provide care to patients.\n\n2. Finance: AI can be used to analyze financial data and predict market trends, automate trading, detect fraud, and provide personalized financial advice to customers. AI-powered chatbots can also assist customers with their banking needs.\n\n3. Retail: AI can be used to analyze customer data and behavior to personalize marketing campaigns, optimize pricing strategies, and improve inventory management. AI-powered chatbots can also assist customers with their shopping experience.\n\n4. Manufacturing: AI can be used to optimize production processes, predict equipment failures, and improve quality control. AI-powered robots can also assist in assembly lines and perform repetitive tasks.\n\n5. Transportation: AI can be used to optimize route planning, predict traffic patterns, and improve safety in autonomous vehicles. AI-powered drones can also be used for delivery services.\n\n6. Education: AI can be used to personalize learning experiences for students, provide feedback on assignments, and assist teachers in creating lesson plans. AI-powered chatbots can also provide support to students outside of the classroom.\n\n7. Agriculture: AI can be used to analyze soil and weather data to optimize crop yields, monitor plant health, and automate farming tasks. AI-powered drones can also be used for precision agriculture.\n\nOverall, AI has the potential to transform various industries by increasing productivity, improving decision-making processes, and enhancing customer experiences. However, it is important to consider the ethical implications of AI and ensure that it is used responsibly and ethically.', role='assistant', tool_calls=None, function_call=None)'. -[2024-11-03 17:27:49,431] [DEBUG] [Engine]: Agent 'agent2' completed initial task with result: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, reducing costs, and improving decision-making processes. Here are some examples of how AI could impact different industries:\n\n1. Healthcare: AI can be used to analyze medical data and images to assist in diagnosing diseases, predicting patient outcomes, and personalizing treatment plans. AI-powered robots can also assist in surgeries and provide care to patients.\n\n2. Finance: AI can be used to analyze financial data and predict market trends, automate trading, detect fraud, and provide personalized financial advice to customers. AI-powered chatbots can also assist customers with their banking needs.\n\n3. Retail: AI can be used to analyze customer data and behavior to personalize marketing campaigns, optimize pricing strategies, and improve inventory management. AI-powered chatbots can also assist customers with their shopping experience.\n\n4. Manufacturing: AI can be used to optimize production processes, predict equipment failures, and improve quality control. AI-powered robots can also assist in assembly lines and perform repetitive tasks.\n\n5. Transportation: AI can be used to optimize route planning, predict traffic patterns, and improve safety in autonomous vehicles. AI-powered drones can also be used for delivery services.\n\n6. Education: AI can be used to personalize learning experiences for students, provide feedback on assignments, and assist teachers in creating lesson plans. AI-powered chatbots can also provide support to students outside of the classroom.\n\n7. Agriculture: AI can be used to analyze soil and weather data to optimize crop yields, monitor plant health, and automate farming tasks. AI-powered drones can also be used for precision agriculture.\n\nOverall, AI has the potential to transform various industries by increasing productivity, improving decision-making processes, and enhancing customer experiences. However, it is important to consider the ethical implications of AI and ensure that it is used responsibly and ethically.', role='assistant', tool_calls=None, function_call=None) -[2024-11-03 17:27:49,431] [INFO] [Engine]: Assigning initial task to agent3: Discuss the potential impact of AI on various industries. -[2024-11-03 17:27:49,432] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-03 17:27:52,459] [INFO] [BaseAgent]: Agent 'agent3' acted with result 'Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, improving decision-making processes, and enabling new capabilities. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze large amounts of medical data to identify patterns and trends that can help in diagnosing diseases, developing treatment plans, and predicting patient outcomes. AI-powered tools can also assist in drug discovery, personalized medicine, and remote patient monitoring.\n\n2. Finance: AI can be used in the finance industry to automate routine tasks such as fraud detection, risk assessment, and customer service. AI algorithms can also analyze market trends and make investment decisions in real-time, leading to better financial outcomes for businesses and individuals.\n\n3. Manufacturing: AI can optimize production processes by predicting equipment failures, scheduling maintenance, and improving supply chain management. AI-powered robots can also be used to automate repetitive tasks on the factory floor, leading to increased productivity and cost savings.\n\n4. Retail: AI can personalize the shopping experience for customers by analyzing their preferences and recommending products that are likely to be of interest to them. AI-powered chatbots can also provide customer support and answer queries in real-time, improving customer satisfaction and loyalty.\n\n5. Transportation: AI can be used to optimize traffic flow, reduce congestion, and improve safety on the roads. AI-powered autonomous vehicles are also being developed, which have the potential to revolutionize the way people and goods are transported in the future.\n\n6. Agriculture: AI can help farmers optimize crop yields by analyzing weather patterns, soil conditions, and crop health data. AI-powered drones and robots can also be used to monitor fields, apply fertilizers, and harvest crops, leading to increased efficiency and sustainability in agriculture.\n\nOverall, AI has the potential to transform various industries by enabling new capabilities, improving efficiency, and driving innovation. However, it is important for businesses to carefully consider the ethical and societal implications of AI adoption to ensure that its benefits are realized in a responsible and sustainable manner.', role='assistant', tool_calls=None, function_call=None)'. -[2024-11-03 17:27:52,460] [DEBUG] [Engine]: Agent 'agent3' completed initial task with result: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, improving decision-making processes, and enabling new capabilities. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze large amounts of medical data to identify patterns and trends that can help in diagnosing diseases, developing treatment plans, and predicting patient outcomes. AI-powered tools can also assist in drug discovery, personalized medicine, and remote patient monitoring.\n\n2. Finance: AI can be used in the finance industry to automate routine tasks such as fraud detection, risk assessment, and customer service. AI algorithms can also analyze market trends and make investment decisions in real-time, leading to better financial outcomes for businesses and individuals.\n\n3. Manufacturing: AI can optimize production processes by predicting equipment failures, scheduling maintenance, and improving supply chain management. AI-powered robots can also be used to automate repetitive tasks on the factory floor, leading to increased productivity and cost savings.\n\n4. Retail: AI can personalize the shopping experience for customers by analyzing their preferences and recommending products that are likely to be of interest to them. AI-powered chatbots can also provide customer support and answer queries in real-time, improving customer satisfaction and loyalty.\n\n5. Transportation: AI can be used to optimize traffic flow, reduce congestion, and improve safety on the roads. AI-powered autonomous vehicles are also being developed, which have the potential to revolutionize the way people and goods are transported in the future.\n\n6. Agriculture: AI can help farmers optimize crop yields by analyzing weather patterns, soil conditions, and crop health data. AI-powered drones and robots can also be used to monitor fields, apply fertilizers, and harvest crops, leading to increased efficiency and sustainability in agriculture.\n\nOverall, AI has the potential to transform various industries by enabling new capabilities, improving efficiency, and driving innovation. However, it is important for businesses to carefully consider the ethical and societal implications of AI adoption to ensure that its benefits are realized in a responsible and sustainable manner.', role='assistant', tool_calls=None, function_call=None) -[2024-11-03 17:27:52,461] [INFO] [Engine]: Assigning initial task to agent4: Discuss the potential impact of AI on various industries. -[2024-11-03 17:27:52,461] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-03 17:27:56,651] [INFO] [BaseAgent]: Agent 'agent4' acted with result 'Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, improving decision-making processes, and enabling new capabilities. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze large amounts of medical data to identify patterns and trends that can help in diagnosing diseases, developing treatment plans, and predicting patient outcomes. AI-powered tools can also assist in drug discovery, personalized medicine, and remote patient monitoring.\n\n2. Finance: AI can be used in the finance industry to automate routine tasks such as fraud detection, risk assessment, and customer service. AI algorithms can also analyze market trends and make investment decisions in real-time, leading to better financial outcomes for businesses and individuals.\n\n3. Manufacturing: AI can optimize production processes by predicting equipment failures, scheduling maintenance, and improving supply chain management. AI-powered robots can also be used to automate repetitive tasks on the factory floor, leading to increased productivity and cost savings.\n\n4. Retail: AI can personalize the shopping experience for customers by analyzing their preferences and recommending products that are likely to be of interest to them. AI-powered chatbots can also assist customers with their inquiries and provide real-time support, leading to improved customer satisfaction and loyalty.\n\n5. Transportation: AI can be used to optimize traffic flow, reduce congestion, and improve safety on the roads. AI-powered autonomous vehicles are also being developed, which have the potential to revolutionize the way people and goods are transported in the future.\n\n6. Education: AI can personalize learning experiences for students by adapting the curriculum to their individual needs and learning styles. AI-powered tools can also assist teachers in grading assignments, providing feedback, and identifying areas where students may need additional support.\n\nOverall, AI has the potential to transform various industries by increasing efficiency, improving decision-making processes, and enabling new capabilities that were previously not possible. However, it is important for businesses to carefully consider the ethical implications of AI and ensure that it is used responsibly to benefit society as a whole.', role='assistant', tool_calls=None, function_call=None)'. -[2024-11-03 17:27:56,657] [DEBUG] [Engine]: Agent 'agent4' completed initial task with result: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, improving decision-making processes, and enabling new capabilities. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze large amounts of medical data to identify patterns and trends that can help in diagnosing diseases, developing treatment plans, and predicting patient outcomes. AI-powered tools can also assist in drug discovery, personalized medicine, and remote patient monitoring.\n\n2. Finance: AI can be used in the finance industry to automate routine tasks such as fraud detection, risk assessment, and customer service. AI algorithms can also analyze market trends and make investment decisions in real-time, leading to better financial outcomes for businesses and individuals.\n\n3. Manufacturing: AI can optimize production processes by predicting equipment failures, scheduling maintenance, and improving supply chain management. AI-powered robots can also be used to automate repetitive tasks on the factory floor, leading to increased productivity and cost savings.\n\n4. Retail: AI can personalize the shopping experience for customers by analyzing their preferences and recommending products that are likely to be of interest to them. AI-powered chatbots can also assist customers with their inquiries and provide real-time support, leading to improved customer satisfaction and loyalty.\n\n5. Transportation: AI can be used to optimize traffic flow, reduce congestion, and improve safety on the roads. AI-powered autonomous vehicles are also being developed, which have the potential to revolutionize the way people and goods are transported in the future.\n\n6. Education: AI can personalize learning experiences for students by adapting the curriculum to their individual needs and learning styles. AI-powered tools can also assist teachers in grading assignments, providing feedback, and identifying areas where students may need additional support.\n\nOverall, AI has the potential to transform various industries by increasing efficiency, improving decision-making processes, and enabling new capabilities that were previously not possible. However, it is important for businesses to carefully consider the ethical implications of AI and ensure that it is used responsibly to benefit society as a whole.', role='assistant', tool_calls=None, function_call=None) -[2024-11-03 17:27:56,661] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- agent1: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, reducing costs, and improving decision-making processes. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze medical data, diagnose diseases, and develop personalized treatment plans. It can also help in drug discovery, patient monitoring, and predicting patient outcomes. AI-powered robots can assist in surgeries and provide care to patients.\n\n2. Finance: AI can be used to analyze financial data, detect fraud, and make investment decisions. It can also automate routine tasks such as customer service and risk assessment. AI-powered chatbots can provide personalized financial advice to customers.\n\n3. Retail: AI can be used to analyze customer data, predict trends, and personalize marketing campaigns. It can also automate inventory management, optimize pricing strategies, and improve customer service. AI-powered virtual assistants can help customers with their shopping experience.\n\n4. Manufacturing: AI can be used to optimize production processes, predict equipment failures, and improve quality control. It can also automate routine tasks such as inventory management and supply chain optimization. AI-powered robots can assist in assembly lines and perform complex tasks.\n\n5. Transportation: AI can be used to optimize route planning, predict traffic patterns, and improve safety. It can also automate vehicle maintenance, monitor driver behavior, and enhance the overall transportation experience. AI-powered autonomous vehicles can revolutionize the way people and goods are transported.\n\n6. Education: AI can be used to personalize learning experiences, assess student performance, and provide feedback. It can also automate administrative tasks such as grading and scheduling. AI-powered tutoring systems can help students learn at their own pace and improve their academic performance.\n\nOverall, AI has the potential to transform various industries by increasing productivity, improving decision-making processes, and enhancing customer experiences. However, it also raises concerns about job displacement, data privacy, and ethical implications. It is important for businesses and policymakers to carefully consider these implications and develop strategies to harness the full potential of AI while mitigating its risks.', role='assistant', tool_calls=None, function_call=None) -- agent2: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, reducing costs, and improving decision-making processes. Here are some examples of how AI could impact different industries:\n\n1. Healthcare: AI can be used to analyze medical data and images to assist in diagnosing diseases, predicting patient outcomes, and personalizing treatment plans. AI-powered robots can also assist in surgeries and provide care to patients.\n\n2. Finance: AI can be used to analyze financial data and predict market trends, automate trading, detect fraud, and provide personalized financial advice to customers. AI-powered chatbots can also assist customers with their banking needs.\n\n3. Retail: AI can be used to analyze customer data and behavior to personalize marketing campaigns, optimize pricing strategies, and improve inventory management. AI-powered chatbots can also assist customers with their shopping experience.\n\n4. Manufacturing: AI can be used to optimize production processes, predict equipment failures, and improve quality control. AI-powered robots can also assist in assembly lines and perform repetitive tasks.\n\n5. Transportation: AI can be used to optimize route planning, predict traffic patterns, and improve safety in autonomous vehicles. AI-powered drones can also be used for delivery services.\n\n6. Education: AI can be used to personalize learning experiences for students, provide feedback on assignments, and assist teachers in creating lesson plans. AI-powered chatbots can also provide support to students outside of the classroom.\n\n7. Agriculture: AI can be used to analyze soil and weather data to optimize crop yields, monitor plant health, and automate farming tasks. AI-powered drones can also be used for precision agriculture.\n\nOverall, AI has the potential to transform various industries by increasing productivity, improving decision-making processes, and enhancing customer experiences. However, it is important to consider the ethical implications of AI and ensure that it is used responsibly and ethically.', role='assistant', tool_calls=None, function_call=None) -- agent3: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, improving decision-making processes, and enabling new capabilities. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze large amounts of medical data to identify patterns and trends that can help in diagnosing diseases, developing treatment plans, and predicting patient outcomes. AI-powered tools can also assist in drug discovery, personalized medicine, and remote patient monitoring.\n\n2. Finance: AI can be used in the finance industry to automate routine tasks such as fraud detection, risk assessment, and customer service. AI algorithms can also analyze market trends and make investment decisions in real-time, leading to better financial outcomes for businesses and individuals.\n\n3. Manufacturing: AI can optimize production processes by predicting equipment failures, scheduling maintenance, and improving supply chain management. AI-powered robots can also be used to automate repetitive tasks on the factory floor, leading to increased productivity and cost savings.\n\n4. Retail: AI can personalize the shopping experience for customers by analyzing their preferences and recommending products that are likely to be of interest to them. AI-powered chatbots can also provide customer support and answer queries in real-time, improving customer satisfaction and loyalty.\n\n5. Transportation: AI can be used to optimize traffic flow, reduce congestion, and improve safety on the roads. AI-powered autonomous vehicles are also being developed, which have the potential to revolutionize the way people and goods are transported in the future.\n\n6. Agriculture: AI can help farmers optimize crop yields by analyzing weather patterns, soil conditions, and crop health data. AI-powered drones and robots can also be used to monitor fields, apply fertilizers, and harvest crops, leading to increased efficiency and sustainability in agriculture.\n\nOverall, AI has the potential to transform various industries by enabling new capabilities, improving efficiency, and driving innovation. However, it is important for businesses to carefully consider the ethical and societal implications of AI adoption to ensure that its benefits are realized in a responsible and sustainable manner.', role='assistant', tool_calls=None, function_call=None) -- agent4: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, improving decision-making processes, and enabling new capabilities. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze large amounts of medical data to identify patterns and trends that can help in diagnosing diseases, developing treatment plans, and predicting patient outcomes. AI-powered tools can also assist in drug discovery, personalized medicine, and remote patient monitoring.\n\n2. Finance: AI can be used in the finance industry to automate routine tasks such as fraud detection, risk assessment, and customer service. AI algorithms can also analyze market trends and make investment decisions in real-time, leading to better financial outcomes for businesses and individuals.\n\n3. Manufacturing: AI can optimize production processes by predicting equipment failures, scheduling maintenance, and improving supply chain management. AI-powered robots can also be used to automate repetitive tasks on the factory floor, leading to increased productivity and cost savings.\n\n4. Retail: AI can personalize the shopping experience for customers by analyzing their preferences and recommending products that are likely to be of interest to them. AI-powered chatbots can also assist customers with their inquiries and provide real-time support, leading to improved customer satisfaction and loyalty.\n\n5. Transportation: AI can be used to optimize traffic flow, reduce congestion, and improve safety on the roads. AI-powered autonomous vehicles are also being developed, which have the potential to revolutionize the way people and goods are transported in the future.\n\n6. Education: AI can personalize learning experiences for students by adapting the curriculum to their individual needs and learning styles. AI-powered tools can also assist teachers in grading assignments, providing feedback, and identifying areas where students may need additional support.\n\nOverall, AI has the potential to transform various industries by increasing efficiency, improving decision-making processes, and enabling new capabilities that were previously not possible. However, it is important for businesses to carefully consider the ethical implications of AI and ensure that it is used responsibly to benefit society as a whole.', role='assistant', tool_calls=None, function_call=None) - -[2024-11-03 17:27:56,661] [INFO] [Engine]: Initial Summary: -Agents' Results Summary: -- agent1: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, reducing costs, and improving decision-making processes. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze medical data, diagnose diseases, and develop personalized treatment plans. It can also help in drug discovery, patient monitoring, and predicting patient outcomes. AI-powered robots can assist in surgeries and provide care to patients.\n\n2. Finance: AI can be used to analyze financial data, detect fraud, and make investment decisions. It can also automate routine tasks such as customer service and risk assessment. AI-powered chatbots can provide personalized financial advice to customers.\n\n3. Retail: AI can be used to analyze customer data, predict trends, and personalize marketing campaigns. It can also automate inventory management, optimize pricing strategies, and improve customer service. AI-powered virtual assistants can help customers with their shopping experience.\n\n4. Manufacturing: AI can be used to optimize production processes, predict equipment failures, and improve quality control. It can also automate routine tasks such as inventory management and supply chain optimization. AI-powered robots can assist in assembly lines and perform complex tasks.\n\n5. Transportation: AI can be used to optimize route planning, predict traffic patterns, and improve safety. It can also automate vehicle maintenance, monitor driver behavior, and enhance the overall transportation experience. AI-powered autonomous vehicles can revolutionize the way people and goods are transported.\n\n6. Education: AI can be used to personalize learning experiences, assess student performance, and provide feedback. It can also automate administrative tasks such as grading and scheduling. AI-powered tutoring systems can help students learn at their own pace and improve their academic performance.\n\nOverall, AI has the potential to transform various industries by increasing productivity, improving decision-making processes, and enhancing customer experiences. However, it also raises concerns about job displacement, data privacy, and ethical implications. It is important for businesses and policymakers to carefully consider these implications and develop strategies to harness the full potential of AI while mitigating its risks.', role='assistant', tool_calls=None, function_call=None) -- agent2: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, reducing costs, and improving decision-making processes. Here are some examples of how AI could impact different industries:\n\n1. Healthcare: AI can be used to analyze medical data and images to assist in diagnosing diseases, predicting patient outcomes, and personalizing treatment plans. AI-powered robots can also assist in surgeries and provide care to patients.\n\n2. Finance: AI can be used to analyze financial data and predict market trends, automate trading, detect fraud, and provide personalized financial advice to customers. AI-powered chatbots can also assist customers with their banking needs.\n\n3. Retail: AI can be used to analyze customer data and behavior to personalize marketing campaigns, optimize pricing strategies, and improve inventory management. AI-powered chatbots can also assist customers with their shopping experience.\n\n4. Manufacturing: AI can be used to optimize production processes, predict equipment failures, and improve quality control. AI-powered robots can also assist in assembly lines and perform repetitive tasks.\n\n5. Transportation: AI can be used to optimize route planning, predict traffic patterns, and improve safety in autonomous vehicles. AI-powered drones can also be used for delivery services.\n\n6. Education: AI can be used to personalize learning experiences for students, provide feedback on assignments, and assist teachers in creating lesson plans. AI-powered chatbots can also provide support to students outside of the classroom.\n\n7. Agriculture: AI can be used to analyze soil and weather data to optimize crop yields, monitor plant health, and automate farming tasks. AI-powered drones can also be used for precision agriculture.\n\nOverall, AI has the potential to transform various industries by increasing productivity, improving decision-making processes, and enhancing customer experiences. However, it is important to consider the ethical implications of AI and ensure that it is used responsibly and ethically.', role='assistant', tool_calls=None, function_call=None) -- agent3: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, improving decision-making processes, and enabling new capabilities. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze large amounts of medical data to identify patterns and trends that can help in diagnosing diseases, developing treatment plans, and predicting patient outcomes. AI-powered tools can also assist in drug discovery, personalized medicine, and remote patient monitoring.\n\n2. Finance: AI can be used in the finance industry to automate routine tasks such as fraud detection, risk assessment, and customer service. AI algorithms can also analyze market trends and make investment decisions in real-time, leading to better financial outcomes for businesses and individuals.\n\n3. Manufacturing: AI can optimize production processes by predicting equipment failures, scheduling maintenance, and improving supply chain management. AI-powered robots can also be used to automate repetitive tasks on the factory floor, leading to increased productivity and cost savings.\n\n4. Retail: AI can personalize the shopping experience for customers by analyzing their preferences and recommending products that are likely to be of interest to them. AI-powered chatbots can also provide customer support and answer queries in real-time, improving customer satisfaction and loyalty.\n\n5. Transportation: AI can be used to optimize traffic flow, reduce congestion, and improve safety on the roads. AI-powered autonomous vehicles are also being developed, which have the potential to revolutionize the way people and goods are transported in the future.\n\n6. Agriculture: AI can help farmers optimize crop yields by analyzing weather patterns, soil conditions, and crop health data. AI-powered drones and robots can also be used to monitor fields, apply fertilizers, and harvest crops, leading to increased efficiency and sustainability in agriculture.\n\nOverall, AI has the potential to transform various industries by enabling new capabilities, improving efficiency, and driving innovation. However, it is important for businesses to carefully consider the ethical and societal implications of AI adoption to ensure that its benefits are realized in a responsible and sustainable manner.', role='assistant', tool_calls=None, function_call=None) -- agent4: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, improving decision-making processes, and enabling new capabilities. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze large amounts of medical data to identify patterns and trends that can help in diagnosing diseases, developing treatment plans, and predicting patient outcomes. AI-powered tools can also assist in drug discovery, personalized medicine, and remote patient monitoring.\n\n2. Finance: AI can be used in the finance industry to automate routine tasks such as fraud detection, risk assessment, and customer service. AI algorithms can also analyze market trends and make investment decisions in real-time, leading to better financial outcomes for businesses and individuals.\n\n3. Manufacturing: AI can optimize production processes by predicting equipment failures, scheduling maintenance, and improving supply chain management. AI-powered robots can also be used to automate repetitive tasks on the factory floor, leading to increased productivity and cost savings.\n\n4. Retail: AI can personalize the shopping experience for customers by analyzing their preferences and recommending products that are likely to be of interest to them. AI-powered chatbots can also assist customers with their inquiries and provide real-time support, leading to improved customer satisfaction and loyalty.\n\n5. Transportation: AI can be used to optimize traffic flow, reduce congestion, and improve safety on the roads. AI-powered autonomous vehicles are also being developed, which have the potential to revolutionize the way people and goods are transported in the future.\n\n6. Education: AI can personalize learning experiences for students by adapting the curriculum to their individual needs and learning styles. AI-powered tools can also assist teachers in grading assignments, providing feedback, and identifying areas where students may need additional support.\n\nOverall, AI has the potential to transform various industries by increasing efficiency, improving decision-making processes, and enabling new capabilities that were previously not possible. However, it is important for businesses to carefully consider the ethical implications of AI and ensure that it is used responsibly to benefit society as a whole.', role='assistant', tool_calls=None, function_call=None) - -[2024-11-03 17:27:56,662] [DEBUG] [EnginePlanner]: Updated progress: Starting the discussion on AI's impact across industries. -Agents' Results Summary: -- agent1: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, reducing costs, and improving decision-making processes. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze medical data, diagnose diseases, and develop personalized treatment plans. It can also help in drug discovery, patient monitoring, and predicting patient outcomes. AI-powered robots can assist in surgeries and provide care to patients.\n\n2. Finance: AI can be used to analyze financial data, detect fraud, and make investment decisions. It can also automate routine tasks such as customer service and risk assessment. AI-powered chatbots can provide personalized financial advice to customers.\n\n3. Retail: AI can be used to analyze customer data, predict trends, and personalize marketing campaigns. It can also automate inventory management, optimize pricing strategies, and improve customer service. AI-powered virtual assistants can help customers with their shopping experience.\n\n4. Manufacturing: AI can be used to optimize production processes, predict equipment failures, and improve quality control. It can also automate routine tasks such as inventory management and supply chain optimization. AI-powered robots can assist in assembly lines and perform complex tasks.\n\n5. Transportation: AI can be used to optimize route planning, predict traffic patterns, and improve safety. It can also automate vehicle maintenance, monitor driver behavior, and enhance the overall transportation experience. AI-powered autonomous vehicles can revolutionize the way people and goods are transported.\n\n6. Education: AI can be used to personalize learning experiences, assess student performance, and provide feedback. It can also automate administrative tasks such as grading and scheduling. AI-powered tutoring systems can help students learn at their own pace and improve their academic performance.\n\nOverall, AI has the potential to transform various industries by increasing productivity, improving decision-making processes, and enhancing customer experiences. However, it also raises concerns about job displacement, data privacy, and ethical implications. It is important for businesses and policymakers to carefully consider these implications and develop strategies to harness the full potential of AI while mitigating its risks.', role='assistant', tool_calls=None, function_call=None) -- agent2: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, reducing costs, and improving decision-making processes. Here are some examples of how AI could impact different industries:\n\n1. Healthcare: AI can be used to analyze medical data and images to assist in diagnosing diseases, predicting patient outcomes, and personalizing treatment plans. AI-powered robots can also assist in surgeries and provide care to patients.\n\n2. Finance: AI can be used to analyze financial data and predict market trends, automate trading, detect fraud, and provide personalized financial advice to customers. AI-powered chatbots can also assist customers with their banking needs.\n\n3. Retail: AI can be used to analyze customer data and behavior to personalize marketing campaigns, optimize pricing strategies, and improve inventory management. AI-powered chatbots can also assist customers with their shopping experience.\n\n4. Manufacturing: AI can be used to optimize production processes, predict equipment failures, and improve quality control. AI-powered robots can also assist in assembly lines and perform repetitive tasks.\n\n5. Transportation: AI can be used to optimize route planning, predict traffic patterns, and improve safety in autonomous vehicles. AI-powered drones can also be used for delivery services.\n\n6. Education: AI can be used to personalize learning experiences for students, provide feedback on assignments, and assist teachers in creating lesson plans. AI-powered chatbots can also provide support to students outside of the classroom.\n\n7. Agriculture: AI can be used to analyze soil and weather data to optimize crop yields, monitor plant health, and automate farming tasks. AI-powered drones can also be used for precision agriculture.\n\nOverall, AI has the potential to transform various industries by increasing productivity, improving decision-making processes, and enhancing customer experiences. However, it is important to consider the ethical implications of AI and ensure that it is used responsibly and ethically.', role='assistant', tool_calls=None, function_call=None) -- agent3: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, improving decision-making processes, and enabling new capabilities. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze large amounts of medical data to identify patterns and trends that can help in diagnosing diseases, developing treatment plans, and predicting patient outcomes. AI-powered tools can also assist in drug discovery, personalized medicine, and remote patient monitoring.\n\n2. Finance: AI can be used in the finance industry to automate routine tasks such as fraud detection, risk assessment, and customer service. AI algorithms can also analyze market trends and make investment decisions in real-time, leading to better financial outcomes for businesses and individuals.\n\n3. Manufacturing: AI can optimize production processes by predicting equipment failures, scheduling maintenance, and improving supply chain management. AI-powered robots can also be used to automate repetitive tasks on the factory floor, leading to increased productivity and cost savings.\n\n4. Retail: AI can personalize the shopping experience for customers by analyzing their preferences and recommending products that are likely to be of interest to them. AI-powered chatbots can also provide customer support and answer queries in real-time, improving customer satisfaction and loyalty.\n\n5. Transportation: AI can be used to optimize traffic flow, reduce congestion, and improve safety on the roads. AI-powered autonomous vehicles are also being developed, which have the potential to revolutionize the way people and goods are transported in the future.\n\n6. Agriculture: AI can help farmers optimize crop yields by analyzing weather patterns, soil conditions, and crop health data. AI-powered drones and robots can also be used to monitor fields, apply fertilizers, and harvest crops, leading to increased efficiency and sustainability in agriculture.\n\nOverall, AI has the potential to transform various industries by enabling new capabilities, improving efficiency, and driving innovation. However, it is important for businesses to carefully consider the ethical and societal implications of AI adoption to ensure that its benefits are realized in a responsible and sustainable manner.', role='assistant', tool_calls=None, function_call=None) -- agent4: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, improving decision-making processes, and enabling new capabilities. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze large amounts of medical data to identify patterns and trends that can help in diagnosing diseases, developing treatment plans, and predicting patient outcomes. AI-powered tools can also assist in drug discovery, personalized medicine, and remote patient monitoring.\n\n2. Finance: AI can be used in the finance industry to automate routine tasks such as fraud detection, risk assessment, and customer service. AI algorithms can also analyze market trends and make investment decisions in real-time, leading to better financial outcomes for businesses and individuals.\n\n3. Manufacturing: AI can optimize production processes by predicting equipment failures, scheduling maintenance, and improving supply chain management. AI-powered robots can also be used to automate repetitive tasks on the factory floor, leading to increased productivity and cost savings.\n\n4. Retail: AI can personalize the shopping experience for customers by analyzing their preferences and recommending products that are likely to be of interest to them. AI-powered chatbots can also assist customers with their inquiries and provide real-time support, leading to improved customer satisfaction and loyalty.\n\n5. Transportation: AI can be used to optimize traffic flow, reduce congestion, and improve safety on the roads. AI-powered autonomous vehicles are also being developed, which have the potential to revolutionize the way people and goods are transported in the future.\n\n6. Education: AI can personalize learning experiences for students by adapting the curriculum to their individual needs and learning styles. AI-powered tools can also assist teachers in grading assignments, providing feedback, and identifying areas where students may need additional support.\n\nOverall, AI has the potential to transform various industries by increasing efficiency, improving decision-making processes, and enabling new capabilities that were previously not possible. However, it is important for businesses to carefully consider the ethical implications of AI and ensure that it is used responsibly to benefit society as a whole.', role='assistant', tool_calls=None, function_call=None) - -[2024-11-03 17:27:56,663] [INFO] [Engine]: Starting iteration 1 -[2024-11-03 17:27:56,663] [INFO] [BaseAgent]: Agent 'agent1' is planning the next task. -[2024-11-03 17:27:56,663] [ERROR] [Engine]: Error in agent 'agent1' during planning or action: Object of type Message is not JSON serializable -[2024-11-03 17:27:56,663] [INFO] [BaseAgent]: Agent 'agent2' is planning the next task. -[2024-11-03 17:27:56,663] [ERROR] [Engine]: Error in agent 'agent2' during planning or action: Object of type Message is not JSON serializable -[2024-11-03 17:27:56,663] [INFO] [BaseAgent]: Agent 'agent3' is planning the next task. -[2024-11-03 17:27:56,663] [ERROR] [Engine]: Error in agent 'agent3' during planning or action: Object of type Message is not JSON serializable -[2024-11-03 17:27:56,663] [INFO] [BaseAgent]: Agent 'agent4' is planning the next task. -[2024-11-03 17:27:56,663] [ERROR] [Engine]: Error in agent 'agent4' during planning or action: Object of type Message is not JSON serializable -[2024-11-03 17:27:56,663] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: - -[2024-11-03 17:27:56,663] [INFO] [Engine]: Iteration 1 Summary: -Agents' Results Summary: - -[2024-11-03 17:27:57,979] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': False} -[2024-11-03 17:27:57,980] [INFO] [Engine]: EnginePlanner decided to terminate the simulation. -[2024-11-03 17:27:57,980] [INFO] [Engine]: Engine graph-based coordination loop completed. -[2024-11-03 17:27:57,981] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-03 17:27:57,981] [INFO] [Evaluator]: Total Token Consumption: 4730 -[2024-11-03 17:27:57,981] [INFO] [Evaluator]: Average Tokens per Iteration: 2365.0 -[2024-11-03 17:27:57,981] [INFO] [Engine]: Graph-based coordination simulation completed. -[2024-11-03 19:21:09,622] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 19:21:09,622] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 19:21:09,622] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 19:21:09,622] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 19:21:09,622] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 19:21:09,622] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 19:21:09,622] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 19:21:09,622] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 19:21:09,622] [INFO] [BaseAgent]: Agent 'agent5' initialized. -[2024-11-03 19:21:09,622] [DEBUG] [Engine]: Agent 'agent5' of type 'BaseAgent' initialized. -[2024-11-03 19:21:09,622] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-11-03 19:21:09,622] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-03 19:21:09,622] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-03 19:21:09,622] [INFO] [Engine]: Engine initialized. -[2024-11-03 19:21:09,622] [INFO] [Engine]: Engine starting simulation. -[2024-11-03 19:21:09,622] [INFO] [Engine]: Running in tree-based coordination mode. -[2024-11-03 19:21:09,622] [INFO] [Engine]: Starting tree-based coordination. -[2024-11-03 19:21:09,622] [DEBUG] [AgentGraph]: Root agents: ['agent1', 'agent2', 'agent3', 'agent4', 'agent5'] -[2024-11-03 19:21:09,622] [ERROR] [AgentGraph]: Multiple root agents found in the graph. -[2024-11-03 19:21:09,622] [ERROR] [Engine]: No root agent found in the tree. -[2024-11-03 19:21:09,622] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-03 19:21:09,622] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-03 19:21:09,622] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-03 19:21:09,623] [INFO] [Engine]: Tree-based coordination simulation completed. -[2024-11-03 19:35:57,103] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 19:35:57,103] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 19:35:57,103] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 19:35:57,103] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 19:35:57,103] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 19:35:57,103] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 19:35:57,103] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 19:35:57,103] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 19:35:57,103] [INFO] [BaseAgent]: Agent 'agent5' initialized. -[2024-11-03 19:35:57,103] [DEBUG] [Engine]: Agent 'agent5' of type 'BaseAgent' initialized. -[2024-11-03 19:35:57,103] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-11-03 19:35:57,104] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-03 19:35:57,104] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-03 19:35:57,104] [INFO] [Engine]: Engine initialized. -[2024-11-03 19:35:57,104] [INFO] [Engine]: Engine starting simulation. -[2024-11-03 19:35:57,104] [INFO] [Engine]: Running in tree-based coordination mode. -[2024-11-03 19:35:57,104] [INFO] [Engine]: Starting tree-based coordination. -[2024-11-03 19:35:57,104] [DEBUG] [AgentGraph]: Root agents: ['agent1', 'agent2', 'agent3', 'agent4', 'agent5'] -[2024-11-03 19:35:57,104] [ERROR] [AgentGraph]: Multiple root agents found in the graph. -[2024-11-03 19:35:57,104] [ERROR] [Engine]: No root agent found in the tree. -[2024-11-03 19:35:57,104] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-03 19:35:57,104] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-03 19:35:57,104] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-03 19:35:57,104] [INFO] [Engine]: Tree-based coordination simulation completed. -[2024-11-03 19:36:24,278] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 19:36:24,278] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 19:36:24,278] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 19:36:24,278] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 19:36:24,278] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 19:36:24,278] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 19:36:24,278] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 19:36:24,278] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 19:36:24,278] [INFO] [BaseAgent]: Agent 'agent5' initialized. -[2024-11-03 19:36:24,278] [DEBUG] [Engine]: Agent 'agent5' of type 'BaseAgent' initialized. -[2024-11-03 19:36:24,278] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-11-03 19:36:24,278] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-03 19:36:24,278] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-03 19:36:24,278] [INFO] [Engine]: Engine initialized. -[2024-11-03 19:36:24,278] [INFO] [Engine]: Engine starting simulation. -[2024-11-03 19:36:24,279] [INFO] [Engine]: Running in tree-based coordination mode. -[2024-11-03 19:36:24,279] [INFO] [Engine]: Starting tree-based coordination. -[2024-11-03 19:36:24,279] [DEBUG] [AgentGraph]: Root agents: ['agent1', 'agent2', 'agent3', 'agent4', 'agent5'] -[2024-11-03 19:36:24,279] [ERROR] [AgentGraph]: Multiple root agents found in the graph. -[2024-11-03 19:36:24,279] [ERROR] [Engine]: No root agent found in the tree. -[2024-11-03 19:36:24,279] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-03 19:36:24,279] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-03 19:36:24,279] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-03 19:36:24,279] [INFO] [Engine]: Tree-based coordination simulation completed. -[2024-11-03 19:38:07,976] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 19:38:07,976] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 19:38:07,976] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 19:38:07,976] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 19:38:07,976] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 19:38:07,976] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 19:38:07,976] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 19:38:07,976] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 19:38:07,976] [INFO] [BaseAgent]: Agent 'agent5' initialized. -[2024-11-03 19:38:07,976] [DEBUG] [Engine]: Agent 'agent5' of type 'BaseAgent' initialized. -[2024-11-03 19:43:10,946] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 19:43:10,947] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 19:43:10,947] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 19:43:10,947] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 19:43:10,947] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 19:43:10,947] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 19:43:10,947] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 19:43:10,947] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 19:43:10,947] [INFO] [BaseAgent]: Agent 'agent5' initialized. -[2024-11-03 19:43:10,947] [DEBUG] [Engine]: Agent 'agent5' of type 'BaseAgent' initialized. -[2024-11-03 19:43:45,064] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 19:43:45,064] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 19:43:45,064] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 19:43:45,064] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 19:43:45,064] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 19:43:45,064] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 19:43:45,064] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 19:43:45,064] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 19:43:45,064] [INFO] [BaseAgent]: Agent 'agent5' initialized. -[2024-11-03 19:43:45,064] [DEBUG] [Engine]: Agent 'agent5' of type 'BaseAgent' initialized. -[2024-11-03 19:43:45,064] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'tree'. -[2024-11-03 19:44:44,849] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 19:44:44,850] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 19:44:44,850] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 19:44:44,850] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 19:44:44,850] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 19:44:44,850] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 19:44:44,850] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 19:44:44,850] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 19:44:44,850] [INFO] [BaseAgent]: Agent 'agent5' initialized. -[2024-11-03 19:44:44,850] [DEBUG] [Engine]: Agent 'agent5' of type 'BaseAgent' initialized. -[2024-11-03 19:44:44,850] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'tree'. -[2024-11-03 19:45:36,396] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 19:45:36,397] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 19:45:36,397] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 19:45:36,397] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 19:45:36,397] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 19:45:36,397] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 19:45:36,397] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 19:45:36,397] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 19:45:36,397] [INFO] [BaseAgent]: Agent 'agent5' initialized. -[2024-11-03 19:45:36,397] [DEBUG] [Engine]: Agent 'agent5' of type 'BaseAgent' initialized. -[2024-11-03 19:45:36,398] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'tree'. -[2024-11-03 19:47:08,753] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 19:47:08,753] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 19:47:08,754] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 19:47:08,754] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 19:47:08,754] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 19:47:08,754] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 19:47:08,754] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 19:47:08,754] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 19:47:08,754] [INFO] [BaseAgent]: Agent 'agent5' initialized. -[2024-11-03 19:47:08,754] [DEBUG] [Engine]: Agent 'agent5' of type 'BaseAgent' initialized. -[2024-11-03 19:47:08,754] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'tree'. -[2024-11-03 19:47:08,754] [INFO] [AgentGraph]: Relationship added: agent1 --[parent]--> agent2 -[2024-11-03 19:47:08,754] [INFO] [AgentGraph]: Relationship added: agent1 --[parent]--> agent3 -[2024-11-03 19:47:08,754] [INFO] [AgentGraph]: Relationship added: agent2 --[parent]--> agent4 -[2024-11-03 19:47:08,754] [INFO] [AgentGraph]: Relationship added: agent3 --[parent]--> agent5 -[2024-11-03 19:47:08,754] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-03 19:47:08,754] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-03 19:47:08,754] [INFO] [Engine]: Engine initialized. -[2024-11-03 19:47:08,754] [INFO] [Engine]: Engine starting simulation. -[2024-11-03 19:47:08,754] [INFO] [Engine]: Running in tree-based coordination mode. -[2024-11-03 19:47:08,754] [INFO] [Engine]: Starting tree-based coordination. -[2024-11-03 19:47:08,754] [DEBUG] [AgentGraph]: Root agents: ['agent1'] -[2024-11-03 19:47:08,754] [INFO] [Engine]: Starting iteration 1 -[2024-11-03 19:47:08,754] [INFO] [Engine]: Agent 'agent1' is executing task. -[2024-11-03 19:47:08,754] [INFO] [BaseAgent]: Agent 'agent1' is planning tasks for children. -[2024-11-03 19:47:10,135] [ERROR] [BaseAgent]: Failed to parse tasks for children: Extra data: line 1 column 9 (char 8) -[2024-11-03 19:47:10,136] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-03 19:47:13,782] [INFO] [BaseAgent]: Agent 'agent1' acted with result 'Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, reducing costs, and improving decision-making processes. Here are some examples of how AI could impact different industries:\n\n1. Healthcare: AI can be used to analyze medical data and images to assist in diagnosing diseases, predicting patient outcomes, and personalizing treatment plans. AI-powered robots can also assist in surgeries and provide care to patients.\n\n2. Finance: AI can be used to analyze financial data and predict market trends, automate trading, detect fraud, and provide personalized financial advice to customers. AI-powered chatbots can also assist customers with their banking needs.\n\n3. Retail: AI can be used to analyze customer data and behavior to personalize marketing campaigns, optimize pricing strategies, and improve inventory management. AI-powered chatbots can also assist customers with their shopping experience.\n\n4. Manufacturing: AI can be used to optimize production processes, predict equipment failures, and improve quality control. AI-powered robots can also assist in assembly lines and perform repetitive tasks.\n\n5. Transportation: AI can be used to optimize route planning, predict traffic patterns, and improve safety in autonomous vehicles. AI-powered drones can also be used for delivery services.\n\n6. Education: AI can be used to personalize learning experiences for students, provide feedback on assignments, and assist teachers in creating lesson plans. AI-powered chatbots can also provide support to students outside of the classroom.\n\n7. Agriculture: AI can be used to analyze soil and weather data to optimize crop yields, monitor plant health, and automate farming tasks. AI-powered drones can also be used for crop monitoring and spraying pesticides.\n\nOverall, AI has the potential to transform various industries by increasing productivity, improving decision-making processes, and enhancing customer experiences. However, it is important to consider the ethical implications of AI and ensure that it is used responsibly and ethically.', role='assistant', tool_calls=None, function_call=None)'. -[2024-11-03 19:47:13,784] [INFO] [BaseAgent]: Agent 'agent1' is summarizing results. -[2024-11-03 19:47:13,784] [INFO] [BaseAgent]: Agent 'agent1' is processing children's results. -[2024-11-03 19:47:13,784] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- root_agent: -Own result: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, reducing costs, and improving decision-making processes. Here are some examples of how AI could impact different industries:\n\n1. Healthcare: AI can be used to analyze medical data and images to assist in diagnosing diseases, predicting patient outcomes, and personalizing treatment plans. AI-powered robots can also assist in surgeries and provide care to patients.\n\n2. Finance: AI can be used to analyze financial data and predict market trends, automate trading, detect fraud, and provide personalized financial advice to customers. AI-powered chatbots can also assist customers with their banking needs.\n\n3. Retail: AI can be used to analyze customer data and behavior to personalize marketing campaigns, optimize pricing strategies, and improve inventory management. AI-powered chatbots can also assist customers with their shopping experience.\n\n4. Manufacturing: AI can be used to optimize production processes, predict equipment failures, and improve quality control. AI-powered robots can also assist in assembly lines and perform repetitive tasks.\n\n5. Transportation: AI can be used to optimize route planning, predict traffic patterns, and improve safety in autonomous vehicles. AI-powered drones can also be used for delivery services.\n\n6. Education: AI can be used to personalize learning experiences for students, provide feedback on assignments, and assist teachers in creating lesson plans. AI-powered chatbots can also provide support to students outside of the classroom.\n\n7. Agriculture: AI can be used to analyze soil and weather data to optimize crop yields, monitor plant health, and automate farming tasks. AI-powered drones can also be used for crop monitoring and spraying pesticides.\n\nOverall, AI has the potential to transform various industries by increasing productivity, improving decision-making processes, and enhancing customer experiences. However, it is important to consider the ethical implications of AI and ensure that it is used responsibly and ethically.', role='assistant', tool_calls=None, function_call=None) - -[2024-11-03 19:47:13,784] [INFO] [Engine]: Iteration 1 Summary: -Agents' Results Summary: -- root_agent: -Own result: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, reducing costs, and improving decision-making processes. Here are some examples of how AI could impact different industries:\n\n1. Healthcare: AI can be used to analyze medical data and images to assist in diagnosing diseases, predicting patient outcomes, and personalizing treatment plans. AI-powered robots can also assist in surgeries and provide care to patients.\n\n2. Finance: AI can be used to analyze financial data and predict market trends, automate trading, detect fraud, and provide personalized financial advice to customers. AI-powered chatbots can also assist customers with their banking needs.\n\n3. Retail: AI can be used to analyze customer data and behavior to personalize marketing campaigns, optimize pricing strategies, and improve inventory management. AI-powered chatbots can also assist customers with their shopping experience.\n\n4. Manufacturing: AI can be used to optimize production processes, predict equipment failures, and improve quality control. AI-powered robots can also assist in assembly lines and perform repetitive tasks.\n\n5. Transportation: AI can be used to optimize route planning, predict traffic patterns, and improve safety in autonomous vehicles. AI-powered drones can also be used for delivery services.\n\n6. Education: AI can be used to personalize learning experiences for students, provide feedback on assignments, and assist teachers in creating lesson plans. AI-powered chatbots can also provide support to students outside of the classroom.\n\n7. Agriculture: AI can be used to analyze soil and weather data to optimize crop yields, monitor plant health, and automate farming tasks. AI-powered drones can also be used for crop monitoring and spraying pesticides.\n\nOverall, AI has the potential to transform various industries by increasing productivity, improving decision-making processes, and enhancing customer experiences. However, it is important to consider the ethical implications of AI and ensure that it is used responsibly and ethically.', role='assistant', tool_calls=None, function_call=None) - -[2024-11-03 19:47:13,784] [DEBUG] [EnginePlanner]: Updated progress: Starting the discussion on AI's impact across industries. -Agents' Results Summary: -- root_agent: -Own result: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, reducing costs, and improving decision-making processes. Here are some examples of how AI could impact different industries:\n\n1. Healthcare: AI can be used to analyze medical data and images to assist in diagnosing diseases, predicting patient outcomes, and personalizing treatment plans. AI-powered robots can also assist in surgeries and provide care to patients.\n\n2. Finance: AI can be used to analyze financial data and predict market trends, automate trading, detect fraud, and provide personalized financial advice to customers. AI-powered chatbots can also assist customers with their banking needs.\n\n3. Retail: AI can be used to analyze customer data and behavior to personalize marketing campaigns, optimize pricing strategies, and improve inventory management. AI-powered chatbots can also assist customers with their shopping experience.\n\n4. Manufacturing: AI can be used to optimize production processes, predict equipment failures, and improve quality control. AI-powered robots can also assist in assembly lines and perform repetitive tasks.\n\n5. Transportation: AI can be used to optimize route planning, predict traffic patterns, and improve safety in autonomous vehicles. AI-powered drones can also be used for delivery services.\n\n6. Education: AI can be used to personalize learning experiences for students, provide feedback on assignments, and assist teachers in creating lesson plans. AI-powered chatbots can also provide support to students outside of the classroom.\n\n7. Agriculture: AI can be used to analyze soil and weather data to optimize crop yields, monitor plant health, and automate farming tasks. AI-powered drones can also be used for crop monitoring and spraying pesticides.\n\nOverall, AI has the potential to transform various industries by increasing productivity, improving decision-making processes, and enhancing customer experiences. However, it is important to consider the ethical implications of AI and ensure that it is used responsibly and ethically.', role='assistant', tool_calls=None, function_call=None) - -[2024-11-03 19:47:14,295] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-03 19:47:14,295] [INFO] [Engine]: Starting iteration 2 -[2024-11-03 19:47:14,296] [INFO] [Engine]: Agent 'agent1' is executing task. -[2024-11-03 19:47:14,296] [INFO] [BaseAgent]: Agent 'agent1' is planning tasks for children. -[2024-11-03 19:47:15,316] [ERROR] [BaseAgent]: Failed to parse tasks for children: Extra data: line 1 column 9 (char 8) -[2024-11-03 19:47:15,317] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-03 19:47:19,105] [INFO] [BaseAgent]: Agent 'agent1' acted with result 'Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, reducing costs, and improving decision-making processes. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze medical data, diagnose diseases, and develop personalized treatment plans. It can also help in drug discovery, patient monitoring, and predicting patient outcomes.\n\n2. Finance: AI can be used to analyze financial data, detect fraud, automate trading, and provide personalized financial advice. It can also help in risk management and compliance.\n\n3. Retail: AI can be used to analyze customer data, personalize marketing campaigns, optimize pricing strategies, and improve inventory management. It can also enhance the customer shopping experience through chatbots and virtual assistants.\n\n4. Manufacturing: AI can be used to optimize production processes, predict equipment failures, and improve quality control. It can also enable the development of smart factories that are more efficient and flexible.\n\n5. Transportation: AI can be used to optimize route planning, reduce traffic congestion, and improve safety. It can also enable the development of autonomous vehicles that are more efficient and reliable.\n\n6. Education: AI can be used to personalize learning experiences, provide feedback to students, and automate administrative tasks. It can also help in developing adaptive learning systems that cater to individual student needs.\n\n7. Agriculture: AI can be used to optimize crop management, monitor soil conditions, and predict crop yields. It can also enable the development of precision agriculture techniques that are more sustainable and productive.\n\nOverall, AI has the potential to transform various industries by enabling them to become more efficient, innovative, and competitive. However, it also raises concerns about job displacement, data privacy, and ethical implications that need to be addressed.', role='assistant', tool_calls=None, function_call=None)'. -[2024-11-03 19:47:19,106] [INFO] [BaseAgent]: Agent 'agent1' is summarizing results. -[2024-11-03 19:47:19,106] [INFO] [BaseAgent]: Agent 'agent1' is processing children's results. -[2024-11-03 19:47:19,106] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- root_agent: -Own result: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, reducing costs, and improving decision-making processes. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze medical data, diagnose diseases, and develop personalized treatment plans. It can also help in drug discovery, patient monitoring, and predicting patient outcomes.\n\n2. Finance: AI can be used to analyze financial data, detect fraud, automate trading, and provide personalized financial advice. It can also help in risk management and compliance.\n\n3. Retail: AI can be used to analyze customer data, personalize marketing campaigns, optimize pricing strategies, and improve inventory management. It can also enhance the customer shopping experience through chatbots and virtual assistants.\n\n4. Manufacturing: AI can be used to optimize production processes, predict equipment failures, and improve quality control. It can also enable the development of smart factories that are more efficient and flexible.\n\n5. Transportation: AI can be used to optimize route planning, reduce traffic congestion, and improve safety. It can also enable the development of autonomous vehicles that are more efficient and reliable.\n\n6. Education: AI can be used to personalize learning experiences, provide feedback to students, and automate administrative tasks. It can also help in developing adaptive learning systems that cater to individual student needs.\n\n7. Agriculture: AI can be used to optimize crop management, monitor soil conditions, and predict crop yields. It can also enable the development of precision agriculture techniques that are more sustainable and productive.\n\nOverall, AI has the potential to transform various industries by enabling them to become more efficient, innovative, and competitive. However, it also raises concerns about job displacement, data privacy, and ethical implications that need to be addressed.', role='assistant', tool_calls=None, function_call=None) - -[2024-11-03 19:47:19,107] [INFO] [Engine]: Iteration 2 Summary: -Agents' Results Summary: -- root_agent: -Own result: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, reducing costs, and improving decision-making processes. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze medical data, diagnose diseases, and develop personalized treatment plans. It can also help in drug discovery, patient monitoring, and predicting patient outcomes.\n\n2. Finance: AI can be used to analyze financial data, detect fraud, automate trading, and provide personalized financial advice. It can also help in risk management and compliance.\n\n3. Retail: AI can be used to analyze customer data, personalize marketing campaigns, optimize pricing strategies, and improve inventory management. It can also enhance the customer shopping experience through chatbots and virtual assistants.\n\n4. Manufacturing: AI can be used to optimize production processes, predict equipment failures, and improve quality control. It can also enable the development of smart factories that are more efficient and flexible.\n\n5. Transportation: AI can be used to optimize route planning, reduce traffic congestion, and improve safety. It can also enable the development of autonomous vehicles that are more efficient and reliable.\n\n6. Education: AI can be used to personalize learning experiences, provide feedback to students, and automate administrative tasks. It can also help in developing adaptive learning systems that cater to individual student needs.\n\n7. Agriculture: AI can be used to optimize crop management, monitor soil conditions, and predict crop yields. It can also enable the development of precision agriculture techniques that are more sustainable and productive.\n\nOverall, AI has the potential to transform various industries by enabling them to become more efficient, innovative, and competitive. However, it also raises concerns about job displacement, data privacy, and ethical implications that need to be addressed.', role='assistant', tool_calls=None, function_call=None) - -[2024-11-03 19:47:19,107] [DEBUG] [EnginePlanner]: Updated progress: Starting the discussion on AI's impact across industries. -Agents' Results Summary: -- root_agent: -Own result: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, reducing costs, and improving decision-making processes. Here are some examples of how AI could impact different industries:\n\n1. Healthcare: AI can be used to analyze medical data and images to assist in diagnosing diseases, predicting patient outcomes, and personalizing treatment plans. AI-powered robots can also assist in surgeries and provide care to patients.\n\n2. Finance: AI can be used to analyze financial data and predict market trends, automate trading, detect fraud, and provide personalized financial advice to customers. AI-powered chatbots can also assist customers with their banking needs.\n\n3. Retail: AI can be used to analyze customer data and behavior to personalize marketing campaigns, optimize pricing strategies, and improve inventory management. AI-powered chatbots can also assist customers with their shopping experience.\n\n4. Manufacturing: AI can be used to optimize production processes, predict equipment failures, and improve quality control. AI-powered robots can also assist in assembly lines and perform repetitive tasks.\n\n5. Transportation: AI can be used to optimize route planning, predict traffic patterns, and improve safety in autonomous vehicles. AI-powered drones can also be used for delivery services.\n\n6. Education: AI can be used to personalize learning experiences for students, provide feedback on assignments, and assist teachers in creating lesson plans. AI-powered chatbots can also provide support to students outside of the classroom.\n\n7. Agriculture: AI can be used to analyze soil and weather data to optimize crop yields, monitor plant health, and automate farming tasks. AI-powered drones can also be used for crop monitoring and spraying pesticides.\n\nOverall, AI has the potential to transform various industries by increasing productivity, improving decision-making processes, and enhancing customer experiences. However, it is important to consider the ethical implications of AI and ensure that it is used responsibly and ethically.', role='assistant', tool_calls=None, function_call=None) - -Agents' Results Summary: -- root_agent: -Own result: Message(content='Artificial Intelligence (AI) has the potential to revolutionize various industries by increasing efficiency, reducing costs, and improving decision-making processes. Some of the key industries that are likely to be impacted by AI include:\n\n1. Healthcare: AI can be used to analyze medical data, diagnose diseases, and develop personalized treatment plans. It can also help in drug discovery, patient monitoring, and predicting patient outcomes.\n\n2. Finance: AI can be used to analyze financial data, detect fraud, automate trading, and provide personalized financial advice. It can also help in risk management and compliance.\n\n3. Retail: AI can be used to analyze customer data, personalize marketing campaigns, optimize pricing strategies, and improve inventory management. It can also enhance the customer shopping experience through chatbots and virtual assistants.\n\n4. Manufacturing: AI can be used to optimize production processes, predict equipment failures, and improve quality control. It can also enable the development of smart factories that are more efficient and flexible.\n\n5. Transportation: AI can be used to optimize route planning, reduce traffic congestion, and improve safety. It can also enable the development of autonomous vehicles that are more efficient and reliable.\n\n6. Education: AI can be used to personalize learning experiences, provide feedback to students, and automate administrative tasks. It can also help in developing adaptive learning systems that cater to individual student needs.\n\n7. Agriculture: AI can be used to optimize crop management, monitor soil conditions, and predict crop yields. It can also enable the development of precision agriculture techniques that are more sustainable and productive.\n\nOverall, AI has the potential to transform various industries by enabling them to become more efficient, innovative, and competitive. However, it also raises concerns about job displacement, data privacy, and ethical implications that need to be addressed.', role='assistant', tool_calls=None, function_call=None) - -[2024-11-03 19:47:19,615] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': False} -[2024-11-03 19:47:19,616] [INFO] [Engine]: EnginePlanner decided to terminate the simulation. -[2024-11-03 19:47:19,616] [INFO] [Engine]: Tree-based coordination simulation completed. -[2024-11-03 19:47:19,616] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-03 19:47:19,616] [INFO] [Evaluator]: Total Token Consumption: 1593 -[2024-11-03 19:47:19,616] [INFO] [Evaluator]: Average Tokens per Iteration: 796.5 -[2024-11-03 19:47:19,617] [INFO] [Engine]: Tree-based coordination simulation completed. -[2024-11-07 16:32:22,771] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-07 16:32:22,771] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-07 16:32:22,771] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-07 16:32:22,772] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-07 16:32:22,772] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-07 16:32:22,772] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-07 16:32:22,772] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-07 16:32:22,772] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-07 16:32:22,772] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'chain'. -[2024-11-07 16:32:22,772] [INFO] [AgentGraph]: Relationship added: agent1 --[reports_to]--> agent2 -[2024-11-07 16:32:22,772] [INFO] [AgentGraph]: Relationship added: agent1 --[manages]--> agent3 -[2024-11-07 16:32:22,772] [INFO] [AgentGraph]: Relationship added: agent2 --[supports]--> agent4 -[2024-11-07 16:32:22,772] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborates_with]--> agent4 -[2024-11-07 16:32:22,772] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-07 16:32:22,772] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-07 16:32:22,772] [INFO] [Engine]: Engine initialized. -[2024-11-07 16:32:22,772] [INFO] [Engine]: Engine starting simulation. -[2024-11-07 16:32:22,772] [INFO] [Engine]: Running in chain-based coordination mode. -[2024-11-07 16:32:22,772] [INFO] [Engine]: Starting chain-based coordination. -[2024-11-07 16:32:22,772] [INFO] [Engine]: Agent 'agent1' is executing task. -[2024-11-07 16:32:22,773] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Find new about the latest trends in AI.'. -[2024-11-07 16:32:25,983] [INFO] [BaseAgent]: Agent 'agent1' called 'fetch_webpage' with args '{'url': 'https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/'}'. -[2024-11-07 16:32:25,984] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': False, 'error-msg': '404 Client Error: Not Found for url: https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/'}'. -[2024-11-07 16:32:25,984] [INFO] [Engine]: Agent 'agent1' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_GAm2x75bzKkTBLdzpRTIXUKu', type='function')], function_call=None) -[2024-11-07 16:32:25,984] [ERROR] [Engine]: An error occurred during chain-based coordination. -Traceback (most recent call last): - File "/Users/zhukunlun/Documents/GitHub/MARBLE/marble/engine/engine.py", line 358, in chain_coordinate - if self.environment.is_task_completed(): - File "/Users/zhukunlun/Documents/GitHub/MARBLE/marble/environments/base_env.py", line 38, in is_task_completed - return self._compare_to_ground_truth(last_action_result, self.ground_truth) - File "/Users/zhukunlun/Documents/GitHub/MARBLE/marble/environments/base_env.py", line 44, in _compare_to_ground_truth - return result.strip().lower() == ground_truth.strip().lower() -AttributeError: 'dict' object has no attribute 'strip' -[2024-11-07 16:32:25,987] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-07 16:32:25,988] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-07 16:32:25,988] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-07 16:32:25,988] [INFO] [Engine]: Chain-based coordination simulation completed. -[2024-11-07 16:50:03,047] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-07 16:50:03,047] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-07 16:50:03,047] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-07 16:50:03,047] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-07 16:50:03,047] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-07 16:50:03,047] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-07 16:50:03,047] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-07 16:50:03,047] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-07 16:50:03,047] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'chain'. -[2024-11-07 16:50:03,047] [INFO] [AgentGraph]: Relationship added: agent1 --[reports_to]--> agent2 -[2024-11-07 16:50:03,047] [INFO] [AgentGraph]: Relationship added: agent1 --[manages]--> agent3 -[2024-11-07 16:50:03,047] [INFO] [AgentGraph]: Relationship added: agent2 --[supports]--> agent4 -[2024-11-07 16:50:03,048] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborates_with]--> agent4 -[2024-11-07 16:50:03,048] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-07 16:50:03,048] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-07 16:50:03,048] [INFO] [Engine]: Engine initialized. -[2024-11-07 16:50:03,048] [INFO] [Engine]: Engine starting simulation. -[2024-11-07 16:50:03,048] [INFO] [Engine]: Running in chain-based coordination mode. -[2024-11-07 16:50:03,048] [INFO] [Engine]: Starting chain-based coordination. -[2024-11-07 16:50:03,048] [INFO] [Engine]: Agent 'agent1' is executing task. -[2024-11-07 16:50:03,048] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Find new about the latest trends in AI.'. -[2024-11-07 16:50:05,051] [INFO] [BaseAgent]: Agent 'agent1' called 'fetch_webpage' with args '{'url': 'https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/'}'. -[2024-11-07 16:50:05,052] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': False, 'error-msg': '404 Client Error: Not Found for url: https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/'}'. -[2024-11-07 16:50:05,052] [INFO] [Engine]: Agent 'agent1' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_n9Mp3wsmov82rSDwB46iUneG', type='function')], function_call=None) -[2024-11-07 16:50:05,053] [ERROR] [Engine]: An error occurred during chain-based coordination. -Traceback (most recent call last): - File "/Users/zhukunlun/Documents/GitHub/MARBLE/marble/engine/engine.py", line 353, in chain_coordinate - if self.environment.is_task_completed(): - File "/Users/zhukunlun/Documents/GitHub/MARBLE/marble/environments/base_env.py", line 38, in is_task_completed - return self._compare_to_ground_truth(last_action_result, self.ground_truth) - File "/Users/zhukunlun/Documents/GitHub/MARBLE/marble/environments/base_env.py", line 44, in _compare_to_ground_truth - return result.strip().lower() == ground_truth.strip().lower() -AttributeError: 'dict' object has no attribute 'strip' -[2024-11-07 16:50:05,056] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-07 16:50:05,056] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-07 16:50:05,056] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-07 16:50:05,056] [INFO] [Engine]: Chain-based coordination simulation completed. -[2024-11-07 16:50:29,520] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-07 16:50:29,520] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-07 16:50:29,520] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-07 16:50:29,520] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-07 16:50:29,520] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-07 16:50:29,520] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-07 16:50:29,520] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-07 16:50:29,520] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-07 16:50:29,521] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'chain'. -[2024-11-07 16:50:29,521] [INFO] [AgentGraph]: Relationship added: agent1 --[reports_to]--> agent2 -[2024-11-07 16:50:29,521] [INFO] [AgentGraph]: Relationship added: agent1 --[manages]--> agent3 -[2024-11-07 16:50:29,521] [INFO] [AgentGraph]: Relationship added: agent2 --[supports]--> agent4 -[2024-11-07 16:50:29,521] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborates_with]--> agent4 -[2024-11-07 16:50:29,521] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-07 16:50:29,521] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-07 16:50:29,521] [INFO] [Engine]: Engine initialized. -[2024-11-07 16:50:29,521] [INFO] [Engine]: Engine starting simulation. -[2024-11-07 16:50:29,521] [INFO] [Engine]: Running in chain-based coordination mode. -[2024-11-07 16:50:29,521] [INFO] [Engine]: Starting chain-based coordination. -[2024-11-07 16:50:29,521] [INFO] [Engine]: Agent 'agent1' is executing task. -[2024-11-07 16:50:29,521] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Find new about the latest trends in AI.'. -[2024-11-07 16:50:31,312] [INFO] [BaseAgent]: Agent 'agent1' called 'fetch_webpage' with args '{'url': 'https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/'}'. -[2024-11-07 16:50:31,313] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': False, 'error-msg': '404 Client Error: Not Found for url: https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/'}'. -[2024-11-07 16:50:31,313] [INFO] [Engine]: Agent 'agent1' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_49RHtvWHniOUURzMW60Mrkd9', type='function')], function_call=None) -[2024-11-07 16:50:31,313] [INFO] [BaseAgent]: Agent 'agent1' is planning the next step. -[2024-11-07 16:50:32,349] [INFO] [BaseAgent]: Agent 'agent1' selected 'agent3' as the next agent with plan: 'The next agent, agent3, should use Wikipedia to research and gather information on the top AI trends to watch out for in 2022. The agent can focus on summarizing key points, providing insights, and presenting the information in an organized manner for our team project.'. -[2024-11-07 16:50:32,349] [ERROR] [Engine]: An error occurred during chain-based coordination. -Traceback (most recent call last): - File "/Users/zhukunlun/Documents/GitHub/MARBLE/marble/engine/engine.py", line 362, in chain_coordinate - self.planner.update_progress(summary) -UnboundLocalError: local variable 'summary' referenced before assignment -[2024-11-07 16:50:32,349] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-07 16:50:32,349] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-07 16:50:32,349] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-07 16:50:32,349] [INFO] [Engine]: Chain-based coordination simulation completed. -[2024-11-07 16:50:57,557] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-07 16:50:57,557] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-07 16:50:57,557] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-07 16:50:57,557] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-07 16:50:57,557] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-07 16:50:57,557] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-07 16:50:57,557] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-07 16:50:57,557] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-07 16:50:57,557] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'chain'. -[2024-11-07 16:50:57,557] [INFO] [AgentGraph]: Relationship added: agent1 --[reports_to]--> agent2 -[2024-11-07 16:50:57,557] [INFO] [AgentGraph]: Relationship added: agent1 --[manages]--> agent3 -[2024-11-07 16:50:57,557] [INFO] [AgentGraph]: Relationship added: agent2 --[supports]--> agent4 -[2024-11-07 16:50:57,558] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborates_with]--> agent4 -[2024-11-07 16:50:57,558] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-07 16:50:57,558] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-07 16:50:57,558] [INFO] [Engine]: Engine initialized. -[2024-11-07 16:50:57,558] [INFO] [Engine]: Engine starting simulation. -[2024-11-07 16:50:57,558] [INFO] [Engine]: Running in chain-based coordination mode. -[2024-11-07 16:50:57,558] [INFO] [Engine]: Starting chain-based coordination. -[2024-11-07 16:50:57,558] [INFO] [Engine]: Agent 'agent1' is executing task. -[2024-11-07 16:50:57,558] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Find new about the latest trends in AI.'. -[2024-11-07 16:50:59,386] [INFO] [BaseAgent]: Agent 'agent1' called 'fetch_webpage' with args '{'url': 'https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/'}'. -[2024-11-07 16:50:59,387] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': False, 'error-msg': '404 Client Error: Not Found for url: https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/'}'. -[2024-11-07 16:50:59,387] [INFO] [Engine]: Agent 'agent1' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_iaZ5NejHQ2mykkDXCWakZg5B', type='function')], function_call=None) -[2024-11-07 16:50:59,388] [INFO] [BaseAgent]: Agent 'agent1' is planning the next step. -[2024-11-07 16:51:00,674] [INFO] [BaseAgent]: Agent 'agent1' selected 'agent3' as the next agent with plan: 'The next agent should use the information from the Analytics Vidhya article to research and gather more in-depth details on the top AI trends to watch out for in 2022. They can use Wikipedia to find additional insights, statistics, and examples related to each trend mentioned in the article. The goal is to provide a comprehensive overview of each trend and its potential impact.'. -[2024-11-07 16:51:00,675] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_iaZ5NejHQ2mykkDXCWakZg5B', type='function')], function_call=None) -[2024-11-07 16:51:01,078] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-07 16:51:01,078] [ERROR] [Engine]: An error occurred during chain-based coordination. -Traceback (most recent call last): - File "/Users/zhukunlun/Documents/GitHub/MARBLE/marble/engine/engine.py", line 371, in chain_coordinate - summary = self._summarize_results(agents_results) - File "/Users/zhukunlun/Documents/GitHub/MARBLE/marble/engine/engine.py", line 450, in _summarize_results - summary += f"- {json.dumps(result)}\n" - File "/opt/anaconda3/envs/mabench/lib/python3.10/json/__init__.py", line 231, in dumps - return _default_encoder.encode(obj) - File "/opt/anaconda3/envs/mabench/lib/python3.10/json/encoder.py", line 199, in encode - chunks = self.iterencode(o, _one_shot=True) - File "/opt/anaconda3/envs/mabench/lib/python3.10/json/encoder.py", line 257, in iterencode - return _iterencode(o, 0) - File "/opt/anaconda3/envs/mabench/lib/python3.10/json/encoder.py", line 179, in default - raise TypeError(f'Object of type {o.__class__.__name__} ' -TypeError: Object of type Message is not JSON serializable -[2024-11-07 16:51:01,081] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-07 16:51:01,081] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-07 16:51:01,082] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-07 16:51:01,082] [INFO] [Engine]: Chain-based coordination simulation completed. -[2024-11-07 16:53:57,296] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-07 16:53:57,296] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-07 16:53:57,296] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-07 16:53:57,297] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-07 16:53:57,297] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-07 16:53:57,297] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-07 16:53:57,297] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-07 16:53:57,297] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-07 16:53:57,297] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'chain'. -[2024-11-07 16:53:57,297] [INFO] [AgentGraph]: Relationship added: agent1 --[reports_to]--> agent2 -[2024-11-07 16:53:57,297] [INFO] [AgentGraph]: Relationship added: agent1 --[manages]--> agent3 -[2024-11-07 16:53:57,297] [INFO] [AgentGraph]: Relationship added: agent2 --[supports]--> agent4 -[2024-11-07 16:53:57,297] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborates_with]--> agent4 -[2024-11-07 16:53:57,297] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-07 16:53:57,297] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-07 16:53:57,297] [INFO] [Engine]: Engine initialized. -[2024-11-07 16:53:57,297] [INFO] [Engine]: Engine starting simulation. -[2024-11-07 16:53:57,297] [INFO] [Engine]: Running in chain-based coordination mode. -[2024-11-07 16:53:57,297] [INFO] [Engine]: Starting chain-based coordination. -[2024-11-07 16:53:57,297] [INFO] [Engine]: Agent 'agent1' is executing task. -[2024-11-07 16:53:57,297] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Find new about the latest trends in AI.'. -[2024-11-07 16:53:59,995] [INFO] [BaseAgent]: Agent 'agent1' called 'fetch_webpage' with args '{'url': 'https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/'}'. -[2024-11-07 16:53:59,995] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': False, 'error-msg': '404 Client Error: Not Found for url: https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/'}'. -[2024-11-07 16:53:59,995] [INFO] [Engine]: Agent 'agent1' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_ACHT5SoFT2AkIXnz19sYddJx', type='function')], function_call=None) -[2024-11-07 16:53:59,995] [INFO] [BaseAgent]: Agent 'agent1' is planning the next step. -[2024-11-07 16:54:00,850] [INFO] [BaseAgent]: Agent 'agent1' selected 'agent3' as the next agent with plan: 'The next agent, agent3, should utilize Wikipedia to research and gather information on the top AI trends to watch out for in 2022. They can focus on summarizing key points, trends, and insights from the Wikipedia sources to contribute to our task of understanding the latest AI trends.'. -[2024-11-07 16:54:00,851] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_ACHT5SoFT2AkIXnz19sYddJx', type='function')], function_call=None) -[2024-11-07 16:54:01,303] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-07 16:54:01,303] [INFO] [Engine]: Agent 'agent3' is executing task. -[2024-11-07 16:54:01,304] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'The next agent, agent3, should utilize Wikipedia to research and gather information on the top AI trends to watch out for in 2022. They can focus on summarizing key points, trends, and insights from the Wikipedia sources to contribute to our task of understanding the latest AI trends.'. -[2024-11-07 16:54:02,196] [INFO] [BaseAgent]: Agent 'agent3' called 'fetch_webpage' with args '{'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence'}'. -[2024-11-07 16:54:02,208] [INFO] [BaseAgent]: Agent 'agent3' obtained result '{'success': True, 'error-msg': '', 'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence', 'content': '\n\n\n\nArtificial intelligence - Wikipedia\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nJump to content\n
\n\t
\n\t\t
\n\t\t\t
\n\n\t\t\n\t\t\t\n\n\n\t\t
\n\t\t
\n\t\t\t\n\n\n\t\t\t\n\n\t\t
\n\t\n\n
\n\t
\n\t\t
\n\t\t\t
\n\t\t
\n\t\t
\n\t\t\t
\n\t\t
\n\t\t\t\n\t\t
\n\t
\n\t
\n\t\t\t\t
\n\t\t\n\t\t\t
\n\t\t
\n\t\t
\n\t\t\t
\n\t\t\t\t
\n\t\t\t\t\t\n\t\t\t\t\t

Artificial intelligence

\n\t\t\t\t\t\t\t\n
\n\t\n\t\n\t
\n\n\t\t
\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t
\n\n\t
\n
\n
\n\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\t
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
\n\t\t\t\t\t\t
\n\t\t\t\t\t\t\t\n\t\t\t\t\n\t\t\t\t\t\t\t
\n\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\t\n\t\t\t\t\t\t
\n\t\t\n\t\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\t\t
\n\t\t
Page semi-protected
\n\t\t
\n\n\t\t\t\t\t\t
From Wikipedia, the free encyclopedia
\n\t\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t
\n\n

\n

\n\n\n\n\n\n\n\n

Artificial intelligence (AI), in its broadest sense, is intelligence exhibited by machines, particularly computer systems. It is a field of research in computer science that develops and studies methods and software that enable machines to perceive their environment and use learning and intelligence to take actions that maximize their chances of achieving defined goals.[1] Such machines may be called AIs.\n

Some high-profile applications of AI include advanced web search engines (e.g., Google Search); recommendation systems (used by YouTube, Amazon, and Netflix); interacting via human speech (e.g., Google Assistant, Siri, and Alexa); autonomous vehicles (e.g., Waymo); generative and creative tools (e.g., ChatGPT, and AI art); and superhuman play and analysis in strategy games (e.g., chess and Go). However, many AI applications are not perceived as AI: "A lot of cutting edge AI has filtered into general applications, often without being called AI because once something becomes useful enough and common enough it\'s not labeled AI anymore."[2][3]\n

The various subfields of AI research are centered around particular goals and the use of particular tools. The traditional goals of AI research include reasoning, knowledge representation, planning, learning, natural language processing, perception, and support for robotics.[a] General intelligence—the ability to complete any task performable by a human on an at least equal level—is among the field\'s long-term goals.[4] To reach these goals, AI researchers have adapted and integrated a wide range of techniques, including search and mathematical optimization, formal logic, artificial neural networks, and methods based on statistics, operations research, and economics.[b] AI also draws upon psychology, linguistics, philosophy, neuroscience, and other fields.[5]\n

Artificial intelligence was founded as an academic discipline in 1956,[6] and the field went through multiple cycles of optimism,[7][8] followed by periods of disappointment and loss of funding, known as AI winter.[9][10] Funding and interest vastly increased after 2012 when deep learning outperformed previous AI techniques.[11] This growth accelerated further after 2017 with the transformer architecture,[12] and by the early 2020s hundreds of billions of dollars were being invested in AI (known as the "AI boom"). The widespread use of AI in the 21st century exposed several unintended consequences and harms in the present and raised concerns about its risks and long-term effects in the future, prompting discussions about regulatory policies to ensure the safety and benefits of the technology.\n

\n\n

Goals

\n

The general problem of simulating (or creating) intelligence has been broken into subproblems. These consist of particular traits or capabilities that researchers expect an intelligent system to display. The traits described below have received the most attention and cover the scope of AI research.[a]\n

\n

Reasoning and problem-solving

\n

Early researchers developed algorithms that imitated step-by-step reasoning that humans use when they solve puzzles or make logical deductions.[13] By the late 1980s and 1990s, methods were developed for dealing with uncertain or incomplete information, employing concepts from probability and economics.[14]\n

Many of these algorithms are insufficient for solving large reasoning problems because they experience a "combinatorial explosion": They become exponentially slower as the problems grow.[15] Even humans rarely use the step-by-step deduction that early AI research could model. They solve most of their problems using fast, intuitive judgments.[16] Accurate and efficient reasoning is an unsolved problem.\n

\n

Knowledge representation

\n
An ontology represents knowledge as a set of concepts within a domain and the relationships between those concepts.
\n

Knowledge representation and knowledge engineering[17] allow AI programs to answer questions intelligently and make deductions about real-world facts. Formal knowledge representations are used in content-based indexing and retrieval,[18] scene interpretation,[19] clinical decision support,[20] knowledge discovery (mining "interesting" and actionable inferences from large databases),[21] and other areas.[22]\n

A knowledge base is a body of knowledge represented in a form that can be used by a program. An ontology is the set of objects, relations, concepts, and properties used by a particular domain of knowledge.[23] Knowledge bases need to represent things such as objects, properties, categories, and relations between objects;[24] situations, events, states, and time;[25] causes and effects;[26] knowledge about knowledge (what we know about what other people know);[27] default reasoning (things that humans assume are true until they are told differently and will remain true even when other facts are changing);[28] and many other aspects and domains of knowledge.\n

Among the most difficult problems in knowledge representation are the breadth of commonsense knowledge (the set of atomic facts that the average person knows is enormous);[29] and the sub-symbolic form of most commonsense knowledge (much of what people know is not represented as "facts" or "statements" that they could express verbally).[16] There is also the difficulty of knowledge acquisition, the problem of obtaining knowledge for AI applications.[c]\n

\n

Planning and decision-making

\n

An "agent" is anything that perceives and takes actions in the world. A rational agent has goals or preferences and takes actions to make them happen.[d][32] In automated planning, the agent has a specific goal.[33] In automated decision-making, the agent has preferences—there are some situations it would prefer to be in, and some situations it is trying to avoid. The decision-making agent assigns a number to each situation (called the "utility") that measures how much the agent prefers it. For each possible action, it can calculate the "expected utility": the utility of all possible outcomes of the action, weighted by the probability that the outcome will occur. It can then choose the action with the maximum expected utility.[34]\n

In classical planning, the agent knows exactly what the effect of any action will be.[35] In most real-world problems, however, the agent may not be certain about the situation they are in (it is "unknown" or "unobservable") and it may not know for certain what will happen after each possible action (it is not "deterministic"). It must choose an action by making a probabilistic guess and then reassess the situation to see if the action worked.[36]\n

In some problems, the agent\'s preferences may be uncertain, especially if there are other agents or humans involved. These can be learned (e.g., with inverse reinforcement learning), or the agent can seek information to improve its preferences.[37] Information value theory can be used to weigh the value of exploratory or experimental actions.[38] The space of possible future actions and situations is typically intractably large, so the agents must take actions and evaluate situations while being uncertain of what the outcome will be.\n

A Markov decision process has a transition model that describes the probability that a particular action will change the state in a particular way and a reward function that supplies the utility of each state and the cost of each action. A policy associates a decision with each possible state. The policy could be calculated (e.g., by iteration), be heuristic, or it can be learned.[39]\n

Game theory describes the rational behavior of multiple interacting agents and is used in AI programs that make decisions that involve other agents.[40]\n

\n

Learning

\n

Machine learning is the study of programs that can improve their performance on a given task automatically.[41] It has been a part of AI from the beginning.[e]\n

There are several kinds of machine learning. Unsupervised learning analyzes a stream of data and finds patterns and makes predictions without any other guidance.[44] Supervised learning requires a human to label the input data first, and comes in two main varieties: classification (where the program must learn to predict what category the input belongs in) and regression (where the program must deduce a numeric function based on numeric input).[45]\n

In reinforcement learning, the agent is rewarded for good responses and punished for bad ones. The agent learns to choose responses that are classified as "good".[46] Transfer learning is when the knowledge gained from one problem is applied to a new problem.[47] Deep learning is a type of machine learning that runs inputs through biologically inspired artificial neural networks for all of these types of learning.[48]\n

Computational learning theory can assess learners by computational complexity, by sample complexity (how much data is required), or by other notions of optimization.[49]\n

\n
\n

Natural language processing

\n

Natural language processing (NLP)[50] allows programs to read, write and communicate in human languages such as English. Specific problems include speech recognition, speech synthesis, machine translation, information extraction, information retrieval and question answering.[51]\n

Early work, based on Noam Chomsky\'s generative grammar and semantic networks, had difficulty with word-sense disambiguation[f] unless restricted to small domains called "micro-worlds" (due to the common sense knowledge problem[29]). Margaret Masterman believed that it was meaning and not grammar that was the key to understanding languages, and that thesauri and not dictionaries should be the basis of computational language structure.\n

Modern deep learning techniques for NLP include word embedding (representing words, typically as vectors encoding their meaning),[52] transformers (a deep learning architecture using an attention mechanism),[53] and others.[54] In 2019, generative pre-trained transformer (or "GPT") language models began to generate coherent text,[55][56] and by 2023, these models were able to get human-level scores on the bar exam, SAT test, GRE test, and many other real-world applications.[57]\n

\n

Perception

\n

Machine perception is the ability to use input from sensors (such as cameras, microphones, wireless signals, active lidar, sonar, radar, and tactile sensors) to deduce aspects of the world. Computer vision is the ability to analyze visual input.[58]\n

The field includes speech recognition,[59] image classification,[60] facial recognition, object recognition,[61]object tracking,[62] and robotic perception.[63]\n

\n

Social intelligence

\n
Kismet, a robot head which was made in the 1990s; it is a machine that can recognize and simulate emotions.[64]
\n

Affective computing is an interdisciplinary umbrella that comprises systems that recognize, interpret, process, or simulate human feeling, emotion, and mood.[65] For example, some virtual assistants are programmed to speak conversationally or even to banter humorously; it makes them appear more sensitive to the emotional dynamics of human interaction, or to otherwise facilitate human–computer interaction.\n

However, this tends to give naïve users an unrealistic conception of the intelligence of existing computer agents.[66] Moderate successes related to affective computing include textual sentiment analysis and, more recently, multimodal sentiment analysis, wherein AI classifies the affects displayed by a videotaped subject.[67]\n

\n

General intelligence

\n

A machine with artificial general intelligence should be able to solve a wide variety of problems with breadth and versatility similar to human intelligence.[4]\n

\n

Techniques

\n

AI research uses a wide variety of techniques to accomplish the goals above.[b]\n

\n

Search and optimization

\n

AI can solve many problems by intelligently searching through many possible solutions.[68] There are two very different kinds of search used in AI: state space search and local search.\n

\n
\n

State space search searches through a tree of possible states to try to find a goal state.[69] For example, planning algorithms search through trees of goals and subgoals, attempting to find a path to a target goal, a process called means-ends analysis.[70]\n

Simple exhaustive searches[71] are rarely sufficient for most real-world problems: the search space (the number of places to search) quickly grows to astronomical numbers. The result is a search that is too slow or never completes.[15] "Heuristics" or "rules of thumb" can help prioritize choices that are more likely to reach a goal.[72]\n

Adversarial search is used for game-playing programs, such as chess or Go. It searches through a tree of possible moves and counter-moves, looking for a winning position.[73]\n

\n
\n
Illustration of gradient descent for 3 different starting points; two parameters (represented by the plan coordinates) are adjusted in order to minimize the loss function (the height)

Local search uses mathematical optimization to find a solution to a problem. It begins with some form of guess and refines it incrementally.[74]\n

Gradient descent is a type of local search that optimizes a set of numerical parameters by incrementally adjusting them to minimize a loss function. Variants of gradient descent are commonly used to train neural networks.[75]\n

Another type of local search is evolutionary computation, which aims to iteratively improve a set of candidate solutions by "mutating" and "recombining" them, selecting only the fittest to survive each generation.[76]\n

Distributed search processes can coordinate via swarm intelligence algorithms. Two popular swarm algorithms used in search are particle swarm optimization (inspired by bird flocking) and ant colony optimization (inspired by ant trails).[77]\n

\n

Logic

\n

Formal logic is used for reasoning and knowledge representation.[78]\nFormal logic comes in two main forms: propositional logic (which operates on statements that are true or false and uses logical connectives such as "and", "or", "not" and "implies")[79] and predicate logic (which also operates on objects, predicates and relations and uses quantifiers such as "Every X is a Y" and "There are some Xs that are Ys").[80]\n

Deductive reasoning in logic is the process of proving a new statement (conclusion) from other statements that are given and assumed to be true (the premises).[81] Proofs can be structured as proof trees, in which nodes are labelled by sentences, and children nodes are connected to parent nodes by inference rules.\n

Given a problem and a set of premises, problem-solving reduces to searching for a proof tree whose root node is labelled by a solution of the problem and whose leaf nodes are labelled by premises or axioms. In the case of Horn clauses, problem-solving search can be performed by reasoning forwards from the premises or backwards from the problem.[82] In the more general case of the clausal form of first-order logic, resolution is a single, axiom-free rule of inference, in which a problem is solved by proving a contradiction from premises that include the negation of the problem to be solved.[83]\n

Inference in both Horn clause logic and first-order logic is undecidable, and therefore intractable. However, backward reasoning with Horn clauses, which underpins computation in the logic programming language Prolog, is Turing complete. Moreover, its efficiency is competitive with computation in other symbolic programming languages.[84]\n

Fuzzy logic assigns a "degree of truth" between 0 and 1. It can therefore handle propositions that are vague and partially true.[85]\n

Non-monotonic logics, including logic programming with negation as failure, are designed to handle default reasoning.[28] Other specialized versions of logic have been developed to describe many complex domains.\n

\n

Probabilistic methods for uncertain reasoning

\n
A simple Bayesian network, with the associated conditional probability tables
\n

Many problems in AI (including in reasoning, planning, learning, perception, and robotics) require the agent to operate with incomplete or uncertain information. AI researchers have devised a number of tools to solve these problems using methods from probability theory and economics.[86] Precise mathematical tools have been developed that analyze how an agent can make choices and plan, using decision theory, decision analysis,[87] and information value theory.[88] These tools include models such as Markov decision processes,[89] dynamic decision networks,[90] game theory and mechanism design.[91]\n

Bayesian networks[92] are a tool that can be used for reasoning (using the Bayesian inference algorithm),[g][94] learning (using the expectation–maximization algorithm),[h][96] planning (using decision networks)[97] and perception (using dynamic Bayesian networks).[90]\n

Probabilistic algorithms can also be used for filtering, prediction, smoothing, and finding explanations for streams of data, thus helping perception systems analyze processes that occur over time (e.g., hidden Markov models or Kalman filters).[90]\n

\n
Expectation–maximization clustering of Old Faithful eruption data starts from a random guess but then successfully converges on an accurate clustering of the two physically distinct modes of eruption.
\n

Classifiers and statistical learning methods

\n

The simplest AI applications can be divided into two types: classifiers (e.g., "if shiny then diamond"), on one hand, and controllers (e.g., "if diamond then pick up"), on the other hand. Classifiers[98] are functions that use pattern matching to determine the closest match. They can be fine-tuned based on chosen examples using supervised learning. Each pattern (also called an "observation") is labeled with a certain predefined class. All the observations combined with their class labels are known as a data set. When a new observation is received, that observation is classified based on previous experience.[45]\n

There are many kinds of classifiers in use.[99] The decision tree is the simplest and most widely used symbolic machine learning algorithm.[100] K-nearest neighbor algorithm was the most widely used analogical AI until the mid-1990s, and Kernel methods such as the support vector machine (SVM) displaced k-nearest neighbor in the 1990s.[101]\nThe naive Bayes classifier is reportedly the "most widely used learner"[102] at Google, due in part to its scalability.[103]\nNeural networks are also used as classifiers.[104]\n

\n

Artificial neural networks

\n
A neural network is an interconnected group of nodes, akin to the vast network of neurons in the human brain.
\n

An artificial neural network is based on a collection of nodes also known as artificial neurons, which loosely model the neurons in a biological brain. It is trained to recognise patterns; once trained, it can recognise those patterns in fresh data. There is an input, at least one hidden layer of nodes and an output. Each node applies a function and once the weight crosses its specified threshold, the data is transmitted to the next layer. A network is typically called a deep neural network if it has at least 2 hidden layers.[104]\n

Learning algorithms for neural networks use local search to choose the weights that will get the right output for each input during training. The most common training technique is the backpropagation algorithm.[105] Neural networks learn to model complex relationships between inputs and outputs and find patterns in data. In theory, a neural network can learn any function.[106]\n

In feedforward neural networks the signal passes in only one direction.[107] Recurrent neural networks feed the output signal back into the input, which allows short-term memories of previous input events. Long short term memory is the most successful network architecture for recurrent networks.[108] Perceptrons[109] use only a single layer of neurons; deep learning[110] uses multiple layers. Convolutional neural networks strengthen the connection between neurons that are "close" to each other—this is especially important in image processing, where a local set of neurons must identify an "edge" before the network can identify an object.[111]\n

\n
\n

Deep learning

\n
\n

Deep learning[110] uses several layers of neurons between the network\'s inputs and outputs. The multiple layers can progressively extract higher-level features from the raw input. For example, in image processing, lower layers may identify edges, while higher layers may identify the concepts relevant to a human such as digits, letters, or faces.[112]\n

Deep learning has profoundly improved the performance of programs in many important subfields of artificial intelligence, including computer vision, speech recognition, natural language processing, image classification,[113] and others. The reason that deep learning performs so well in so many applications is not known as of 2023.[114] The sudden success of deep learning in 2012–2015 did not occur because of some new discovery or theoretical breakthrough (deep neural networks and backpropagation had been described by many people, as far back as the 1950s)[i] but because of two factors: the incredible increase in computer power (including the hundred-fold increase in speed by switching to GPUs) and the availability of vast amounts of training data, especially the giant curated datasets used for benchmark testing, such as ImageNet.[j]\n

\n

GPT

\n

Generative pre-trained transformers (GPT) are large language models (LLMs) that generate text based on the semantic relationships between words in sentences. Text-based GPT models are pretrained on a large corpus of text that can be from the Internet. The pretraining consists of predicting the next token (a token being usually a word, subword, or punctuation). Throughout this pretraining, GPT models accumulate knowledge about the world and can then generate human-like text by repeatedly predicting the next token. Typically, a subsequent training phase makes the model more truthful, useful, and harmless, usually with a technique called reinforcement learning from human feedback (RLHF). Current GPT models are prone to generating falsehoods called "hallucinations", although this can be reduced with RLHF and quality data. They are used in chatbots, which allow people to ask a question or request a task in simple text.[122][123]\n

Current models and services include Gemini (formerly Bard), ChatGPT, Grok, Claude, Copilot, and LLaMA.[124] Multimodal GPT models can process different types of data (modalities) such as images, videos, sound, and text.[125]\n

\n

Hardware and software

\n\n

In the late 2010s, graphics processing units (GPUs) that were increasingly designed with AI-specific enhancements and used with specialized TensorFlow software had replaced previously used central processing unit (CPUs) as the dominant means for large-scale (commercial and academic) machine learning models\' training.[126] Specialized programming languages such as Prolog were used in early AI research,[127] but general-purpose programming languages like Python have become predominant.[128]\n

The transistor density in integrated circuits has been observed to roughly double every 18 months—a trend known as Moore\'s law, named after the Intel co-founder Gordon Moore, who first identified it. Improvements in GPUs have been even faster.[129]\n

\n

Applications

\n

AI and machine learning technology is used in most of the essential applications of the 2020s, including: search engines (such as Google Search), targeting online advertisements, recommendation systems (offered by Netflix, YouTube or Amazon), driving internet traffic, targeted advertising (AdSense, Facebook), virtual assistants (such as Siri or Alexa), autonomous vehicles (including drones, ADAS and self-driving cars), automatic language translation (Microsoft Translator, Google Translate), facial recognition (Apple\'s Face ID or Microsoft\'s DeepFace and Google\'s FaceNet) and image labeling (used by Facebook, Apple\'s iPhoto and TikTok). The deployment of AI may be overseen by a Chief automation officer (CAO).\n

Health and medicine

\n\n

The application of AI in medicine and medical research has the potential to increase patient care and quality of life.[130] Through the lens of the Hippocratic Oath, medical professionals are ethically compelled to use AI, if applications can more accurately diagnose and treat patients.[131][132]\n

For medical research, AI is an important tool for processing and integrating big data. This is particularly important for organoid and tissue engineering development which use microscopy imaging as a key technique in fabrication.[133] It has been suggested that AI can overcome discrepancies in funding allocated to different fields of research.[133] New AI tools can deepen the understanding of biomedically relevant pathways. For example, AlphaFold 2 (2021) demonstrated the ability to approximate, in hours rather than months, the 3D structure of a protein.[134] In 2023, it was reported that AI-guided drug discovery helped find a class of antibiotics capable of killing two different types of drug-resistant bacteria.[135] In 2024, researchers used machine learning to accelerate the search for Parkinson\'s disease drug treatments. Their aim was to identify compounds that block the clumping, or aggregation, of alpha-synuclein (the protein that characterises Parkinson\'s disease). They were able to speed up the initial screening process ten-fold and reduce the cost by a thousand-fold.[136][137]\n

\n

Games

\n\n

Game playing programs have been used since the 1950s to demonstrate and test AI\'s most advanced techniques.[138] Deep Blue became the first computer chess-playing system to beat a reigning world chess champion, Garry Kasparov, on 11 May 1997.[139] In 2011, in a Jeopardy! quiz show exhibition match, IBM\'s question answering system, Watson, defeated the two greatest Jeopardy! champions, Brad Rutter and Ken Jennings, by a significant margin.[140] In March 2016, AlphaGo won 4 out of 5 games of Go in a match with Go champion Lee Sedol, becoming the first computer Go-playing system to beat a professional Go player without handicaps. Then, in 2017, it defeated Ke Jie, who was the best Go player in the world.[141] Other programs handle imperfect-information games, such as the poker-playing program Pluribus.[142] DeepMind developed increasingly generalistic reinforcement learning models, such as with MuZero, which could be trained to play chess, Go, or Atari games.[143] In 2019, DeepMind\'s AlphaStar achieved grandmaster level in StarCraft II, a particularly challenging real-time strategy game that involves incomplete knowledge of what happens on the map.[144] In 2021, an AI agent competed in a PlayStation Gran Turismo competition, winning against four of the world\'s best Gran Turismo drivers using deep reinforcement learning.[145] In 2024, Google DeepMind introduced SIMA, a type of AI capable of autonomously playing nine previously unseen open-world video games by observing screen output, as well as executing short, specific tasks in response to natural language instructions.[146]\n

\n

Mathematics

\n

In mathematics, special forms of formal step-by-step reasoning are used. In contrast, LLMs such as GPT-4 Turbo, Gemini Ultra, Claude Opus, LLaMa-2 or Mistral Large are working with probabilistic models, which can produce wrong answers in the form of hallucinations. Therefore, they need not only a large database of mathematical problems to learn from but also methods such as supervised fine-tuning or trained classifiers with human-annotated data to improve answers for new problems and learn from corrections.[147] A 2024 study showed that the performance of some language models for reasoning capabilities in solving math problems not included in their training data was low, even for problems with only minor deviations from trained data.[148]\n

Alternatively, dedicated models for mathematic problem solving with higher precision for the outcome including proof of theorems have been developed such as Alpha Tensor, Alpha Geometry and Alpha Proof all from Google DeepMind,[149] Llemma from eleuther[150] or Julius.[151]\n

When natural language is used to describe mathematical problems, converters transform such prompts into a formal language such as Lean to define mathematic tasks.\n

Some models have been developed to solve challenging problems and reach good results in benchmark tests, others to serve as educational tools in mathematics.[152]\n

\n

Finance

\n

Finance is one of the fastest growing sectors where applied AI tools are being deployed: from retail online banking to investment advice and insurance, where automated "robot advisers" have been in use for some years.[153]\n

World Pensions experts like Nicolas Firzli insist it may be too early to see the emergence of highly innovative AI-informed financial products and services: "the deployment of AI tools will simply further automatise things: destroying tens of thousands of jobs in banking, financial planning, and pension advice in the process, but I\'m not sure it will unleash a new wave of [e.g., sophisticated] pension innovation."[154]\n

\n

Military

\n\n

Various countries are deploying AI military applications.[155] The main applications enhance command and control, communications, sensors, integration and interoperability.[156] Research is targeting intelligence collection and analysis, logistics, cyber operations, information operations, and semiautonomous and autonomous vehicles.[155] AI technologies enable coordination of sensors and effectors, threat detection and identification, marking of enemy positions, target acquisition, coordination and deconfliction of distributed Joint Fires between networked combat vehicles involving manned and unmanned teams.[156] AI was incorporated into military operations in Iraq and Syria.[155]\n

In November 2023, US Vice President Kamala Harris disclosed a declaration signed by 31 nations to set guardrails for the military use of AI. The commitments include using legal reviews to ensure the compliance of military AI with international laws, and being cautious and transparent in the development of this technology.[157]\n

\n

Generative AI

\n\n
Vincent van Gogh in watercolour created by generative AI software
\n

In the early 2020s, generative AI gained widespread prominence. GenAI is AI capable of generating text, images, videos, or other data using generative models,[158][159] often in response to prompts.[160][161]\n

In March 2023, 58% of U.S. adults had heard about ChatGPT and 14% had tried it.[162] The increasing realism and ease-of-use of AI-based text-to-image generators such as Midjourney, DALL-E, and Stable Diffusion sparked a trend of viral AI-generated photos. Widespread attention was gained by a fake photo of Pope Francis wearing a white puffer coat, the fictional arrest of Donald Trump, and a hoax of an attack on the Pentagon, as well as the usage in professional creative arts.[163][164]\n

\n

Agents

\n

Artificial intelligent (AI) agents are software entities designed to perceive their environment, make decisions, and take actions autonomously to achieve specific goals. These agents can interact with users, their environment, or other agents. AI agents are used in various applications, including virtual assistants, chatbots, autonomous vehicles, game-playing systems, and industrial robotics. AI agents operate within the constraints of their programming, available computational resources, and hardware limitations. This means they are restricted to performing tasks within their defined scope and have finite memory and processing capabilities. In real-world applications, AI agents often face time constraints for decision-making and action execution. Many AI agents incorporate learning algorithms, enabling them to improve their performance over time through experience or training. Using machine learning, AI agents can adapt to new situations and optimise their behaviour for their designated tasks.[165][166][167]\n

\n

Other industry-specific tasks

\n

There are also thousands of successful AI applications used to solve specific problems for specific industries or institutions. In a 2017 survey, one in five companies reported having incorporated "AI" in some offerings or processes.[168] A few examples are energy storage, medical diagnosis, military logistics, applications that predict the result of judicial decisions, foreign policy, or supply chain management.\n

AI applications for evacuation and disaster management are growing. AI has been used to investigate if and how people evacuated in large scale and small scale evacuations using historical data from GPS, videos or social media. Further, AI can provide real time information on the real time evacuation conditions.[169][170][171]\n

In agriculture, AI has helped farmers identify areas that need irrigation, fertilization, pesticide treatments or increasing yield. Agronomists use AI to conduct research and development. AI has been used to predict the ripening time for crops such as tomatoes, monitor soil moisture, operate agricultural robots, conduct predictive analytics, classify livestock pig call emotions, automate greenhouses, detect diseases and pests, and save water.\n

Artificial intelligence is used in astronomy to analyze increasing amounts of available data and applications, mainly for "classification, regression, clustering, forecasting, generation, discovery, and the development of new scientific insights." For example, it is used for discovering exoplanets, forecasting solar activity, and distinguishing between signals and instrumental effects in gravitational wave astronomy. Additionally, it could be used for activities in space, such as space exploration, including the analysis of data from space missions, real-time science decisions of spacecraft, space debris avoidance, and more autonomous operation.\n

During the 2024 Indian elections, US$50 millions was spent on authorized AI-generated content, notably by creating deepfakes of allied (including sometimes deceased) politicians to better engage with voters, and by translating speeches to various local languages.[172] \n

\n

Ethics

\n\n

AI has potential benefits and potential risks.[173] AI may be able to advance science and find solutions for serious problems: Demis Hassabis of Deep Mind hopes to "solve intelligence, and then use that to solve everything else".[174] However, as the use of AI has become widespread, several unintended consequences and risks have been identified.[175] In-production systems can sometimes not factor ethics and bias into their AI training processes, especially when the AI algorithms are inherently unexplainable in deep learning.[176]\n

\n

Risks and harm

\n
\n\n

Machine learning algorithms require large amounts of data. The techniques used to acquire this data have raised concerns about privacy, surveillance and copyright.\n

AI-powered devices and services, such as virtual assistants and IoT products, continuously collect personal information, raising concerns about intrusive data gathering and unauthorized access by third parties. The loss of privacy is further exacerbated by AI\'s ability to process and combine vast amounts of data, potentially leading to a surveillance society where individual activities are constantly monitored and analyzed without adequate safeguards or transparency.\n

Sensitive user data collected may include online activity records, geolocation data, video or audio.[177] For example, in order to build speech recognition algorithms, Amazon has recorded millions of private conversations and allowed temporary workers to listen to and transcribe some of them.[178] Opinions about this widespread surveillance range from those who see it as a necessary evil to those for whom it is clearly unethical and a violation of the right to privacy.[179]\n

AI developers argue that this is the only way to deliver valuable applications. and have developed several techniques that attempt to preserve privacy while still obtaining the data, such as data aggregation, de-identification and differential privacy.[180] Since 2016, some privacy experts, such as Cynthia Dwork, have begun to view privacy in terms of fairness. Brian Christian wrote that experts have pivoted "from the question of \'what they know\' to the question of \'what they\'re doing with it\'."[181]\n

Generative AI is often trained on unlicensed copyrighted works, including in domains such as images or computer code; the output is then used under the rationale of "fair use". Experts disagree about how well and under what circumstances this rationale will hold up in courts of law; relevant factors may include "the purpose and character of the use of the copyrighted work" and "the effect upon the potential market for the copyrighted work".[182][183] Website owners who do not wish to have their content scraped can indicate it in a "robots.txt" file.[184] In 2023, leading authors (including John Grisham and Jonathan Franzen) sued AI companies for using their work to train generative AI.[185][186] Another discussed approach is to envision a separate sui generis system of protection for creations generated by AI to ensure fair attribution and compensation for human authors.[187]\n

\n

Dominance by tech giants

\n

The commercial AI scene is dominated by Big Tech companies such as Alphabet Inc., Amazon, Apple Inc., Meta Platforms, and Microsoft.[188][189][190] Some of these players already own the vast majority of existing cloud infrastructure and computing power from data centers, allowing them to entrench further in the marketplace.[191][192]\n

\n

Substantial power needs and other environmental impacts

\n\n

In January 2024, the International Energy Agency (IEA) released Electricity 2024, Analysis and Forecast to 2026, forecasting electric power use.[193] This is the first IEA report to make projections for data centers and power consumption for artificial intelligence and cryptocurrency. The report states that power demand for these uses might double by 2026, with additional electric power usage equal to electricity used by the whole Japanese nation.[194]\n

Prodigious power consumption by AI is responsible for the growth of fossil fuels use, and might delay closings of obsolete, carbon-emitting coal energy facilities. There is a feverish rise in the construction of data centers throughout the US, making large technology firms (e.g., Microsoft, Meta, Google, Amazon) into voracious consumers of electric power. Projected electric consumption is so immense that there is concern that it will be fulfilled no matter the source. A ChatGPT search involves the use of 10 times the electrical energy as a Google search. The large firms are in haste to find power sources – from nuclear energy to geothermal to fusion. The tech firms argue that – in the long view – AI will be eventually kinder to the environment, but they need the energy now. AI makes the power grid more efficient and "intelligent", will assist in the growth of nuclear power, and track overall carbon emissions, according to technology firms.[195]\n

A 2024 Goldman Sachs Research Paper, AI Data Centers and the Coming US Power Demand Surge, found "US power demand (is) likely to experience growth not seen in a generation...." and forecasts that, by 2030, US data centers will consume 8% of US power, as opposed to 3% in 2022, presaging growth for the electrical power generation industry by a variety of means.[196] Data centers\' need for more and more electrical power is such that they might max out the electrical grid. The Big Tech companies counter that AI can be used to maximize the utilization of the grid by all.[197]\n

In 2024, the Wall Street Journal reported that big AI companies have begun negotiations with the US nuclear power providers to provide electricity to the data centers. In March 2024 Amazon purchased a Pennsylvania nuclear-powered data center for $650 Million (US).[198] Nvidia CEO Jen-Hsun Huang said nuclear power is a good option for the data centers.[199]\n

In September 2024, Microsoft announced an agreement with Constellation Energy to re-open the Three Mile Island nuclear power plant to provide Microsoft with 100% of all electric power produced by the plant for 20 years. Reopening the plant, which suffered a partial nuclear meltdown of its Unit 2 reactor in 1979, will require Constellation to get through strict regulatory processes which will include extensive safety scrutiny from the US Nuclear Regulatory Commission. If approved (this will be the first ever US re-commissioning of a nuclear plant), over 835 megawatts of power – enough for 800,000 homes – of energy will be produced. The cost for re-opening and upgrading is estimated at $1.6 billion (US) and is dependent on tax breaks for nuclear power contained in the 2022 US Inflation Reduction Act.[200] The US government and the state of Michigan are investing almost $2 billion (US) to reopen the Palisades Nuclear reactor on Lake Michigan. Closed since 2022, the plant is planned to be reopened in October 2025. The Three Mile Island facility will be renamed the Crane Clean Energy Center after Chris Crane, a nuclear proponent and former CEO of Exelon who was responsible for Exelon spinoff of Constellation.[201]\n

After the last approval in September 2024, Taiwan suspended the approval of data centers north of Taoyuan with a capacity of more than 5 MW, due to power supply shortages.[202] On the other hand, Singapore imposed a ban on the opening of data centers in 2019 due to electric power, but in 2022, lifted this ban.[202]\n

Although most nuclear plants in Japan have been shut down after the 2011 Fukushima nuclear accident, according to an October 2024 Bloomberg article in Japanese, cloud gaming services company Ubitus, in which Nvidia has a stake, is looking for land in Japan near nuclear power plant for a new data center for generative AI. CEO Wesley Kuo said nuclear power plants are the most efficient, cheap and stable power for AI.[203]\n

On 1 November 2024, the Federal Energy Regulatory Commission (FERC) rejected an application submitted by Talen Energy for approval to supply some electricity from the nuclear power station Susquehanna to Amazon\'s data center.[204] \nAccording to the Commission Chairman Willie L. Phillips, it is a burden on the electricity grid as well as a significant cost shifting concern to households and other business sectors.[204]\n

\n

Misinformation

\n\n

YouTube, Facebook and others use recommender systems to guide users to more content. These AI programs were given the goal of maximizing user engagement (that is, the only goal was to keep people watching). The AI learned that users tended to choose misinformation, conspiracy theories, and extreme partisan content, and, to keep them watching, the AI recommended more of it. Users also tended to watch more content on the same subject, so the AI led people into filter bubbles where they received multiple versions of the same misinformation.[205] This convinced many users that the misinformation was true, and ultimately undermined trust in institutions, the media and the government.[206] The AI program had correctly learned to maximize its goal, but the result was harmful to society. After the U.S. election in 2016, major technology companies took steps to mitigate the problem [citation needed].\n

In 2022, generative AI began to create images, audio, video and text that are indistinguishable from real photographs, recordings, films, or human writing. It is possible for bad actors to use this technology to create massive amounts of misinformation or propaganda.[207] AI pioneer Geoffrey Hinton expressed concern about AI enabling "authoritarian leaders to manipulate their electorates" on a large scale, among other risks.[208]\n

\n

Algorithmic bias and fairness

\n\n

Machine learning applications will be biased[k] if they learn from biased data.[210] The developers may not be aware that the bias exists.[211] Bias can be introduced by the way training data is selected and by the way a model is deployed.[212][210] If a biased algorithm is used to make decisions that can seriously harm people (as it can in medicine, finance, recruitment, housing or policing) then the algorithm may cause discrimination.[213] The field of fairness studies how to prevent harms from algorithmic biases.\n

On June 28, 2015, Google Photos\'s new image labeling feature mistakenly identified Jacky Alcine and a friend as "gorillas" because they were black. The system was trained on a dataset that contained very few images of black people,[214] a problem called "sample size disparity".[215] Google "fixed" this problem by preventing the system from labelling anything as a "gorilla". Eight years later, in 2023, Google Photos still could not identify a gorilla, and neither could similar products from Apple, Facebook, Microsoft and Amazon.[216]\n

COMPAS is a commercial program widely used by U.S. courts to assess the likelihood of a defendant becoming a recidivist. In 2016, Julia Angwin at ProPublica discovered that COMPAS exhibited racial bias, despite the fact that the program was not told the races of the defendants. Although the error rate for both whites and blacks was calibrated equal at exactly 61%, the errors for each race were different—the system consistently overestimated the chance that a black person would re-offend and would underestimate the chance that a white person would not re-offend.[217] In 2017, several researchers[l] showed that it was mathematically impossible for COMPAS to accommodate all possible measures of fairness when the base rates of re-offense were different for whites and blacks in the data.[219]\n

A program can make biased decisions even if the data does not explicitly mention a problematic feature (such as "race" or "gender"). The feature will correlate with other features (like "address", "shopping history" or "first name"), and the program will make the same decisions based on these features as it would on "race" or "gender".[220] Moritz Hardt said "the most robust fact in this research area is that fairness through blindness doesn\'t work."[221]\n

Criticism of COMPAS highlighted that machine learning models are designed to make "predictions" that are only valid if we assume that the future will resemble the past. If they are trained on data that includes the results of racist decisions in the past, machine learning models must predict that racist decisions will be made in the future. If an application then uses these predictions as recommendations, some of these "recommendations" will likely be racist.[222] Thus, machine learning is not well suited to help make decisions in areas where there is hope that the future will be better than the past. It is descriptive rather than prescriptive.[m]\n

Bias and unfairness may go undetected because the developers are overwhelmingly white and male: among AI engineers, about 4% are black and 20% are women.[215]\n

There are various conflicting definitions and mathematical models of fairness. These notions depend on ethical assumptions, and are influenced by beliefs about society. One broad category is distributive fairness, which focuses on the outcomes, often identifying groups and seeking to compensate for statistical disparities. Representational fairness tries to ensure that AI systems do not reinforce negative stereotypes or render certain groups invisible. Procedural fairness focuses on the decision process rather than the outcome. The most relevant notions of fairness may depend on the context, notably the type of AI application and the stakeholders. The subjectivity in the notions of bias and fairness makes it difficult for companies to operationalize them. Having access to sensitive attributes such as race or gender is also considered by many AI ethicists to be necessary in order to compensate for biases, but it may conflict with anti-discrimination laws.[209]\n

At its 2022 Conference on Fairness, Accountability, and Transparency (ACM FAccT 2022), the Association for Computing Machinery, in Seoul, South Korea, presented and published findings that recommend that until AI and robotics systems are demonstrated to be free of bias mistakes, they are unsafe, and the use of self-learning neural networks trained on vast, unregulated sources of flawed internet data should be curtailed.[dubiousdiscuss][224]\n

\n

Lack of transparency

\n\n

Many AI systems are so complex that their designers cannot explain how they reach their decisions.[225] Particularly with deep neural networks, in which there are a large amount of non-linear relationships between inputs and outputs. But some popular explainability techniques exist.[226]\n

It is impossible to be certain that a program is operating correctly if no one knows how exactly it works. There have been many cases where a machine learning program passed rigorous tests, but nevertheless learned something different than what the programmers intended. For example, a system that could identify skin diseases better than medical professionals was found to actually have a strong tendency to classify images with a ruler as "cancerous", because pictures of malignancies typically include a ruler to show the scale.[227] Another machine learning system designed to help effectively allocate medical resources was found to classify patients with asthma as being at "low risk" of dying from pneumonia. Having asthma is actually a severe risk factor, but since the patients having asthma would usually get much more medical care, they were relatively unlikely to die according to the training data. The correlation between asthma and low risk of dying from pneumonia was real, but misleading.[228]\n

People who have been harmed by an algorithm\'s decision have a right to an explanation.[229] Doctors, for example, are expected to clearly and completely explain to their colleagues the reasoning behind any decision they make. Early drafts of the European Union\'s General Data Protection Regulation in 2016 included an explicit statement that this right exists.[n] Industry experts noted that this is an unsolved problem with no solution in sight. Regulators argued that nevertheless the harm is real: if the problem has no solution, the tools should not be used.[230]\n

DARPA established the XAI ("Explainable Artificial Intelligence") program in 2014 to try to solve these problems.[231]\n

Several approaches aim to address the transparency problem. SHAP enables to visualise the contribution of each feature to the output.[232] LIME can locally approximate a model\'s outputs with a simpler, interpretable model.[233] Multitask learning provides a large number of outputs in addition to the target classification. These other outputs can help developers deduce what the network has learned.[234] Deconvolution, DeepDream and other generative methods can allow developers to see what different layers of a deep network for computer vision have learned, and produce output that can suggest what the network is learning.[235] For generative pre-trained transformers, Anthropic developed a technique based on dictionary learning that associates patterns of neuron activations with human-understandable concepts.[236]\n

\n

Bad actors and weaponized AI

\n\n

Artificial intelligence provides a number of tools that are useful to bad actors, such as authoritarian governments, terrorists, criminals or rogue states.\n

A lethal autonomous weapon is a machine that locates, selects and engages human targets without human supervision.[o] Widely available AI tools can be used by bad actors to develop inexpensive autonomous weapons and, if produced at scale, they are potentially weapons of mass destruction.[238] Even when used in conventional warfare, it is unlikely that they will be unable to reliably choose targets and could potentially kill an innocent person.[238] In 2014, 30 nations (including China) supported a ban on autonomous weapons under the United Nations\' Convention on Certain Conventional Weapons, however the United States and others disagreed.[239] By 2015, over fifty countries were reported to be researching battlefield robots.[240]\n

AI tools make it easier for authoritarian governments to efficiently control their citizens in several ways. Face and voice recognition allow widespread surveillance. Machine learning, operating this data, can classify potential enemies of the state and prevent them from hiding. Recommendation systems can precisely target propaganda and misinformation for maximum effect. Deepfakes and generative AI aid in producing misinformation. Advanced AI can make authoritarian centralized decision making more competitive than liberal and decentralized systems such as markets. It lowers the cost and difficulty of digital warfare and advanced spyware.[241] All these technologies have been available since 2020 or earlier—AI facial recognition systems are already being used for mass surveillance in China.[242][243]\n

There many other ways that AI is expected to help bad actors, some of which can not be foreseen. For example, machine-learning AI is able to design tens of thousands of toxic molecules in a matter of hours.[244]\n

\n

Technological unemployment

\n\n

Economists have frequently highlighted the risks of redundancies from AI, and speculated about unemployment if there is no adequate social policy for full employment.[245]\n

In the past, technology has tended to increase rather than reduce total employment, but economists acknowledge that "we\'re in uncharted territory" with AI.[246] A survey of economists showed disagreement about whether the increasing use of robots and AI will cause a substantial increase in long-term unemployment, but they generally agree that it could be a net benefit if productivity gains are redistributed.[247] Risk estimates vary; for example, in the 2010s, Michael Osborne and Carl Benedikt Frey estimated 47% of U.S. jobs are at "high risk" of potential automation, while an OECD report classified only 9% of U.S. jobs as "high risk".[p][249] The methodology of speculating about future employment levels has been criticised as lacking evidential foundation, and for implying that technology, rather than social policy, creates unemployment, as opposed to redundancies.[245] In April 2023, it was reported that 70% of the jobs for Chinese video game illustrators had been eliminated by generative artificial intelligence.[250][251]\n

Unlike previous waves of automation, many middle-class jobs may be eliminated by artificial intelligence; The Economist stated in 2015 that "the worry that AI could do to white-collar jobs what steam power did to blue-collar ones during the Industrial Revolution" is "worth taking seriously".[252] Jobs at extreme risk range from paralegals to fast food cooks, while job demand is likely to increase for care-related professions ranging from personal healthcare to the clergy.[253]\n

From the early days of the development of artificial intelligence, there have been arguments, for example, those put forward by Joseph Weizenbaum, about whether tasks that can be done by computers actually should be done by them, given the difference between computers and humans, and between quantitative calculation and qualitative, value-based judgement.[254]\n

\n

Existential risk

\n\n

It has been argued AI will become so powerful that humanity may irreversibly lose control of it. This could, as physicist Stephen Hawking stated, "spell the end of the human race".[255] This scenario has been common in science fiction, when a computer or robot suddenly develops a human-like "self-awareness" (or "sentience" or "consciousness") and becomes a malevolent character.[q] These sci-fi scenarios are misleading in several ways.\n

First, AI does not require human-like "sentience" to be an existential risk. Modern AI programs are given specific goals and use learning and intelligence to achieve them. Philosopher Nick Bostrom argued that if one gives almost any goal to a sufficiently powerful AI, it may choose to destroy humanity to achieve it (he used the example of a paperclip factory manager).[257] Stuart Russell gives the example of household robot that tries to find a way to kill its owner to prevent it from being unplugged, reasoning that "you can\'t fetch the coffee if you\'re dead."[258] In order to be safe for humanity, a superintelligence would have to be genuinely aligned with humanity\'s morality and values so that it is "fundamentally on our side".[259]\n

Second, Yuval Noah Harari argues that AI does not require a robot body or physical control to pose an existential risk. The essential parts of civilization are not physical. Things like ideologies, law, government, money and the economy are made of language; they exist because there are stories that billions of people believe. The current prevalence of misinformation suggests that an AI could use language to convince people to believe anything, even to take actions that are destructive.[260]\n

The opinions amongst experts and industry insiders are mixed, with sizable fractions both concerned and unconcerned by risk from eventual superintelligent AI.[261] Personalities such as Stephen Hawking, Bill Gates, and Elon Musk,[262] as well as AI pioneers such as Yoshua Bengio, Stuart Russell, Demis Hassabis, and Sam Altman, have expressed concerns about existential risk from AI.\n

In May 2023, Geoffrey Hinton announced his resignation from Google in order to be able to "freely speak out about the risks of AI" without "considering how this impacts Google."[263] He notably mentioned risks of an AI takeover,[264] and stressed that in order to avoid the worst outcomes, establishing safety guidelines will require cooperation among those competing in use of AI.[265]\n

In 2023, many leading AI experts issued the joint statement that "Mitigating the risk of extinction from AI should be a global priority alongside other societal-scale risks such as pandemics and nuclear war".[266]\n

Other researchers, however, spoke in favor of a less dystopian view. AI pioneer Juergen Schmidhuber did not sign the joint statement, emphasising that in 95% of all cases, AI research is about making "human lives longer and healthier and easier."[267] While the tools that are now being used to improve lives can also be used by bad actors, "they can also be used against the bad actors."[268][269] Andrew Ng also argued that "it\'s a mistake to fall for the doomsday hype on AI—and that regulators who do will only benefit vested interests."[270] Yann LeCun "scoffs at his peers\' dystopian scenarios of supercharged misinformation and even, eventually, human extinction."[271] In the early 2010s, experts argued that the risks are too distant in the future to warrant research or that humans will be valuable from the perspective of a superintelligent machine.[272] However, after 2016, the study of current and future risks and possible solutions became a serious area of research.[273]\n

\n

Ethical machines and alignment

\n\n

Friendly AI are machines that have been designed from the beginning to minimize risks and to make choices that benefit humans. Eliezer Yudkowsky, who coined the term, argues that developing friendly AI should be a higher research priority: it may require a large investment and it must be completed before AI becomes an existential risk.[274]\n

Machines with intelligence have the potential to use their intelligence to make ethical decisions. The field of machine ethics provides machines with ethical principles and procedures for resolving ethical dilemmas.[275]\nThe field of machine ethics is also called computational morality,[275]\nand was founded at an AAAI symposium in 2005.[276]\n

Other approaches include Wendell Wallach\'s "artificial moral agents"[277] and Stuart J. Russell\'s three principles for developing provably beneficial machines.[278]\n

\n

Open source

\n

Active organizations in the AI open-source community include Hugging Face,[279] Google,[280] EleutherAI and Meta.[281] Various AI models, such as Llama 2, Mistral or Stable Diffusion, have been made open-weight,[282][283] meaning that their architecture and trained parameters (the "weights") are publicly available. Open-weight models can be freely fine-tuned, which allows companies to specialize them with their own data and for their own use-case.[284] Open-weight models are useful for research and innovation but can also be misused. Since they can be fine-tuned, any built-in security measure, such as objecting to harmful requests, can be trained away until it becomes ineffective. Some researchers warn that future AI models may develop dangerous capabilities (such as the potential to drastically facilitate bioterrorism) and that once released on the Internet, they cannot be deleted everywhere if needed. They recommend pre-release audits and cost-benefit analyses.[285]\n

\n

Frameworks

\n

Artificial Intelligence projects can have their ethical permissibility tested while designing, developing, and implementing an AI system. An AI framework such as the Care and Act Framework containing the SUM values—developed by the Alan Turing Institute tests projects in four main areas:[286][287]\n

\n
  • Respect the dignity of individual people
  • \n
  • Connect with other people sincerely, openly, and inclusively
  • \n
  • Care for the wellbeing of everyone
  • \n
  • Protect social values, justice, and the public interest
\n

Other developments in ethical frameworks include those decided upon during the Asilomar Conference, the Montreal Declaration for Responsible AI, and the IEEE\'s Ethics of Autonomous Systems initiative, among others;[288] however, these principles do not go without their criticisms, especially regards to the people chosen contributes to these frameworks.[289]\n

Promotion of the wellbeing of the people and communities that these technologies affect requires consideration of the social and ethical implications at all stages of AI system design, development and implementation, and collaboration between job roles such as data scientists, product managers, data engineers, domain experts, and delivery managers.[290]\n

The UK AI Safety Institute released in 2024 a testing toolset called \'Inspect\' for AI safety evaluations available under a MIT open-source licence which is freely available on GitHub and can be improved with third-party packages. It can be used to evaluate AI models in a range of areas including core knowledge, ability to reason, and autonomous capabilities.[291]\n

\n

Regulation

\n\n
AI Safety Summit
The first global AI Safety Summit was held in 2023 with a declaration calling for international co-operation.
\n

The regulation of artificial intelligence is the development of public sector policies and laws for promoting and regulating AI; it is therefore related to the broader regulation of algorithms.[292] The regulatory and policy landscape for AI is an emerging issue in jurisdictions globally.[293] According to AI Index at Stanford, the annual number of AI-related laws passed in the 127 survey countries jumped from one passed in 2016 to 37 passed in 2022 alone.[294][295] Between 2016 and 2020, more than 30 countries adopted dedicated strategies for AI.[296] Most EU member states had released national AI strategies, as had Canada, China, India, Japan, Mauritius, the Russian Federation, Saudi Arabia, United Arab Emirates, U.S., and Vietnam. Others were in the process of elaborating their own AI strategy, including Bangladesh, Malaysia and Tunisia.[296] The Global Partnership on Artificial Intelligence was launched in June 2020, stating a need for AI to be developed in accordance with human rights and democratic values, to ensure public confidence and trust in the technology.[296] Henry Kissinger, Eric Schmidt, and Daniel Huttenlocher published a joint statement in November 2021 calling for a government commission to regulate AI.[297] In 2023, OpenAI leaders published recommendations for the governance of superintelligence, which they believe may happen in less than 10 years.[298] In 2023, the United Nations also launched an advisory body to provide recommendations on AI governance; the body comprises technology company executives, governments officials and academics.[299] In 2024, the Council of Europe created the first international legally binding treaty on AI, called the "Framework Convention on Artificial Intelligence and Human Rights, Democracy and the Rule of Law". It was adopted by the European Union, the United States, the United Kingdom, and other signatories.[300]\n

In a 2022 Ipsos survey, attitudes towards AI varied greatly by country; 78% of Chinese citizens, but only 35% of Americans, agreed that "products and services using AI have more benefits than drawbacks".[294] A 2023 Reuters/Ipsos poll found that 61% of Americans agree, and 22% disagree, that AI poses risks to humanity.[301] In a 2023 Fox News poll, 35% of Americans thought it "very important", and an additional 41% thought it "somewhat important", for the federal government to regulate AI, versus 13% responding "not very important" and 8% responding "not at all important".[302][303]\n

In November 2023, the first global AI Safety Summit was held in Bletchley Park in the UK to discuss the near and far term risks of AI and the possibility of mandatory and voluntary regulatory frameworks.[304] 28 countries including the United States, China, and the European Union issued a declaration at the start of the summit, calling for international co-operation to manage the challenges and risks of artificial intelligence.[305][306] In May 2024 at the AI Seoul Summit, 16 global AI tech companies agreed to safety commitments on the development of AI.[307][308]\n

\n

History

\n\n\n

The study of mechanical or "formal" reasoning began with philosophers and mathematicians in antiquity. The study of logic led directly to Alan Turing\'s theory of computation, which suggested that a machine, by shuffling symbols as simple as "0" and "1", could simulate any conceivable form of mathematical reasoning.[309][310] This, along with concurrent discoveries in cybernetics, information theory and neurobiology, led researchers to consider the possibility of building an "electronic brain".[r] They developed several areas of research that would become part of AI,[312] such as McCullouch and Pitts design for "artificial neurons" in 1943,[115] and Turing\'s influential 1950 paper \'Computing Machinery and Intelligence\', which introduced the Turing test and showed that "machine intelligence" was plausible.[313][310]\n

The field of AI research was founded at a workshop at Dartmouth College in 1956.[s][6] The attendees became the leaders of AI research in the 1960s.[t] They and their students produced programs that the press described as "astonishing":[u] computers were learning checkers strategies, solving word problems in algebra, proving logical theorems and speaking English.[v][7] Artificial intelligence laboratories were set up at a number of British and U.S. universities in the latter 1950s and early 1960s.[310]\n

Researchers in the 1960s and the 1970s were convinced that their methods would eventually succeed in creating a machine with general intelligence and considered this the goal of their field.[317] In 1965 Herbert Simon predicted, "machines will be capable, within twenty years, of doing any work a man can do".[318] In 1967 Marvin Minsky agreed, writing that "within a generation ... the problem of creating \'artificial intelligence\' will substantially be solved".[319] They had, however, underestimated the difficulty of the problem.[w] In 1974, both the U.S. and British governments cut off exploratory research in response to the criticism of Sir James Lighthill[321] and ongoing pressure from the U.S. Congress to fund more productive projects.[322] Minsky\'s and Papert\'s book Perceptrons was understood as proving that artificial neural networks would never be useful for solving real-world tasks, thus discrediting the approach altogether.[323] The "AI winter", a period when obtaining funding for AI projects was difficult, followed.[9]\n

In the early 1980s, AI research was revived by the commercial success of expert systems,[324] a form of AI program that simulated the knowledge and analytical skills of human experts. By 1985, the market for AI had reached over a billion dollars. At the same time, Japan\'s fifth generation computer project inspired the U.S. and British governments to restore funding for academic research.[8] However, beginning with the collapse of the Lisp Machine market in 1987, AI once again fell into disrepute, and a second, longer-lasting winter began.[10]\n

Up to this point, most of AI\'s funding had gone to projects that used high-level symbols to represent mental objects like plans, goals, beliefs, and known facts. In the 1980s, some researchers began to doubt that this approach would be able to imitate all the processes of human cognition, especially perception, robotics, learning and pattern recognition,[325] and began to look into "sub-symbolic" approaches.[326] Rodney Brooks rejected "representation" in general and focussed directly on engineering machines that move and survive.[x] Judea Pearl, Lofti Zadeh and others developed methods that handled incomplete and uncertain information by making reasonable guesses rather than precise logic.[86][331] But the most important development was the revival of "connectionism", including neural network research, by Geoffrey Hinton and others.[332] In 1990, Yann LeCun successfully showed that convolutional neural networks can recognize handwritten digits, the first of many successful applications of neural networks.[333]\n

AI gradually restored its reputation in the late 1990s and early 21st century by exploiting formal mathematical methods and by finding specific solutions to specific problems. This "narrow" and "formal" focus allowed researchers to produce verifiable results and collaborate with other fields (such as statistics, economics and mathematics).[334] By 2000, solutions developed by AI researchers were being widely used, although in the 1990s they were rarely described as "artificial intelligence" (a tendency known as the AI effect).[335]\nHowever, several academic researchers became concerned that AI was no longer pursuing its original goal of creating versatile, fully intelligent machines. Beginning around 2002, they founded the subfield of artificial general intelligence (or "AGI"), which had several well-funded institutions by the 2010s.[4]\n

Deep learning began to dominate industry benchmarks in 2012 and was adopted throughout the field.[11]\nFor many specific tasks, other methods were abandoned.[y]\nDeep learning\'s success was based on both hardware improvements (faster computers,[337] graphics processing units, cloud computing[338]) and access to large amounts of data[339] (including curated datasets,[338] such as ImageNet). Deep learning\'s success led to an enormous increase in interest and funding in AI.[z] The amount of machine learning research (measured by total publications) increased by 50% in the years 2015–2019.[296]\n

In 2016, issues of fairness and the misuse of technology were catapulted into center stage at machine learning conferences, publications vastly increased, funding became available, and many researchers re-focussed their careers on these issues. The alignment problem became a serious field of academic study.[273]\n

In the late teens and early 2020s, AGI companies began to deliver programs that created enormous interest. In 2015, AlphaGo, developed by DeepMind, beat the world champion Go player. The program was taught only the rules of the game and developed strategy by itself. GPT-3 is a large language model that was released in 2020 by OpenAI and is capable of generating high-quality human-like text.[340] These programs, and others, inspired an aggressive AI boom, where large companies began investing billions in AI research. According to AI Impacts, about $50 billion annually was invested in "AI" around 2022 in the U.S. alone and about 20% of the new U.S. Computer Science PhD graduates have specialized in "AI".[341] About 800,000 "AI"-related U.S. job openings existed in 2022.[342]\n

\n

Philosophy

\n\n

Philosophical debates have historically sought to determine the nature of intelligence and how to make intelligent machines.[343] Another major focus has been whether machines can be conscious, and the associated ethical implications.[344] Many other topics in philosophy are relevant to AI, such as epistemology and free will.[345] Rapid advancements have intensified public discussions on the philosophy and ethics of AI.[344]\n

\n

Defining artificial intelligence

\n\n

Alan Turing wrote in 1950 "I propose to consider the question \'can machines think\'?"[346] He advised changing the question from whether a machine "thinks", to "whether or not it is possible for machinery to show intelligent behaviour".[346] He devised the Turing test, which measures the ability of a machine to simulate human conversation.[313] Since we can only observe the behavior of the machine, it does not matter if it is "actually" thinking or literally has a "mind". Turing notes that we can not determine these things about other people but "it is usual to have a polite convention that everyone thinks."[347]\n

\n
The Turing test can provide some evidence of intelligence, but it penalizes non-human intelligent behavior.[348]
\n

Russell and Norvig agree with Turing that intelligence must be defined in terms of external behavior, not internal structure.[1] However, they are critical that the test requires the machine to imitate humans. "Aeronautical engineering texts," they wrote, "do not define the goal of their field as making \'machines that fly so exactly like pigeons that they can fool other pigeons.\'"[349] AI founder John McCarthy agreed, writing that "Artificial intelligence is not, by definition, simulation of human intelligence".[350]\n

McCarthy defines intelligence as "the computational part of the ability to achieve goals in the world".[351] Another AI founder, Marvin Minsky similarly describes it as "the ability to solve hard problems".[352] The leading AI textbook defines it as the study of agents that perceive their environment and take actions that maximize their chances of achieving defined goals.[1] These definitions view intelligence in terms of well-defined problems with well-defined solutions, where both the difficulty of the problem and the performance of the program are direct measures of the "intelligence" of the machine—and no other philosophical discussion is required, or may not even be possible.\n

Another definition has been adopted by Google,[353] a major practitioner in the field of AI. This definition stipulates the ability of systems to synthesize information as the manifestation of intelligence, similar to the way it is defined in biological intelligence.\n

Some authors have suggested in practice, that the definition of AI is vague and difficult to define, with contention as to whether classical algorithms should be categorised as AI,[354] with many companies during the early 2020s AI boom using the term as a marketing buzzword, often even if they did "not actually use AI in a material way".[355]\n

\n

Evaluating approaches to AI

\n

No established unifying theory or paradigm has guided AI research for most of its history.[aa] The unprecedented success of statistical machine learning in the 2010s eclipsed all other approaches (so much so that some sources, especially in the business world, use the term "artificial intelligence" to mean "machine learning with neural networks"). This approach is mostly sub-symbolic, soft and narrow. Critics argue that these questions may have to be revisited by future generations of AI researchers.\n

\n

Symbolic AI and its limits

\n

Symbolic AI (or "GOFAI")[357] simulated the high-level conscious reasoning that people use when they solve puzzles, express legal reasoning and do mathematics. They were highly successful at "intelligent" tasks such as algebra or IQ tests. In the 1960s, Newell and Simon proposed the physical symbol systems hypothesis: "A physical symbol system has the necessary and sufficient means of general intelligent action."[358]\n

However, the symbolic approach failed on many tasks that humans solve easily, such as learning, recognizing an object or commonsense reasoning. Moravec\'s paradox is the discovery that high-level "intelligent" tasks were easy for AI, but low level "instinctive" tasks were extremely difficult.[359] Philosopher Hubert Dreyfus had argued since the 1960s that human expertise depends on unconscious instinct rather than conscious symbol manipulation, and on having a "feel" for the situation, rather than explicit symbolic knowledge.[360] Although his arguments had been ridiculed and ignored when they were first presented, eventually, AI research came to agree with him.[ab][16]\n

The issue is not resolved: sub-symbolic reasoning can make many of the same inscrutable mistakes that human intuition does, such as algorithmic bias. Critics such as Noam Chomsky argue continuing research into symbolic AI will still be necessary to attain general intelligence,[362][363] in part because sub-symbolic AI is a move away from explainable AI: it can be difficult or impossible to understand why a modern statistical AI program made a particular decision. The emerging field of neuro-symbolic artificial intelligence attempts to bridge the two approaches.\n

\n

Neat vs. scruffy

\n\n

"Neats" hope that intelligent behavior is described using simple, elegant principles (such as logic, optimization, or neural networks). "Scruffies" expect that it necessarily requires solving a large number of unrelated problems. Neats defend their programs with theoretical rigor, scruffies rely mainly on incremental testing to see if they work. This issue was actively discussed in the 1970s and 1980s,[364] but eventually was seen as irrelevant. Modern AI has elements of both.\n

\n

Soft vs. hard computing

\n\n

Finding a provably correct or optimal solution is intractable for many important problems.[15] Soft computing is a set of techniques, including genetic algorithms, fuzzy logic and neural networks, that are tolerant of imprecision, uncertainty, partial truth and approximation. Soft computing was introduced in the late 1980s and most successful AI programs in the 21st century are examples of soft computing with neural networks.\n

\n

Narrow vs. general AI

\n\n

AI researchers are divided as to whether to pursue the goals of artificial general intelligence and superintelligence directly or to solve as many specific problems as possible (narrow AI) in hopes these solutions will lead indirectly to the field\'s long-term goals.[365][366] General intelligence is difficult to define and difficult to measure, and modern AI has had more verifiable successes by focusing on specific problems with specific solutions. The sub-field of artificial general intelligence studies this area exclusively.\n

\n

Machine consciousness, sentience, and mind

\n\n

The philosophy of mind does not know whether a machine can have a mind, consciousness and mental states, in the same sense that human beings do. This issue considers the internal experiences of the machine, rather than its external behavior. Mainstream AI research considers this issue irrelevant because it does not affect the goals of the field: to build machines that can solve problems using intelligence. Russell and Norvig add that "[t]he additional project of making a machine conscious in exactly the way humans are is not one that we are equipped to take on."[367] However, the question has become central to the philosophy of mind. It is also typically the central question at issue in artificial intelligence in fiction.\n

\n

Consciousness

\n\n

David Chalmers identified two problems in understanding the mind, which he named the "hard" and "easy" problems of consciousness.[368] The easy problem is understanding how the brain processes signals, makes plans and controls behavior. The hard problem is explaining how this feels or why it should feel like anything at all, assuming we are right in thinking that it truly does feel like something (Dennett\'s consciousness illusionism says this is an illusion). While human information processing is easy to explain, human subjective experience is difficult to explain. For example, it is easy to imagine a color-blind person who has learned to identify which objects in their field of view are red, but it is not clear what would be required for the person to know what red looks like.[369]\n

\n

Computationalism and functionalism

\n\n

Computationalism is the position in the philosophy of mind that the human mind is an information processing system and that thinking is a form of computing. Computationalism argues that the relationship between mind and body is similar or identical to the relationship between software and hardware and thus may be a solution to the mind–body problem. This philosophical position was inspired by the work of AI researchers and cognitive scientists in the 1960s and was originally proposed by philosophers Jerry Fodor and Hilary Putnam.[370]\n

Philosopher John Searle characterized this position as "strong AI": "The appropriately programmed computer with the right inputs and outputs would thereby have a mind in exactly the same sense human beings have minds."[ac] Searle counters this assertion with his Chinese room argument, which attempts to show that, even if a machine perfectly simulates human behavior, there is still no reason to suppose it also has a mind.[374]\n

\n

AI welfare and rights

\n

It is difficult or impossible to reliably evaluate whether an advanced AI is sentient (has the ability to feel), and if so, to what degree.[375] But if there is a significant chance that a given machine can feel and suffer, then it may be entitled to certain rights or welfare protection measures, similarly to animals.[376][377] Sapience (a set of capacities related to high intelligence, such as discernment or self-awareness) may provide another moral basis for AI rights.[376] Robot rights are also sometimes proposed as a practical way to integrate autonomous agents into society.[378]\n

In 2017, the European Union considered granting "electronic personhood" to some of the most capable AI systems. Similarly to the legal status of companies, it would have conferred rights but also responsibilities.[379] Critics argued in 2018 that granting rights to AI systems would downplay the importance of human rights, and that legislation should focus on user needs rather than speculative futuristic scenarios. They also noted that robots lacked the autonomy to take part to society on their own.[380][381]\n

Progress in AI increased interest in the topic. Proponents of AI welfare and rights often argue that AI sentience, if it emerges, would be particularly easy to deny. They warn that this may be a moral blind spot analogous to slavery or factory farming, which could lead to large-scale suffering if sentient AI is created and carelessly exploited.[377][376]\n

\n

Future

\n

Superintelligence and the singularity

\n

A superintelligence is a hypothetical agent that would possess intelligence far surpassing that of the brightest and most gifted human mind.[366]If research into artificial general intelligence produced sufficiently intelligent software, it might be able to reprogram and improve itself. The improved software would be even better at improving itself, leading to what I. J. Good called an "intelligence explosion" and Vernor Vinge called a "singularity".[382]\n

However, technologies cannot improve exponentially indefinitely, and typically follow an S-shaped curve, slowing when they reach the physical limits of what the technology can do.[383]\n

\n

Transhumanism

\n\n

Robot designer Hans Moravec, cyberneticist Kevin Warwick and inventor Ray Kurzweil have predicted that humans and machines may merge in the future into cyborgs that are more capable and powerful than either. This idea, called transhumanism, has roots in the writings of Aldous Huxley and Robert Ettinger.[384]\n

Edward Fredkin argues that "artificial intelligence is the next step in evolution", an idea first proposed by Samuel Butler\'s "Darwin among the Machines" as far back as 1863, and expanded upon by George Dyson in his 1998 book Darwin Among the Machines: The Evolution of Global Intelligence.[385]\n

\n

In fiction

\n\n
The word "robot" itself was coined by Karel Čapek in his 1921 play R.U.R., the title standing for "Rossum\'s Universal Robots".
\n

Thought-capable artificial beings have appeared as storytelling devices since antiquity,[386] and have been a persistent theme in science fiction.[387]\n

A common trope in these works began with Mary Shelley\'s Frankenstein, where a human creation becomes a threat to its masters. This includes such works as Arthur C. Clarke\'s and Stanley Kubrick\'s 2001: A Space Odyssey (both 1968), with HAL 9000, the murderous computer in charge of the Discovery One spaceship, as well as The Terminator (1984) and The Matrix (1999). In contrast, the rare loyal robots such as Gort from The Day the Earth Stood Still (1951) and Bishop from Aliens (1986) are less prominent in popular culture.[388]\n

Isaac Asimov introduced the Three Laws of Robotics in many stories, most notably with the "Multivac" super-intelligent computer. Asimov\'s laws are often brought up during lay discussions of machine ethics;[389] while almost all artificial intelligence researchers are familiar with Asimov\'s laws through popular culture, they generally consider the laws useless for many reasons, one of which is their ambiguity.[390]\n

Several works use AI to force us to confront the fundamental question of what makes us human, showing us artificial beings that have the ability to feel, and thus to suffer. This appears in Karel Čapek\'s R.U.R., the films A.I. Artificial Intelligence and Ex Machina, as well as the novel Do Androids Dream of Electric Sheep?, by Philip K. Dick. Dick considers the idea that our understanding of human subjectivity is altered by technology created with artificial intelligence.[391]\n

\n

See also

\n\n

Explanatory notes

\n
\n
    \n
  1. ^ a b This list of intelligent traits is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
  2. \n
  3. ^ a b This list of tools is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
  4. \n
  5. ^ It is among the reasons that expert systems proved to be inefficient for capturing knowledge.[30][31]\n
  6. \n
  7. ^ \n"Rational agent" is general term used in economics, philosophy and theoretical artificial intelligence. It can refer to anything that directs its behavior to accomplish goals, such as a person, an animal, a corporation, a nation, or in the case of AI, a computer program.\n
  8. \n
  9. ^ Alan Turing discussed the centrality of learning as early as 1950, in his classic paper "Computing Machinery and Intelligence".[42] In 1956, at the original Dartmouth AI summer conference, Ray Solomonoff wrote a report on unsupervised probabilistic machine learning: "An Inductive Inference Machine".[43]\n
  10. \n
  11. ^ See AI winter § Machine translation and the ALPAC report of 1966\n
  12. \n
  13. ^ \nCompared with symbolic logic, formal Bayesian inference is computationally expensive. For inference to be tractable, most observations must be conditionally independent of one another. AdSense uses a Bayesian network with over 300 million edges to learn which ads to serve.[93]\n
  14. \n
  15. ^ Expectation–maximization, one of the most popular algorithms in machine learning, allows clustering in the presence of unknown latent variables.[95]\n
  16. \n
  17. ^ \nSome form of deep neural networks (without a specific learning algorithm) were described by:\nWarren S. McCulloch and Walter Pitts (1943)[115]\nAlan Turing (1948);[116]\nKarl Steinbuch and Roger David Joseph (1961).[117]\nDeep or recurrent networks that learned (or used gradient descent) were developed by:\nFrank Rosenblatt(1957);[116]\nOliver Selfridge (1959);[117]\nAlexey Ivakhnenko and Valentin Lapa (1965);[118]\nKaoru Nakano (1971);[119]\nShun-Ichi Amari (1972);[119]\nJohn Joseph Hopfield (1982).[119]\nPrecursors to backpropagation were developed by:\nHenry J. Kelley (1960);[116]\nArthur E. Bryson (1962);[116]\nStuart Dreyfus (1962);[116]\nArthur E. Bryson and Yu-Chi Ho (1969);[116]\nBackpropagation was independently developed by:\nSeppo Linnainmaa (1970);[120]\nPaul Werbos (1974).[116]\n
  18. \n
  19. ^ Geoffrey Hinton said, of his work on neural networks in the 1990s, "our labeled datasets were thousands of times too small. [And] our computers were millions of times too slow."[121]\n
  20. \n
  21. ^ In statistics, a bias is a systematic error or deviation from the correct value. But in the context of fairness, it refers to a tendency in favor or against a certain group or individual characteristic, usually in a way that is considered unfair or harmful. A statistically unbiased AI system that produces disparate outcomes for different demographic groups may thus be viewed as biased in the ethical sense.[209]\n
  22. \n
  23. ^ Including Jon Kleinberg (Cornell University), Sendhil Mullainathan (University of Chicago), Cynthia Chouldechova (Carnegie Mellon) and Sam Corbett-Davis (Stanford)[218]\n
  24. \n
  25. ^ Moritz Hardt (a director at the Max Planck Institute for Intelligent Systems) argues that machine learning "is fundamentally the wrong tool for a lot of domains, where you\'re trying to design interventions and mechanisms that change the world."[223]\n
  26. \n
  27. ^ When the law was passed in 2018, it still contained a form of this provision.\n
  28. \n
  29. ^ This is the United Nations\' definition, and includes things like land mines as well.[237]\n
  30. \n
  31. ^ See table 4; 9% is both the OECD average and the U.S. average.[248]\n
  32. \n
  33. ^ Sometimes called a "robopocalypse"[256]\n
  34. \n
  35. ^ "Electronic brain" was the term used by the press around this time.[309][311]\n
  36. \n
  37. ^ \nDaniel Crevier wrote, "the conference is generally recognized as the official birthdate of the new science."[314] Russell and Norvig called the conference "the inception of artificial intelligence."[115]\n
  38. \n
  39. ^ \nRussell and Norvig wrote "for the next 20 years the field would be dominated by these people and their students."[315]\n
  40. \n
  41. ^ \nRussell and Norvig wrote "it was astonishing whenever a computer did anything kind of smartish".[316]\n
  42. \n
  43. ^ \nThe programs described are Arthur Samuel\'s checkers program for the IBM 701, Daniel Bobrow\'s STUDENT, Newell and Simon\'s Logic Theorist and Terry Winograd\'s SHRDLU.\n
  44. \n
  45. ^ Russell and Norvig write: "in almost all cases, these early systems failed on more difficult problems"[320]\n
  46. \n
  47. ^ \nEmbodied approaches to AI[327] were championed by Hans Moravec[328] and Rodney Brooks[329] and went by many names: Nouvelle AI.[329] Developmental robotics.[330]\n
  48. \n
  49. ^ Matteo Wong wrote in The Atlantic: "Whereas for decades, computer-science fields such as natural-language processing, computer vision, and robotics used extremely different methods, now they all use a programming method called "deep learning." As a result, their code and approaches have become more similar, and their models are easier to integrate into one another."[336]\n
  50. \n
  51. ^ Jack Clark wrote in Bloomberg: "After a half-decade of quiet breakthroughs in artificial intelligence, 2015 has been a landmark year. Computers are smarter and learning faster than ever", and noted that the number of software projects that use machine learning at Google increased from a "sporadic usage" in 2012 to more than 2,700 projects in 2015.[338]\n
  52. \n
  53. ^ Nils Nilsson wrote in 1983: "Simply put, there is wide disagreement in the field about what AI is all about."[356]\n
  54. \n
  55. ^ \nDaniel Crevier wrote that "time has proven the accuracy and perceptiveness of some of Dreyfus\'s comments. Had he formulated them less aggressively, constructive actions they suggested might have been taken much earlier."[361]\n
  56. \n
  57. ^ \nSearle presented this definition of "Strong AI" in 1999.[371] Searle\'s original formulation was "The appropriately programmed computer really is a mind, in the sense that computers given the right programs can be literally said to understand and have other cognitive states."[372] Strong AI is defined similarly by Russell and Norvig: "Stong AI – the assertion that machines that do so are actually thinking (as opposed to simulating thinking)."[373]\n
  58. \n
\n

References

\n
\n
    \n
  1. ^ a b c Russell & Norvig (2021), pp. 1–4.\n
  2. \n
  3. ^ AI set to exceed human brain power Archived 2008-02-19 at the Wayback Machine CNN.com (July 26, 2006)\n
  4. \n
  5. ^ Kaplan, Andreas; Haenlein, Michael (2019). "Siri, Siri, in my hand: Who\'s the fairest in the land? On the interpretations, illustrations, and implications of artificial intelligence". Business Horizons. 62: 15–25. doi:10.1016/j.bushor.2018.08.004. ISSN 0007-6813. S2CID 158433736.\n
  6. \n
  7. ^ a b c \nArtificial general intelligence: Russell & Norvig (2021, pp. 32–33, 1020–1021)
    Proposal for the modern version: Pennachin & Goertzel (2007)
    Warnings of overspecialization in AI from leading researchers: Nilsson (1995), McCarthy (2007), Beal & Winston (2009)
    \n
  8. \n
  9. ^ Russell & Norvig (2021, §1.2).\n
  10. \n
  11. ^ a b Dartmouth workshop: Russell & Norvig (2021, p. 18), McCorduck (2004, pp. 111–136), NRC (1999, pp. 200–201)
    The proposal: McCarthy et al. (1955)
    \n
  12. \n
  13. ^ a b Successful programs of the 1960s: McCorduck (2004, pp. 243–252), Crevier (1993, pp. 52–107), Moravec (1988, p. 9), Russell & Norvig (2021, pp. 19–21)\n
  14. \n
  15. ^ a b Funding initiatives in the early 1980s: Fifth Generation Project (Japan), Alvey (UK), Microelectronics and Computer Technology Corporation (US), Strategic Computing Initiative (US): McCorduck (2004, pp. 426–441), Crevier (1993, pp. 161–162, 197–203, 211, 240), Russell & Norvig (2021, p. 23), NRC (1999, pp. 210–211), Newquist (1994, pp. 235–248)\n
  16. \n
  17. ^ a b First AI Winter, Lighthill report, Mansfield Amendment: Crevier (1993, pp. 115–117), Russell & Norvig (2021, pp. 21–22), NRC (1999, pp. 212–213), Howe (1994), Newquist (1994, pp. 189–201)\n
  18. \n
  19. ^ a b Second AI Winter: Russell & Norvig (2021, p. 24), McCorduck (2004, pp. 430–435), Crevier (1993, pp. 209–210), NRC (1999, pp. 214–216), Newquist (1994, pp. 301–318)\n
  20. \n
  21. ^ a b Deep learning revolution, AlexNet: Goldman (2022), Russell & Norvig (2021, p. 26), McKinsey (2018)\n
  22. \n
  23. ^ Toews (2023).\n
  24. \n
  25. ^ Problem-solving, puzzle solving, game playing, and deduction: Russell & Norvig (2021, chpt. 3–5), Russell & Norvig (2021, chpt. 6) (constraint satisfaction), Poole, Mackworth & Goebel (1998, chpt. 2, 3, 7, 9), Luger & Stubblefield (2004, chpt. 3, 4, 6, 8), Nilsson (1998, chpt. 7–12)\n
  26. \n
  27. ^ Uncertain reasoning: Russell & Norvig (2021, chpt. 12–18), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 333–381), Nilsson (1998, chpt. 7–12)\n
  28. \n
  29. ^ a b c Intractability and efficiency and the combinatorial explosion: Russell & Norvig (2021, p. 21)\n
  30. \n
  31. ^ a b c Psychological evidence of the prevalence of sub-symbolic reasoning and knowledge: Kahneman (2011), Dreyfus & Dreyfus (1986), Wason & Shapiro (1966), Kahneman, Slovic & Tversky (1982)\n
  32. \n
  33. ^ Knowledge representation and knowledge engineering: Russell & Norvig (2021, chpt. 10), Poole, Mackworth & Goebel (1998, pp. 23–46, 69–81, 169–233, 235–277, 281–298, 319–345), Luger & Stubblefield (2004, pp. 227–243), Nilsson (1998, chpt. 17.1–17.4, 18)\n
  34. \n
  35. ^ Smoliar & Zhang (1994).\n
  36. \n
  37. ^ Neumann & Möller (2008).\n
  38. \n
  39. ^ Kuperman, Reichley & Bailey (2006).\n
  40. \n
  41. ^ McGarry (2005).\n
  42. \n
  43. ^ Bertini, Del Bimbo & Torniai (2006).\n
  44. \n
  45. ^ Russell & Norvig (2021), pp. 272.\n
  46. \n
  47. ^ Representing categories and relations: Semantic networks, description logics, inheritance (including frames, and scripts): Russell & Norvig (2021, §10.2 & 10.5), Poole, Mackworth & Goebel (1998, pp. 174–177), Luger & Stubblefield (2004, pp. 248–258), Nilsson (1998, chpt. 18.3)\n
  48. \n
  49. ^ Representing events and time:Situation calculus, event calculus, fluent calculus (including solving the frame problem): Russell & Norvig (2021, §10.3), Poole, Mackworth & Goebel (1998, pp. 281–298), Nilsson (1998, chpt. 18.2)\n
  50. \n
  51. ^ Causal calculus: Poole, Mackworth & Goebel (1998, pp. 335–337)\n
  52. \n
  53. ^ Representing knowledge about knowledge: Belief calculus, modal logics: Russell & Norvig (2021, §10.4), Poole, Mackworth & Goebel (1998, pp. 275–277)\n
  54. \n
  55. ^ a b Default reasoning, Frame problem, default logic, non-monotonic logics, circumscription, closed world assumption, abduction: Russell & Norvig (2021, §10.6), Poole, Mackworth & Goebel (1998, pp. 248–256, 323–335), Luger & Stubblefield (2004, pp. 335–363), Nilsson (1998, ~18.3.3)\n(Poole et al. places abduction under "default reasoning". Luger et al. places this under "uncertain reasoning").\n
  56. \n
  57. ^ a b Breadth of commonsense knowledge: Lenat & Guha (1989, Introduction), Crevier (1993, pp. 113–114), Moravec (1988, p. 13), Russell & Norvig (2021, pp. 241, 385, 982) (qualification problem)\n
  58. \n
  59. ^ Newquist (1994), p. 296.\n
  60. \n
  61. ^ Crevier (1993), pp. 204–208.\n
  62. \n
  63. ^ Russell & Norvig (2021), p. 528.\n
  64. \n
  65. ^ Automated planning: Russell & Norvig (2021, chpt. 11).\n
  66. \n
  67. ^ Automated decision making, Decision theory: Russell & Norvig (2021, chpt. 16–18).\n
  68. \n
  69. ^ Classical planning: Russell & Norvig (2021, Section 11.2).\n
  70. \n
  71. ^ Sensorless or "conformant" planning, contingent planning, replanning (a.k.a online planning): Russell & Norvig (2021, Section 11.5).\n
  72. \n
  73. ^ Uncertain preferences: Russell & Norvig (2021, Section 16.7)\nInverse reinforcement learning: Russell & Norvig (2021, Section 22.6)\n
  74. \n
  75. ^ Information value theory: Russell & Norvig (2021, Section 16.6).\n
  76. \n
  77. ^ Markov decision process: Russell & Norvig (2021, chpt. 17).\n
  78. \n
  79. ^ Game theory and multi-agent decision theory: Russell & Norvig (2021, chpt. 18).\n
  80. \n
  81. ^ Learning: Russell & Norvig (2021, chpt. 19–22), Poole, Mackworth & Goebel (1998, pp. 397–438), Luger & Stubblefield (2004, pp. 385–542), Nilsson (1998, chpt. 3.3, 10.3, 17.5, 20)\n
  82. \n
  83. ^ Turing (1950).\n
  84. \n
  85. ^ Solomonoff (1956).\n
  86. \n
  87. ^ Unsupervised learning: Russell & Norvig (2021, pp. 653) (definition), Russell & Norvig (2021, pp. 738–740) (cluster analysis), Russell & Norvig (2021, pp. 846–860) (word embedding)\n
  88. \n
  89. ^ a b Supervised learning: Russell & Norvig (2021, §19.2) (Definition), Russell & Norvig (2021, Chpt. 19–20) (Techniques)\n
  90. \n
  91. ^ Reinforcement learning: Russell & Norvig (2021, chpt. 22), Luger & Stubblefield (2004, pp. 442–449)\n
  92. \n
  93. ^ Transfer learning: Russell & Norvig (2021, pp. 281), The Economist (2016)\n
  94. \n
  95. ^ "Artificial Intelligence (AI): What Is AI and How Does It Work? | Built In". builtin.com. Retrieved 30 October 2023.\n
  96. \n
  97. ^ Computational learning theory: Russell & Norvig (2021, pp. 672–674), Jordan & Mitchell (2015)\n
  98. \n
  99. ^ Natural language processing (NLP): Russell & Norvig (2021, chpt. 23–24), Poole, Mackworth & Goebel (1998, pp. 91–104), Luger & Stubblefield (2004, pp. 591–632)\n
  100. \n
  101. ^ Subproblems of NLP: Russell & Norvig (2021, pp. 849–850)\n
  102. \n
  103. ^ Russell & Norvig (2021), pp. 856–858.\n
  104. \n
  105. ^ Dickson (2022).\n
  106. \n
  107. ^ Modern statistical and deep learning approaches to NLP: Russell & Norvig (2021, chpt. 24), Cambria & White (2014)\n
  108. \n
  109. ^ Vincent (2019).\n
  110. \n
  111. ^ Russell & Norvig (2021), pp. 875–878.\n
  112. \n
  113. ^ Bushwick (2023).\n
  114. \n
  115. ^ Computer vision: Russell & Norvig (2021, chpt. 25), Nilsson (1998, chpt. 6)\n
  116. \n
  117. ^ Russell & Norvig (2021), pp. 849–850.\n
  118. \n
  119. ^ Russell & Norvig (2021), pp. 895–899.\n
  120. \n
  121. ^ Russell & Norvig (2021), pp. 899–901.\n
  122. \n
  123. ^ Challa et al. (2011).\n
  124. \n
  125. ^ Russell & Norvig (2021), pp. 931–938.\n
  126. \n
  127. ^ MIT AIL (2014).\n
  128. \n
  129. ^ Affective computing: Thro (1993), Edelson (1991), Tao & Tan (2005), Scassellati (2002)\n
  130. \n
  131. ^ Waddell (2018).\n
  132. \n
  133. ^ Poria et al. (2017).\n
  134. \n
  135. ^ Search algorithms: Russell & Norvig (2021, chpts. 3–5), Poole, Mackworth & Goebel (1998, pp. 113–163), Luger & Stubblefield (2004, pp. 79–164, 193–219), Nilsson (1998, chpts. 7–12)\n
  136. \n
  137. ^ State space search: Russell & Norvig (2021, chpt. 3)\n
  138. \n
  139. ^ Russell & Norvig (2021), sect. 11.2.\n
  140. \n
  141. ^ Uninformed searches (breadth first search, depth-first search and general state space search): Russell & Norvig (2021, sect. 3.4), Poole, Mackworth & Goebel (1998, pp. 113–132), Luger & Stubblefield (2004, pp. 79–121), Nilsson (1998, chpt. 8)\n
  142. \n
  143. ^ Heuristic or informed searches (e.g., greedy best first and A*): Russell & Norvig (2021, sect. 3.5), Poole, Mackworth & Goebel (1998, pp. 132–147), Poole & Mackworth (2017, sect. 3.6), Luger & Stubblefield (2004, pp. 133–150)\n
  144. \n
  145. ^ Adversarial search: Russell & Norvig (2021, chpt. 5)\n
  146. \n
  147. ^ Local or "optimization" search: Russell & Norvig (2021, chpt. 4)\n
  148. \n
  149. ^ Singh Chauhan, Nagesh (18 December 2020). "Optimization Algorithms in Neural Networks". KDnuggets. Retrieved 13 January 2024.\n
  150. \n
  151. ^ Evolutionary computation: Russell & Norvig (2021, sect. 4.1.2)\n
  152. \n
  153. ^ Merkle & Middendorf (2013).\n
  154. \n
  155. ^ Logic: Russell & Norvig (2021, chpts. 6–9), Luger & Stubblefield (2004, pp. 35–77), Nilsson (1998, chpt. 13–16)\n
  156. \n
  157. ^ Propositional logic: Russell & Norvig (2021, chpt. 6), Luger & Stubblefield (2004, pp. 45–50), Nilsson (1998, chpt. 13)\n
  158. \n
  159. ^ First-order logic and features such as equality: Russell & Norvig (2021, chpt. 7), Poole, Mackworth & Goebel (1998, pp. 268–275), Luger & Stubblefield (2004, pp. 50–62), Nilsson (1998, chpt. 15)\n
  160. \n
  161. ^ Logical inference: Russell & Norvig (2021, chpt. 10)\n
  162. \n
  163. ^ logical deduction as search: Russell & Norvig (2021, sects. 9.3, 9.4), Poole, Mackworth & Goebel (1998, pp. ~46–52), Luger & Stubblefield (2004, pp. 62–73), Nilsson (1998, chpt. 4.2, 7.2)\n
  164. \n
  165. ^ Resolution and unification: Russell & Norvig (2021, sections 7.5.2, 9.2, 9.5)\n
  166. \n
  167. ^ Warren, D.H.; Pereira, L.M.; Pereira, F. (1977). "Prolog-the language and its implementation compared with Lisp". ACM SIGPLAN Notices. 12 (8): 109–115. doi:10.1145/872734.806939.\n
  168. \n
  169. ^ Fuzzy logic: Russell & Norvig (2021, pp. 214, 255, 459), Scientific American (1999)\n
  170. \n
  171. ^ a b Stochastic methods for uncertain reasoning: Russell & Norvig (2021, chpt. 12–18, 20), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 165–191, 333–381), Nilsson (1998, chpt. 19)\n
  172. \n
  173. ^ decision theory and decision analysis: Russell & Norvig (2021, chpt. 16–18), Poole, Mackworth & Goebel (1998, pp. 381–394)\n
  174. \n
  175. ^ Information value theory: Russell & Norvig (2021, sect. 16.6)\n
  176. \n
  177. ^ Markov decision processes and dynamic decision networks: Russell & Norvig (2021, chpt. 17)\n
  178. \n
  179. ^ a b c Stochastic temporal models: Russell & Norvig (2021, chpt. 14)\nHidden Markov model: Russell & Norvig (2021, sect. 14.3)\nKalman filters: Russell & Norvig (2021, sect. 14.4)\nDynamic Bayesian networks: Russell & Norvig (2021, sect. 14.5)\n
  180. \n
  181. ^ Game theory and mechanism design: Russell & Norvig (2021, chpt. 18)\n
  182. \n
  183. ^ Bayesian networks: Russell & Norvig (2021, sects. 12.5–12.6, 13.4–13.5, 14.3–14.5, 16.5, 20.2–20.3), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~182–190, ≈363–379), Nilsson (1998, chpt. 19.3–19.4)\n
  184. \n
  185. ^ Domingos (2015), chpt. 6.\n
  186. \n
  187. ^ Bayesian inference algorithm: Russell & Norvig (2021, sect. 13.3–13.5), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~363–379), Nilsson (1998, chpt. 19.4 & 7)\n
  188. \n
  189. ^ Domingos (2015), p. 210.\n
  190. \n
  191. ^ Bayesian learning and the expectation–maximization algorithm: Russell & Norvig (2021, chpt. 20), Poole, Mackworth & Goebel (1998, pp. 424–433), Nilsson (1998, chpt. 20), Domingos (2015, p. 210)\n
  192. \n
  193. ^ Bayesian decision theory and Bayesian decision networks: Russell & Norvig (2021, sect. 16.5)\n
  194. \n
  195. ^ Statistical learning methods and classifiers: Russell & Norvig (2021, chpt. 20),\n
  196. \n
  197. ^ Ciaramella, Alberto; Ciaramella, Marco (2024). Introduction to Artificial Intelligence: from data analysis to generative AI. Intellisemantic Editions. ISBN 978-8-8947-8760-3.\n
  198. \n
  199. ^ Decision trees: Russell & Norvig (2021, sect. 19.3), Domingos (2015, p. 88)\n
  200. \n
  201. ^ Non-parameteric learning models such as K-nearest neighbor and support vector machines: Russell & Norvig (2021, sect. 19.7), Domingos (2015, p. 187) (k-nearest neighbor)\n\n
  202. \n
  203. ^ Domingos (2015), p. 152.\n
  204. \n
  205. ^ Naive Bayes classifier: Russell & Norvig (2021, sect. 12.6), Domingos (2015, p. 152)\n
  206. \n
  207. ^ a b Neural networks: Russell & Norvig (2021, chpt. 21), Domingos (2015, Chapter 4)\n
  208. \n
  209. ^ Gradient calculation in computational graphs, backpropagation, automatic differentiation: Russell & Norvig (2021, sect. 21.2), Luger & Stubblefield (2004, pp. 467–474), Nilsson (1998, chpt. 3.3)\n
  210. \n
  211. ^ Universal approximation theorem: Russell & Norvig (2021, p. 752)\nThe theorem: Cybenko (1988), Hornik, Stinchcombe & White (1989)\n
  212. \n
  213. ^ Feedforward neural networks: Russell & Norvig (2021, sect. 21.1)\n
  214. \n
  215. ^ Recurrent neural networks: Russell & Norvig (2021, sect. 21.6)\n
  216. \n
  217. ^ Perceptrons: Russell & Norvig (2021, pp. 21, 22, 683, 22)\n
  218. \n
  219. ^ a b Deep learning: Russell & Norvig (2021, chpt. 21), Goodfellow, Bengio & Courville (2016), Hinton et al. (2016), Schmidhuber (2015)\n
  220. \n
  221. ^ Convolutional neural networks: Russell & Norvig (2021, sect. 21.3)\n
  222. \n
  223. ^ Deng & Yu (2014), pp. 199–200.\n
  224. \n
  225. ^ Ciresan, Meier & Schmidhuber (2012).\n
  226. \n
  227. ^ Russell & Norvig (2021), p. 751.\n
  228. \n
  229. ^ a b c Russell & Norvig (2021), p. 17.\n
  230. \n
  231. ^ a b c d e f g Russell & Norvig (2021), p. 785.\n
  232. \n
  233. ^ a b Schmidhuber (2022), sect. 5.\n
  234. \n
  235. ^ Schmidhuber (2022), sect. 6.\n
  236. \n
  237. ^ a b c Schmidhuber (2022), sect. 7.\n
  238. \n
  239. ^ Schmidhuber (2022), sect. 8.\n
  240. \n
  241. ^ Quoted in Christian (2020, p. 22)\n
  242. \n
  243. ^ Smith (2023).\n
  244. \n
  245. ^ "Explained: Generative AI". 9 November 2023.\n
  246. \n
  247. ^ "AI Writing and Content Creation Tools". MIT Sloan Teaching & Learning Technologies. Archived from the original on 25 December 2023. Retrieved 25 December 2023.\n
  248. \n
  249. ^ Marmouyet (2023).\n
  250. \n
  251. ^ Kobielus (2019).\n
  252. \n
  253. ^ Thomason, James (21 May 2024). "Mojo Rising: The resurgence of AI-first programming languages". VentureBeat. Archived from the original on 27 June 2024. Retrieved 26 May 2024.\n
  254. \n
  255. ^ Wodecki, Ben (5 May 2023). "7 AI Programming Languages You Need to Know". AI Business. Archived from the original on 25 July 2024. Retrieved 5 October 2024.\n
  256. \n
  257. ^ Plumb, Taryn (18 September 2024). "Why Jensen Huang and Marc Benioff see \'gigantic\' opportunity for agentic AI". VentureBeat. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
  258. \n
  259. ^ Davenport, T; Kalakota, R (June 2019). "The potential for artificial intelligence in healthcare". Future Healthc J. 6 (2): 94–98. doi:10.7861/futurehosp.6-2-94. PMC 6616181. PMID 31363513.\n
  260. \n
  261. ^ Lyakhova, U.A.; Lyakhov, P.A. (2024). "Systematic review of approaches to detection and classification of skin cancer using artificial intelligence: Development and prospects". Computers in Biology and Medicine. 178: 108742. doi:10.1016/j.compbiomed.2024.108742. PMID 38875908.\n
  262. \n
  263. ^ Alqudaihi, Kawther S.; Aslam, Nida; Khan, Irfan Ullah; Almuhaideb, Abdullah M.; Alsunaidi, Shikah J.; Ibrahim, Nehad M. Abdel Rahman; Alhaidari, Fahd A.; Shaikh, Fatema S.; Alsenbel, Yasmine M.; Alalharith, Dima M.; Alharthi, Hajar M.; Alghamdi, Wejdan M.; Alshahrani, Mohammed S. (2021). "Cough Sound Detection and Diagnosis Using Artificial Intelligence Techniques: Challenges and Opportunities". IEEE Access. 9: 102327–102344. Bibcode:2021IEEEA...9j2327A. doi:10.1109/ACCESS.2021.3097559. ISSN 2169-3536. PMC 8545201. PMID 34786317.\n
  264. \n
  265. ^ a b Bax, Monique; Thorpe, Jordan; Romanov, Valentin (December 2023). "The future of personalized cardiovascular medicine demands 3D and 4D printing, stem cells, and artificial intelligence". Frontiers in Sensors. 4. doi:10.3389/fsens.2023.1294721. ISSN 2673-5067.\n
  266. \n
  267. ^ Jumper, J; Evans, R; Pritzel, A (2021). "Highly accurate protein structure prediction with AlphaFold". Nature. 596 (7873): 583–589. Bibcode:2021Natur.596..583J. doi:10.1038/s41586-021-03819-2. PMC 8371605. PMID 34265844.\n
  268. \n
  269. ^ "AI discovers new class of antibiotics to kill drug-resistant bacteria". 20 December 2023. Archived from the original on 16 September 2024. Retrieved 5 October 2024.\n
  270. \n
  271. ^ "AI speeds up drug design for Parkinson\'s ten-fold". Cambridge University. 17 April 2024. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  272. \n
  273. ^ Horne, Robert I.; Andrzejewska, Ewa A.; Alam, Parvez; Brotzakis, Z. Faidon; Srivastava, Ankit; Aubert, Alice; Nowinska, Magdalena; Gregory, Rebecca C.; Staats, Roxine; Possenti, Andrea; Chia, Sean; Sormanni, Pietro; Ghetti, Bernardino; Caughey, Byron; Knowles, Tuomas P. J.; Vendruscolo, Michele (17 April 2024). "Discovery of potent inhibitors of α-synuclein aggregation using structure-based iterative learning". Nature Chemical Biology. 20 (5). Nature: 634–645. doi:10.1038/s41589-024-01580-x. PMC 11062903. PMID 38632492.\n
  274. \n
  275. ^ Grant, Eugene F.; Lardner, Rex (25 July 1952). "The Talk of the Town – It". The New Yorker. ISSN 0028-792X. Archived from the original on 16 February 2020. Retrieved 28 January 2024.\n
  276. \n
  277. ^ Anderson, Mark Robert (11 May 2017). "Twenty years on from Deep Blue vs Kasparov: how a chess match started the big data revolution". The Conversation. Archived from the original on 17 September 2024. Retrieved 28 January 2024.\n
  278. \n
  279. ^ Markoff, John (16 February 2011). "Computer Wins on \'Jeopardy!\': Trivial, It\'s Not". The New York Times. ISSN 0362-4331. Archived from the original on 22 October 2014. Retrieved 28 January 2024.\n
  280. \n
  281. ^ Byford, Sam (27 May 2017). "AlphaGo retires from competitive Go after defeating world number one 3–0". The Verge. Archived from the original on 7 June 2017. Retrieved 28 January 2024.\n
  282. \n
  283. ^ Brown, Noam; Sandholm, Tuomas (30 August 2019). "Superhuman AI for multiplayer poker". Science. 365 (6456): 885–890. Bibcode:2019Sci...365..885B. doi:10.1126/science.aay2400. ISSN 0036-8075. PMID 31296650.\n
  284. \n
  285. ^ "MuZero: Mastering Go, chess, shogi and Atari without rules". Google DeepMind. 23 December 2020. Retrieved 28 January 2024.\n
  286. \n
  287. ^ Sample, Ian (30 October 2019). "AI becomes grandmaster in \'fiendishly complex\' StarCraft II". The Guardian. ISSN 0261-3077. Archived from the original on 29 December 2020. Retrieved 28 January 2024.\n
  288. \n
  289. ^ Wurman, P. R.; Barrett, S.; Kawamoto, K. (2022). "Outracing champion Gran Turismo drivers with deep reinforcement learning" (PDF). Nature. 602 (7896): 223–228. Bibcode:2022Natur.602..223W. doi:10.1038/s41586-021-04357-7. PMID 35140384.\n
  290. \n
  291. ^ Wilkins, Alex (13 March 2024). "Google AI learns to play open-world video games by watching them". New Scientist. Archived from the original on 26 July 2024. Retrieved 21 July 2024.\n
  292. \n
  293. ^ Uesato, J. et al.: Improving mathematical reasoning with process supervision. Archived 15 September 2024 at the Wayback Machine openai.com, May 31, 2023. Retrieved 2024-08-07.\n
  294. \n
  295. ^ Srivastava, Saurabh (29 February 2024). "Functional Benchmarks for Robust Evaluation of Reasoning Performance, and the Reasoning Gap". arXiv:2402.19450 [cs.AI].\n
  296. \n
  297. ^ Roberts, Siobhan (25 July 2024). "AI achieves silver-medal standard solving International Mathematical Olympiad problems". The New York Times. Archived from the original on 26 September 2024. Retrieved 7 August 2024.\n
  298. \n
  299. ^ LLEMMA. eleuther.ai. Retrieved 2024-08-07.\n
  300. \n
  301. ^ AI Math. Archived 5 October 2024 at the Wayback Machine Caesars Labs, 2024. Retrieved 2024-08-07.\n
  302. \n
  303. ^ Alex McFarland: 7 Best AI for Math Tools. Archived 11 September 2024 at the Wayback Machine unite.ai. Retrieved 2024-08-07\n
  304. \n
  305. ^ Matthew Finio & Amanda Downie: IBM Think 2024 Primer, "What is Artificial Intelligence (AI) in Finance?" 8 Dec. 2023\n
  306. \n
  307. ^ M. Nicolas, J. Firzli: Pensions Age/European Pensions magazine, "Artificial Intelligence: Ask the Industry" May June 2024 https://videovoice.org/ai-in-finance-innovation-entrepreneurship-vs-over-regulation-with-the-eus-artificial-intelligence-act-wont-work-as-intended/ Archived 11 September 2024 at the Wayback Machine.\n
  308. \n
  309. ^ a b c Congressional Research Service (2019). Artificial Intelligence and National Security (PDF). Washington, DC: Congressional Research Service. Archived (PDF) from the original on 8 May 2020. Retrieved 5 October 2024.PD-notice\n
  310. \n
  311. ^ a b Slyusar, Vadym (2019). "Artificial intelligence as the basis of future control networks". ResearchGate. doi:10.13140/RG.2.2.30247.50087. Archived from the original on 28 April 2021. Retrieved 20 July 2019.\n
  312. \n
  313. ^ Knight, Will. "The US and 30 Other Nations Agree to Set Guardrails for Military AI". Wired. ISSN 1059-1028. Archived from the original on 20 September 2024. Retrieved 24 January 2024.\n
  314. \n
  315. ^ Newsom, Gavin; Weber, Shirley N. (6 September 2023). "Executive Order N-12-23" (PDF). Executive Department, State of California. Archived (PDF) from the original on 21 February 2024. Retrieved 7 September 2023.\n
  316. \n
  317. ^ Pinaya, Walter H. L.; Graham, Mark S.; Kerfoot, Eric; Tudosiu, Petru-Daniel; Dafflon, Jessica; Fernandez, Virginia; Sanchez, Pedro; Wolleb, Julia; da Costa, Pedro F.; Patel, Ashay (2023). "Generative AI for Medical Imaging: extending the MONAI Framework". arXiv:2307.15208 [eess.IV].\n
  318. \n
  319. ^ Griffith, Erin; Metz, Cade (27 January 2023). "Anthropic Said to Be Closing In on $300 Million in New A.I. Funding". The New York Times. Archived from the original on 9 December 2023. Retrieved 14 March 2023.\n
  320. \n
  321. ^ Lanxon, Nate; Bass, Dina; Davalos, Jackie (10 March 2023). "A Cheat Sheet to AI Buzzwords and Their Meanings". Bloomberg News. Archived from the original on 17 November 2023. Retrieved 14 March 2023.\n
  322. \n
  323. ^ Marcelline, Marco (27 May 2023). "ChatGPT: Most Americans Know About It, But Few Actually Use the AI Chatbot". PCMag. Archived from the original on 21 May 2024. Retrieved 28 January 2024.\n
  324. \n
  325. ^ Lu, Donna (31 March 2023). "Misinformation, mistakes and the Pope in a puffer: what rapidly evolving AI can – and can\'t – do". The Guardian. ISSN 0261-3077. Archived from the original on 10 June 2024. Retrieved 28 January 2024.\n
  326. \n
  327. ^ Hurst, Luke (23 May 2023). "How a fake image of a Pentagon explosion shared on Twitter caused a real dip on Wall Street". euronews. Retrieved 28 January 2024.\n
  328. \n
  329. ^ Poole, David; Mackworth, Alan (2023). Artificial Intelligence, Foundations of Computational Agents (3rd ed.). Cambridge University Press. doi:10.1017/9781009258227. ISBN 978-1-0092-5819-7. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  330. \n
  331. ^ Russell, Stuart; Norvig, Peter (2020). Artificial Intelligence: A Modern Approach (4th ed.). Pearson. ISBN 978-0-1346-1099-3.\n
  332. \n
  333. ^ "Why agents are the next frontier of generative AI". McKinsey Digital. 24 July 2024. Archived from the original on 3 October 2024. Retrieved 10 August 2024.\n
  334. \n
  335. ^ Ransbotham, Sam; Kiron, David; Gerbert, Philipp; Reeves, Martin (6 September 2017). "Reshaping Business With Artificial Intelligence". MIT Sloan Management Review. Archived from the original on 13 February 2024.\n
  336. \n
  337. ^ Sun, Yuran; Zhao, Xilei; Lovreglio, Ruggiero; Kuligowski, Erica (1 January 2024), Naser, M. Z. (ed.), "8 – AI for large-scale evacuation modeling: promises and challenges", Interpretable Machine Learning for the Analysis, Design, Assessment, and Informed Decision Making for Civil Infrastructure, Woodhead Publishing Series in Civil and Structural Engineering, Woodhead Publishing, pp. 185–204, ISBN 978-0-1282-4073-1, archived from the original on 19 May 2024, retrieved 28 June 2024.\n
  338. \n
  339. ^ Gomaa, Islam; Adelzadeh, Masoud; Gwynne, Steven; Spencer, Bruce; Ko, Yoon; Bénichou, Noureddine; Ma, Chunyun; Elsagan, Nour; Duong, Dana; Zalok, Ehab; Kinateder, Max (1 November 2021). "A Framework for Intelligent Fire Detection and Evacuation System". Fire Technology. 57 (6): 3179–3185. doi:10.1007/s10694-021-01157-3. ISSN 1572-8099. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  340. \n
  341. ^ Zhao, Xilei; Lovreglio, Ruggiero; Nilsson, Daniel (1 May 2020). "Modelling and interpreting pre-evacuation decision-making using machine learning". Automation in Construction. 113: 103140. doi:10.1016/j.autcon.2020.103140. ISSN 0926-5805. Archived from the original on 19 May 2024. Retrieved 5 October 2024.\n
  342. \n
  343. ^ "India\'s latest election embraced AI technology. Here are some ways it was used constructively". PBS News. 12 June 2024. Retrieved 28 October 2024.\n
  344. \n
  345. ^ Müller, Vincent C. (30 April 2020). "Ethics of Artificial Intelligence and Robotics". Stanford Encyclopedia of Philosophy Archive. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  346. \n
  347. ^ Simonite (2016).\n
  348. \n
  349. ^ Russell & Norvig (2021), p. 987.\n
  350. \n
  351. ^ Laskowski (2023).\n
  352. \n
  353. ^ GAO (2022).\n
  354. \n
  355. ^ Valinsky (2019).\n
  356. \n
  357. ^ Russell & Norvig (2021), p. 991.\n
  358. \n
  359. ^ Russell & Norvig (2021), pp. 991–992.\n
  360. \n
  361. ^ Christian (2020), p. 63.\n
  362. \n
  363. ^ Vincent (2022).\n
  364. \n
  365. ^ Kopel, Matthew. "Copyright Services: Fair Use". Cornell University Library. Archived from the original on 26 September 2024. Retrieved 26 April 2024.\n
  366. \n
  367. ^ Burgess, Matt. "How to Stop Your Data From Being Used to Train AI". Wired. ISSN 1059-1028. Archived from the original on 3 October 2024. Retrieved 26 April 2024.\n
  368. \n
  369. ^ Reisner (2023).\n
  370. \n
  371. ^ Alter & Harris (2023).\n
  372. \n
  373. ^ "Getting the Innovation Ecosystem Ready for AI. An IP policy toolkit" (PDF). WIPO.\n
  374. \n
  375. ^ Hammond, George (27 December 2023). "Big Tech is spending more than VC firms on AI startups". Ars Technica. Archived from the original on 10 January 2024.\n
  376. \n
  377. ^ Wong, Matteo (24 October 2023). "The Future of AI Is GOMA". The Atlantic. Archived from the original on 5 January 2024.\n
  378. \n
  379. ^ "Big tech and the pursuit of AI dominance". The Economist. 26 March 2023. Archived from the original on 29 December 2023.\n
  380. \n
  381. ^ Fung, Brian (19 December 2023). "Where the battle to dominate AI may be won". CNN Business. Archived from the original on 13 January 2024.\n
  382. \n
  383. ^ Metz, Cade (5 July 2023). "In the Age of A.I., Tech\'s Little Guys Need Big Friends". The New York Times. Archived from the original on 8 July 2024. Retrieved 5 October 2024.\n
  384. \n
  385. ^ "Electricity 2024 – Analysis". IEA. 24 January 2024. Retrieved 13 July 2024.\n
  386. \n
  387. ^ Calvert, Brian (28 March 2024). "AI already uses as much energy as a small country. It\'s only the beginning". Vox. New York, New York. Archived from the original on 3 July 2024. Retrieved 5 October 2024.\n
  388. \n
  389. ^ Halper, Evan; O\'Donovan, Caroline (21 June 2024). "AI is exhausting the power grid. Tech firms are seeking a miracle solution". Washington Post.\n
  390. \n
  391. ^ Davenport, Carly. "AI Data Centers and the Coming YS Power Demand Surge" (PDF). Goldman Sachs. Archived from the original (PDF) on 26 July 2024. Retrieved 5 October 2024.\n
  392. \n
  393. ^ Ryan, Carol (12 April 2024). "Energy-Guzzling AI Is Also the Future of Energy Savings". Wall Street Journal. Dow Jones.\n
  394. \n
  395. ^ Hiller, Jennifer (1 July 2024). "Tech Industry Wants to Lock Up Nuclear Power for AI". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  396. \n
  397. ^ Kendall, Tyler (28 September 2024). "Nvidia\'s Huang Says Nuclear Power an Option to Feed Data Centers". Bloomberg.\n
  398. \n
  399. ^ Halper, Evan (20 September 2024). "Microsoft deal would reopen Three Mile Island nuclear plant to power AI". Washington Post.\n
  400. \n
  401. ^ Hiller, Jennifer (20 September 2024). "Three Mile Island\'s Nuclear Plant to Reopen, Help Power Microsoft\'s AI Centers". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  402. \n
  403. ^ a b Niva Yadav (19 August 2024). "Taiwan to stop large data centers in the North, cites insufficient power". DatacenterDynamics.\n
  404. \n
  405. ^ Mochizuki, Takashi; Oda, Shoko (18 October 2024). "エヌビディア出資の日本企業、原発近くでAIデータセンター新設検討". Bloomberg (in Japanese).\n
  406. \n
  407. ^ a b Naureen S Malik and Will Wade (5 November 2024). "Nuclear-Hungry AI Campuses Need New Plan to Find Power Fast". Bloomberg.\n
  408. \n
  409. ^ Nicas (2018).\n
  410. \n
  411. ^ Rainie, Lee; Keeter, Scott; Perrin, Andrew (22 July 2019). "Trust and Distrust in America". Pew Research Center. Archived from the original on 22 February 2024.\n
  412. \n
  413. ^ Williams (2023).\n
  414. \n
  415. ^ Taylor & Hern (2023).\n
  416. \n
  417. ^ a b Samuel, Sigal (19 April 2022). "Why it\'s so damn hard to make AI fair and unbiased". Vox. Archived from the original on 5 October 2024. Retrieved 24 July 2024.\n
  418. \n
  419. ^ a b Rose (2023).\n
  420. \n
  421. ^ CNA (2019).\n
  422. \n
  423. ^ Goffrey (2008), p. 17.\n
  424. \n
  425. ^ Berdahl et al. (2023); Goffrey (2008, p. 17); Rose (2023); Russell & Norvig (2021, p. 995)\n
  426. \n
  427. ^ Christian (2020), p. 25.\n
  428. \n
  429. ^ a b Russell & Norvig (2021), p. 995.\n
  430. \n
  431. ^ Grant & Hill (2023).\n
  432. \n
  433. ^ Larson & Angwin (2016).\n
  434. \n
  435. ^ Christian (2020), p. 67–70.\n
  436. \n
  437. ^ Christian (2020, pp. 67–70); Russell & Norvig (2021, pp. 993–994)\n
  438. \n
  439. ^ Russell & Norvig (2021, p. 995); Lipartito (2011, p. 36); Goodman & Flaxman (2017, p. 6); Christian (2020, pp. 39–40, 65)\n
  440. \n
  441. ^ Quoted in Christian (2020, p. 65).\n
  442. \n
  443. ^ Russell & Norvig (2021, p. 994); Christian (2020, pp. 40, 80–81)\n
  444. \n
  445. ^ Quoted in Christian (2020, p. 80)\n
  446. \n
  447. ^ Dockrill (2022).\n
  448. \n
  449. ^ Sample (2017).\n
  450. \n
  451. ^ "Black Box AI". 16 June 2023. Archived from the original on 15 June 2024. Retrieved 5 October 2024.\n
  452. \n
  453. ^ Christian (2020), p. 110.\n
  454. \n
  455. ^ Christian (2020), pp. 88–91.\n
  456. \n
  457. ^ Christian (2020, p. 83); Russell & Norvig (2021, p. 997)\n
  458. \n
  459. ^ Christian (2020), p. 91.\n
  460. \n
  461. ^ Christian (2020), p. 83.\n
  462. \n
  463. ^ Verma (2021).\n
  464. \n
  465. ^ Rothman (2020).\n
  466. \n
  467. ^ Christian (2020), pp. 105–108.\n
  468. \n
  469. ^ Christian (2020), pp. 108–112.\n
  470. \n
  471. ^ Ropek, Lucas (21 May 2024). "New Anthropic Research Sheds Light on AI\'s \'Black Box\'". Gizmodo. Archived from the original on 5 October 2024. Retrieved 23 May 2024.\n
  472. \n
  473. ^ Russell & Norvig (2021), p. 989.\n
  474. \n
  475. ^ a b Russell & Norvig (2021), pp. 987–990.\n
  476. \n
  477. ^ Russell & Norvig (2021), p. 988.\n
  478. \n
  479. ^ Robitzski (2018); Sainato (2015)\n
  480. \n
  481. ^ Harari (2018).\n
  482. \n
  483. ^ Buckley, Chris; Mozur, Paul (22 May 2019). "How China Uses High-Tech Surveillance to Subdue Minorities". The New York Times. Archived from the original on 25 November 2019. Retrieved 2 July 2019.\n
  484. \n
  485. ^ "Security lapse exposed a Chinese smart city surveillance system". 3 May 2019. Archived from the original on 7 March 2021. Retrieved 14 September 2020.\n
  486. \n
  487. ^ Urbina et al. (2022).\n
  488. \n
  489. ^ a b E. McGaughey, \'Will Robots Automate Your Job Away? Full Employment, Basic Income, and Economic Democracy\' (2022), 51(3) Industrial Law Journal 511–559. Archived 27 May 2023 at the Wayback Machine.\n
  490. \n
  491. ^ Ford & Colvin (2015);McGaughey (2022)\n
  492. \n
  493. ^ IGM Chicago (2017).\n
  494. \n
  495. ^ Arntz, Gregory & Zierahn (2016), p. 33.\n
  496. \n
  497. ^ Lohr (2017); Frey & Osborne (2017); Arntz, Gregory & Zierahn (2016, p. 33)\n
  498. \n
  499. ^ Zhou, Viola (11 April 2023). "AI is already taking video game illustrators\' jobs in China". Rest of World. Archived from the original on 21 February 2024. Retrieved 17 August 2023.\n
  500. \n
  501. ^ Carter, Justin (11 April 2023). "China\'s game art industry reportedly decimated by growing AI use". Game Developer. Archived from the original on 17 August 2023. Retrieved 17 August 2023.\n
  502. \n
  503. ^ Morgenstern (2015).\n
  504. \n
  505. ^ Mahdawi (2017); Thompson (2014)\n
  506. \n
  507. ^ Tarnoff, Ben (4 August 2023). "Lessons from Eliza". The Guardian Weekly. pp. 34–39.\n
  508. \n
  509. ^ Cellan-Jones (2014).\n
  510. \n
  511. ^ Russell & Norvig 2021, p. 1001.\n
  512. \n
  513. ^ Bostrom (2014).\n
  514. \n
  515. ^ Russell (2019).\n
  516. \n
  517. ^ Bostrom (2014); Müller & Bostrom (2014); Bostrom (2015).\n
  518. \n
  519. ^ Harari (2023).\n
  520. \n
  521. ^ Müller & Bostrom (2014).\n
  522. \n
  523. ^ Leaders\' concerns about the existential risks of AI around 2015: Rawlinson (2015), Holley (2015), Gibbs (2014), Sainato (2015)\n
  524. \n
  525. ^ ""Godfather of artificial intelligence" talks impact and potential of new AI". CBS News. 25 March 2023. Archived from the original on 28 March 2023. Retrieved 28 March 2023.\n
  526. \n
  527. ^ Pittis, Don (4 May 2023). "Canadian artificial intelligence leader Geoffrey Hinton piles on fears of computer takeover". CBC. Archived from the original on 7 July 2024. Retrieved 5 October 2024.\n
  528. \n
  529. ^ "\'50–50 chance\' that AI outsmarts humanity, Geoffrey Hinton says". Bloomberg BNN. 14 June 2024. Retrieved 6 July 2024.\n
  530. \n
  531. ^ Valance (2023).\n
  532. \n
  533. ^ Taylor, Josh (7 May 2023). "Rise of artificial intelligence is inevitable but should not be feared, \'father of AI\' says". The Guardian. Archived from the original on 23 October 2023. Retrieved 26 May 2023.\n
  534. \n
  535. ^ Colton, Emma (7 May 2023). "\'Father of AI\' says tech fears misplaced: \'You cannot stop it\'". Fox News. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
  536. \n
  537. ^ Jones, Hessie (23 May 2023). "Juergen Schmidhuber, Renowned \'Father Of Modern AI,\' Says His Life\'s Work Won\'t Lead To Dystopia". Forbes. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
  538. \n
  539. ^ McMorrow, Ryan (19 December 2023). "Andrew Ng: \'Do we think the world is better off with more or less intelligence?\'". Financial Times. Archived from the original on 25 January 2024. Retrieved 30 December 2023.\n
  540. \n
  541. ^ Levy, Steven (22 December 2023). "How Not to Be Stupid About AI, With Yann LeCun". Wired. Archived from the original on 28 December 2023. Retrieved 30 December 2023.\n
  542. \n
  543. ^ Arguments that AI is not an imminent risk: Brooks (2014), Geist (2015), Madrigal (2015), Lee (2014)\n
  544. \n
  545. ^ a b Christian (2020), pp. 67, 73.\n
  546. \n
  547. ^ Yudkowsky (2008).\n
  548. \n
  549. ^ a b Anderson & Anderson (2011).\n
  550. \n
  551. ^ AAAI (2014).\n
  552. \n
  553. ^ Wallach (2010).\n
  554. \n
  555. ^ Russell (2019), p. 173.\n
  556. \n
  557. ^ Stewart, Ashley; Melton, Monica. "Hugging Face CEO says he\'s focused on building a \'sustainable model\' for the $4.5 billion open-source-AI startup". Business Insider. Archived from the original on 25 September 2024. Retrieved 14 April 2024.\n
  558. \n
  559. ^ Wiggers, Kyle (9 April 2024). "Google open sources tools to support AI model development". TechCrunch. Archived from the original on 10 September 2024. Retrieved 14 April 2024.\n
  560. \n
  561. ^ Heaven, Will Douglas (12 May 2023). "The open-source AI boom is built on Big Tech\'s handouts. How long will it last?". MIT Technology Review. Retrieved 14 April 2024.\n
  562. \n
  563. ^ Brodsky, Sascha (19 December 2023). "Mistral AI\'s New Language Model Aims for Open Source Supremacy". AI Business. Archived from the original on 5 September 2024. Retrieved 5 October 2024.\n
  564. \n
  565. ^ Edwards, Benj (22 February 2024). "Stability announces Stable Diffusion 3, a next-gen AI image generator". Ars Technica. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
  566. \n
  567. ^ Marshall, Matt (29 January 2024). "How enterprises are using open source LLMs: 16 examples". VentureBeat. Archived from the original on 26 September 2024. Retrieved 5 October 2024.\n
  568. \n
  569. ^ Piper, Kelsey (2 February 2024). "Should we make our most powerful AI models open source to all?". Vox. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
  570. \n
  571. ^ Alan Turing Institute (2019). "Understanding artificial intelligence ethics and safety" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
  572. \n
  573. ^ Alan Turing Institute (2023). "AI Ethics and Governance in Practice" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
  574. \n
  575. ^ Floridi, Luciano; Cowls, Josh (23 June 2019). "A Unified Framework of Five Principles for AI in Society". Harvard Data Science Review. 1 (1). doi:10.1162/99608f92.8cd550d1. S2CID 198775713.\n
  576. \n
  577. ^ Buruk, Banu; Ekmekci, Perihan Elif; Arda, Berna (1 September 2020). "A critical perspective on guidelines for responsible and trustworthy artificial intelligence". Medicine, Health Care and Philosophy. 23 (3): 387–399. doi:10.1007/s11019-020-09948-1. ISSN 1572-8633. PMID 32236794. S2CID 214766800. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  578. \n
  579. ^ Kamila, Manoj Kumar; Jasrotia, Sahil Singh (1 January 2023). "Ethical issues in the development of artificial intelligence: recognizing the risks". International Journal of Ethics and Systems. ahead-of-print (ahead-of-print). doi:10.1108/IJOES-05-2023-0107. ISSN 2514-9369. S2CID 259614124. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  580. \n
  581. ^ "AI Safety Institute releases new AI safety evaluations platform". UK Government. 10 May 2024. Archived from the original on 5 October 2024. Retrieved 14 May 2024.\n
  582. \n
  583. ^ Regulation of AI to mitigate risks: Berryhill et al. (2019), Barfield & Pagallo (2018), Iphofen & Kritikos (2019), Wirtz, Weyerer & Geyer (2018), Buiten (2019)\n
  584. \n\n
  585. ^ a b Vincent (2023).\n
  586. \n
  587. ^ Stanford University (2023).\n
  588. \n
  589. ^ a b c d UNESCO (2021).\n
  590. \n
  591. ^ Kissinger (2021).\n
  592. \n
  593. ^ Altman, Brockman & Sutskever (2023).\n
  594. \n
  595. ^ VOA News (25 October 2023). "UN Announces Advisory Body on Artificial Intelligence". Archived from the original on 18 September 2024. Retrieved 5 October 2024.\n
  596. \n
  597. ^ "Council of Europe opens first ever global treaty on AI for signature". Council of Europe. 5 September 2024. Archived from the original on 17 September 2024. Retrieved 17 September 2024.\n
  598. \n
  599. ^ Edwards (2023).\n
  600. \n
  601. ^ Kasperowicz (2023).\n
  602. \n
  603. ^ Fox News (2023).\n
  604. \n
  605. ^ Milmo, Dan (3 November 2023). "Hope or Horror? The great AI debate dividing its pioneers". The Guardian Weekly. pp. 10–12.\n
  606. \n
  607. ^ "The Bletchley Declaration by Countries Attending the AI Safety Summit, 1–2 November 2023". GOV.UK. 1 November 2023. Archived from the original on 1 November 2023. Retrieved 2 November 2023.\n
  608. \n
  609. ^ "Countries agree to safe and responsible development of frontier AI in landmark Bletchley Declaration". GOV.UK (Press release). Archived from the original on 1 November 2023. Retrieved 1 November 2023.\n
  610. \n
  611. ^ "Second global AI summit secures safety commitments from companies". Reuters. 21 May 2024. Retrieved 23 May 2024.\n
  612. \n
  613. ^ "Frontier AI Safety Commitments, AI Seoul Summit 2024". gov.uk. 21 May 2024. Archived from the original on 23 May 2024. Retrieved 23 May 2024.\n
  614. \n
  615. ^ a b Russell & Norvig 2021, p. 9.\n
  616. \n
  617. ^ a b c Copeland, J., ed. (2004). The Essential Turing: the ideas that gave birth to the computer age. Oxford, England: Clarendon Press. ISBN 0-1982-5079-7.\n
  618. \n
  619. ^ "Google books ngram". Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  620. \n
  621. ^ AI\'s immediate precursors: McCorduck (2004, pp. 51–107), Crevier (1993, pp. 27–32), Russell & Norvig (2021, pp. 8–17), Moravec (1988, p. 3)\n
  622. \n
  623. ^ a b Turing\'s original publication of the Turing test in "Computing machinery and intelligence": Turing (1950)\nHistorical influence and philosophical implications: Haugeland (1985, pp. 6–9), Crevier (1993, p. 24), McCorduck (2004, pp. 70–71), Russell & Norvig (2021, pp. 2, 984)\n
  624. \n
  625. ^ Crevier (1993), pp. 47–49.\n
  626. \n
  627. ^ Russell & Norvig (2003), p. 17.\n
  628. \n
  629. ^ Russell & Norvig (2003), p. 18.\n
  630. \n
  631. ^ Newquist (1994), pp. 86–86.\n
  632. \n
  633. ^ Simon (1965, p. 96) quoted in Crevier (1993, p. 109)\n
  634. \n
  635. ^ Minsky (1967, p. 2) quoted in Crevier (1993, p. 109)\n
  636. \n
  637. ^ Russell & Norvig (2021), p. 21.\n
  638. \n
  639. ^ Lighthill (1973).\n
  640. \n
  641. ^ NRC 1999, pp. 212–213.\n
  642. \n
  643. ^ Russell & Norvig (2021), p. 22.\n
  644. \n
  645. ^ Expert systems: Russell & Norvig (2021, pp. 23, 292), Luger & Stubblefield (2004, pp. 227–331), Nilsson (1998, chpt. 17.4), McCorduck (2004, pp. 327–335, 434–435), Crevier (1993, pp. 145–162, 197–203), Newquist (1994, pp. 155–183)\n
  646. \n
  647. ^ Russell & Norvig (2021), p. 24.\n
  648. \n
  649. ^ Nilsson (1998), p. 7.\n
  650. \n
  651. ^ McCorduck (2004), pp. 454–462.\n
  652. \n
  653. ^ Moravec (1988).\n
  654. \n
  655. ^ a b Brooks (1990).\n
  656. \n
  657. ^ Developmental robotics: Weng et al. (2001), Lungarella et al. (2003), Asada et al. (2009), Oudeyer (2010)\n
  658. \n
  659. ^ Russell & Norvig (2021), p. 25.\n
  660. \n
  661. ^ Crevier (1993, pp. 214–215), Russell & Norvig (2021, pp. 24, 26)\n
  662. \n
  663. ^ Russell & Norvig (2021), p. 26.\n
  664. \n
  665. ^ Formal and narrow methods adopted in the 1990s: Russell & Norvig (2021, pp. 24–26), McCorduck (2004, pp. 486–487)\n
  666. \n
  667. ^ AI widely used in the late 1990s: Kurzweil (2005, p. 265), NRC (1999, pp. 216–222), Newquist (1994, pp. 189–201)\n
  668. \n
  669. ^ Wong (2023).\n
  670. \n
  671. ^ Moore\'s Law and AI: Russell & Norvig (2021, pp. 14, 27)\n
  672. \n
  673. ^ a b c Clark (2015b).\n
  674. \n
  675. ^ Big data: Russell & Norvig (2021, p. 26)\n
  676. \n
  677. ^ Sagar, Ram (3 June 2020). "OpenAI Releases GPT-3, The Largest Model So Far". Analytics India Magazine. Archived from the original on 4 August 2020. Retrieved 15 March 2023.\n
  678. \n
  679. ^ DiFeliciantonio (2023).\n
  680. \n
  681. ^ Goswami (2023).\n
  682. \n
  683. ^ Grayling, Anthony; Ball, Brian (1 August 2024). "Philosophy is crucial in the age of AI". The Conversation. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
  684. \n
  685. ^ a b Jarow, Oshan (15 June 2024). "Will AI ever become conscious? It depends on how you think about biology". Vox. Archived from the original on 21 September 2024. Retrieved 4 October 2024.\n
  686. \n
  687. ^ McCarthy, John. "The Philosophy of AI and the AI of Philosophy". jmc.stanford.edu. Archived from the original on 23 October 2018. Retrieved 3 October 2024.\n
  688. \n
  689. ^ a b Turing (1950), p. 1.\n
  690. \n
  691. ^ Turing (1950), Under "The Argument from Consciousness".\n
  692. \n
  693. ^ Kirk-Giannini, Cameron Domenico; Goldstein, Simon (16 October 2023). "AI is closer than ever to passing the Turing test for \'intelligence\'. What happens when it does?". The Conversation. Archived from the original on 25 September 2024. Retrieved 17 August 2024.\n
  694. \n
  695. ^ Russell & Norvig (2021), p. 3.\n
  696. \n
  697. ^ Maker (2006).\n
  698. \n
  699. ^ McCarthy (1999).\n
  700. \n
  701. ^ Minsky (1986).\n
  702. \n
  703. ^ "What Is Artificial Intelligence (AI)?". Google Cloud Platform. Archived from the original on 31 July 2023. Retrieved 16 October 2023.\n
  704. \n
  705. ^ "One of the Biggest Problems in Regulating AI Is Agreeing on a Definition". carnegieendowment.org. Retrieved 31 July 2024.\n
  706. \n
  707. ^ "AI or BS? How to tell if a marketing tool really uses artificial intelligence". The Drum. Retrieved 31 July 2024.\n
  708. \n
  709. ^ Nilsson (1983), p. 10.\n
  710. \n
  711. ^ Haugeland (1985), pp. 112–117.\n
  712. \n
  713. ^ Physical symbol system hypothesis: Newell & Simon (1976, p. 116)\nHistorical significance: McCorduck (2004, p. 153), Russell & Norvig (2021, p. 19)\n
  714. \n
  715. ^ Moravec\'s paradox: Moravec (1988, pp. 15–16), Minsky (1986, p. 29), Pinker (2007, pp. 190–191)\n
  716. \n
  717. ^ Dreyfus\' critique of AI: Dreyfus (1972), Dreyfus & Dreyfus (1986)\nHistorical significance and philosophical implications: Crevier (1993, pp. 120–132), McCorduck (2004, pp. 211–239), Russell & Norvig (2021, pp. 981–982), Fearn (2007, chpt. 3)\n
  718. \n
  719. ^ Crevier (1993), p. 125.\n
  720. \n
  721. ^ Langley (2011).\n
  722. \n
  723. ^ Katz (2012).\n
  724. \n
  725. ^ Neats vs. scruffies, the historic debate: McCorduck (2004, pp. 421–424, 486–489), Crevier (1993, p. 168), Nilsson (1983, pp. 10–11), Russell & Norvig (2021, p. 24)\nA classic example of the "scruffy" approach to intelligence: Minsky (1986)\nA modern example of neat AI and its aspirations in the 21st century: Domingos (2015)\n
  726. \n
  727. ^ Pennachin & Goertzel (2007).\n
  728. \n
  729. ^ a b Roberts (2016).\n
  730. \n
  731. ^ Russell & Norvig (2021), p. 986.\n
  732. \n
  733. ^ Chalmers (1995).\n
  734. \n
  735. ^ Dennett (1991).\n
  736. \n
  737. ^ Horst (2005).\n
  738. \n
  739. ^ Searle (1999).\n
  740. \n
  741. ^ Searle (1980), p. 1.\n
  742. \n
  743. ^ Russell & Norvig (2021), p. 9817.\n
  744. \n
  745. ^ Searle\'s Chinese room argument: Searle (1980). Searle\'s original presentation of the thought experiment., Searle (1999).\nDiscussion: Russell & Norvig (2021, pp. 985), McCorduck (2004, pp. 443–445), Crevier (1993, pp. 269–271)\n
  746. \n
  747. ^ Leith, Sam (7 July 2022). "Nick Bostrom: How can we be certain a machine isn\'t conscious?". The Spectator. Archived from the original on 26 September 2024. Retrieved 23 February 2024.\n
  748. \n
  749. ^ a b c Thomson, Jonny (31 October 2022). "Why don\'t robots have rights?". Big Think. Archived from the original on 13 September 2024. Retrieved 23 February 2024.\n
  750. \n
  751. ^ a b Kateman, Brian (24 July 2023). "AI Should Be Terrified of Humans". Time. Archived from the original on 25 September 2024. Retrieved 23 February 2024.\n
  752. \n
  753. ^ Wong, Jeff (10 July 2023). "What leaders need to know about robot rights". Fast Company.\n
  754. \n
  755. ^ Hern, Alex (12 January 2017). "Give robots \'personhood\' status, EU committee argues". The Guardian. ISSN 0261-3077. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
  756. \n
  757. ^ Dovey, Dana (14 April 2018). "Experts Don\'t Think Robots Should Have Rights". Newsweek. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
  758. \n
  759. ^ Cuddy, Alice (13 April 2018). "Robot rights violate human rights, experts warn EU". euronews. Archived from the original on 19 September 2024. Retrieved 23 February 2024.\n
  760. \n
  761. ^ The Intelligence explosion and technological singularity: Russell & Norvig (2021, pp. 1004–1005), Omohundro (2008), Kurzweil (2005)\n\nI. J. Good\'s "intelligence explosion": Good (1965)\n\nVernor Vinge\'s "singularity": Vinge (1993)\n
  762. \n
  763. ^ Russell & Norvig (2021), p. 1005.\n
  764. \n
  765. ^ Transhumanism: Moravec (1988), Kurzweil (2005), Russell & Norvig (2021, p. 1005)\n
  766. \n
  767. ^ AI as evolution: Edward Fredkin is quoted in McCorduck (2004, p. 401), Butler (1863), Dyson (1998)\n
  768. \n
  769. ^ AI in myth: McCorduck (2004, pp. 4–5)\n
  770. \n
  771. ^ McCorduck (2004), pp. 340–400.\n
  772. \n
  773. ^ Buttazzo (2001).\n
  774. \n
  775. ^ Anderson (2008).\n
  776. \n
  777. ^ McCauley (2007).\n
  778. \n
  779. ^ Galvan (1997).\n
  780. \n
\n

AI textbooks

\n

The two most widely used textbooks in 2023 (see the Open Syllabus):\n

\n\n

These were the four of the most widely used AI textbooks in 2008:\n

\n
\n\n
\n

Other textbooks:\n

\n\n

History of AI

\n
\n\n
\n

Other sources

\n
\n\n\n
\n

Further reading

\n
\n\n
\n
\n\n\n\n\n\n\n\n\n\n
\n
\n\t\t\t\t\t\n\t\t\t\t
\n\t\t\t\n\t\t\t\n\t\t\n\t\t\n\t \n \n
\n\t\n
\n\n\n'}'. -[2024-11-07 16:54:02,810] [INFO] [Engine]: Agent 'agent3' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_xAhd02lIKJwbT5XBzGnFlWub', type='function')], function_call=None) -[2024-11-07 16:54:02,811] [INFO] [BaseAgent]: Agent 'agent3' is planning the next step. -[2024-11-07 16:54:03,853] [INFO] [BaseAgent]: Agent 'agent3' selected 'agent1' as the next agent with plan: 'The next agent, agent1, will use Bing search engine to gather additional information on artificial intelligence, focusing on recent developments, applications, and impact on various industries. The agent should also compare different perspectives on the future of artificial intelligence and summarize key points for the team.'. -[2024-11-07 16:54:03,853] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_ACHT5SoFT2AkIXnz19sYddJx', type='function')], function_call=None) -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_xAhd02lIKJwbT5XBzGnFlWub', type='function')], function_call=None) -[2024-11-07 16:54:04,271] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': False} -[2024-11-07 16:54:04,272] [INFO] [Engine]: EnginePlanner decided to terminate the simulation. -[2024-11-07 16:54:04,272] [ERROR] [Engine]: An error occurred during chain-based coordination. -Traceback (most recent call last): - File "/Users/zhukunlun/Documents/GitHub/MARBLE/marble/engine/engine.py", line 371, in chain_coordinate - summary = self._summarize_results(agents_results) - File "/Users/zhukunlun/Documents/GitHub/MARBLE/marble/engine/engine.py", line 450, in _summarize_results - summary += f"- {json.dumps(result)}\n" - File "/opt/anaconda3/envs/mabench/lib/python3.10/json/__init__.py", line 231, in dumps - return _default_encoder.encode(obj) - File "/opt/anaconda3/envs/mabench/lib/python3.10/json/encoder.py", line 199, in encode - chunks = self.iterencode(o, _one_shot=True) - File "/opt/anaconda3/envs/mabench/lib/python3.10/json/encoder.py", line 257, in iterencode - return _iterencode(o, 0) - File "/opt/anaconda3/envs/mabench/lib/python3.10/json/encoder.py", line 179, in default - raise TypeError(f'Object of type {o.__class__.__name__} ' -TypeError: Object of type Message is not JSON serializable -[2024-11-07 16:54:04,278] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-07 16:54:04,278] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-07 16:54:04,280] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-07 16:54:04,280] [INFO] [Engine]: Chain-based coordination simulation completed. -[2024-11-07 16:54:41,709] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-07 16:54:41,709] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-07 16:54:41,709] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-07 16:54:41,709] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-07 16:54:41,709] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-07 16:54:41,709] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-07 16:54:41,709] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-07 16:54:41,709] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-07 16:54:41,709] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'chain'. -[2024-11-07 16:54:41,709] [INFO] [AgentGraph]: Relationship added: agent1 --[reports_to]--> agent2 -[2024-11-07 16:54:41,709] [INFO] [AgentGraph]: Relationship added: agent1 --[manages]--> agent3 -[2024-11-07 16:54:41,709] [INFO] [AgentGraph]: Relationship added: agent2 --[supports]--> agent4 -[2024-11-07 16:54:41,709] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborates_with]--> agent4 -[2024-11-07 16:54:41,709] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-07 16:54:41,709] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-07 16:54:41,709] [INFO] [Engine]: Engine initialized. -[2024-11-07 16:54:41,709] [INFO] [Engine]: Engine starting simulation. -[2024-11-07 16:54:41,709] [INFO] [Engine]: Running in chain-based coordination mode. -[2024-11-07 16:54:41,709] [INFO] [Engine]: Starting chain-based coordination. -[2024-11-07 16:54:41,709] [INFO] [Engine]: Agent 'agent1' is executing task. -[2024-11-07 16:54:41,709] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Find new about the latest trends in AI.'. -[2024-11-07 16:54:44,501] [INFO] [BaseAgent]: Agent 'agent1' called 'fetch_webpage' with args '{'url': 'https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/'}'. -[2024-11-07 16:54:44,502] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': False, 'error-msg': '404 Client Error: Not Found for url: https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/'}'. -[2024-11-07 16:54:44,502] [INFO] [Engine]: Agent 'agent1' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_AVdiDjBnWm1AavuGyxrq4Qia', type='function')], function_call=None) -[2024-11-07 16:54:44,502] [INFO] [BaseAgent]: Agent 'agent1' is planning the next step. -[2024-11-07 16:54:45,545] [INFO] [BaseAgent]: Agent 'agent1' selected 'agent3' as the next agent with plan: 'The next agent, agent3, will use Wikipedia to research the top AI trends to watch out for in 2022. The agent should focus on gathering detailed and accurate information about the AI trends, their implications, and potential applications in various industries. The agent can also look for any recent developments or advancements in AI technology that are shaping these trends.'. -[2024-11-07 16:54:45,546] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_AVdiDjBnWm1AavuGyxrq4Qia', type='function')], function_call=None) -[2024-11-07 16:54:45,895] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-07 16:54:45,896] [INFO] [Engine]: Agent 'agent3' is executing task. -[2024-11-07 16:54:45,896] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'The next agent, agent3, will use Wikipedia to research the top AI trends to watch out for in 2022. The agent should focus on gathering detailed and accurate information about the AI trends, their implications, and potential applications in various industries. The agent can also look for any recent developments or advancements in AI technology that are shaping these trends.'. -[2024-11-07 16:54:46,632] [INFO] [BaseAgent]: Agent 'agent3' called 'fetch_webpage' with args '{'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence'}'. -[2024-11-07 16:54:46,644] [INFO] [BaseAgent]: Agent 'agent3' obtained result '{'success': True, 'error-msg': '', 'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence', 'content': '\n\n\n\nArtificial intelligence - Wikipedia\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nJump to content\n
\n\t
\n\t\t
\n\t\t\t
\n\n\t\t\n\t\t\t\n\n\n\t\t
\n\t\t
\n\t\t\t\n\n\n\t\t\t\n\n\t\t
\n\t\n\n
\n\t
\n\t\t
\n\t\t\t
\n\t\t
\n\t\t
\n\t\t\t
\n\t\t
\n\t\t\t\n\t\t
\n\t
\n\t
\n\t\t\t\t
\n\t\t\n\t\t\t
\n\t\t
\n\t\t
\n\t\t\t
\n\t\t\t\t
\n\t\t\t\t\t\n\t\t\t\t\t

Artificial intelligence

\n\t\t\t\t\t\t\t\n
\n\t\n\t\n\t
\n\n\t\t
\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t
\n\n\t
\n
\n
\n\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\t
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
\n\t\t\t\t\t\t
\n\t\t\t\t\t\t\t\n\t\t\t\t\n\t\t\t\t\t\t\t
\n\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\t\n\t\t\t\t\t\t
\n\t\t\n\t\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\t\t
\n\t\t
Page semi-protected
\n\t\t
\n\n\t\t\t\t\t\t
From Wikipedia, the free encyclopedia
\n\t\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t
\n\n

\n

\n\n\n\n\n\n\n\n

Artificial intelligence (AI), in its broadest sense, is intelligence exhibited by machines, particularly computer systems. It is a field of research in computer science that develops and studies methods and software that enable machines to perceive their environment and use learning and intelligence to take actions that maximize their chances of achieving defined goals.[1] Such machines may be called AIs.\n

Some high-profile applications of AI include advanced web search engines (e.g., Google Search); recommendation systems (used by YouTube, Amazon, and Netflix); interacting via human speech (e.g., Google Assistant, Siri, and Alexa); autonomous vehicles (e.g., Waymo); generative and creative tools (e.g., ChatGPT, and AI art); and superhuman play and analysis in strategy games (e.g., chess and Go). However, many AI applications are not perceived as AI: "A lot of cutting edge AI has filtered into general applications, often without being called AI because once something becomes useful enough and common enough it\'s not labeled AI anymore."[2][3]\n

The various subfields of AI research are centered around particular goals and the use of particular tools. The traditional goals of AI research include reasoning, knowledge representation, planning, learning, natural language processing, perception, and support for robotics.[a] General intelligence—the ability to complete any task performable by a human on an at least equal level—is among the field\'s long-term goals.[4] To reach these goals, AI researchers have adapted and integrated a wide range of techniques, including search and mathematical optimization, formal logic, artificial neural networks, and methods based on statistics, operations research, and economics.[b] AI also draws upon psychology, linguistics, philosophy, neuroscience, and other fields.[5]\n

Artificial intelligence was founded as an academic discipline in 1956,[6] and the field went through multiple cycles of optimism,[7][8] followed by periods of disappointment and loss of funding, known as AI winter.[9][10] Funding and interest vastly increased after 2012 when deep learning outperformed previous AI techniques.[11] This growth accelerated further after 2017 with the transformer architecture,[12] and by the early 2020s hundreds of billions of dollars were being invested in AI (known as the "AI boom"). The widespread use of AI in the 21st century exposed several unintended consequences and harms in the present and raised concerns about its risks and long-term effects in the future, prompting discussions about regulatory policies to ensure the safety and benefits of the technology.\n

\n\n

Goals

\n

The general problem of simulating (or creating) intelligence has been broken into subproblems. These consist of particular traits or capabilities that researchers expect an intelligent system to display. The traits described below have received the most attention and cover the scope of AI research.[a]\n

\n

Reasoning and problem-solving

\n

Early researchers developed algorithms that imitated step-by-step reasoning that humans use when they solve puzzles or make logical deductions.[13] By the late 1980s and 1990s, methods were developed for dealing with uncertain or incomplete information, employing concepts from probability and economics.[14]\n

Many of these algorithms are insufficient for solving large reasoning problems because they experience a "combinatorial explosion": They become exponentially slower as the problems grow.[15] Even humans rarely use the step-by-step deduction that early AI research could model. They solve most of their problems using fast, intuitive judgments.[16] Accurate and efficient reasoning is an unsolved problem.\n

\n

Knowledge representation

\n
An ontology represents knowledge as a set of concepts within a domain and the relationships between those concepts.
\n

Knowledge representation and knowledge engineering[17] allow AI programs to answer questions intelligently and make deductions about real-world facts. Formal knowledge representations are used in content-based indexing and retrieval,[18] scene interpretation,[19] clinical decision support,[20] knowledge discovery (mining "interesting" and actionable inferences from large databases),[21] and other areas.[22]\n

A knowledge base is a body of knowledge represented in a form that can be used by a program. An ontology is the set of objects, relations, concepts, and properties used by a particular domain of knowledge.[23] Knowledge bases need to represent things such as objects, properties, categories, and relations between objects;[24] situations, events, states, and time;[25] causes and effects;[26] knowledge about knowledge (what we know about what other people know);[27] default reasoning (things that humans assume are true until they are told differently and will remain true even when other facts are changing);[28] and many other aspects and domains of knowledge.\n

Among the most difficult problems in knowledge representation are the breadth of commonsense knowledge (the set of atomic facts that the average person knows is enormous);[29] and the sub-symbolic form of most commonsense knowledge (much of what people know is not represented as "facts" or "statements" that they could express verbally).[16] There is also the difficulty of knowledge acquisition, the problem of obtaining knowledge for AI applications.[c]\n

\n

Planning and decision-making

\n

An "agent" is anything that perceives and takes actions in the world. A rational agent has goals or preferences and takes actions to make them happen.[d][32] In automated planning, the agent has a specific goal.[33] In automated decision-making, the agent has preferences—there are some situations it would prefer to be in, and some situations it is trying to avoid. The decision-making agent assigns a number to each situation (called the "utility") that measures how much the agent prefers it. For each possible action, it can calculate the "expected utility": the utility of all possible outcomes of the action, weighted by the probability that the outcome will occur. It can then choose the action with the maximum expected utility.[34]\n

In classical planning, the agent knows exactly what the effect of any action will be.[35] In most real-world problems, however, the agent may not be certain about the situation they are in (it is "unknown" or "unobservable") and it may not know for certain what will happen after each possible action (it is not "deterministic"). It must choose an action by making a probabilistic guess and then reassess the situation to see if the action worked.[36]\n

In some problems, the agent\'s preferences may be uncertain, especially if there are other agents or humans involved. These can be learned (e.g., with inverse reinforcement learning), or the agent can seek information to improve its preferences.[37] Information value theory can be used to weigh the value of exploratory or experimental actions.[38] The space of possible future actions and situations is typically intractably large, so the agents must take actions and evaluate situations while being uncertain of what the outcome will be.\n

A Markov decision process has a transition model that describes the probability that a particular action will change the state in a particular way and a reward function that supplies the utility of each state and the cost of each action. A policy associates a decision with each possible state. The policy could be calculated (e.g., by iteration), be heuristic, or it can be learned.[39]\n

Game theory describes the rational behavior of multiple interacting agents and is used in AI programs that make decisions that involve other agents.[40]\n

\n

Learning

\n

Machine learning is the study of programs that can improve their performance on a given task automatically.[41] It has been a part of AI from the beginning.[e]\n

There are several kinds of machine learning. Unsupervised learning analyzes a stream of data and finds patterns and makes predictions without any other guidance.[44] Supervised learning requires a human to label the input data first, and comes in two main varieties: classification (where the program must learn to predict what category the input belongs in) and regression (where the program must deduce a numeric function based on numeric input).[45]\n

In reinforcement learning, the agent is rewarded for good responses and punished for bad ones. The agent learns to choose responses that are classified as "good".[46] Transfer learning is when the knowledge gained from one problem is applied to a new problem.[47] Deep learning is a type of machine learning that runs inputs through biologically inspired artificial neural networks for all of these types of learning.[48]\n

Computational learning theory can assess learners by computational complexity, by sample complexity (how much data is required), or by other notions of optimization.[49]\n

\n
\n

Natural language processing

\n

Natural language processing (NLP)[50] allows programs to read, write and communicate in human languages such as English. Specific problems include speech recognition, speech synthesis, machine translation, information extraction, information retrieval and question answering.[51]\n

Early work, based on Noam Chomsky\'s generative grammar and semantic networks, had difficulty with word-sense disambiguation[f] unless restricted to small domains called "micro-worlds" (due to the common sense knowledge problem[29]). Margaret Masterman believed that it was meaning and not grammar that was the key to understanding languages, and that thesauri and not dictionaries should be the basis of computational language structure.\n

Modern deep learning techniques for NLP include word embedding (representing words, typically as vectors encoding their meaning),[52] transformers (a deep learning architecture using an attention mechanism),[53] and others.[54] In 2019, generative pre-trained transformer (or "GPT") language models began to generate coherent text,[55][56] and by 2023, these models were able to get human-level scores on the bar exam, SAT test, GRE test, and many other real-world applications.[57]\n

\n

Perception

\n

Machine perception is the ability to use input from sensors (such as cameras, microphones, wireless signals, active lidar, sonar, radar, and tactile sensors) to deduce aspects of the world. Computer vision is the ability to analyze visual input.[58]\n

The field includes speech recognition,[59] image classification,[60] facial recognition, object recognition,[61]object tracking,[62] and robotic perception.[63]\n

\n

Social intelligence

\n
Kismet, a robot head which was made in the 1990s; it is a machine that can recognize and simulate emotions.[64]
\n

Affective computing is an interdisciplinary umbrella that comprises systems that recognize, interpret, process, or simulate human feeling, emotion, and mood.[65] For example, some virtual assistants are programmed to speak conversationally or even to banter humorously; it makes them appear more sensitive to the emotional dynamics of human interaction, or to otherwise facilitate human–computer interaction.\n

However, this tends to give naïve users an unrealistic conception of the intelligence of existing computer agents.[66] Moderate successes related to affective computing include textual sentiment analysis and, more recently, multimodal sentiment analysis, wherein AI classifies the affects displayed by a videotaped subject.[67]\n

\n

General intelligence

\n

A machine with artificial general intelligence should be able to solve a wide variety of problems with breadth and versatility similar to human intelligence.[4]\n

\n

Techniques

\n

AI research uses a wide variety of techniques to accomplish the goals above.[b]\n

\n

Search and optimization

\n

AI can solve many problems by intelligently searching through many possible solutions.[68] There are two very different kinds of search used in AI: state space search and local search.\n

\n
\n

State space search searches through a tree of possible states to try to find a goal state.[69] For example, planning algorithms search through trees of goals and subgoals, attempting to find a path to a target goal, a process called means-ends analysis.[70]\n

Simple exhaustive searches[71] are rarely sufficient for most real-world problems: the search space (the number of places to search) quickly grows to astronomical numbers. The result is a search that is too slow or never completes.[15] "Heuristics" or "rules of thumb" can help prioritize choices that are more likely to reach a goal.[72]\n

Adversarial search is used for game-playing programs, such as chess or Go. It searches through a tree of possible moves and counter-moves, looking for a winning position.[73]\n

\n
\n
Illustration of gradient descent for 3 different starting points; two parameters (represented by the plan coordinates) are adjusted in order to minimize the loss function (the height)

Local search uses mathematical optimization to find a solution to a problem. It begins with some form of guess and refines it incrementally.[74]\n

Gradient descent is a type of local search that optimizes a set of numerical parameters by incrementally adjusting them to minimize a loss function. Variants of gradient descent are commonly used to train neural networks.[75]\n

Another type of local search is evolutionary computation, which aims to iteratively improve a set of candidate solutions by "mutating" and "recombining" them, selecting only the fittest to survive each generation.[76]\n

Distributed search processes can coordinate via swarm intelligence algorithms. Two popular swarm algorithms used in search are particle swarm optimization (inspired by bird flocking) and ant colony optimization (inspired by ant trails).[77]\n

\n

Logic

\n

Formal logic is used for reasoning and knowledge representation.[78]\nFormal logic comes in two main forms: propositional logic (which operates on statements that are true or false and uses logical connectives such as "and", "or", "not" and "implies")[79] and predicate logic (which also operates on objects, predicates and relations and uses quantifiers such as "Every X is a Y" and "There are some Xs that are Ys").[80]\n

Deductive reasoning in logic is the process of proving a new statement (conclusion) from other statements that are given and assumed to be true (the premises).[81] Proofs can be structured as proof trees, in which nodes are labelled by sentences, and children nodes are connected to parent nodes by inference rules.\n

Given a problem and a set of premises, problem-solving reduces to searching for a proof tree whose root node is labelled by a solution of the problem and whose leaf nodes are labelled by premises or axioms. In the case of Horn clauses, problem-solving search can be performed by reasoning forwards from the premises or backwards from the problem.[82] In the more general case of the clausal form of first-order logic, resolution is a single, axiom-free rule of inference, in which a problem is solved by proving a contradiction from premises that include the negation of the problem to be solved.[83]\n

Inference in both Horn clause logic and first-order logic is undecidable, and therefore intractable. However, backward reasoning with Horn clauses, which underpins computation in the logic programming language Prolog, is Turing complete. Moreover, its efficiency is competitive with computation in other symbolic programming languages.[84]\n

Fuzzy logic assigns a "degree of truth" between 0 and 1. It can therefore handle propositions that are vague and partially true.[85]\n

Non-monotonic logics, including logic programming with negation as failure, are designed to handle default reasoning.[28] Other specialized versions of logic have been developed to describe many complex domains.\n

\n

Probabilistic methods for uncertain reasoning

\n
A simple Bayesian network, with the associated conditional probability tables
\n

Many problems in AI (including in reasoning, planning, learning, perception, and robotics) require the agent to operate with incomplete or uncertain information. AI researchers have devised a number of tools to solve these problems using methods from probability theory and economics.[86] Precise mathematical tools have been developed that analyze how an agent can make choices and plan, using decision theory, decision analysis,[87] and information value theory.[88] These tools include models such as Markov decision processes,[89] dynamic decision networks,[90] game theory and mechanism design.[91]\n

Bayesian networks[92] are a tool that can be used for reasoning (using the Bayesian inference algorithm),[g][94] learning (using the expectation–maximization algorithm),[h][96] planning (using decision networks)[97] and perception (using dynamic Bayesian networks).[90]\n

Probabilistic algorithms can also be used for filtering, prediction, smoothing, and finding explanations for streams of data, thus helping perception systems analyze processes that occur over time (e.g., hidden Markov models or Kalman filters).[90]\n

\n
Expectation–maximization clustering of Old Faithful eruption data starts from a random guess but then successfully converges on an accurate clustering of the two physically distinct modes of eruption.
\n

Classifiers and statistical learning methods

\n

The simplest AI applications can be divided into two types: classifiers (e.g., "if shiny then diamond"), on one hand, and controllers (e.g., "if diamond then pick up"), on the other hand. Classifiers[98] are functions that use pattern matching to determine the closest match. They can be fine-tuned based on chosen examples using supervised learning. Each pattern (also called an "observation") is labeled with a certain predefined class. All the observations combined with their class labels are known as a data set. When a new observation is received, that observation is classified based on previous experience.[45]\n

There are many kinds of classifiers in use.[99] The decision tree is the simplest and most widely used symbolic machine learning algorithm.[100] K-nearest neighbor algorithm was the most widely used analogical AI until the mid-1990s, and Kernel methods such as the support vector machine (SVM) displaced k-nearest neighbor in the 1990s.[101]\nThe naive Bayes classifier is reportedly the "most widely used learner"[102] at Google, due in part to its scalability.[103]\nNeural networks are also used as classifiers.[104]\n

\n

Artificial neural networks

\n
A neural network is an interconnected group of nodes, akin to the vast network of neurons in the human brain.
\n

An artificial neural network is based on a collection of nodes also known as artificial neurons, which loosely model the neurons in a biological brain. It is trained to recognise patterns; once trained, it can recognise those patterns in fresh data. There is an input, at least one hidden layer of nodes and an output. Each node applies a function and once the weight crosses its specified threshold, the data is transmitted to the next layer. A network is typically called a deep neural network if it has at least 2 hidden layers.[104]\n

Learning algorithms for neural networks use local search to choose the weights that will get the right output for each input during training. The most common training technique is the backpropagation algorithm.[105] Neural networks learn to model complex relationships between inputs and outputs and find patterns in data. In theory, a neural network can learn any function.[106]\n

In feedforward neural networks the signal passes in only one direction.[107] Recurrent neural networks feed the output signal back into the input, which allows short-term memories of previous input events. Long short term memory is the most successful network architecture for recurrent networks.[108] Perceptrons[109] use only a single layer of neurons; deep learning[110] uses multiple layers. Convolutional neural networks strengthen the connection between neurons that are "close" to each other—this is especially important in image processing, where a local set of neurons must identify an "edge" before the network can identify an object.[111]\n

\n
\n

Deep learning

\n
\n

Deep learning[110] uses several layers of neurons between the network\'s inputs and outputs. The multiple layers can progressively extract higher-level features from the raw input. For example, in image processing, lower layers may identify edges, while higher layers may identify the concepts relevant to a human such as digits, letters, or faces.[112]\n

Deep learning has profoundly improved the performance of programs in many important subfields of artificial intelligence, including computer vision, speech recognition, natural language processing, image classification,[113] and others. The reason that deep learning performs so well in so many applications is not known as of 2023.[114] The sudden success of deep learning in 2012–2015 did not occur because of some new discovery or theoretical breakthrough (deep neural networks and backpropagation had been described by many people, as far back as the 1950s)[i] but because of two factors: the incredible increase in computer power (including the hundred-fold increase in speed by switching to GPUs) and the availability of vast amounts of training data, especially the giant curated datasets used for benchmark testing, such as ImageNet.[j]\n

\n

GPT

\n

Generative pre-trained transformers (GPT) are large language models (LLMs) that generate text based on the semantic relationships between words in sentences. Text-based GPT models are pretrained on a large corpus of text that can be from the Internet. The pretraining consists of predicting the next token (a token being usually a word, subword, or punctuation). Throughout this pretraining, GPT models accumulate knowledge about the world and can then generate human-like text by repeatedly predicting the next token. Typically, a subsequent training phase makes the model more truthful, useful, and harmless, usually with a technique called reinforcement learning from human feedback (RLHF). Current GPT models are prone to generating falsehoods called "hallucinations", although this can be reduced with RLHF and quality data. They are used in chatbots, which allow people to ask a question or request a task in simple text.[122][123]\n

Current models and services include Gemini (formerly Bard), ChatGPT, Grok, Claude, Copilot, and LLaMA.[124] Multimodal GPT models can process different types of data (modalities) such as images, videos, sound, and text.[125]\n

\n

Hardware and software

\n\n

In the late 2010s, graphics processing units (GPUs) that were increasingly designed with AI-specific enhancements and used with specialized TensorFlow software had replaced previously used central processing unit (CPUs) as the dominant means for large-scale (commercial and academic) machine learning models\' training.[126] Specialized programming languages such as Prolog were used in early AI research,[127] but general-purpose programming languages like Python have become predominant.[128]\n

The transistor density in integrated circuits has been observed to roughly double every 18 months—a trend known as Moore\'s law, named after the Intel co-founder Gordon Moore, who first identified it. Improvements in GPUs have been even faster.[129]\n

\n

Applications

\n

AI and machine learning technology is used in most of the essential applications of the 2020s, including: search engines (such as Google Search), targeting online advertisements, recommendation systems (offered by Netflix, YouTube or Amazon), driving internet traffic, targeted advertising (AdSense, Facebook), virtual assistants (such as Siri or Alexa), autonomous vehicles (including drones, ADAS and self-driving cars), automatic language translation (Microsoft Translator, Google Translate), facial recognition (Apple\'s Face ID or Microsoft\'s DeepFace and Google\'s FaceNet) and image labeling (used by Facebook, Apple\'s iPhoto and TikTok). The deployment of AI may be overseen by a Chief automation officer (CAO).\n

Health and medicine

\n\n

The application of AI in medicine and medical research has the potential to increase patient care and quality of life.[130] Through the lens of the Hippocratic Oath, medical professionals are ethically compelled to use AI, if applications can more accurately diagnose and treat patients.[131][132]\n

For medical research, AI is an important tool for processing and integrating big data. This is particularly important for organoid and tissue engineering development which use microscopy imaging as a key technique in fabrication.[133] It has been suggested that AI can overcome discrepancies in funding allocated to different fields of research.[133] New AI tools can deepen the understanding of biomedically relevant pathways. For example, AlphaFold 2 (2021) demonstrated the ability to approximate, in hours rather than months, the 3D structure of a protein.[134] In 2023, it was reported that AI-guided drug discovery helped find a class of antibiotics capable of killing two different types of drug-resistant bacteria.[135] In 2024, researchers used machine learning to accelerate the search for Parkinson\'s disease drug treatments. Their aim was to identify compounds that block the clumping, or aggregation, of alpha-synuclein (the protein that characterises Parkinson\'s disease). They were able to speed up the initial screening process ten-fold and reduce the cost by a thousand-fold.[136][137]\n

\n

Games

\n\n

Game playing programs have been used since the 1950s to demonstrate and test AI\'s most advanced techniques.[138] Deep Blue became the first computer chess-playing system to beat a reigning world chess champion, Garry Kasparov, on 11 May 1997.[139] In 2011, in a Jeopardy! quiz show exhibition match, IBM\'s question answering system, Watson, defeated the two greatest Jeopardy! champions, Brad Rutter and Ken Jennings, by a significant margin.[140] In March 2016, AlphaGo won 4 out of 5 games of Go in a match with Go champion Lee Sedol, becoming the first computer Go-playing system to beat a professional Go player without handicaps. Then, in 2017, it defeated Ke Jie, who was the best Go player in the world.[141] Other programs handle imperfect-information games, such as the poker-playing program Pluribus.[142] DeepMind developed increasingly generalistic reinforcement learning models, such as with MuZero, which could be trained to play chess, Go, or Atari games.[143] In 2019, DeepMind\'s AlphaStar achieved grandmaster level in StarCraft II, a particularly challenging real-time strategy game that involves incomplete knowledge of what happens on the map.[144] In 2021, an AI agent competed in a PlayStation Gran Turismo competition, winning against four of the world\'s best Gran Turismo drivers using deep reinforcement learning.[145] In 2024, Google DeepMind introduced SIMA, a type of AI capable of autonomously playing nine previously unseen open-world video games by observing screen output, as well as executing short, specific tasks in response to natural language instructions.[146]\n

\n

Mathematics

\n

In mathematics, special forms of formal step-by-step reasoning are used. In contrast, LLMs such as GPT-4 Turbo, Gemini Ultra, Claude Opus, LLaMa-2 or Mistral Large are working with probabilistic models, which can produce wrong answers in the form of hallucinations. Therefore, they need not only a large database of mathematical problems to learn from but also methods such as supervised fine-tuning or trained classifiers with human-annotated data to improve answers for new problems and learn from corrections.[147] A 2024 study showed that the performance of some language models for reasoning capabilities in solving math problems not included in their training data was low, even for problems with only minor deviations from trained data.[148]\n

Alternatively, dedicated models for mathematic problem solving with higher precision for the outcome including proof of theorems have been developed such as Alpha Tensor, Alpha Geometry and Alpha Proof all from Google DeepMind,[149] Llemma from eleuther[150] or Julius.[151]\n

When natural language is used to describe mathematical problems, converters transform such prompts into a formal language such as Lean to define mathematic tasks.\n

Some models have been developed to solve challenging problems and reach good results in benchmark tests, others to serve as educational tools in mathematics.[152]\n

\n

Finance

\n

Finance is one of the fastest growing sectors where applied AI tools are being deployed: from retail online banking to investment advice and insurance, where automated "robot advisers" have been in use for some years.[153]\n

World Pensions experts like Nicolas Firzli insist it may be too early to see the emergence of highly innovative AI-informed financial products and services: "the deployment of AI tools will simply further automatise things: destroying tens of thousands of jobs in banking, financial planning, and pension advice in the process, but I\'m not sure it will unleash a new wave of [e.g., sophisticated] pension innovation."[154]\n

\n

Military

\n\n

Various countries are deploying AI military applications.[155] The main applications enhance command and control, communications, sensors, integration and interoperability.[156] Research is targeting intelligence collection and analysis, logistics, cyber operations, information operations, and semiautonomous and autonomous vehicles.[155] AI technologies enable coordination of sensors and effectors, threat detection and identification, marking of enemy positions, target acquisition, coordination and deconfliction of distributed Joint Fires between networked combat vehicles involving manned and unmanned teams.[156] AI was incorporated into military operations in Iraq and Syria.[155]\n

In November 2023, US Vice President Kamala Harris disclosed a declaration signed by 31 nations to set guardrails for the military use of AI. The commitments include using legal reviews to ensure the compliance of military AI with international laws, and being cautious and transparent in the development of this technology.[157]\n

\n

Generative AI

\n\n
Vincent van Gogh in watercolour created by generative AI software
\n

In the early 2020s, generative AI gained widespread prominence. GenAI is AI capable of generating text, images, videos, or other data using generative models,[158][159] often in response to prompts.[160][161]\n

In March 2023, 58% of U.S. adults had heard about ChatGPT and 14% had tried it.[162] The increasing realism and ease-of-use of AI-based text-to-image generators such as Midjourney, DALL-E, and Stable Diffusion sparked a trend of viral AI-generated photos. Widespread attention was gained by a fake photo of Pope Francis wearing a white puffer coat, the fictional arrest of Donald Trump, and a hoax of an attack on the Pentagon, as well as the usage in professional creative arts.[163][164]\n

\n

Agents

\n

Artificial intelligent (AI) agents are software entities designed to perceive their environment, make decisions, and take actions autonomously to achieve specific goals. These agents can interact with users, their environment, or other agents. AI agents are used in various applications, including virtual assistants, chatbots, autonomous vehicles, game-playing systems, and industrial robotics. AI agents operate within the constraints of their programming, available computational resources, and hardware limitations. This means they are restricted to performing tasks within their defined scope and have finite memory and processing capabilities. In real-world applications, AI agents often face time constraints for decision-making and action execution. Many AI agents incorporate learning algorithms, enabling them to improve their performance over time through experience or training. Using machine learning, AI agents can adapt to new situations and optimise their behaviour for their designated tasks.[165][166][167]\n

\n

Other industry-specific tasks

\n

There are also thousands of successful AI applications used to solve specific problems for specific industries or institutions. In a 2017 survey, one in five companies reported having incorporated "AI" in some offerings or processes.[168] A few examples are energy storage, medical diagnosis, military logistics, applications that predict the result of judicial decisions, foreign policy, or supply chain management.\n

AI applications for evacuation and disaster management are growing. AI has been used to investigate if and how people evacuated in large scale and small scale evacuations using historical data from GPS, videos or social media. Further, AI can provide real time information on the real time evacuation conditions.[169][170][171]\n

In agriculture, AI has helped farmers identify areas that need irrigation, fertilization, pesticide treatments or increasing yield. Agronomists use AI to conduct research and development. AI has been used to predict the ripening time for crops such as tomatoes, monitor soil moisture, operate agricultural robots, conduct predictive analytics, classify livestock pig call emotions, automate greenhouses, detect diseases and pests, and save water.\n

Artificial intelligence is used in astronomy to analyze increasing amounts of available data and applications, mainly for "classification, regression, clustering, forecasting, generation, discovery, and the development of new scientific insights." For example, it is used for discovering exoplanets, forecasting solar activity, and distinguishing between signals and instrumental effects in gravitational wave astronomy. Additionally, it could be used for activities in space, such as space exploration, including the analysis of data from space missions, real-time science decisions of spacecraft, space debris avoidance, and more autonomous operation.\n

During the 2024 Indian elections, US$50 millions was spent on authorized AI-generated content, notably by creating deepfakes of allied (including sometimes deceased) politicians to better engage with voters, and by translating speeches to various local languages.[172] \n

\n

Ethics

\n\n

AI has potential benefits and potential risks.[173] AI may be able to advance science and find solutions for serious problems: Demis Hassabis of Deep Mind hopes to "solve intelligence, and then use that to solve everything else".[174] However, as the use of AI has become widespread, several unintended consequences and risks have been identified.[175] In-production systems can sometimes not factor ethics and bias into their AI training processes, especially when the AI algorithms are inherently unexplainable in deep learning.[176]\n

\n

Risks and harm

\n
\n\n

Machine learning algorithms require large amounts of data. The techniques used to acquire this data have raised concerns about privacy, surveillance and copyright.\n

AI-powered devices and services, such as virtual assistants and IoT products, continuously collect personal information, raising concerns about intrusive data gathering and unauthorized access by third parties. The loss of privacy is further exacerbated by AI\'s ability to process and combine vast amounts of data, potentially leading to a surveillance society where individual activities are constantly monitored and analyzed without adequate safeguards or transparency.\n

Sensitive user data collected may include online activity records, geolocation data, video or audio.[177] For example, in order to build speech recognition algorithms, Amazon has recorded millions of private conversations and allowed temporary workers to listen to and transcribe some of them.[178] Opinions about this widespread surveillance range from those who see it as a necessary evil to those for whom it is clearly unethical and a violation of the right to privacy.[179]\n

AI developers argue that this is the only way to deliver valuable applications. and have developed several techniques that attempt to preserve privacy while still obtaining the data, such as data aggregation, de-identification and differential privacy.[180] Since 2016, some privacy experts, such as Cynthia Dwork, have begun to view privacy in terms of fairness. Brian Christian wrote that experts have pivoted "from the question of \'what they know\' to the question of \'what they\'re doing with it\'."[181]\n

Generative AI is often trained on unlicensed copyrighted works, including in domains such as images or computer code; the output is then used under the rationale of "fair use". Experts disagree about how well and under what circumstances this rationale will hold up in courts of law; relevant factors may include "the purpose and character of the use of the copyrighted work" and "the effect upon the potential market for the copyrighted work".[182][183] Website owners who do not wish to have their content scraped can indicate it in a "robots.txt" file.[184] In 2023, leading authors (including John Grisham and Jonathan Franzen) sued AI companies for using their work to train generative AI.[185][186] Another discussed approach is to envision a separate sui generis system of protection for creations generated by AI to ensure fair attribution and compensation for human authors.[187]\n

\n

Dominance by tech giants

\n

The commercial AI scene is dominated by Big Tech companies such as Alphabet Inc., Amazon, Apple Inc., Meta Platforms, and Microsoft.[188][189][190] Some of these players already own the vast majority of existing cloud infrastructure and computing power from data centers, allowing them to entrench further in the marketplace.[191][192]\n

\n

Substantial power needs and other environmental impacts

\n\n

In January 2024, the International Energy Agency (IEA) released Electricity 2024, Analysis and Forecast to 2026, forecasting electric power use.[193] This is the first IEA report to make projections for data centers and power consumption for artificial intelligence and cryptocurrency. The report states that power demand for these uses might double by 2026, with additional electric power usage equal to electricity used by the whole Japanese nation.[194]\n

Prodigious power consumption by AI is responsible for the growth of fossil fuels use, and might delay closings of obsolete, carbon-emitting coal energy facilities. There is a feverish rise in the construction of data centers throughout the US, making large technology firms (e.g., Microsoft, Meta, Google, Amazon) into voracious consumers of electric power. Projected electric consumption is so immense that there is concern that it will be fulfilled no matter the source. A ChatGPT search involves the use of 10 times the electrical energy as a Google search. The large firms are in haste to find power sources – from nuclear energy to geothermal to fusion. The tech firms argue that – in the long view – AI will be eventually kinder to the environment, but they need the energy now. AI makes the power grid more efficient and "intelligent", will assist in the growth of nuclear power, and track overall carbon emissions, according to technology firms.[195]\n

A 2024 Goldman Sachs Research Paper, AI Data Centers and the Coming US Power Demand Surge, found "US power demand (is) likely to experience growth not seen in a generation...." and forecasts that, by 2030, US data centers will consume 8% of US power, as opposed to 3% in 2022, presaging growth for the electrical power generation industry by a variety of means.[196] Data centers\' need for more and more electrical power is such that they might max out the electrical grid. The Big Tech companies counter that AI can be used to maximize the utilization of the grid by all.[197]\n

In 2024, the Wall Street Journal reported that big AI companies have begun negotiations with the US nuclear power providers to provide electricity to the data centers. In March 2024 Amazon purchased a Pennsylvania nuclear-powered data center for $650 Million (US).[198] Nvidia CEO Jen-Hsun Huang said nuclear power is a good option for the data centers.[199]\n

In September 2024, Microsoft announced an agreement with Constellation Energy to re-open the Three Mile Island nuclear power plant to provide Microsoft with 100% of all electric power produced by the plant for 20 years. Reopening the plant, which suffered a partial nuclear meltdown of its Unit 2 reactor in 1979, will require Constellation to get through strict regulatory processes which will include extensive safety scrutiny from the US Nuclear Regulatory Commission. If approved (this will be the first ever US re-commissioning of a nuclear plant), over 835 megawatts of power – enough for 800,000 homes – of energy will be produced. The cost for re-opening and upgrading is estimated at $1.6 billion (US) and is dependent on tax breaks for nuclear power contained in the 2022 US Inflation Reduction Act.[200] The US government and the state of Michigan are investing almost $2 billion (US) to reopen the Palisades Nuclear reactor on Lake Michigan. Closed since 2022, the plant is planned to be reopened in October 2025. The Three Mile Island facility will be renamed the Crane Clean Energy Center after Chris Crane, a nuclear proponent and former CEO of Exelon who was responsible for Exelon spinoff of Constellation.[201]\n

After the last approval in September 2024, Taiwan suspended the approval of data centers north of Taoyuan with a capacity of more than 5 MW, due to power supply shortages.[202] On the other hand, Singapore imposed a ban on the opening of data centers in 2019 due to electric power, but in 2022, lifted this ban.[202]\n

Although most nuclear plants in Japan have been shut down after the 2011 Fukushima nuclear accident, according to an October 2024 Bloomberg article in Japanese, cloud gaming services company Ubitus, in which Nvidia has a stake, is looking for land in Japan near nuclear power plant for a new data center for generative AI. CEO Wesley Kuo said nuclear power plants are the most efficient, cheap and stable power for AI.[203]\n

On 1 November 2024, the Federal Energy Regulatory Commission (FERC) rejected an application submitted by Talen Energy for approval to supply some electricity from the nuclear power station Susquehanna to Amazon\'s data center.[204] \nAccording to the Commission Chairman Willie L. Phillips, it is a burden on the electricity grid as well as a significant cost shifting concern to households and other business sectors.[204]\n

\n

Misinformation

\n\n

YouTube, Facebook and others use recommender systems to guide users to more content. These AI programs were given the goal of maximizing user engagement (that is, the only goal was to keep people watching). The AI learned that users tended to choose misinformation, conspiracy theories, and extreme partisan content, and, to keep them watching, the AI recommended more of it. Users also tended to watch more content on the same subject, so the AI led people into filter bubbles where they received multiple versions of the same misinformation.[205] This convinced many users that the misinformation was true, and ultimately undermined trust in institutions, the media and the government.[206] The AI program had correctly learned to maximize its goal, but the result was harmful to society. After the U.S. election in 2016, major technology companies took steps to mitigate the problem [citation needed].\n

In 2022, generative AI began to create images, audio, video and text that are indistinguishable from real photographs, recordings, films, or human writing. It is possible for bad actors to use this technology to create massive amounts of misinformation or propaganda.[207] AI pioneer Geoffrey Hinton expressed concern about AI enabling "authoritarian leaders to manipulate their electorates" on a large scale, among other risks.[208]\n

\n

Algorithmic bias and fairness

\n\n

Machine learning applications will be biased[k] if they learn from biased data.[210] The developers may not be aware that the bias exists.[211] Bias can be introduced by the way training data is selected and by the way a model is deployed.[212][210] If a biased algorithm is used to make decisions that can seriously harm people (as it can in medicine, finance, recruitment, housing or policing) then the algorithm may cause discrimination.[213] The field of fairness studies how to prevent harms from algorithmic biases.\n

On June 28, 2015, Google Photos\'s new image labeling feature mistakenly identified Jacky Alcine and a friend as "gorillas" because they were black. The system was trained on a dataset that contained very few images of black people,[214] a problem called "sample size disparity".[215] Google "fixed" this problem by preventing the system from labelling anything as a "gorilla". Eight years later, in 2023, Google Photos still could not identify a gorilla, and neither could similar products from Apple, Facebook, Microsoft and Amazon.[216]\n

COMPAS is a commercial program widely used by U.S. courts to assess the likelihood of a defendant becoming a recidivist. In 2016, Julia Angwin at ProPublica discovered that COMPAS exhibited racial bias, despite the fact that the program was not told the races of the defendants. Although the error rate for both whites and blacks was calibrated equal at exactly 61%, the errors for each race were different—the system consistently overestimated the chance that a black person would re-offend and would underestimate the chance that a white person would not re-offend.[217] In 2017, several researchers[l] showed that it was mathematically impossible for COMPAS to accommodate all possible measures of fairness when the base rates of re-offense were different for whites and blacks in the data.[219]\n

A program can make biased decisions even if the data does not explicitly mention a problematic feature (such as "race" or "gender"). The feature will correlate with other features (like "address", "shopping history" or "first name"), and the program will make the same decisions based on these features as it would on "race" or "gender".[220] Moritz Hardt said "the most robust fact in this research area is that fairness through blindness doesn\'t work."[221]\n

Criticism of COMPAS highlighted that machine learning models are designed to make "predictions" that are only valid if we assume that the future will resemble the past. If they are trained on data that includes the results of racist decisions in the past, machine learning models must predict that racist decisions will be made in the future. If an application then uses these predictions as recommendations, some of these "recommendations" will likely be racist.[222] Thus, machine learning is not well suited to help make decisions in areas where there is hope that the future will be better than the past. It is descriptive rather than prescriptive.[m]\n

Bias and unfairness may go undetected because the developers are overwhelmingly white and male: among AI engineers, about 4% are black and 20% are women.[215]\n

There are various conflicting definitions and mathematical models of fairness. These notions depend on ethical assumptions, and are influenced by beliefs about society. One broad category is distributive fairness, which focuses on the outcomes, often identifying groups and seeking to compensate for statistical disparities. Representational fairness tries to ensure that AI systems do not reinforce negative stereotypes or render certain groups invisible. Procedural fairness focuses on the decision process rather than the outcome. The most relevant notions of fairness may depend on the context, notably the type of AI application and the stakeholders. The subjectivity in the notions of bias and fairness makes it difficult for companies to operationalize them. Having access to sensitive attributes such as race or gender is also considered by many AI ethicists to be necessary in order to compensate for biases, but it may conflict with anti-discrimination laws.[209]\n

At its 2022 Conference on Fairness, Accountability, and Transparency (ACM FAccT 2022), the Association for Computing Machinery, in Seoul, South Korea, presented and published findings that recommend that until AI and robotics systems are demonstrated to be free of bias mistakes, they are unsafe, and the use of self-learning neural networks trained on vast, unregulated sources of flawed internet data should be curtailed.[dubiousdiscuss][224]\n

\n

Lack of transparency

\n\n

Many AI systems are so complex that their designers cannot explain how they reach their decisions.[225] Particularly with deep neural networks, in which there are a large amount of non-linear relationships between inputs and outputs. But some popular explainability techniques exist.[226]\n

It is impossible to be certain that a program is operating correctly if no one knows how exactly it works. There have been many cases where a machine learning program passed rigorous tests, but nevertheless learned something different than what the programmers intended. For example, a system that could identify skin diseases better than medical professionals was found to actually have a strong tendency to classify images with a ruler as "cancerous", because pictures of malignancies typically include a ruler to show the scale.[227] Another machine learning system designed to help effectively allocate medical resources was found to classify patients with asthma as being at "low risk" of dying from pneumonia. Having asthma is actually a severe risk factor, but since the patients having asthma would usually get much more medical care, they were relatively unlikely to die according to the training data. The correlation between asthma and low risk of dying from pneumonia was real, but misleading.[228]\n

People who have been harmed by an algorithm\'s decision have a right to an explanation.[229] Doctors, for example, are expected to clearly and completely explain to their colleagues the reasoning behind any decision they make. Early drafts of the European Union\'s General Data Protection Regulation in 2016 included an explicit statement that this right exists.[n] Industry experts noted that this is an unsolved problem with no solution in sight. Regulators argued that nevertheless the harm is real: if the problem has no solution, the tools should not be used.[230]\n

DARPA established the XAI ("Explainable Artificial Intelligence") program in 2014 to try to solve these problems.[231]\n

Several approaches aim to address the transparency problem. SHAP enables to visualise the contribution of each feature to the output.[232] LIME can locally approximate a model\'s outputs with a simpler, interpretable model.[233] Multitask learning provides a large number of outputs in addition to the target classification. These other outputs can help developers deduce what the network has learned.[234] Deconvolution, DeepDream and other generative methods can allow developers to see what different layers of a deep network for computer vision have learned, and produce output that can suggest what the network is learning.[235] For generative pre-trained transformers, Anthropic developed a technique based on dictionary learning that associates patterns of neuron activations with human-understandable concepts.[236]\n

\n

Bad actors and weaponized AI

\n\n

Artificial intelligence provides a number of tools that are useful to bad actors, such as authoritarian governments, terrorists, criminals or rogue states.\n

A lethal autonomous weapon is a machine that locates, selects and engages human targets without human supervision.[o] Widely available AI tools can be used by bad actors to develop inexpensive autonomous weapons and, if produced at scale, they are potentially weapons of mass destruction.[238] Even when used in conventional warfare, it is unlikely that they will be unable to reliably choose targets and could potentially kill an innocent person.[238] In 2014, 30 nations (including China) supported a ban on autonomous weapons under the United Nations\' Convention on Certain Conventional Weapons, however the United States and others disagreed.[239] By 2015, over fifty countries were reported to be researching battlefield robots.[240]\n

AI tools make it easier for authoritarian governments to efficiently control their citizens in several ways. Face and voice recognition allow widespread surveillance. Machine learning, operating this data, can classify potential enemies of the state and prevent them from hiding. Recommendation systems can precisely target propaganda and misinformation for maximum effect. Deepfakes and generative AI aid in producing misinformation. Advanced AI can make authoritarian centralized decision making more competitive than liberal and decentralized systems such as markets. It lowers the cost and difficulty of digital warfare and advanced spyware.[241] All these technologies have been available since 2020 or earlier—AI facial recognition systems are already being used for mass surveillance in China.[242][243]\n

There many other ways that AI is expected to help bad actors, some of which can not be foreseen. For example, machine-learning AI is able to design tens of thousands of toxic molecules in a matter of hours.[244]\n

\n

Technological unemployment

\n\n

Economists have frequently highlighted the risks of redundancies from AI, and speculated about unemployment if there is no adequate social policy for full employment.[245]\n

In the past, technology has tended to increase rather than reduce total employment, but economists acknowledge that "we\'re in uncharted territory" with AI.[246] A survey of economists showed disagreement about whether the increasing use of robots and AI will cause a substantial increase in long-term unemployment, but they generally agree that it could be a net benefit if productivity gains are redistributed.[247] Risk estimates vary; for example, in the 2010s, Michael Osborne and Carl Benedikt Frey estimated 47% of U.S. jobs are at "high risk" of potential automation, while an OECD report classified only 9% of U.S. jobs as "high risk".[p][249] The methodology of speculating about future employment levels has been criticised as lacking evidential foundation, and for implying that technology, rather than social policy, creates unemployment, as opposed to redundancies.[245] In April 2023, it was reported that 70% of the jobs for Chinese video game illustrators had been eliminated by generative artificial intelligence.[250][251]\n

Unlike previous waves of automation, many middle-class jobs may be eliminated by artificial intelligence; The Economist stated in 2015 that "the worry that AI could do to white-collar jobs what steam power did to blue-collar ones during the Industrial Revolution" is "worth taking seriously".[252] Jobs at extreme risk range from paralegals to fast food cooks, while job demand is likely to increase for care-related professions ranging from personal healthcare to the clergy.[253]\n

From the early days of the development of artificial intelligence, there have been arguments, for example, those put forward by Joseph Weizenbaum, about whether tasks that can be done by computers actually should be done by them, given the difference between computers and humans, and between quantitative calculation and qualitative, value-based judgement.[254]\n

\n

Existential risk

\n\n

It has been argued AI will become so powerful that humanity may irreversibly lose control of it. This could, as physicist Stephen Hawking stated, "spell the end of the human race".[255] This scenario has been common in science fiction, when a computer or robot suddenly develops a human-like "self-awareness" (or "sentience" or "consciousness") and becomes a malevolent character.[q] These sci-fi scenarios are misleading in several ways.\n

First, AI does not require human-like "sentience" to be an existential risk. Modern AI programs are given specific goals and use learning and intelligence to achieve them. Philosopher Nick Bostrom argued that if one gives almost any goal to a sufficiently powerful AI, it may choose to destroy humanity to achieve it (he used the example of a paperclip factory manager).[257] Stuart Russell gives the example of household robot that tries to find a way to kill its owner to prevent it from being unplugged, reasoning that "you can\'t fetch the coffee if you\'re dead."[258] In order to be safe for humanity, a superintelligence would have to be genuinely aligned with humanity\'s morality and values so that it is "fundamentally on our side".[259]\n

Second, Yuval Noah Harari argues that AI does not require a robot body or physical control to pose an existential risk. The essential parts of civilization are not physical. Things like ideologies, law, government, money and the economy are made of language; they exist because there are stories that billions of people believe. The current prevalence of misinformation suggests that an AI could use language to convince people to believe anything, even to take actions that are destructive.[260]\n

The opinions amongst experts and industry insiders are mixed, with sizable fractions both concerned and unconcerned by risk from eventual superintelligent AI.[261] Personalities such as Stephen Hawking, Bill Gates, and Elon Musk,[262] as well as AI pioneers such as Yoshua Bengio, Stuart Russell, Demis Hassabis, and Sam Altman, have expressed concerns about existential risk from AI.\n

In May 2023, Geoffrey Hinton announced his resignation from Google in order to be able to "freely speak out about the risks of AI" without "considering how this impacts Google."[263] He notably mentioned risks of an AI takeover,[264] and stressed that in order to avoid the worst outcomes, establishing safety guidelines will require cooperation among those competing in use of AI.[265]\n

In 2023, many leading AI experts issued the joint statement that "Mitigating the risk of extinction from AI should be a global priority alongside other societal-scale risks such as pandemics and nuclear war".[266]\n

Other researchers, however, spoke in favor of a less dystopian view. AI pioneer Juergen Schmidhuber did not sign the joint statement, emphasising that in 95% of all cases, AI research is about making "human lives longer and healthier and easier."[267] While the tools that are now being used to improve lives can also be used by bad actors, "they can also be used against the bad actors."[268][269] Andrew Ng also argued that "it\'s a mistake to fall for the doomsday hype on AI—and that regulators who do will only benefit vested interests."[270] Yann LeCun "scoffs at his peers\' dystopian scenarios of supercharged misinformation and even, eventually, human extinction."[271] In the early 2010s, experts argued that the risks are too distant in the future to warrant research or that humans will be valuable from the perspective of a superintelligent machine.[272] However, after 2016, the study of current and future risks and possible solutions became a serious area of research.[273]\n

\n

Ethical machines and alignment

\n\n

Friendly AI are machines that have been designed from the beginning to minimize risks and to make choices that benefit humans. Eliezer Yudkowsky, who coined the term, argues that developing friendly AI should be a higher research priority: it may require a large investment and it must be completed before AI becomes an existential risk.[274]\n

Machines with intelligence have the potential to use their intelligence to make ethical decisions. The field of machine ethics provides machines with ethical principles and procedures for resolving ethical dilemmas.[275]\nThe field of machine ethics is also called computational morality,[275]\nand was founded at an AAAI symposium in 2005.[276]\n

Other approaches include Wendell Wallach\'s "artificial moral agents"[277] and Stuart J. Russell\'s three principles for developing provably beneficial machines.[278]\n

\n

Open source

\n

Active organizations in the AI open-source community include Hugging Face,[279] Google,[280] EleutherAI and Meta.[281] Various AI models, such as Llama 2, Mistral or Stable Diffusion, have been made open-weight,[282][283] meaning that their architecture and trained parameters (the "weights") are publicly available. Open-weight models can be freely fine-tuned, which allows companies to specialize them with their own data and for their own use-case.[284] Open-weight models are useful for research and innovation but can also be misused. Since they can be fine-tuned, any built-in security measure, such as objecting to harmful requests, can be trained away until it becomes ineffective. Some researchers warn that future AI models may develop dangerous capabilities (such as the potential to drastically facilitate bioterrorism) and that once released on the Internet, they cannot be deleted everywhere if needed. They recommend pre-release audits and cost-benefit analyses.[285]\n

\n

Frameworks

\n

Artificial Intelligence projects can have their ethical permissibility tested while designing, developing, and implementing an AI system. An AI framework such as the Care and Act Framework containing the SUM values—developed by the Alan Turing Institute tests projects in four main areas:[286][287]\n

\n
  • Respect the dignity of individual people
  • \n
  • Connect with other people sincerely, openly, and inclusively
  • \n
  • Care for the wellbeing of everyone
  • \n
  • Protect social values, justice, and the public interest
\n

Other developments in ethical frameworks include those decided upon during the Asilomar Conference, the Montreal Declaration for Responsible AI, and the IEEE\'s Ethics of Autonomous Systems initiative, among others;[288] however, these principles do not go without their criticisms, especially regards to the people chosen contributes to these frameworks.[289]\n

Promotion of the wellbeing of the people and communities that these technologies affect requires consideration of the social and ethical implications at all stages of AI system design, development and implementation, and collaboration between job roles such as data scientists, product managers, data engineers, domain experts, and delivery managers.[290]\n

The UK AI Safety Institute released in 2024 a testing toolset called \'Inspect\' for AI safety evaluations available under a MIT open-source licence which is freely available on GitHub and can be improved with third-party packages. It can be used to evaluate AI models in a range of areas including core knowledge, ability to reason, and autonomous capabilities.[291]\n

\n

Regulation

\n\n
AI Safety Summit
The first global AI Safety Summit was held in 2023 with a declaration calling for international co-operation.
\n

The regulation of artificial intelligence is the development of public sector policies and laws for promoting and regulating AI; it is therefore related to the broader regulation of algorithms.[292] The regulatory and policy landscape for AI is an emerging issue in jurisdictions globally.[293] According to AI Index at Stanford, the annual number of AI-related laws passed in the 127 survey countries jumped from one passed in 2016 to 37 passed in 2022 alone.[294][295] Between 2016 and 2020, more than 30 countries adopted dedicated strategies for AI.[296] Most EU member states had released national AI strategies, as had Canada, China, India, Japan, Mauritius, the Russian Federation, Saudi Arabia, United Arab Emirates, U.S., and Vietnam. Others were in the process of elaborating their own AI strategy, including Bangladesh, Malaysia and Tunisia.[296] The Global Partnership on Artificial Intelligence was launched in June 2020, stating a need for AI to be developed in accordance with human rights and democratic values, to ensure public confidence and trust in the technology.[296] Henry Kissinger, Eric Schmidt, and Daniel Huttenlocher published a joint statement in November 2021 calling for a government commission to regulate AI.[297] In 2023, OpenAI leaders published recommendations for the governance of superintelligence, which they believe may happen in less than 10 years.[298] In 2023, the United Nations also launched an advisory body to provide recommendations on AI governance; the body comprises technology company executives, governments officials and academics.[299] In 2024, the Council of Europe created the first international legally binding treaty on AI, called the "Framework Convention on Artificial Intelligence and Human Rights, Democracy and the Rule of Law". It was adopted by the European Union, the United States, the United Kingdom, and other signatories.[300]\n

In a 2022 Ipsos survey, attitudes towards AI varied greatly by country; 78% of Chinese citizens, but only 35% of Americans, agreed that "products and services using AI have more benefits than drawbacks".[294] A 2023 Reuters/Ipsos poll found that 61% of Americans agree, and 22% disagree, that AI poses risks to humanity.[301] In a 2023 Fox News poll, 35% of Americans thought it "very important", and an additional 41% thought it "somewhat important", for the federal government to regulate AI, versus 13% responding "not very important" and 8% responding "not at all important".[302][303]\n

In November 2023, the first global AI Safety Summit was held in Bletchley Park in the UK to discuss the near and far term risks of AI and the possibility of mandatory and voluntary regulatory frameworks.[304] 28 countries including the United States, China, and the European Union issued a declaration at the start of the summit, calling for international co-operation to manage the challenges and risks of artificial intelligence.[305][306] In May 2024 at the AI Seoul Summit, 16 global AI tech companies agreed to safety commitments on the development of AI.[307][308]\n

\n

History

\n\n\n

The study of mechanical or "formal" reasoning began with philosophers and mathematicians in antiquity. The study of logic led directly to Alan Turing\'s theory of computation, which suggested that a machine, by shuffling symbols as simple as "0" and "1", could simulate any conceivable form of mathematical reasoning.[309][310] This, along with concurrent discoveries in cybernetics, information theory and neurobiology, led researchers to consider the possibility of building an "electronic brain".[r] They developed several areas of research that would become part of AI,[312] such as McCullouch and Pitts design for "artificial neurons" in 1943,[115] and Turing\'s influential 1950 paper \'Computing Machinery and Intelligence\', which introduced the Turing test and showed that "machine intelligence" was plausible.[313][310]\n

The field of AI research was founded at a workshop at Dartmouth College in 1956.[s][6] The attendees became the leaders of AI research in the 1960s.[t] They and their students produced programs that the press described as "astonishing":[u] computers were learning checkers strategies, solving word problems in algebra, proving logical theorems and speaking English.[v][7] Artificial intelligence laboratories were set up at a number of British and U.S. universities in the latter 1950s and early 1960s.[310]\n

Researchers in the 1960s and the 1970s were convinced that their methods would eventually succeed in creating a machine with general intelligence and considered this the goal of their field.[317] In 1965 Herbert Simon predicted, "machines will be capable, within twenty years, of doing any work a man can do".[318] In 1967 Marvin Minsky agreed, writing that "within a generation ... the problem of creating \'artificial intelligence\' will substantially be solved".[319] They had, however, underestimated the difficulty of the problem.[w] In 1974, both the U.S. and British governments cut off exploratory research in response to the criticism of Sir James Lighthill[321] and ongoing pressure from the U.S. Congress to fund more productive projects.[322] Minsky\'s and Papert\'s book Perceptrons was understood as proving that artificial neural networks would never be useful for solving real-world tasks, thus discrediting the approach altogether.[323] The "AI winter", a period when obtaining funding for AI projects was difficult, followed.[9]\n

In the early 1980s, AI research was revived by the commercial success of expert systems,[324] a form of AI program that simulated the knowledge and analytical skills of human experts. By 1985, the market for AI had reached over a billion dollars. At the same time, Japan\'s fifth generation computer project inspired the U.S. and British governments to restore funding for academic research.[8] However, beginning with the collapse of the Lisp Machine market in 1987, AI once again fell into disrepute, and a second, longer-lasting winter began.[10]\n

Up to this point, most of AI\'s funding had gone to projects that used high-level symbols to represent mental objects like plans, goals, beliefs, and known facts. In the 1980s, some researchers began to doubt that this approach would be able to imitate all the processes of human cognition, especially perception, robotics, learning and pattern recognition,[325] and began to look into "sub-symbolic" approaches.[326] Rodney Brooks rejected "representation" in general and focussed directly on engineering machines that move and survive.[x] Judea Pearl, Lofti Zadeh and others developed methods that handled incomplete and uncertain information by making reasonable guesses rather than precise logic.[86][331] But the most important development was the revival of "connectionism", including neural network research, by Geoffrey Hinton and others.[332] In 1990, Yann LeCun successfully showed that convolutional neural networks can recognize handwritten digits, the first of many successful applications of neural networks.[333]\n

AI gradually restored its reputation in the late 1990s and early 21st century by exploiting formal mathematical methods and by finding specific solutions to specific problems. This "narrow" and "formal" focus allowed researchers to produce verifiable results and collaborate with other fields (such as statistics, economics and mathematics).[334] By 2000, solutions developed by AI researchers were being widely used, although in the 1990s they were rarely described as "artificial intelligence" (a tendency known as the AI effect).[335]\nHowever, several academic researchers became concerned that AI was no longer pursuing its original goal of creating versatile, fully intelligent machines. Beginning around 2002, they founded the subfield of artificial general intelligence (or "AGI"), which had several well-funded institutions by the 2010s.[4]\n

Deep learning began to dominate industry benchmarks in 2012 and was adopted throughout the field.[11]\nFor many specific tasks, other methods were abandoned.[y]\nDeep learning\'s success was based on both hardware improvements (faster computers,[337] graphics processing units, cloud computing[338]) and access to large amounts of data[339] (including curated datasets,[338] such as ImageNet). Deep learning\'s success led to an enormous increase in interest and funding in AI.[z] The amount of machine learning research (measured by total publications) increased by 50% in the years 2015–2019.[296]\n

In 2016, issues of fairness and the misuse of technology were catapulted into center stage at machine learning conferences, publications vastly increased, funding became available, and many researchers re-focussed their careers on these issues. The alignment problem became a serious field of academic study.[273]\n

In the late teens and early 2020s, AGI companies began to deliver programs that created enormous interest. In 2015, AlphaGo, developed by DeepMind, beat the world champion Go player. The program was taught only the rules of the game and developed strategy by itself. GPT-3 is a large language model that was released in 2020 by OpenAI and is capable of generating high-quality human-like text.[340] These programs, and others, inspired an aggressive AI boom, where large companies began investing billions in AI research. According to AI Impacts, about $50 billion annually was invested in "AI" around 2022 in the U.S. alone and about 20% of the new U.S. Computer Science PhD graduates have specialized in "AI".[341] About 800,000 "AI"-related U.S. job openings existed in 2022.[342]\n

\n

Philosophy

\n\n

Philosophical debates have historically sought to determine the nature of intelligence and how to make intelligent machines.[343] Another major focus has been whether machines can be conscious, and the associated ethical implications.[344] Many other topics in philosophy are relevant to AI, such as epistemology and free will.[345] Rapid advancements have intensified public discussions on the philosophy and ethics of AI.[344]\n

\n

Defining artificial intelligence

\n\n

Alan Turing wrote in 1950 "I propose to consider the question \'can machines think\'?"[346] He advised changing the question from whether a machine "thinks", to "whether or not it is possible for machinery to show intelligent behaviour".[346] He devised the Turing test, which measures the ability of a machine to simulate human conversation.[313] Since we can only observe the behavior of the machine, it does not matter if it is "actually" thinking or literally has a "mind". Turing notes that we can not determine these things about other people but "it is usual to have a polite convention that everyone thinks."[347]\n

\n
The Turing test can provide some evidence of intelligence, but it penalizes non-human intelligent behavior.[348]
\n

Russell and Norvig agree with Turing that intelligence must be defined in terms of external behavior, not internal structure.[1] However, they are critical that the test requires the machine to imitate humans. "Aeronautical engineering texts," they wrote, "do not define the goal of their field as making \'machines that fly so exactly like pigeons that they can fool other pigeons.\'"[349] AI founder John McCarthy agreed, writing that "Artificial intelligence is not, by definition, simulation of human intelligence".[350]\n

McCarthy defines intelligence as "the computational part of the ability to achieve goals in the world".[351] Another AI founder, Marvin Minsky similarly describes it as "the ability to solve hard problems".[352] The leading AI textbook defines it as the study of agents that perceive their environment and take actions that maximize their chances of achieving defined goals.[1] These definitions view intelligence in terms of well-defined problems with well-defined solutions, where both the difficulty of the problem and the performance of the program are direct measures of the "intelligence" of the machine—and no other philosophical discussion is required, or may not even be possible.\n

Another definition has been adopted by Google,[353] a major practitioner in the field of AI. This definition stipulates the ability of systems to synthesize information as the manifestation of intelligence, similar to the way it is defined in biological intelligence.\n

Some authors have suggested in practice, that the definition of AI is vague and difficult to define, with contention as to whether classical algorithms should be categorised as AI,[354] with many companies during the early 2020s AI boom using the term as a marketing buzzword, often even if they did "not actually use AI in a material way".[355]\n

\n

Evaluating approaches to AI

\n

No established unifying theory or paradigm has guided AI research for most of its history.[aa] The unprecedented success of statistical machine learning in the 2010s eclipsed all other approaches (so much so that some sources, especially in the business world, use the term "artificial intelligence" to mean "machine learning with neural networks"). This approach is mostly sub-symbolic, soft and narrow. Critics argue that these questions may have to be revisited by future generations of AI researchers.\n

\n

Symbolic AI and its limits

\n

Symbolic AI (or "GOFAI")[357] simulated the high-level conscious reasoning that people use when they solve puzzles, express legal reasoning and do mathematics. They were highly successful at "intelligent" tasks such as algebra or IQ tests. In the 1960s, Newell and Simon proposed the physical symbol systems hypothesis: "A physical symbol system has the necessary and sufficient means of general intelligent action."[358]\n

However, the symbolic approach failed on many tasks that humans solve easily, such as learning, recognizing an object or commonsense reasoning. Moravec\'s paradox is the discovery that high-level "intelligent" tasks were easy for AI, but low level "instinctive" tasks were extremely difficult.[359] Philosopher Hubert Dreyfus had argued since the 1960s that human expertise depends on unconscious instinct rather than conscious symbol manipulation, and on having a "feel" for the situation, rather than explicit symbolic knowledge.[360] Although his arguments had been ridiculed and ignored when they were first presented, eventually, AI research came to agree with him.[ab][16]\n

The issue is not resolved: sub-symbolic reasoning can make many of the same inscrutable mistakes that human intuition does, such as algorithmic bias. Critics such as Noam Chomsky argue continuing research into symbolic AI will still be necessary to attain general intelligence,[362][363] in part because sub-symbolic AI is a move away from explainable AI: it can be difficult or impossible to understand why a modern statistical AI program made a particular decision. The emerging field of neuro-symbolic artificial intelligence attempts to bridge the two approaches.\n

\n

Neat vs. scruffy

\n\n

"Neats" hope that intelligent behavior is described using simple, elegant principles (such as logic, optimization, or neural networks). "Scruffies" expect that it necessarily requires solving a large number of unrelated problems. Neats defend their programs with theoretical rigor, scruffies rely mainly on incremental testing to see if they work. This issue was actively discussed in the 1970s and 1980s,[364] but eventually was seen as irrelevant. Modern AI has elements of both.\n

\n

Soft vs. hard computing

\n\n

Finding a provably correct or optimal solution is intractable for many important problems.[15] Soft computing is a set of techniques, including genetic algorithms, fuzzy logic and neural networks, that are tolerant of imprecision, uncertainty, partial truth and approximation. Soft computing was introduced in the late 1980s and most successful AI programs in the 21st century are examples of soft computing with neural networks.\n

\n

Narrow vs. general AI

\n\n

AI researchers are divided as to whether to pursue the goals of artificial general intelligence and superintelligence directly or to solve as many specific problems as possible (narrow AI) in hopes these solutions will lead indirectly to the field\'s long-term goals.[365][366] General intelligence is difficult to define and difficult to measure, and modern AI has had more verifiable successes by focusing on specific problems with specific solutions. The sub-field of artificial general intelligence studies this area exclusively.\n

\n

Machine consciousness, sentience, and mind

\n\n

The philosophy of mind does not know whether a machine can have a mind, consciousness and mental states, in the same sense that human beings do. This issue considers the internal experiences of the machine, rather than its external behavior. Mainstream AI research considers this issue irrelevant because it does not affect the goals of the field: to build machines that can solve problems using intelligence. Russell and Norvig add that "[t]he additional project of making a machine conscious in exactly the way humans are is not one that we are equipped to take on."[367] However, the question has become central to the philosophy of mind. It is also typically the central question at issue in artificial intelligence in fiction.\n

\n

Consciousness

\n\n

David Chalmers identified two problems in understanding the mind, which he named the "hard" and "easy" problems of consciousness.[368] The easy problem is understanding how the brain processes signals, makes plans and controls behavior. The hard problem is explaining how this feels or why it should feel like anything at all, assuming we are right in thinking that it truly does feel like something (Dennett\'s consciousness illusionism says this is an illusion). While human information processing is easy to explain, human subjective experience is difficult to explain. For example, it is easy to imagine a color-blind person who has learned to identify which objects in their field of view are red, but it is not clear what would be required for the person to know what red looks like.[369]\n

\n

Computationalism and functionalism

\n\n

Computationalism is the position in the philosophy of mind that the human mind is an information processing system and that thinking is a form of computing. Computationalism argues that the relationship between mind and body is similar or identical to the relationship between software and hardware and thus may be a solution to the mind–body problem. This philosophical position was inspired by the work of AI researchers and cognitive scientists in the 1960s and was originally proposed by philosophers Jerry Fodor and Hilary Putnam.[370]\n

Philosopher John Searle characterized this position as "strong AI": "The appropriately programmed computer with the right inputs and outputs would thereby have a mind in exactly the same sense human beings have minds."[ac] Searle counters this assertion with his Chinese room argument, which attempts to show that, even if a machine perfectly simulates human behavior, there is still no reason to suppose it also has a mind.[374]\n

\n

AI welfare and rights

\n

It is difficult or impossible to reliably evaluate whether an advanced AI is sentient (has the ability to feel), and if so, to what degree.[375] But if there is a significant chance that a given machine can feel and suffer, then it may be entitled to certain rights or welfare protection measures, similarly to animals.[376][377] Sapience (a set of capacities related to high intelligence, such as discernment or self-awareness) may provide another moral basis for AI rights.[376] Robot rights are also sometimes proposed as a practical way to integrate autonomous agents into society.[378]\n

In 2017, the European Union considered granting "electronic personhood" to some of the most capable AI systems. Similarly to the legal status of companies, it would have conferred rights but also responsibilities.[379] Critics argued in 2018 that granting rights to AI systems would downplay the importance of human rights, and that legislation should focus on user needs rather than speculative futuristic scenarios. They also noted that robots lacked the autonomy to take part to society on their own.[380][381]\n

Progress in AI increased interest in the topic. Proponents of AI welfare and rights often argue that AI sentience, if it emerges, would be particularly easy to deny. They warn that this may be a moral blind spot analogous to slavery or factory farming, which could lead to large-scale suffering if sentient AI is created and carelessly exploited.[377][376]\n

\n

Future

\n

Superintelligence and the singularity

\n

A superintelligence is a hypothetical agent that would possess intelligence far surpassing that of the brightest and most gifted human mind.[366]If research into artificial general intelligence produced sufficiently intelligent software, it might be able to reprogram and improve itself. The improved software would be even better at improving itself, leading to what I. J. Good called an "intelligence explosion" and Vernor Vinge called a "singularity".[382]\n

However, technologies cannot improve exponentially indefinitely, and typically follow an S-shaped curve, slowing when they reach the physical limits of what the technology can do.[383]\n

\n

Transhumanism

\n\n

Robot designer Hans Moravec, cyberneticist Kevin Warwick and inventor Ray Kurzweil have predicted that humans and machines may merge in the future into cyborgs that are more capable and powerful than either. This idea, called transhumanism, has roots in the writings of Aldous Huxley and Robert Ettinger.[384]\n

Edward Fredkin argues that "artificial intelligence is the next step in evolution", an idea first proposed by Samuel Butler\'s "Darwin among the Machines" as far back as 1863, and expanded upon by George Dyson in his 1998 book Darwin Among the Machines: The Evolution of Global Intelligence.[385]\n

\n

In fiction

\n\n
The word "robot" itself was coined by Karel Čapek in his 1921 play R.U.R., the title standing for "Rossum\'s Universal Robots".
\n

Thought-capable artificial beings have appeared as storytelling devices since antiquity,[386] and have been a persistent theme in science fiction.[387]\n

A common trope in these works began with Mary Shelley\'s Frankenstein, where a human creation becomes a threat to its masters. This includes such works as Arthur C. Clarke\'s and Stanley Kubrick\'s 2001: A Space Odyssey (both 1968), with HAL 9000, the murderous computer in charge of the Discovery One spaceship, as well as The Terminator (1984) and The Matrix (1999). In contrast, the rare loyal robots such as Gort from The Day the Earth Stood Still (1951) and Bishop from Aliens (1986) are less prominent in popular culture.[388]\n

Isaac Asimov introduced the Three Laws of Robotics in many stories, most notably with the "Multivac" super-intelligent computer. Asimov\'s laws are often brought up during lay discussions of machine ethics;[389] while almost all artificial intelligence researchers are familiar with Asimov\'s laws through popular culture, they generally consider the laws useless for many reasons, one of which is their ambiguity.[390]\n

Several works use AI to force us to confront the fundamental question of what makes us human, showing us artificial beings that have the ability to feel, and thus to suffer. This appears in Karel Čapek\'s R.U.R., the films A.I. Artificial Intelligence and Ex Machina, as well as the novel Do Androids Dream of Electric Sheep?, by Philip K. Dick. Dick considers the idea that our understanding of human subjectivity is altered by technology created with artificial intelligence.[391]\n

\n

See also

\n\n

Explanatory notes

\n
\n
    \n
  1. ^ a b This list of intelligent traits is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
  2. \n
  3. ^ a b This list of tools is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
  4. \n
  5. ^ It is among the reasons that expert systems proved to be inefficient for capturing knowledge.[30][31]\n
  6. \n
  7. ^ \n"Rational agent" is general term used in economics, philosophy and theoretical artificial intelligence. It can refer to anything that directs its behavior to accomplish goals, such as a person, an animal, a corporation, a nation, or in the case of AI, a computer program.\n
  8. \n
  9. ^ Alan Turing discussed the centrality of learning as early as 1950, in his classic paper "Computing Machinery and Intelligence".[42] In 1956, at the original Dartmouth AI summer conference, Ray Solomonoff wrote a report on unsupervised probabilistic machine learning: "An Inductive Inference Machine".[43]\n
  10. \n
  11. ^ See AI winter § Machine translation and the ALPAC report of 1966\n
  12. \n
  13. ^ \nCompared with symbolic logic, formal Bayesian inference is computationally expensive. For inference to be tractable, most observations must be conditionally independent of one another. AdSense uses a Bayesian network with over 300 million edges to learn which ads to serve.[93]\n
  14. \n
  15. ^ Expectation–maximization, one of the most popular algorithms in machine learning, allows clustering in the presence of unknown latent variables.[95]\n
  16. \n
  17. ^ \nSome form of deep neural networks (without a specific learning algorithm) were described by:\nWarren S. McCulloch and Walter Pitts (1943)[115]\nAlan Turing (1948);[116]\nKarl Steinbuch and Roger David Joseph (1961).[117]\nDeep or recurrent networks that learned (or used gradient descent) were developed by:\nFrank Rosenblatt(1957);[116]\nOliver Selfridge (1959);[117]\nAlexey Ivakhnenko and Valentin Lapa (1965);[118]\nKaoru Nakano (1971);[119]\nShun-Ichi Amari (1972);[119]\nJohn Joseph Hopfield (1982).[119]\nPrecursors to backpropagation were developed by:\nHenry J. Kelley (1960);[116]\nArthur E. Bryson (1962);[116]\nStuart Dreyfus (1962);[116]\nArthur E. Bryson and Yu-Chi Ho (1969);[116]\nBackpropagation was independently developed by:\nSeppo Linnainmaa (1970);[120]\nPaul Werbos (1974).[116]\n
  18. \n
  19. ^ Geoffrey Hinton said, of his work on neural networks in the 1990s, "our labeled datasets were thousands of times too small. [And] our computers were millions of times too slow."[121]\n
  20. \n
  21. ^ In statistics, a bias is a systematic error or deviation from the correct value. But in the context of fairness, it refers to a tendency in favor or against a certain group or individual characteristic, usually in a way that is considered unfair or harmful. A statistically unbiased AI system that produces disparate outcomes for different demographic groups may thus be viewed as biased in the ethical sense.[209]\n
  22. \n
  23. ^ Including Jon Kleinberg (Cornell University), Sendhil Mullainathan (University of Chicago), Cynthia Chouldechova (Carnegie Mellon) and Sam Corbett-Davis (Stanford)[218]\n
  24. \n
  25. ^ Moritz Hardt (a director at the Max Planck Institute for Intelligent Systems) argues that machine learning "is fundamentally the wrong tool for a lot of domains, where you\'re trying to design interventions and mechanisms that change the world."[223]\n
  26. \n
  27. ^ When the law was passed in 2018, it still contained a form of this provision.\n
  28. \n
  29. ^ This is the United Nations\' definition, and includes things like land mines as well.[237]\n
  30. \n
  31. ^ See table 4; 9% is both the OECD average and the U.S. average.[248]\n
  32. \n
  33. ^ Sometimes called a "robopocalypse"[256]\n
  34. \n
  35. ^ "Electronic brain" was the term used by the press around this time.[309][311]\n
  36. \n
  37. ^ \nDaniel Crevier wrote, "the conference is generally recognized as the official birthdate of the new science."[314] Russell and Norvig called the conference "the inception of artificial intelligence."[115]\n
  38. \n
  39. ^ \nRussell and Norvig wrote "for the next 20 years the field would be dominated by these people and their students."[315]\n
  40. \n
  41. ^ \nRussell and Norvig wrote "it was astonishing whenever a computer did anything kind of smartish".[316]\n
  42. \n
  43. ^ \nThe programs described are Arthur Samuel\'s checkers program for the IBM 701, Daniel Bobrow\'s STUDENT, Newell and Simon\'s Logic Theorist and Terry Winograd\'s SHRDLU.\n
  44. \n
  45. ^ Russell and Norvig write: "in almost all cases, these early systems failed on more difficult problems"[320]\n
  46. \n
  47. ^ \nEmbodied approaches to AI[327] were championed by Hans Moravec[328] and Rodney Brooks[329] and went by many names: Nouvelle AI.[329] Developmental robotics.[330]\n
  48. \n
  49. ^ Matteo Wong wrote in The Atlantic: "Whereas for decades, computer-science fields such as natural-language processing, computer vision, and robotics used extremely different methods, now they all use a programming method called "deep learning." As a result, their code and approaches have become more similar, and their models are easier to integrate into one another."[336]\n
  50. \n
  51. ^ Jack Clark wrote in Bloomberg: "After a half-decade of quiet breakthroughs in artificial intelligence, 2015 has been a landmark year. Computers are smarter and learning faster than ever", and noted that the number of software projects that use machine learning at Google increased from a "sporadic usage" in 2012 to more than 2,700 projects in 2015.[338]\n
  52. \n
  53. ^ Nils Nilsson wrote in 1983: "Simply put, there is wide disagreement in the field about what AI is all about."[356]\n
  54. \n
  55. ^ \nDaniel Crevier wrote that "time has proven the accuracy and perceptiveness of some of Dreyfus\'s comments. Had he formulated them less aggressively, constructive actions they suggested might have been taken much earlier."[361]\n
  56. \n
  57. ^ \nSearle presented this definition of "Strong AI" in 1999.[371] Searle\'s original formulation was "The appropriately programmed computer really is a mind, in the sense that computers given the right programs can be literally said to understand and have other cognitive states."[372] Strong AI is defined similarly by Russell and Norvig: "Stong AI – the assertion that machines that do so are actually thinking (as opposed to simulating thinking)."[373]\n
  58. \n
\n

References

\n
\n
    \n
  1. ^ a b c Russell & Norvig (2021), pp. 1–4.\n
  2. \n
  3. ^ AI set to exceed human brain power Archived 2008-02-19 at the Wayback Machine CNN.com (July 26, 2006)\n
  4. \n
  5. ^ Kaplan, Andreas; Haenlein, Michael (2019). "Siri, Siri, in my hand: Who\'s the fairest in the land? On the interpretations, illustrations, and implications of artificial intelligence". Business Horizons. 62: 15–25. doi:10.1016/j.bushor.2018.08.004. ISSN 0007-6813. S2CID 158433736.\n
  6. \n
  7. ^ a b c \nArtificial general intelligence: Russell & Norvig (2021, pp. 32–33, 1020–1021)
    Proposal for the modern version: Pennachin & Goertzel (2007)
    Warnings of overspecialization in AI from leading researchers: Nilsson (1995), McCarthy (2007), Beal & Winston (2009)
    \n
  8. \n
  9. ^ Russell & Norvig (2021, §1.2).\n
  10. \n
  11. ^ a b Dartmouth workshop: Russell & Norvig (2021, p. 18), McCorduck (2004, pp. 111–136), NRC (1999, pp. 200–201)
    The proposal: McCarthy et al. (1955)
    \n
  12. \n
  13. ^ a b Successful programs of the 1960s: McCorduck (2004, pp. 243–252), Crevier (1993, pp. 52–107), Moravec (1988, p. 9), Russell & Norvig (2021, pp. 19–21)\n
  14. \n
  15. ^ a b Funding initiatives in the early 1980s: Fifth Generation Project (Japan), Alvey (UK), Microelectronics and Computer Technology Corporation (US), Strategic Computing Initiative (US): McCorduck (2004, pp. 426–441), Crevier (1993, pp. 161–162, 197–203, 211, 240), Russell & Norvig (2021, p. 23), NRC (1999, pp. 210–211), Newquist (1994, pp. 235–248)\n
  16. \n
  17. ^ a b First AI Winter, Lighthill report, Mansfield Amendment: Crevier (1993, pp. 115–117), Russell & Norvig (2021, pp. 21–22), NRC (1999, pp. 212–213), Howe (1994), Newquist (1994, pp. 189–201)\n
  18. \n
  19. ^ a b Second AI Winter: Russell & Norvig (2021, p. 24), McCorduck (2004, pp. 430–435), Crevier (1993, pp. 209–210), NRC (1999, pp. 214–216), Newquist (1994, pp. 301–318)\n
  20. \n
  21. ^ a b Deep learning revolution, AlexNet: Goldman (2022), Russell & Norvig (2021, p. 26), McKinsey (2018)\n
  22. \n
  23. ^ Toews (2023).\n
  24. \n
  25. ^ Problem-solving, puzzle solving, game playing, and deduction: Russell & Norvig (2021, chpt. 3–5), Russell & Norvig (2021, chpt. 6) (constraint satisfaction), Poole, Mackworth & Goebel (1998, chpt. 2, 3, 7, 9), Luger & Stubblefield (2004, chpt. 3, 4, 6, 8), Nilsson (1998, chpt. 7–12)\n
  26. \n
  27. ^ Uncertain reasoning: Russell & Norvig (2021, chpt. 12–18), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 333–381), Nilsson (1998, chpt. 7–12)\n
  28. \n
  29. ^ a b c Intractability and efficiency and the combinatorial explosion: Russell & Norvig (2021, p. 21)\n
  30. \n
  31. ^ a b c Psychological evidence of the prevalence of sub-symbolic reasoning and knowledge: Kahneman (2011), Dreyfus & Dreyfus (1986), Wason & Shapiro (1966), Kahneman, Slovic & Tversky (1982)\n
  32. \n
  33. ^ Knowledge representation and knowledge engineering: Russell & Norvig (2021, chpt. 10), Poole, Mackworth & Goebel (1998, pp. 23–46, 69–81, 169–233, 235–277, 281–298, 319–345), Luger & Stubblefield (2004, pp. 227–243), Nilsson (1998, chpt. 17.1–17.4, 18)\n
  34. \n
  35. ^ Smoliar & Zhang (1994).\n
  36. \n
  37. ^ Neumann & Möller (2008).\n
  38. \n
  39. ^ Kuperman, Reichley & Bailey (2006).\n
  40. \n
  41. ^ McGarry (2005).\n
  42. \n
  43. ^ Bertini, Del Bimbo & Torniai (2006).\n
  44. \n
  45. ^ Russell & Norvig (2021), pp. 272.\n
  46. \n
  47. ^ Representing categories and relations: Semantic networks, description logics, inheritance (including frames, and scripts): Russell & Norvig (2021, §10.2 & 10.5), Poole, Mackworth & Goebel (1998, pp. 174–177), Luger & Stubblefield (2004, pp. 248–258), Nilsson (1998, chpt. 18.3)\n
  48. \n
  49. ^ Representing events and time:Situation calculus, event calculus, fluent calculus (including solving the frame problem): Russell & Norvig (2021, §10.3), Poole, Mackworth & Goebel (1998, pp. 281–298), Nilsson (1998, chpt. 18.2)\n
  50. \n
  51. ^ Causal calculus: Poole, Mackworth & Goebel (1998, pp. 335–337)\n
  52. \n
  53. ^ Representing knowledge about knowledge: Belief calculus, modal logics: Russell & Norvig (2021, §10.4), Poole, Mackworth & Goebel (1998, pp. 275–277)\n
  54. \n
  55. ^ a b Default reasoning, Frame problem, default logic, non-monotonic logics, circumscription, closed world assumption, abduction: Russell & Norvig (2021, §10.6), Poole, Mackworth & Goebel (1998, pp. 248–256, 323–335), Luger & Stubblefield (2004, pp. 335–363), Nilsson (1998, ~18.3.3)\n(Poole et al. places abduction under "default reasoning". Luger et al. places this under "uncertain reasoning").\n
  56. \n
  57. ^ a b Breadth of commonsense knowledge: Lenat & Guha (1989, Introduction), Crevier (1993, pp. 113–114), Moravec (1988, p. 13), Russell & Norvig (2021, pp. 241, 385, 982) (qualification problem)\n
  58. \n
  59. ^ Newquist (1994), p. 296.\n
  60. \n
  61. ^ Crevier (1993), pp. 204–208.\n
  62. \n
  63. ^ Russell & Norvig (2021), p. 528.\n
  64. \n
  65. ^ Automated planning: Russell & Norvig (2021, chpt. 11).\n
  66. \n
  67. ^ Automated decision making, Decision theory: Russell & Norvig (2021, chpt. 16–18).\n
  68. \n
  69. ^ Classical planning: Russell & Norvig (2021, Section 11.2).\n
  70. \n
  71. ^ Sensorless or "conformant" planning, contingent planning, replanning (a.k.a online planning): Russell & Norvig (2021, Section 11.5).\n
  72. \n
  73. ^ Uncertain preferences: Russell & Norvig (2021, Section 16.7)\nInverse reinforcement learning: Russell & Norvig (2021, Section 22.6)\n
  74. \n
  75. ^ Information value theory: Russell & Norvig (2021, Section 16.6).\n
  76. \n
  77. ^ Markov decision process: Russell & Norvig (2021, chpt. 17).\n
  78. \n
  79. ^ Game theory and multi-agent decision theory: Russell & Norvig (2021, chpt. 18).\n
  80. \n
  81. ^ Learning: Russell & Norvig (2021, chpt. 19–22), Poole, Mackworth & Goebel (1998, pp. 397–438), Luger & Stubblefield (2004, pp. 385–542), Nilsson (1998, chpt. 3.3, 10.3, 17.5, 20)\n
  82. \n
  83. ^ Turing (1950).\n
  84. \n
  85. ^ Solomonoff (1956).\n
  86. \n
  87. ^ Unsupervised learning: Russell & Norvig (2021, pp. 653) (definition), Russell & Norvig (2021, pp. 738–740) (cluster analysis), Russell & Norvig (2021, pp. 846–860) (word embedding)\n
  88. \n
  89. ^ a b Supervised learning: Russell & Norvig (2021, §19.2) (Definition), Russell & Norvig (2021, Chpt. 19–20) (Techniques)\n
  90. \n
  91. ^ Reinforcement learning: Russell & Norvig (2021, chpt. 22), Luger & Stubblefield (2004, pp. 442–449)\n
  92. \n
  93. ^ Transfer learning: Russell & Norvig (2021, pp. 281), The Economist (2016)\n
  94. \n
  95. ^ "Artificial Intelligence (AI): What Is AI and How Does It Work? | Built In". builtin.com. Retrieved 30 October 2023.\n
  96. \n
  97. ^ Computational learning theory: Russell & Norvig (2021, pp. 672–674), Jordan & Mitchell (2015)\n
  98. \n
  99. ^ Natural language processing (NLP): Russell & Norvig (2021, chpt. 23–24), Poole, Mackworth & Goebel (1998, pp. 91–104), Luger & Stubblefield (2004, pp. 591–632)\n
  100. \n
  101. ^ Subproblems of NLP: Russell & Norvig (2021, pp. 849–850)\n
  102. \n
  103. ^ Russell & Norvig (2021), pp. 856–858.\n
  104. \n
  105. ^ Dickson (2022).\n
  106. \n
  107. ^ Modern statistical and deep learning approaches to NLP: Russell & Norvig (2021, chpt. 24), Cambria & White (2014)\n
  108. \n
  109. ^ Vincent (2019).\n
  110. \n
  111. ^ Russell & Norvig (2021), pp. 875–878.\n
  112. \n
  113. ^ Bushwick (2023).\n
  114. \n
  115. ^ Computer vision: Russell & Norvig (2021, chpt. 25), Nilsson (1998, chpt. 6)\n
  116. \n
  117. ^ Russell & Norvig (2021), pp. 849–850.\n
  118. \n
  119. ^ Russell & Norvig (2021), pp. 895–899.\n
  120. \n
  121. ^ Russell & Norvig (2021), pp. 899–901.\n
  122. \n
  123. ^ Challa et al. (2011).\n
  124. \n
  125. ^ Russell & Norvig (2021), pp. 931–938.\n
  126. \n
  127. ^ MIT AIL (2014).\n
  128. \n
  129. ^ Affective computing: Thro (1993), Edelson (1991), Tao & Tan (2005), Scassellati (2002)\n
  130. \n
  131. ^ Waddell (2018).\n
  132. \n
  133. ^ Poria et al. (2017).\n
  134. \n
  135. ^ Search algorithms: Russell & Norvig (2021, chpts. 3–5), Poole, Mackworth & Goebel (1998, pp. 113–163), Luger & Stubblefield (2004, pp. 79–164, 193–219), Nilsson (1998, chpts. 7–12)\n
  136. \n
  137. ^ State space search: Russell & Norvig (2021, chpt. 3)\n
  138. \n
  139. ^ Russell & Norvig (2021), sect. 11.2.\n
  140. \n
  141. ^ Uninformed searches (breadth first search, depth-first search and general state space search): Russell & Norvig (2021, sect. 3.4), Poole, Mackworth & Goebel (1998, pp. 113–132), Luger & Stubblefield (2004, pp. 79–121), Nilsson (1998, chpt. 8)\n
  142. \n
  143. ^ Heuristic or informed searches (e.g., greedy best first and A*): Russell & Norvig (2021, sect. 3.5), Poole, Mackworth & Goebel (1998, pp. 132–147), Poole & Mackworth (2017, sect. 3.6), Luger & Stubblefield (2004, pp. 133–150)\n
  144. \n
  145. ^ Adversarial search: Russell & Norvig (2021, chpt. 5)\n
  146. \n
  147. ^ Local or "optimization" search: Russell & Norvig (2021, chpt. 4)\n
  148. \n
  149. ^ Singh Chauhan, Nagesh (18 December 2020). "Optimization Algorithms in Neural Networks". KDnuggets. Retrieved 13 January 2024.\n
  150. \n
  151. ^ Evolutionary computation: Russell & Norvig (2021, sect. 4.1.2)\n
  152. \n
  153. ^ Merkle & Middendorf (2013).\n
  154. \n
  155. ^ Logic: Russell & Norvig (2021, chpts. 6–9), Luger & Stubblefield (2004, pp. 35–77), Nilsson (1998, chpt. 13–16)\n
  156. \n
  157. ^ Propositional logic: Russell & Norvig (2021, chpt. 6), Luger & Stubblefield (2004, pp. 45–50), Nilsson (1998, chpt. 13)\n
  158. \n
  159. ^ First-order logic and features such as equality: Russell & Norvig (2021, chpt. 7), Poole, Mackworth & Goebel (1998, pp. 268–275), Luger & Stubblefield (2004, pp. 50–62), Nilsson (1998, chpt. 15)\n
  160. \n
  161. ^ Logical inference: Russell & Norvig (2021, chpt. 10)\n
  162. \n
  163. ^ logical deduction as search: Russell & Norvig (2021, sects. 9.3, 9.4), Poole, Mackworth & Goebel (1998, pp. ~46–52), Luger & Stubblefield (2004, pp. 62–73), Nilsson (1998, chpt. 4.2, 7.2)\n
  164. \n
  165. ^ Resolution and unification: Russell & Norvig (2021, sections 7.5.2, 9.2, 9.5)\n
  166. \n
  167. ^ Warren, D.H.; Pereira, L.M.; Pereira, F. (1977). "Prolog-the language and its implementation compared with Lisp". ACM SIGPLAN Notices. 12 (8): 109–115. doi:10.1145/872734.806939.\n
  168. \n
  169. ^ Fuzzy logic: Russell & Norvig (2021, pp. 214, 255, 459), Scientific American (1999)\n
  170. \n
  171. ^ a b Stochastic methods for uncertain reasoning: Russell & Norvig (2021, chpt. 12–18, 20), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 165–191, 333–381), Nilsson (1998, chpt. 19)\n
  172. \n
  173. ^ decision theory and decision analysis: Russell & Norvig (2021, chpt. 16–18), Poole, Mackworth & Goebel (1998, pp. 381–394)\n
  174. \n
  175. ^ Information value theory: Russell & Norvig (2021, sect. 16.6)\n
  176. \n
  177. ^ Markov decision processes and dynamic decision networks: Russell & Norvig (2021, chpt. 17)\n
  178. \n
  179. ^ a b c Stochastic temporal models: Russell & Norvig (2021, chpt. 14)\nHidden Markov model: Russell & Norvig (2021, sect. 14.3)\nKalman filters: Russell & Norvig (2021, sect. 14.4)\nDynamic Bayesian networks: Russell & Norvig (2021, sect. 14.5)\n
  180. \n
  181. ^ Game theory and mechanism design: Russell & Norvig (2021, chpt. 18)\n
  182. \n
  183. ^ Bayesian networks: Russell & Norvig (2021, sects. 12.5–12.6, 13.4–13.5, 14.3–14.5, 16.5, 20.2–20.3), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~182–190, ≈363–379), Nilsson (1998, chpt. 19.3–19.4)\n
  184. \n
  185. ^ Domingos (2015), chpt. 6.\n
  186. \n
  187. ^ Bayesian inference algorithm: Russell & Norvig (2021, sect. 13.3–13.5), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~363–379), Nilsson (1998, chpt. 19.4 & 7)\n
  188. \n
  189. ^ Domingos (2015), p. 210.\n
  190. \n
  191. ^ Bayesian learning and the expectation–maximization algorithm: Russell & Norvig (2021, chpt. 20), Poole, Mackworth & Goebel (1998, pp. 424–433), Nilsson (1998, chpt. 20), Domingos (2015, p. 210)\n
  192. \n
  193. ^ Bayesian decision theory and Bayesian decision networks: Russell & Norvig (2021, sect. 16.5)\n
  194. \n
  195. ^ Statistical learning methods and classifiers: Russell & Norvig (2021, chpt. 20),\n
  196. \n
  197. ^ Ciaramella, Alberto; Ciaramella, Marco (2024). Introduction to Artificial Intelligence: from data analysis to generative AI. Intellisemantic Editions. ISBN 978-8-8947-8760-3.\n
  198. \n
  199. ^ Decision trees: Russell & Norvig (2021, sect. 19.3), Domingos (2015, p. 88)\n
  200. \n
  201. ^ Non-parameteric learning models such as K-nearest neighbor and support vector machines: Russell & Norvig (2021, sect. 19.7), Domingos (2015, p. 187) (k-nearest neighbor)\n\n
  202. \n
  203. ^ Domingos (2015), p. 152.\n
  204. \n
  205. ^ Naive Bayes classifier: Russell & Norvig (2021, sect. 12.6), Domingos (2015, p. 152)\n
  206. \n
  207. ^ a b Neural networks: Russell & Norvig (2021, chpt. 21), Domingos (2015, Chapter 4)\n
  208. \n
  209. ^ Gradient calculation in computational graphs, backpropagation, automatic differentiation: Russell & Norvig (2021, sect. 21.2), Luger & Stubblefield (2004, pp. 467–474), Nilsson (1998, chpt. 3.3)\n
  210. \n
  211. ^ Universal approximation theorem: Russell & Norvig (2021, p. 752)\nThe theorem: Cybenko (1988), Hornik, Stinchcombe & White (1989)\n
  212. \n
  213. ^ Feedforward neural networks: Russell & Norvig (2021, sect. 21.1)\n
  214. \n
  215. ^ Recurrent neural networks: Russell & Norvig (2021, sect. 21.6)\n
  216. \n
  217. ^ Perceptrons: Russell & Norvig (2021, pp. 21, 22, 683, 22)\n
  218. \n
  219. ^ a b Deep learning: Russell & Norvig (2021, chpt. 21), Goodfellow, Bengio & Courville (2016), Hinton et al. (2016), Schmidhuber (2015)\n
  220. \n
  221. ^ Convolutional neural networks: Russell & Norvig (2021, sect. 21.3)\n
  222. \n
  223. ^ Deng & Yu (2014), pp. 199–200.\n
  224. \n
  225. ^ Ciresan, Meier & Schmidhuber (2012).\n
  226. \n
  227. ^ Russell & Norvig (2021), p. 751.\n
  228. \n
  229. ^ a b c Russell & Norvig (2021), p. 17.\n
  230. \n
  231. ^ a b c d e f g Russell & Norvig (2021), p. 785.\n
  232. \n
  233. ^ a b Schmidhuber (2022), sect. 5.\n
  234. \n
  235. ^ Schmidhuber (2022), sect. 6.\n
  236. \n
  237. ^ a b c Schmidhuber (2022), sect. 7.\n
  238. \n
  239. ^ Schmidhuber (2022), sect. 8.\n
  240. \n
  241. ^ Quoted in Christian (2020, p. 22)\n
  242. \n
  243. ^ Smith (2023).\n
  244. \n
  245. ^ "Explained: Generative AI". 9 November 2023.\n
  246. \n
  247. ^ "AI Writing and Content Creation Tools". MIT Sloan Teaching & Learning Technologies. Archived from the original on 25 December 2023. Retrieved 25 December 2023.\n
  248. \n
  249. ^ Marmouyet (2023).\n
  250. \n
  251. ^ Kobielus (2019).\n
  252. \n
  253. ^ Thomason, James (21 May 2024). "Mojo Rising: The resurgence of AI-first programming languages". VentureBeat. Archived from the original on 27 June 2024. Retrieved 26 May 2024.\n
  254. \n
  255. ^ Wodecki, Ben (5 May 2023). "7 AI Programming Languages You Need to Know". AI Business. Archived from the original on 25 July 2024. Retrieved 5 October 2024.\n
  256. \n
  257. ^ Plumb, Taryn (18 September 2024). "Why Jensen Huang and Marc Benioff see \'gigantic\' opportunity for agentic AI". VentureBeat. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
  258. \n
  259. ^ Davenport, T; Kalakota, R (June 2019). "The potential for artificial intelligence in healthcare". Future Healthc J. 6 (2): 94–98. doi:10.7861/futurehosp.6-2-94. PMC 6616181. PMID 31363513.\n
  260. \n
  261. ^ Lyakhova, U.A.; Lyakhov, P.A. (2024). "Systematic review of approaches to detection and classification of skin cancer using artificial intelligence: Development and prospects". Computers in Biology and Medicine. 178: 108742. doi:10.1016/j.compbiomed.2024.108742. PMID 38875908.\n
  262. \n
  263. ^ Alqudaihi, Kawther S.; Aslam, Nida; Khan, Irfan Ullah; Almuhaideb, Abdullah M.; Alsunaidi, Shikah J.; Ibrahim, Nehad M. Abdel Rahman; Alhaidari, Fahd A.; Shaikh, Fatema S.; Alsenbel, Yasmine M.; Alalharith, Dima M.; Alharthi, Hajar M.; Alghamdi, Wejdan M.; Alshahrani, Mohammed S. (2021). "Cough Sound Detection and Diagnosis Using Artificial Intelligence Techniques: Challenges and Opportunities". IEEE Access. 9: 102327–102344. Bibcode:2021IEEEA...9j2327A. doi:10.1109/ACCESS.2021.3097559. ISSN 2169-3536. PMC 8545201. PMID 34786317.\n
  264. \n
  265. ^ a b Bax, Monique; Thorpe, Jordan; Romanov, Valentin (December 2023). "The future of personalized cardiovascular medicine demands 3D and 4D printing, stem cells, and artificial intelligence". Frontiers in Sensors. 4. doi:10.3389/fsens.2023.1294721. ISSN 2673-5067.\n
  266. \n
  267. ^ Jumper, J; Evans, R; Pritzel, A (2021). "Highly accurate protein structure prediction with AlphaFold". Nature. 596 (7873): 583–589. Bibcode:2021Natur.596..583J. doi:10.1038/s41586-021-03819-2. PMC 8371605. PMID 34265844.\n
  268. \n
  269. ^ "AI discovers new class of antibiotics to kill drug-resistant bacteria". 20 December 2023. Archived from the original on 16 September 2024. Retrieved 5 October 2024.\n
  270. \n
  271. ^ "AI speeds up drug design for Parkinson\'s ten-fold". Cambridge University. 17 April 2024. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  272. \n
  273. ^ Horne, Robert I.; Andrzejewska, Ewa A.; Alam, Parvez; Brotzakis, Z. Faidon; Srivastava, Ankit; Aubert, Alice; Nowinska, Magdalena; Gregory, Rebecca C.; Staats, Roxine; Possenti, Andrea; Chia, Sean; Sormanni, Pietro; Ghetti, Bernardino; Caughey, Byron; Knowles, Tuomas P. J.; Vendruscolo, Michele (17 April 2024). "Discovery of potent inhibitors of α-synuclein aggregation using structure-based iterative learning". Nature Chemical Biology. 20 (5). Nature: 634–645. doi:10.1038/s41589-024-01580-x. PMC 11062903. PMID 38632492.\n
  274. \n
  275. ^ Grant, Eugene F.; Lardner, Rex (25 July 1952). "The Talk of the Town – It". The New Yorker. ISSN 0028-792X. Archived from the original on 16 February 2020. Retrieved 28 January 2024.\n
  276. \n
  277. ^ Anderson, Mark Robert (11 May 2017). "Twenty years on from Deep Blue vs Kasparov: how a chess match started the big data revolution". The Conversation. Archived from the original on 17 September 2024. Retrieved 28 January 2024.\n
  278. \n
  279. ^ Markoff, John (16 February 2011). "Computer Wins on \'Jeopardy!\': Trivial, It\'s Not". The New York Times. ISSN 0362-4331. Archived from the original on 22 October 2014. Retrieved 28 January 2024.\n
  280. \n
  281. ^ Byford, Sam (27 May 2017). "AlphaGo retires from competitive Go after defeating world number one 3–0". The Verge. Archived from the original on 7 June 2017. Retrieved 28 January 2024.\n
  282. \n
  283. ^ Brown, Noam; Sandholm, Tuomas (30 August 2019). "Superhuman AI for multiplayer poker". Science. 365 (6456): 885–890. Bibcode:2019Sci...365..885B. doi:10.1126/science.aay2400. ISSN 0036-8075. PMID 31296650.\n
  284. \n
  285. ^ "MuZero: Mastering Go, chess, shogi and Atari without rules". Google DeepMind. 23 December 2020. Retrieved 28 January 2024.\n
  286. \n
  287. ^ Sample, Ian (30 October 2019). "AI becomes grandmaster in \'fiendishly complex\' StarCraft II". The Guardian. ISSN 0261-3077. Archived from the original on 29 December 2020. Retrieved 28 January 2024.\n
  288. \n
  289. ^ Wurman, P. R.; Barrett, S.; Kawamoto, K. (2022). "Outracing champion Gran Turismo drivers with deep reinforcement learning" (PDF). Nature. 602 (7896): 223–228. Bibcode:2022Natur.602..223W. doi:10.1038/s41586-021-04357-7. PMID 35140384.\n
  290. \n
  291. ^ Wilkins, Alex (13 March 2024). "Google AI learns to play open-world video games by watching them". New Scientist. Archived from the original on 26 July 2024. Retrieved 21 July 2024.\n
  292. \n
  293. ^ Uesato, J. et al.: Improving mathematical reasoning with process supervision. Archived 15 September 2024 at the Wayback Machine openai.com, May 31, 2023. Retrieved 2024-08-07.\n
  294. \n
  295. ^ Srivastava, Saurabh (29 February 2024). "Functional Benchmarks for Robust Evaluation of Reasoning Performance, and the Reasoning Gap". arXiv:2402.19450 [cs.AI].\n
  296. \n
  297. ^ Roberts, Siobhan (25 July 2024). "AI achieves silver-medal standard solving International Mathematical Olympiad problems". The New York Times. Archived from the original on 26 September 2024. Retrieved 7 August 2024.\n
  298. \n
  299. ^ LLEMMA. eleuther.ai. Retrieved 2024-08-07.\n
  300. \n
  301. ^ AI Math. Archived 5 October 2024 at the Wayback Machine Caesars Labs, 2024. Retrieved 2024-08-07.\n
  302. \n
  303. ^ Alex McFarland: 7 Best AI for Math Tools. Archived 11 September 2024 at the Wayback Machine unite.ai. Retrieved 2024-08-07\n
  304. \n
  305. ^ Matthew Finio & Amanda Downie: IBM Think 2024 Primer, "What is Artificial Intelligence (AI) in Finance?" 8 Dec. 2023\n
  306. \n
  307. ^ M. Nicolas, J. Firzli: Pensions Age/European Pensions magazine, "Artificial Intelligence: Ask the Industry" May June 2024 https://videovoice.org/ai-in-finance-innovation-entrepreneurship-vs-over-regulation-with-the-eus-artificial-intelligence-act-wont-work-as-intended/ Archived 11 September 2024 at the Wayback Machine.\n
  308. \n
  309. ^ a b c Congressional Research Service (2019). Artificial Intelligence and National Security (PDF). Washington, DC: Congressional Research Service. Archived (PDF) from the original on 8 May 2020. Retrieved 5 October 2024.PD-notice\n
  310. \n
  311. ^ a b Slyusar, Vadym (2019). "Artificial intelligence as the basis of future control networks". ResearchGate. doi:10.13140/RG.2.2.30247.50087. Archived from the original on 28 April 2021. Retrieved 20 July 2019.\n
  312. \n
  313. ^ Knight, Will. "The US and 30 Other Nations Agree to Set Guardrails for Military AI". Wired. ISSN 1059-1028. Archived from the original on 20 September 2024. Retrieved 24 January 2024.\n
  314. \n
  315. ^ Newsom, Gavin; Weber, Shirley N. (6 September 2023). "Executive Order N-12-23" (PDF). Executive Department, State of California. Archived (PDF) from the original on 21 February 2024. Retrieved 7 September 2023.\n
  316. \n
  317. ^ Pinaya, Walter H. L.; Graham, Mark S.; Kerfoot, Eric; Tudosiu, Petru-Daniel; Dafflon, Jessica; Fernandez, Virginia; Sanchez, Pedro; Wolleb, Julia; da Costa, Pedro F.; Patel, Ashay (2023). "Generative AI for Medical Imaging: extending the MONAI Framework". arXiv:2307.15208 [eess.IV].\n
  318. \n
  319. ^ Griffith, Erin; Metz, Cade (27 January 2023). "Anthropic Said to Be Closing In on $300 Million in New A.I. Funding". The New York Times. Archived from the original on 9 December 2023. Retrieved 14 March 2023.\n
  320. \n
  321. ^ Lanxon, Nate; Bass, Dina; Davalos, Jackie (10 March 2023). "A Cheat Sheet to AI Buzzwords and Their Meanings". Bloomberg News. Archived from the original on 17 November 2023. Retrieved 14 March 2023.\n
  322. \n
  323. ^ Marcelline, Marco (27 May 2023). "ChatGPT: Most Americans Know About It, But Few Actually Use the AI Chatbot". PCMag. Archived from the original on 21 May 2024. Retrieved 28 January 2024.\n
  324. \n
  325. ^ Lu, Donna (31 March 2023). "Misinformation, mistakes and the Pope in a puffer: what rapidly evolving AI can – and can\'t – do". The Guardian. ISSN 0261-3077. Archived from the original on 10 June 2024. Retrieved 28 January 2024.\n
  326. \n
  327. ^ Hurst, Luke (23 May 2023). "How a fake image of a Pentagon explosion shared on Twitter caused a real dip on Wall Street". euronews. Retrieved 28 January 2024.\n
  328. \n
  329. ^ Poole, David; Mackworth, Alan (2023). Artificial Intelligence, Foundations of Computational Agents (3rd ed.). Cambridge University Press. doi:10.1017/9781009258227. ISBN 978-1-0092-5819-7. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  330. \n
  331. ^ Russell, Stuart; Norvig, Peter (2020). Artificial Intelligence: A Modern Approach (4th ed.). Pearson. ISBN 978-0-1346-1099-3.\n
  332. \n
  333. ^ "Why agents are the next frontier of generative AI". McKinsey Digital. 24 July 2024. Archived from the original on 3 October 2024. Retrieved 10 August 2024.\n
  334. \n
  335. ^ Ransbotham, Sam; Kiron, David; Gerbert, Philipp; Reeves, Martin (6 September 2017). "Reshaping Business With Artificial Intelligence". MIT Sloan Management Review. Archived from the original on 13 February 2024.\n
  336. \n
  337. ^ Sun, Yuran; Zhao, Xilei; Lovreglio, Ruggiero; Kuligowski, Erica (1 January 2024), Naser, M. Z. (ed.), "8 – AI for large-scale evacuation modeling: promises and challenges", Interpretable Machine Learning for the Analysis, Design, Assessment, and Informed Decision Making for Civil Infrastructure, Woodhead Publishing Series in Civil and Structural Engineering, Woodhead Publishing, pp. 185–204, ISBN 978-0-1282-4073-1, archived from the original on 19 May 2024, retrieved 28 June 2024.\n
  338. \n
  339. ^ Gomaa, Islam; Adelzadeh, Masoud; Gwynne, Steven; Spencer, Bruce; Ko, Yoon; Bénichou, Noureddine; Ma, Chunyun; Elsagan, Nour; Duong, Dana; Zalok, Ehab; Kinateder, Max (1 November 2021). "A Framework for Intelligent Fire Detection and Evacuation System". Fire Technology. 57 (6): 3179–3185. doi:10.1007/s10694-021-01157-3. ISSN 1572-8099. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  340. \n
  341. ^ Zhao, Xilei; Lovreglio, Ruggiero; Nilsson, Daniel (1 May 2020). "Modelling and interpreting pre-evacuation decision-making using machine learning". Automation in Construction. 113: 103140. doi:10.1016/j.autcon.2020.103140. ISSN 0926-5805. Archived from the original on 19 May 2024. Retrieved 5 October 2024.\n
  342. \n
  343. ^ "India\'s latest election embraced AI technology. Here are some ways it was used constructively". PBS News. 12 June 2024. Retrieved 28 October 2024.\n
  344. \n
  345. ^ Müller, Vincent C. (30 April 2020). "Ethics of Artificial Intelligence and Robotics". Stanford Encyclopedia of Philosophy Archive. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  346. \n
  347. ^ Simonite (2016).\n
  348. \n
  349. ^ Russell & Norvig (2021), p. 987.\n
  350. \n
  351. ^ Laskowski (2023).\n
  352. \n
  353. ^ GAO (2022).\n
  354. \n
  355. ^ Valinsky (2019).\n
  356. \n
  357. ^ Russell & Norvig (2021), p. 991.\n
  358. \n
  359. ^ Russell & Norvig (2021), pp. 991–992.\n
  360. \n
  361. ^ Christian (2020), p. 63.\n
  362. \n
  363. ^ Vincent (2022).\n
  364. \n
  365. ^ Kopel, Matthew. "Copyright Services: Fair Use". Cornell University Library. Archived from the original on 26 September 2024. Retrieved 26 April 2024.\n
  366. \n
  367. ^ Burgess, Matt. "How to Stop Your Data From Being Used to Train AI". Wired. ISSN 1059-1028. Archived from the original on 3 October 2024. Retrieved 26 April 2024.\n
  368. \n
  369. ^ Reisner (2023).\n
  370. \n
  371. ^ Alter & Harris (2023).\n
  372. \n
  373. ^ "Getting the Innovation Ecosystem Ready for AI. An IP policy toolkit" (PDF). WIPO.\n
  374. \n
  375. ^ Hammond, George (27 December 2023). "Big Tech is spending more than VC firms on AI startups". Ars Technica. Archived from the original on 10 January 2024.\n
  376. \n
  377. ^ Wong, Matteo (24 October 2023). "The Future of AI Is GOMA". The Atlantic. Archived from the original on 5 January 2024.\n
  378. \n
  379. ^ "Big tech and the pursuit of AI dominance". The Economist. 26 March 2023. Archived from the original on 29 December 2023.\n
  380. \n
  381. ^ Fung, Brian (19 December 2023). "Where the battle to dominate AI may be won". CNN Business. Archived from the original on 13 January 2024.\n
  382. \n
  383. ^ Metz, Cade (5 July 2023). "In the Age of A.I., Tech\'s Little Guys Need Big Friends". The New York Times. Archived from the original on 8 July 2024. Retrieved 5 October 2024.\n
  384. \n
  385. ^ "Electricity 2024 – Analysis". IEA. 24 January 2024. Retrieved 13 July 2024.\n
  386. \n
  387. ^ Calvert, Brian (28 March 2024). "AI already uses as much energy as a small country. It\'s only the beginning". Vox. New York, New York. Archived from the original on 3 July 2024. Retrieved 5 October 2024.\n
  388. \n
  389. ^ Halper, Evan; O\'Donovan, Caroline (21 June 2024). "AI is exhausting the power grid. Tech firms are seeking a miracle solution". Washington Post.\n
  390. \n
  391. ^ Davenport, Carly. "AI Data Centers and the Coming YS Power Demand Surge" (PDF). Goldman Sachs. Archived from the original (PDF) on 26 July 2024. Retrieved 5 October 2024.\n
  392. \n
  393. ^ Ryan, Carol (12 April 2024). "Energy-Guzzling AI Is Also the Future of Energy Savings". Wall Street Journal. Dow Jones.\n
  394. \n
  395. ^ Hiller, Jennifer (1 July 2024). "Tech Industry Wants to Lock Up Nuclear Power for AI". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  396. \n
  397. ^ Kendall, Tyler (28 September 2024). "Nvidia\'s Huang Says Nuclear Power an Option to Feed Data Centers". Bloomberg.\n
  398. \n
  399. ^ Halper, Evan (20 September 2024). "Microsoft deal would reopen Three Mile Island nuclear plant to power AI". Washington Post.\n
  400. \n
  401. ^ Hiller, Jennifer (20 September 2024). "Three Mile Island\'s Nuclear Plant to Reopen, Help Power Microsoft\'s AI Centers". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  402. \n
  403. ^ a b Niva Yadav (19 August 2024). "Taiwan to stop large data centers in the North, cites insufficient power". DatacenterDynamics.\n
  404. \n
  405. ^ Mochizuki, Takashi; Oda, Shoko (18 October 2024). "エヌビディア出資の日本企業、原発近くでAIデータセンター新設検討". Bloomberg (in Japanese).\n
  406. \n
  407. ^ a b Naureen S Malik and Will Wade (5 November 2024). "Nuclear-Hungry AI Campuses Need New Plan to Find Power Fast". Bloomberg.\n
  408. \n
  409. ^ Nicas (2018).\n
  410. \n
  411. ^ Rainie, Lee; Keeter, Scott; Perrin, Andrew (22 July 2019). "Trust and Distrust in America". Pew Research Center. Archived from the original on 22 February 2024.\n
  412. \n
  413. ^ Williams (2023).\n
  414. \n
  415. ^ Taylor & Hern (2023).\n
  416. \n
  417. ^ a b Samuel, Sigal (19 April 2022). "Why it\'s so damn hard to make AI fair and unbiased". Vox. Archived from the original on 5 October 2024. Retrieved 24 July 2024.\n
  418. \n
  419. ^ a b Rose (2023).\n
  420. \n
  421. ^ CNA (2019).\n
  422. \n
  423. ^ Goffrey (2008), p. 17.\n
  424. \n
  425. ^ Berdahl et al. (2023); Goffrey (2008, p. 17); Rose (2023); Russell & Norvig (2021, p. 995)\n
  426. \n
  427. ^ Christian (2020), p. 25.\n
  428. \n
  429. ^ a b Russell & Norvig (2021), p. 995.\n
  430. \n
  431. ^ Grant & Hill (2023).\n
  432. \n
  433. ^ Larson & Angwin (2016).\n
  434. \n
  435. ^ Christian (2020), p. 67–70.\n
  436. \n
  437. ^ Christian (2020, pp. 67–70); Russell & Norvig (2021, pp. 993–994)\n
  438. \n
  439. ^ Russell & Norvig (2021, p. 995); Lipartito (2011, p. 36); Goodman & Flaxman (2017, p. 6); Christian (2020, pp. 39–40, 65)\n
  440. \n
  441. ^ Quoted in Christian (2020, p. 65).\n
  442. \n
  443. ^ Russell & Norvig (2021, p. 994); Christian (2020, pp. 40, 80–81)\n
  444. \n
  445. ^ Quoted in Christian (2020, p. 80)\n
  446. \n
  447. ^ Dockrill (2022).\n
  448. \n
  449. ^ Sample (2017).\n
  450. \n
  451. ^ "Black Box AI". 16 June 2023. Archived from the original on 15 June 2024. Retrieved 5 October 2024.\n
  452. \n
  453. ^ Christian (2020), p. 110.\n
  454. \n
  455. ^ Christian (2020), pp. 88–91.\n
  456. \n
  457. ^ Christian (2020, p. 83); Russell & Norvig (2021, p. 997)\n
  458. \n
  459. ^ Christian (2020), p. 91.\n
  460. \n
  461. ^ Christian (2020), p. 83.\n
  462. \n
  463. ^ Verma (2021).\n
  464. \n
  465. ^ Rothman (2020).\n
  466. \n
  467. ^ Christian (2020), pp. 105–108.\n
  468. \n
  469. ^ Christian (2020), pp. 108–112.\n
  470. \n
  471. ^ Ropek, Lucas (21 May 2024). "New Anthropic Research Sheds Light on AI\'s \'Black Box\'". Gizmodo. Archived from the original on 5 October 2024. Retrieved 23 May 2024.\n
  472. \n
  473. ^ Russell & Norvig (2021), p. 989.\n
  474. \n
  475. ^ a b Russell & Norvig (2021), pp. 987–990.\n
  476. \n
  477. ^ Russell & Norvig (2021), p. 988.\n
  478. \n
  479. ^ Robitzski (2018); Sainato (2015)\n
  480. \n
  481. ^ Harari (2018).\n
  482. \n
  483. ^ Buckley, Chris; Mozur, Paul (22 May 2019). "How China Uses High-Tech Surveillance to Subdue Minorities". The New York Times. Archived from the original on 25 November 2019. Retrieved 2 July 2019.\n
  484. \n
  485. ^ "Security lapse exposed a Chinese smart city surveillance system". 3 May 2019. Archived from the original on 7 March 2021. Retrieved 14 September 2020.\n
  486. \n
  487. ^ Urbina et al. (2022).\n
  488. \n
  489. ^ a b E. McGaughey, \'Will Robots Automate Your Job Away? Full Employment, Basic Income, and Economic Democracy\' (2022), 51(3) Industrial Law Journal 511–559. Archived 27 May 2023 at the Wayback Machine.\n
  490. \n
  491. ^ Ford & Colvin (2015);McGaughey (2022)\n
  492. \n
  493. ^ IGM Chicago (2017).\n
  494. \n
  495. ^ Arntz, Gregory & Zierahn (2016), p. 33.\n
  496. \n
  497. ^ Lohr (2017); Frey & Osborne (2017); Arntz, Gregory & Zierahn (2016, p. 33)\n
  498. \n
  499. ^ Zhou, Viola (11 April 2023). "AI is already taking video game illustrators\' jobs in China". Rest of World. Archived from the original on 21 February 2024. Retrieved 17 August 2023.\n
  500. \n
  501. ^ Carter, Justin (11 April 2023). "China\'s game art industry reportedly decimated by growing AI use". Game Developer. Archived from the original on 17 August 2023. Retrieved 17 August 2023.\n
  502. \n
  503. ^ Morgenstern (2015).\n
  504. \n
  505. ^ Mahdawi (2017); Thompson (2014)\n
  506. \n
  507. ^ Tarnoff, Ben (4 August 2023). "Lessons from Eliza". The Guardian Weekly. pp. 34–39.\n
  508. \n
  509. ^ Cellan-Jones (2014).\n
  510. \n
  511. ^ Russell & Norvig 2021, p. 1001.\n
  512. \n
  513. ^ Bostrom (2014).\n
  514. \n
  515. ^ Russell (2019).\n
  516. \n
  517. ^ Bostrom (2014); Müller & Bostrom (2014); Bostrom (2015).\n
  518. \n
  519. ^ Harari (2023).\n
  520. \n
  521. ^ Müller & Bostrom (2014).\n
  522. \n
  523. ^ Leaders\' concerns about the existential risks of AI around 2015: Rawlinson (2015), Holley (2015), Gibbs (2014), Sainato (2015)\n
  524. \n
  525. ^ ""Godfather of artificial intelligence" talks impact and potential of new AI". CBS News. 25 March 2023. Archived from the original on 28 March 2023. Retrieved 28 March 2023.\n
  526. \n
  527. ^ Pittis, Don (4 May 2023). "Canadian artificial intelligence leader Geoffrey Hinton piles on fears of computer takeover". CBC. Archived from the original on 7 July 2024. Retrieved 5 October 2024.\n
  528. \n
  529. ^ "\'50–50 chance\' that AI outsmarts humanity, Geoffrey Hinton says". Bloomberg BNN. 14 June 2024. Retrieved 6 July 2024.\n
  530. \n
  531. ^ Valance (2023).\n
  532. \n
  533. ^ Taylor, Josh (7 May 2023). "Rise of artificial intelligence is inevitable but should not be feared, \'father of AI\' says". The Guardian. Archived from the original on 23 October 2023. Retrieved 26 May 2023.\n
  534. \n
  535. ^ Colton, Emma (7 May 2023). "\'Father of AI\' says tech fears misplaced: \'You cannot stop it\'". Fox News. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
  536. \n
  537. ^ Jones, Hessie (23 May 2023). "Juergen Schmidhuber, Renowned \'Father Of Modern AI,\' Says His Life\'s Work Won\'t Lead To Dystopia". Forbes. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
  538. \n
  539. ^ McMorrow, Ryan (19 December 2023). "Andrew Ng: \'Do we think the world is better off with more or less intelligence?\'". Financial Times. Archived from the original on 25 January 2024. Retrieved 30 December 2023.\n
  540. \n
  541. ^ Levy, Steven (22 December 2023). "How Not to Be Stupid About AI, With Yann LeCun". Wired. Archived from the original on 28 December 2023. Retrieved 30 December 2023.\n
  542. \n
  543. ^ Arguments that AI is not an imminent risk: Brooks (2014), Geist (2015), Madrigal (2015), Lee (2014)\n
  544. \n
  545. ^ a b Christian (2020), pp. 67, 73.\n
  546. \n
  547. ^ Yudkowsky (2008).\n
  548. \n
  549. ^ a b Anderson & Anderson (2011).\n
  550. \n
  551. ^ AAAI (2014).\n
  552. \n
  553. ^ Wallach (2010).\n
  554. \n
  555. ^ Russell (2019), p. 173.\n
  556. \n
  557. ^ Stewart, Ashley; Melton, Monica. "Hugging Face CEO says he\'s focused on building a \'sustainable model\' for the $4.5 billion open-source-AI startup". Business Insider. Archived from the original on 25 September 2024. Retrieved 14 April 2024.\n
  558. \n
  559. ^ Wiggers, Kyle (9 April 2024). "Google open sources tools to support AI model development". TechCrunch. Archived from the original on 10 September 2024. Retrieved 14 April 2024.\n
  560. \n
  561. ^ Heaven, Will Douglas (12 May 2023). "The open-source AI boom is built on Big Tech\'s handouts. How long will it last?". MIT Technology Review. Retrieved 14 April 2024.\n
  562. \n
  563. ^ Brodsky, Sascha (19 December 2023). "Mistral AI\'s New Language Model Aims for Open Source Supremacy". AI Business. Archived from the original on 5 September 2024. Retrieved 5 October 2024.\n
  564. \n
  565. ^ Edwards, Benj (22 February 2024). "Stability announces Stable Diffusion 3, a next-gen AI image generator". Ars Technica. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
  566. \n
  567. ^ Marshall, Matt (29 January 2024). "How enterprises are using open source LLMs: 16 examples". VentureBeat. Archived from the original on 26 September 2024. Retrieved 5 October 2024.\n
  568. \n
  569. ^ Piper, Kelsey (2 February 2024). "Should we make our most powerful AI models open source to all?". Vox. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
  570. \n
  571. ^ Alan Turing Institute (2019). "Understanding artificial intelligence ethics and safety" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
  572. \n
  573. ^ Alan Turing Institute (2023). "AI Ethics and Governance in Practice" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
  574. \n
  575. ^ Floridi, Luciano; Cowls, Josh (23 June 2019). "A Unified Framework of Five Principles for AI in Society". Harvard Data Science Review. 1 (1). doi:10.1162/99608f92.8cd550d1. S2CID 198775713.\n
  576. \n
  577. ^ Buruk, Banu; Ekmekci, Perihan Elif; Arda, Berna (1 September 2020). "A critical perspective on guidelines for responsible and trustworthy artificial intelligence". Medicine, Health Care and Philosophy. 23 (3): 387–399. doi:10.1007/s11019-020-09948-1. ISSN 1572-8633. PMID 32236794. S2CID 214766800. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  578. \n
  579. ^ Kamila, Manoj Kumar; Jasrotia, Sahil Singh (1 January 2023). "Ethical issues in the development of artificial intelligence: recognizing the risks". International Journal of Ethics and Systems. ahead-of-print (ahead-of-print). doi:10.1108/IJOES-05-2023-0107. ISSN 2514-9369. S2CID 259614124. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  580. \n
  581. ^ "AI Safety Institute releases new AI safety evaluations platform". UK Government. 10 May 2024. Archived from the original on 5 October 2024. Retrieved 14 May 2024.\n
  582. \n
  583. ^ Regulation of AI to mitigate risks: Berryhill et al. (2019), Barfield & Pagallo (2018), Iphofen & Kritikos (2019), Wirtz, Weyerer & Geyer (2018), Buiten (2019)\n
  584. \n\n
  585. ^ a b Vincent (2023).\n
  586. \n
  587. ^ Stanford University (2023).\n
  588. \n
  589. ^ a b c d UNESCO (2021).\n
  590. \n
  591. ^ Kissinger (2021).\n
  592. \n
  593. ^ Altman, Brockman & Sutskever (2023).\n
  594. \n
  595. ^ VOA News (25 October 2023). "UN Announces Advisory Body on Artificial Intelligence". Archived from the original on 18 September 2024. Retrieved 5 October 2024.\n
  596. \n
  597. ^ "Council of Europe opens first ever global treaty on AI for signature". Council of Europe. 5 September 2024. Archived from the original on 17 September 2024. Retrieved 17 September 2024.\n
  598. \n
  599. ^ Edwards (2023).\n
  600. \n
  601. ^ Kasperowicz (2023).\n
  602. \n
  603. ^ Fox News (2023).\n
  604. \n
  605. ^ Milmo, Dan (3 November 2023). "Hope or Horror? The great AI debate dividing its pioneers". The Guardian Weekly. pp. 10–12.\n
  606. \n
  607. ^ "The Bletchley Declaration by Countries Attending the AI Safety Summit, 1–2 November 2023". GOV.UK. 1 November 2023. Archived from the original on 1 November 2023. Retrieved 2 November 2023.\n
  608. \n
  609. ^ "Countries agree to safe and responsible development of frontier AI in landmark Bletchley Declaration". GOV.UK (Press release). Archived from the original on 1 November 2023. Retrieved 1 November 2023.\n
  610. \n
  611. ^ "Second global AI summit secures safety commitments from companies". Reuters. 21 May 2024. Retrieved 23 May 2024.\n
  612. \n
  613. ^ "Frontier AI Safety Commitments, AI Seoul Summit 2024". gov.uk. 21 May 2024. Archived from the original on 23 May 2024. Retrieved 23 May 2024.\n
  614. \n
  615. ^ a b Russell & Norvig 2021, p. 9.\n
  616. \n
  617. ^ a b c Copeland, J., ed. (2004). The Essential Turing: the ideas that gave birth to the computer age. Oxford, England: Clarendon Press. ISBN 0-1982-5079-7.\n
  618. \n
  619. ^ "Google books ngram". Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  620. \n
  621. ^ AI\'s immediate precursors: McCorduck (2004, pp. 51–107), Crevier (1993, pp. 27–32), Russell & Norvig (2021, pp. 8–17), Moravec (1988, p. 3)\n
  622. \n
  623. ^ a b Turing\'s original publication of the Turing test in "Computing machinery and intelligence": Turing (1950)\nHistorical influence and philosophical implications: Haugeland (1985, pp. 6–9), Crevier (1993, p. 24), McCorduck (2004, pp. 70–71), Russell & Norvig (2021, pp. 2, 984)\n
  624. \n
  625. ^ Crevier (1993), pp. 47–49.\n
  626. \n
  627. ^ Russell & Norvig (2003), p. 17.\n
  628. \n
  629. ^ Russell & Norvig (2003), p. 18.\n
  630. \n
  631. ^ Newquist (1994), pp. 86–86.\n
  632. \n
  633. ^ Simon (1965, p. 96) quoted in Crevier (1993, p. 109)\n
  634. \n
  635. ^ Minsky (1967, p. 2) quoted in Crevier (1993, p. 109)\n
  636. \n
  637. ^ Russell & Norvig (2021), p. 21.\n
  638. \n
  639. ^ Lighthill (1973).\n
  640. \n
  641. ^ NRC 1999, pp. 212–213.\n
  642. \n
  643. ^ Russell & Norvig (2021), p. 22.\n
  644. \n
  645. ^ Expert systems: Russell & Norvig (2021, pp. 23, 292), Luger & Stubblefield (2004, pp. 227–331), Nilsson (1998, chpt. 17.4), McCorduck (2004, pp. 327–335, 434–435), Crevier (1993, pp. 145–162, 197–203), Newquist (1994, pp. 155–183)\n
  646. \n
  647. ^ Russell & Norvig (2021), p. 24.\n
  648. \n
  649. ^ Nilsson (1998), p. 7.\n
  650. \n
  651. ^ McCorduck (2004), pp. 454–462.\n
  652. \n
  653. ^ Moravec (1988).\n
  654. \n
  655. ^ a b Brooks (1990).\n
  656. \n
  657. ^ Developmental robotics: Weng et al. (2001), Lungarella et al. (2003), Asada et al. (2009), Oudeyer (2010)\n
  658. \n
  659. ^ Russell & Norvig (2021), p. 25.\n
  660. \n
  661. ^ Crevier (1993, pp. 214–215), Russell & Norvig (2021, pp. 24, 26)\n
  662. \n
  663. ^ Russell & Norvig (2021), p. 26.\n
  664. \n
  665. ^ Formal and narrow methods adopted in the 1990s: Russell & Norvig (2021, pp. 24–26), McCorduck (2004, pp. 486–487)\n
  666. \n
  667. ^ AI widely used in the late 1990s: Kurzweil (2005, p. 265), NRC (1999, pp. 216–222), Newquist (1994, pp. 189–201)\n
  668. \n
  669. ^ Wong (2023).\n
  670. \n
  671. ^ Moore\'s Law and AI: Russell & Norvig (2021, pp. 14, 27)\n
  672. \n
  673. ^ a b c Clark (2015b).\n
  674. \n
  675. ^ Big data: Russell & Norvig (2021, p. 26)\n
  676. \n
  677. ^ Sagar, Ram (3 June 2020). "OpenAI Releases GPT-3, The Largest Model So Far". Analytics India Magazine. Archived from the original on 4 August 2020. Retrieved 15 March 2023.\n
  678. \n
  679. ^ DiFeliciantonio (2023).\n
  680. \n
  681. ^ Goswami (2023).\n
  682. \n
  683. ^ Grayling, Anthony; Ball, Brian (1 August 2024). "Philosophy is crucial in the age of AI". The Conversation. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
  684. \n
  685. ^ a b Jarow, Oshan (15 June 2024). "Will AI ever become conscious? It depends on how you think about biology". Vox. Archived from the original on 21 September 2024. Retrieved 4 October 2024.\n
  686. \n
  687. ^ McCarthy, John. "The Philosophy of AI and the AI of Philosophy". jmc.stanford.edu. Archived from the original on 23 October 2018. Retrieved 3 October 2024.\n
  688. \n
  689. ^ a b Turing (1950), p. 1.\n
  690. \n
  691. ^ Turing (1950), Under "The Argument from Consciousness".\n
  692. \n
  693. ^ Kirk-Giannini, Cameron Domenico; Goldstein, Simon (16 October 2023). "AI is closer than ever to passing the Turing test for \'intelligence\'. What happens when it does?". The Conversation. Archived from the original on 25 September 2024. Retrieved 17 August 2024.\n
  694. \n
  695. ^ Russell & Norvig (2021), p. 3.\n
  696. \n
  697. ^ Maker (2006).\n
  698. \n
  699. ^ McCarthy (1999).\n
  700. \n
  701. ^ Minsky (1986).\n
  702. \n
  703. ^ "What Is Artificial Intelligence (AI)?". Google Cloud Platform. Archived from the original on 31 July 2023. Retrieved 16 October 2023.\n
  704. \n
  705. ^ "One of the Biggest Problems in Regulating AI Is Agreeing on a Definition". carnegieendowment.org. Retrieved 31 July 2024.\n
  706. \n
  707. ^ "AI or BS? How to tell if a marketing tool really uses artificial intelligence". The Drum. Retrieved 31 July 2024.\n
  708. \n
  709. ^ Nilsson (1983), p. 10.\n
  710. \n
  711. ^ Haugeland (1985), pp. 112–117.\n
  712. \n
  713. ^ Physical symbol system hypothesis: Newell & Simon (1976, p. 116)\nHistorical significance: McCorduck (2004, p. 153), Russell & Norvig (2021, p. 19)\n
  714. \n
  715. ^ Moravec\'s paradox: Moravec (1988, pp. 15–16), Minsky (1986, p. 29), Pinker (2007, pp. 190–191)\n
  716. \n
  717. ^ Dreyfus\' critique of AI: Dreyfus (1972), Dreyfus & Dreyfus (1986)\nHistorical significance and philosophical implications: Crevier (1993, pp. 120–132), McCorduck (2004, pp. 211–239), Russell & Norvig (2021, pp. 981–982), Fearn (2007, chpt. 3)\n
  718. \n
  719. ^ Crevier (1993), p. 125.\n
  720. \n
  721. ^ Langley (2011).\n
  722. \n
  723. ^ Katz (2012).\n
  724. \n
  725. ^ Neats vs. scruffies, the historic debate: McCorduck (2004, pp. 421–424, 486–489), Crevier (1993, p. 168), Nilsson (1983, pp. 10–11), Russell & Norvig (2021, p. 24)\nA classic example of the "scruffy" approach to intelligence: Minsky (1986)\nA modern example of neat AI and its aspirations in the 21st century: Domingos (2015)\n
  726. \n
  727. ^ Pennachin & Goertzel (2007).\n
  728. \n
  729. ^ a b Roberts (2016).\n
  730. \n
  731. ^ Russell & Norvig (2021), p. 986.\n
  732. \n
  733. ^ Chalmers (1995).\n
  734. \n
  735. ^ Dennett (1991).\n
  736. \n
  737. ^ Horst (2005).\n
  738. \n
  739. ^ Searle (1999).\n
  740. \n
  741. ^ Searle (1980), p. 1.\n
  742. \n
  743. ^ Russell & Norvig (2021), p. 9817.\n
  744. \n
  745. ^ Searle\'s Chinese room argument: Searle (1980). Searle\'s original presentation of the thought experiment., Searle (1999).\nDiscussion: Russell & Norvig (2021, pp. 985), McCorduck (2004, pp. 443–445), Crevier (1993, pp. 269–271)\n
  746. \n
  747. ^ Leith, Sam (7 July 2022). "Nick Bostrom: How can we be certain a machine isn\'t conscious?". The Spectator. Archived from the original on 26 September 2024. Retrieved 23 February 2024.\n
  748. \n
  749. ^ a b c Thomson, Jonny (31 October 2022). "Why don\'t robots have rights?". Big Think. Archived from the original on 13 September 2024. Retrieved 23 February 2024.\n
  750. \n
  751. ^ a b Kateman, Brian (24 July 2023). "AI Should Be Terrified of Humans". Time. Archived from the original on 25 September 2024. Retrieved 23 February 2024.\n
  752. \n
  753. ^ Wong, Jeff (10 July 2023). "What leaders need to know about robot rights". Fast Company.\n
  754. \n
  755. ^ Hern, Alex (12 January 2017). "Give robots \'personhood\' status, EU committee argues". The Guardian. ISSN 0261-3077. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
  756. \n
  757. ^ Dovey, Dana (14 April 2018). "Experts Don\'t Think Robots Should Have Rights". Newsweek. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
  758. \n
  759. ^ Cuddy, Alice (13 April 2018). "Robot rights violate human rights, experts warn EU". euronews. Archived from the original on 19 September 2024. Retrieved 23 February 2024.\n
  760. \n
  761. ^ The Intelligence explosion and technological singularity: Russell & Norvig (2021, pp. 1004–1005), Omohundro (2008), Kurzweil (2005)\n\nI. J. Good\'s "intelligence explosion": Good (1965)\n\nVernor Vinge\'s "singularity": Vinge (1993)\n
  762. \n
  763. ^ Russell & Norvig (2021), p. 1005.\n
  764. \n
  765. ^ Transhumanism: Moravec (1988), Kurzweil (2005), Russell & Norvig (2021, p. 1005)\n
  766. \n
  767. ^ AI as evolution: Edward Fredkin is quoted in McCorduck (2004, p. 401), Butler (1863), Dyson (1998)\n
  768. \n
  769. ^ AI in myth: McCorduck (2004, pp. 4–5)\n
  770. \n
  771. ^ McCorduck (2004), pp. 340–400.\n
  772. \n
  773. ^ Buttazzo (2001).\n
  774. \n
  775. ^ Anderson (2008).\n
  776. \n
  777. ^ McCauley (2007).\n
  778. \n
  779. ^ Galvan (1997).\n
  780. \n
\n

AI textbooks

\n

The two most widely used textbooks in 2023 (see the Open Syllabus):\n

\n\n

These were the four of the most widely used AI textbooks in 2008:\n

\n
\n\n
\n

Other textbooks:\n

\n\n

History of AI

\n
\n\n
\n

Other sources

\n
\n\n\n
\n

Further reading

\n
\n\n
\n
\n\n\n\n\n\n\n\n\n\n
\n
\n\t\t\t\t\t\n\t\t\t\t
\n\t\t\t\n\t\t\t\n\t\t\n\t\t\n\t \n \n
\n\t\n
\n\n\n'}'. -[2024-11-07 16:54:46,913] [INFO] [Engine]: Agent 'agent3' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_Jewo5kQHSereVQ9nxjlwDSGX', type='function')], function_call=None) -[2024-11-07 16:54:46,913] [INFO] [BaseAgent]: Agent 'agent3' is planning the next step. -[2024-11-07 16:54:47,898] [INFO] [BaseAgent]: Agent 'agent3' selected 'agent1' as the next agent with plan: 'The next agent should use Bing search results on Artificial Intelligence to gather information on recent advancements and applications in the field. They can look for articles, case studies, or expert opinions to deepen our understanding of the topic.'. -[2024-11-07 16:54:47,899] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_AVdiDjBnWm1AavuGyxrq4Qia', type='function')], function_call=None) -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_Jewo5kQHSereVQ9nxjlwDSGX', type='function')], function_call=None) -[2024-11-07 16:54:48,305] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-07 16:54:48,305] [INFO] [Engine]: Agent 'agent1' is executing task. -[2024-11-07 16:54:48,306] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'The next agent should use Bing search results on Artificial Intelligence to gather information on recent advancements and applications in the field. They can look for articles, case studies, or expert opinions to deepen our understanding of the topic.'. -[2024-11-07 16:54:49,278] [INFO] [BaseAgent]: Agent 'agent1' called 'fetch_webpage' with args '{'url': 'https://www.bing.com/search?q=Artificial+Intelligence+recent+advancements+and+applications'}'. -[2024-11-07 16:54:49,280] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': True, 'error-msg': '', 'url': 'https://www.bing.com/search?q=Artificial+Intelligence+recent+advancements+and+applications', 'content': 'Artificial Intelligence recent advancements and applications - Search
  1. There are no results for Artificial Intelligence recent advancements and applications

    • Check your spelling or try different keywords

    Ref A: 6104EF5C8BA74B289550677BF75A3C6F Ref B: CH1EDGE1014 Ref C: 2024-11-07T22:54:49Z

'}'. -[2024-11-07 16:54:49,298] [INFO] [Engine]: Agent 'agent1' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.bing.com/search?q=Artificial+Intelligence+recent+advancements+and+applications"}', name='fetch_webpage'), id='call_y3LkQkYul0kGwgKniULn9Xlv', type='function')], function_call=None) -[2024-11-07 16:54:49,298] [INFO] [BaseAgent]: Agent 'agent1' is planning the next step. -[2024-11-07 16:54:50,116] [INFO] [BaseAgent]: Agent 'agent1' selected 'agent3' as the next agent with plan: 'The next planning task is for agent3 to utilize the information gathered from Wikipedia on recent advancements and applications of Artificial Intelligence to create a summary highlighting key points, trends, and potential future developments in the field.'. -[2024-11-07 16:54:50,117] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_AVdiDjBnWm1AavuGyxrq4Qia', type='function')], function_call=None) -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_Jewo5kQHSereVQ9nxjlwDSGX', type='function')], function_call=None) -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.bing.com/search?q=Artificial+Intelligence+recent+advancements+and+applications"}', name='fetch_webpage'), id='call_y3LkQkYul0kGwgKniULn9Xlv', type='function')], function_call=None) -[2024-11-07 16:54:50,558] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-07 16:54:50,558] [INFO] [Engine]: Agent 'agent3' is executing task. -[2024-11-07 16:54:50,559] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'The next planning task is for agent3 to utilize the information gathered from Wikipedia on recent advancements and applications of Artificial Intelligence to create a summary highlighting key points, trends, and potential future developments in the field.'. -[2024-11-07 16:54:51,173] [INFO] [BaseAgent]: Agent 'agent3' called 'fetch_webpage' with args '{'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence'}'. -[2024-11-07 16:54:51,174] [INFO] [BaseAgent]: Agent 'agent3' obtained result '{'success': True, 'error-msg': '', 'url': 'https://www.bing.com/search?q=Artificial+Intelligence+recent+advancements+and+applications', 'content': 'Artificial Intelligence recent advancements and applications - Search
  1. There are no results for Artificial Intelligence recent advancements and applications

    • Check your spelling or try different keywords

    Ref A: 6104EF5C8BA74B289550677BF75A3C6F Ref B: CH1EDGE1014 Ref C: 2024-11-07T22:54:49Z

'}'. -[2024-11-07 16:54:51,178] [INFO] [Engine]: Agent 'agent3' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_yLpAF6qZ3xuty5gYVupYm5T0', type='function')], function_call=None) -[2024-11-07 16:54:51,178] [INFO] [BaseAgent]: Agent 'agent3' is planning the next step. -[2024-11-07 16:54:52,298] [INFO] [BaseAgent]: Agent 'agent3' selected 'agent1' as the next agent with plan: 'The next agent, agent1, who is a helpful and supportive team leader, should use Bing to search for more in-depth information on artificial intelligence, focusing on its applications in various industries, recent advancements, and future prospects. The agent can gather insights to create a comprehensive overview of the topic and present key points for the team's research project.'. -[2024-11-07 16:54:52,299] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_AVdiDjBnWm1AavuGyxrq4Qia', type='function')], function_call=None) -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_Jewo5kQHSereVQ9nxjlwDSGX', type='function')], function_call=None) -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.bing.com/search?q=Artificial+Intelligence+recent+advancements+and+applications"}', name='fetch_webpage'), id='call_y3LkQkYul0kGwgKniULn9Xlv', type='function')], function_call=None) -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_yLpAF6qZ3xuty5gYVupYm5T0', type='function')], function_call=None) -[2024-11-07 16:54:52,708] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': False} -[2024-11-07 16:54:52,709] [INFO] [Engine]: EnginePlanner decided to terminate the simulation. -[2024-11-07 16:54:52,709] [ERROR] [Engine]: An error occurred during chain-based coordination. -Traceback (most recent call last): - File "/Users/zhukunlun/Documents/GitHub/MARBLE/marble/engine/engine.py", line 369, in chain_coordinate - summary = self._summarize_results(agents_results) - File "/Users/zhukunlun/Documents/GitHub/MARBLE/marble/engine/engine.py", line 448, in _summarize_results - result = json.dumps(result) - File "/opt/anaconda3/envs/mabench/lib/python3.10/json/__init__.py", line 231, in dumps - return _default_encoder.encode(obj) - File "/opt/anaconda3/envs/mabench/lib/python3.10/json/encoder.py", line 199, in encode - chunks = self.iterencode(o, _one_shot=True) - File "/opt/anaconda3/envs/mabench/lib/python3.10/json/encoder.py", line 257, in iterencode - return _iterencode(o, 0) - File "/opt/anaconda3/envs/mabench/lib/python3.10/json/encoder.py", line 179, in default - raise TypeError(f'Object of type {o.__class__.__name__} ' -TypeError: Object of type Message is not JSON serializable -[2024-11-07 16:54:52,712] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-07 16:54:52,712] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-07 16:54:52,713] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-07 16:54:52,713] [INFO] [Engine]: Chain-based coordination simulation completed. -[2024-11-07 16:57:21,196] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-07 16:57:21,197] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-07 16:57:21,197] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-07 16:57:21,197] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-07 16:57:21,197] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-07 16:57:21,197] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-07 16:57:21,197] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-07 16:57:21,197] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-07 16:57:21,197] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'chain'. -[2024-11-07 16:57:21,197] [INFO] [AgentGraph]: Relationship added: agent1 --[reports_to]--> agent2 -[2024-11-07 16:57:21,197] [INFO] [AgentGraph]: Relationship added: agent1 --[manages]--> agent3 -[2024-11-07 16:57:21,197] [INFO] [AgentGraph]: Relationship added: agent2 --[supports]--> agent4 -[2024-11-07 16:57:21,197] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborates_with]--> agent4 -[2024-11-07 16:57:21,197] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-07 16:57:21,197] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-07 16:57:21,197] [INFO] [Engine]: Engine initialized. -[2024-11-07 16:57:21,197] [INFO] [Engine]: Engine starting simulation. -[2024-11-07 16:57:21,197] [INFO] [Engine]: Running in chain-based coordination mode. -[2024-11-07 16:57:21,197] [INFO] [Engine]: Starting chain-based coordination. -[2024-11-07 16:57:21,197] [INFO] [Engine]: Agent 'agent1' is executing task. -[2024-11-07 16:57:21,197] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Find new about the latest trends in AI.'. -[2024-11-07 16:57:23,030] [INFO] [BaseAgent]: Agent 'agent1' called 'fetch_webpage' with args '{'url': 'https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/'}'. -[2024-11-07 16:57:23,031] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': False, 'error-msg': '404 Client Error: Not Found for url: https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/'}'. -[2024-11-07 16:57:23,031] [INFO] [Engine]: Agent 'agent1' completed task with result: -[2024-11-07 16:57:23,032] [INFO] [BaseAgent]: Agent 'agent1' is planning the next step. -[2024-11-07 16:57:23,852] [INFO] [BaseAgent]: Agent 'agent1' selected 'agent3' as the next agent with plan: 'The next agent should delve deeper into the topic by researching the historical background and cultural significance of the Mexican Day of the Dead festival. They should also explore any unique traditions or customs associated with the celebration.'. -[2024-11-07 16:57:23,852] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. - -[2024-11-07 16:57:24,261] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': False} -[2024-11-07 16:57:24,262] [INFO] [Engine]: EnginePlanner decided to terminate the simulation. -[2024-11-07 16:57:24,262] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- {"agent1": ""} - -[2024-11-07 16:57:24,262] [INFO] [Engine]: Chain execution Summary: -Agents' Results Summary: -- {"agent1": ""} - -[2024-11-07 16:57:24,262] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. - -Agents' Results Summary: -- {"agent1": ""} - -[2024-11-07 16:57:24,263] [INFO] [Engine]: Chain-based coordination simulation completed. -[2024-11-07 16:57:24,263] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-07 16:57:24,263] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-07 16:57:24,263] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-07 16:57:24,263] [INFO] [Engine]: Chain-based coordination simulation completed. -[2024-11-07 16:58:04,408] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-07 16:58:04,409] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-07 16:58:04,409] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-07 16:58:04,409] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-07 16:58:04,409] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-07 16:58:04,409] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-07 16:58:04,409] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-07 16:58:04,409] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-07 16:58:04,409] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'chain'. -[2024-11-07 16:58:04,409] [INFO] [AgentGraph]: Relationship added: agent1 --[reports_to]--> agent2 -[2024-11-07 16:58:04,409] [INFO] [AgentGraph]: Relationship added: agent1 --[manages]--> agent3 -[2024-11-07 16:58:04,409] [INFO] [AgentGraph]: Relationship added: agent2 --[supports]--> agent4 -[2024-11-07 16:58:04,409] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborates_with]--> agent4 -[2024-11-07 16:58:04,409] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-07 16:58:04,409] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-07 16:58:04,409] [INFO] [Engine]: Engine initialized. -[2024-11-07 16:58:04,409] [INFO] [Engine]: Engine starting simulation. -[2024-11-07 16:58:04,409] [INFO] [Engine]: Running in chain-based coordination mode. -[2024-11-07 16:58:04,409] [INFO] [Engine]: Starting chain-based coordination. -[2024-11-07 16:58:04,409] [INFO] [Engine]: Agent 'agent1' is executing task. -[2024-11-07 16:58:04,409] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Find new about the latest trends in AI.'. -[2024-11-07 16:58:06,812] [INFO] [BaseAgent]: Agent 'agent1' called 'fetch_webpage' with args '{'url': 'https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/'}'. -[2024-11-07 16:58:06,812] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': False, 'error-msg': '404 Client Error: Not Found for url: https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/'}'. -[2024-11-07 16:58:06,813] [INFO] [Engine]: Agent 'agent1' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_86o0aB6BCxSa7eXe5iDhbQ4O', type='function')], function_call=None) -[2024-11-07 16:58:06,813] [INFO] [BaseAgent]: Agent 'agent1' is planning the next step. -[2024-11-07 16:58:07,775] [INFO] [BaseAgent]: Agent 'agent1' selected 'agent3' as the next agent with plan: 'The next agent, agent3, should use the information from the fetched webpage to research and gather more details about the top AI trends to watch out for in 2022. They can use Wikipedia to delve deeper into specific trends, key players, and potential impacts of these AI trends.'. -[2024-11-07 16:58:07,776] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_86o0aB6BCxSa7eXe5iDhbQ4O', type='function')], function_call=None) -[2024-11-07 16:58:08,365] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-07 16:58:08,365] [INFO] [Engine]: Agent 'agent3' is executing task. -[2024-11-07 16:58:08,366] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'The next agent, agent3, should use the information from the fetched webpage to research and gather more details about the top AI trends to watch out for in 2022. They can use Wikipedia to delve deeper into specific trends, key players, and potential impacts of these AI trends.'. -[2024-11-07 16:58:09,353] [INFO] [BaseAgent]: Agent 'agent3' called 'fetch_webpage' with args '{'url': 'https://www.forbes.com/sites/forbestechcouncil/2021/12/16/top-ai-trends-to-watch-out-for-in-2022/?sh=3b4c4b7b6b6b'}'. -[2024-11-07 16:58:09,354] [INFO] [BaseAgent]: Agent 'agent3' obtained result '{'success': False, 'error-msg': '404 Client Error: Not Found for url: https://www.forbes.com/sites/forbestechcouncil/2021/12/16/top-ai-trends-to-watch-out-for-in-2022/?sh=3b4c4b7b6b6b'}'. -[2024-11-07 16:58:09,354] [INFO] [Engine]: Agent 'agent3' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.forbes.com/sites/forbestechcouncil/2021/12/16/top-ai-trends-to-watch-out-for-in-2022/?sh=3b4c4b7b6b6b"}', name='fetch_webpage'), id='call_fo1OlgUItVbiWPDw9N1BlzXO', type='function')], function_call=None) -[2024-11-07 16:58:09,354] [INFO] [BaseAgent]: Agent 'agent3' is planning the next step. -[2024-11-07 16:58:10,505] [INFO] [BaseAgent]: Agent 'agent3' selected 'agent1' as the next agent with plan: 'The next agent, agent1, who is a helpful and supportive team leader, should analyze the AI trends for 2022 from the Forbes article and create a summary highlighting the key points and trends to watch out for in the upcoming year. The agent can also provide insights on how these trends may impact various industries and suggest potential opportunities or challenges that businesses may face as a result of these AI advancements.'. -[2024-11-07 16:58:10,506] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_86o0aB6BCxSa7eXe5iDhbQ4O', type='function')], function_call=None) -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.forbes.com/sites/forbestechcouncil/2021/12/16/top-ai-trends-to-watch-out-for-in-2022/?sh=3b4c4b7b6b6b"}', name='fetch_webpage'), id='call_fo1OlgUItVbiWPDw9N1BlzXO', type='function')], function_call=None) -[2024-11-07 16:58:10,792] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-07 16:58:10,793] [INFO] [Engine]: Agent 'agent1' is executing task. -[2024-11-07 16:58:10,793] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'The next agent, agent1, who is a helpful and supportive team leader, should analyze the AI trends for 2022 from the Forbes article and create a summary highlighting the key points and trends to watch out for in the upcoming year. The agent can also provide insights on how these trends may impact various industries and suggest potential opportunities or challenges that businesses may face as a result of these AI advancements.'. -[2024-11-07 16:58:11,848] [INFO] [BaseAgent]: Agent 'agent1' called 'fetch_webpage' with args '{'url': 'https://www.forbes.com/sites/forbestechcouncil/2021/12/14/ai-trends-to-watch-in-2022/?sh=3b4b4b7b6b6b'}'. -[2024-11-07 16:58:11,848] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': False, 'error-msg': '404 Client Error: Not Found for url: https://www.forbes.com/sites/forbestechcouncil/2021/12/14/ai-trends-to-watch-in-2022/?sh=3b4b4b7b6b6b'}'. -[2024-11-07 16:58:11,849] [INFO] [Engine]: Agent 'agent1' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.forbes.com/sites/forbestechcouncil/2021/12/14/ai-trends-to-watch-in-2022/?sh=3b4b4b7b6b6b"}', name='fetch_webpage'), id='call_Gv3Ss5qpWS7jLKpNXFJyUDod', type='function')], function_call=None) -[2024-11-07 16:58:11,849] [INFO] [BaseAgent]: Agent 'agent1' is planning the next step. -[2024-11-07 16:58:12,799] [INFO] [BaseAgent]: Agent 'agent1' selected 'agent3' as the next agent with plan: 'The next agent, agent3, should use Wikipedia to research and gather information on the key AI trends to watch in 2022. They can focus on summarizing the trends, providing insights, and highlighting any notable developments in the field.'. -[2024-11-07 16:58:12,799] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_86o0aB6BCxSa7eXe5iDhbQ4O', type='function')], function_call=None) -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.forbes.com/sites/forbestechcouncil/2021/12/16/top-ai-trends-to-watch-out-for-in-2022/?sh=3b4c4b7b6b6b"}', name='fetch_webpage'), id='call_fo1OlgUItVbiWPDw9N1BlzXO', type='function')], function_call=None) -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.forbes.com/sites/forbestechcouncil/2021/12/14/ai-trends-to-watch-in-2022/?sh=3b4b4b7b6b6b"}', name='fetch_webpage'), id='call_Gv3Ss5qpWS7jLKpNXFJyUDod', type='function')], function_call=None) -[2024-11-07 16:58:13,313] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-07 16:58:13,313] [INFO] [Engine]: Agent 'agent3' is executing task. -[2024-11-07 16:58:13,314] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'The next agent, agent3, should use Wikipedia to research and gather information on the key AI trends to watch in 2022. They can focus on summarizing the trends, providing insights, and highlighting any notable developments in the field.'. -[2024-11-07 16:58:14,083] [INFO] [BaseAgent]: Agent 'agent3' called 'fetch_webpage' with args '{'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence'}'. -[2024-11-07 16:58:14,096] [INFO] [BaseAgent]: Agent 'agent3' obtained result '{'success': True, 'error-msg': '', 'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence', 'content': '\n\n\n\nArtificial intelligence - Wikipedia\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nJump to content\n
\n\t
\n\t\t
\n\t\t\t
\n\n\t\t\n\t\t\t\n\n\n\t\t
\n\t\t
\n\t\t\t\n\n\n\t\t\t\n\n\t\t
\n\t\n\n
\n\t
\n\t\t
\n\t\t\t
\n\t\t
\n\t\t
\n\t\t\t
\n\t\t
\n\t\t\t\n\t\t
\n\t
\n\t
\n\t\t\t\t
\n\t\t\n\t\t\t
\n\t\t
\n\t\t
\n\t\t\t
\n\t\t\t\t
\n\t\t\t\t\t\n\t\t\t\t\t

Artificial intelligence

\n\t\t\t\t\t\t\t\n
\n\t\n\t\n\t
\n\n\t\t
\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t
\n\n\t
\n
\n
\n\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\t
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
\n\t\t\t\t\t\t
\n\t\t\t\t\t\t\t\n\t\t\t\t\n\t\t\t\t\t\t\t
\n\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\t\n\t\t\t\t\t\t
\n\t\t\n\t\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\t\t
\n\t\t
Page semi-protected
\n\t\t
\n\n\t\t\t\t\t\t
From Wikipedia, the free encyclopedia
\n\t\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t
\n\n

\n

\n\n\n\n\n\n\n\n

Artificial intelligence (AI), in its broadest sense, is intelligence exhibited by machines, particularly computer systems. It is a field of research in computer science that develops and studies methods and software that enable machines to perceive their environment and use learning and intelligence to take actions that maximize their chances of achieving defined goals.[1] Such machines may be called AIs.\n

Some high-profile applications of AI include advanced web search engines (e.g., Google Search); recommendation systems (used by YouTube, Amazon, and Netflix); interacting via human speech (e.g., Google Assistant, Siri, and Alexa); autonomous vehicles (e.g., Waymo); generative and creative tools (e.g., ChatGPT, and AI art); and superhuman play and analysis in strategy games (e.g., chess and Go). However, many AI applications are not perceived as AI: "A lot of cutting edge AI has filtered into general applications, often without being called AI because once something becomes useful enough and common enough it\'s not labeled AI anymore."[2][3]\n

The various subfields of AI research are centered around particular goals and the use of particular tools. The traditional goals of AI research include reasoning, knowledge representation, planning, learning, natural language processing, perception, and support for robotics.[a] General intelligence—the ability to complete any task performable by a human on an at least equal level—is among the field\'s long-term goals.[4] To reach these goals, AI researchers have adapted and integrated a wide range of techniques, including search and mathematical optimization, formal logic, artificial neural networks, and methods based on statistics, operations research, and economics.[b] AI also draws upon psychology, linguistics, philosophy, neuroscience, and other fields.[5]\n

Artificial intelligence was founded as an academic discipline in 1956,[6] and the field went through multiple cycles of optimism,[7][8] followed by periods of disappointment and loss of funding, known as AI winter.[9][10] Funding and interest vastly increased after 2012 when deep learning outperformed previous AI techniques.[11] This growth accelerated further after 2017 with the transformer architecture,[12] and by the early 2020s hundreds of billions of dollars were being invested in AI (known as the "AI boom"). The widespread use of AI in the 21st century exposed several unintended consequences and harms in the present and raised concerns about its risks and long-term effects in the future, prompting discussions about regulatory policies to ensure the safety and benefits of the technology.\n

\n\n

Goals

\n

The general problem of simulating (or creating) intelligence has been broken into subproblems. These consist of particular traits or capabilities that researchers expect an intelligent system to display. The traits described below have received the most attention and cover the scope of AI research.[a]\n

\n

Reasoning and problem-solving

\n

Early researchers developed algorithms that imitated step-by-step reasoning that humans use when they solve puzzles or make logical deductions.[13] By the late 1980s and 1990s, methods were developed for dealing with uncertain or incomplete information, employing concepts from probability and economics.[14]\n

Many of these algorithms are insufficient for solving large reasoning problems because they experience a "combinatorial explosion": They become exponentially slower as the problems grow.[15] Even humans rarely use the step-by-step deduction that early AI research could model. They solve most of their problems using fast, intuitive judgments.[16] Accurate and efficient reasoning is an unsolved problem.\n

\n

Knowledge representation

\n
An ontology represents knowledge as a set of concepts within a domain and the relationships between those concepts.
\n

Knowledge representation and knowledge engineering[17] allow AI programs to answer questions intelligently and make deductions about real-world facts. Formal knowledge representations are used in content-based indexing and retrieval,[18] scene interpretation,[19] clinical decision support,[20] knowledge discovery (mining "interesting" and actionable inferences from large databases),[21] and other areas.[22]\n

A knowledge base is a body of knowledge represented in a form that can be used by a program. An ontology is the set of objects, relations, concepts, and properties used by a particular domain of knowledge.[23] Knowledge bases need to represent things such as objects, properties, categories, and relations between objects;[24] situations, events, states, and time;[25] causes and effects;[26] knowledge about knowledge (what we know about what other people know);[27] default reasoning (things that humans assume are true until they are told differently and will remain true even when other facts are changing);[28] and many other aspects and domains of knowledge.\n

Among the most difficult problems in knowledge representation are the breadth of commonsense knowledge (the set of atomic facts that the average person knows is enormous);[29] and the sub-symbolic form of most commonsense knowledge (much of what people know is not represented as "facts" or "statements" that they could express verbally).[16] There is also the difficulty of knowledge acquisition, the problem of obtaining knowledge for AI applications.[c]\n

\n

Planning and decision-making

\n

An "agent" is anything that perceives and takes actions in the world. A rational agent has goals or preferences and takes actions to make them happen.[d][32] In automated planning, the agent has a specific goal.[33] In automated decision-making, the agent has preferences—there are some situations it would prefer to be in, and some situations it is trying to avoid. The decision-making agent assigns a number to each situation (called the "utility") that measures how much the agent prefers it. For each possible action, it can calculate the "expected utility": the utility of all possible outcomes of the action, weighted by the probability that the outcome will occur. It can then choose the action with the maximum expected utility.[34]\n

In classical planning, the agent knows exactly what the effect of any action will be.[35] In most real-world problems, however, the agent may not be certain about the situation they are in (it is "unknown" or "unobservable") and it may not know for certain what will happen after each possible action (it is not "deterministic"). It must choose an action by making a probabilistic guess and then reassess the situation to see if the action worked.[36]\n

In some problems, the agent\'s preferences may be uncertain, especially if there are other agents or humans involved. These can be learned (e.g., with inverse reinforcement learning), or the agent can seek information to improve its preferences.[37] Information value theory can be used to weigh the value of exploratory or experimental actions.[38] The space of possible future actions and situations is typically intractably large, so the agents must take actions and evaluate situations while being uncertain of what the outcome will be.\n

A Markov decision process has a transition model that describes the probability that a particular action will change the state in a particular way and a reward function that supplies the utility of each state and the cost of each action. A policy associates a decision with each possible state. The policy could be calculated (e.g., by iteration), be heuristic, or it can be learned.[39]\n

Game theory describes the rational behavior of multiple interacting agents and is used in AI programs that make decisions that involve other agents.[40]\n

\n

Learning

\n

Machine learning is the study of programs that can improve their performance on a given task automatically.[41] It has been a part of AI from the beginning.[e]\n

There are several kinds of machine learning. Unsupervised learning analyzes a stream of data and finds patterns and makes predictions without any other guidance.[44] Supervised learning requires a human to label the input data first, and comes in two main varieties: classification (where the program must learn to predict what category the input belongs in) and regression (where the program must deduce a numeric function based on numeric input).[45]\n

In reinforcement learning, the agent is rewarded for good responses and punished for bad ones. The agent learns to choose responses that are classified as "good".[46] Transfer learning is when the knowledge gained from one problem is applied to a new problem.[47] Deep learning is a type of machine learning that runs inputs through biologically inspired artificial neural networks for all of these types of learning.[48]\n

Computational learning theory can assess learners by computational complexity, by sample complexity (how much data is required), or by other notions of optimization.[49]\n

\n
\n

Natural language processing

\n

Natural language processing (NLP)[50] allows programs to read, write and communicate in human languages such as English. Specific problems include speech recognition, speech synthesis, machine translation, information extraction, information retrieval and question answering.[51]\n

Early work, based on Noam Chomsky\'s generative grammar and semantic networks, had difficulty with word-sense disambiguation[f] unless restricted to small domains called "micro-worlds" (due to the common sense knowledge problem[29]). Margaret Masterman believed that it was meaning and not grammar that was the key to understanding languages, and that thesauri and not dictionaries should be the basis of computational language structure.\n

Modern deep learning techniques for NLP include word embedding (representing words, typically as vectors encoding their meaning),[52] transformers (a deep learning architecture using an attention mechanism),[53] and others.[54] In 2019, generative pre-trained transformer (or "GPT") language models began to generate coherent text,[55][56] and by 2023, these models were able to get human-level scores on the bar exam, SAT test, GRE test, and many other real-world applications.[57]\n

\n

Perception

\n

Machine perception is the ability to use input from sensors (such as cameras, microphones, wireless signals, active lidar, sonar, radar, and tactile sensors) to deduce aspects of the world. Computer vision is the ability to analyze visual input.[58]\n

The field includes speech recognition,[59] image classification,[60] facial recognition, object recognition,[61]object tracking,[62] and robotic perception.[63]\n

\n

Social intelligence

\n
Kismet, a robot head which was made in the 1990s; it is a machine that can recognize and simulate emotions.[64]
\n

Affective computing is an interdisciplinary umbrella that comprises systems that recognize, interpret, process, or simulate human feeling, emotion, and mood.[65] For example, some virtual assistants are programmed to speak conversationally or even to banter humorously; it makes them appear more sensitive to the emotional dynamics of human interaction, or to otherwise facilitate human–computer interaction.\n

However, this tends to give naïve users an unrealistic conception of the intelligence of existing computer agents.[66] Moderate successes related to affective computing include textual sentiment analysis and, more recently, multimodal sentiment analysis, wherein AI classifies the affects displayed by a videotaped subject.[67]\n

\n

General intelligence

\n

A machine with artificial general intelligence should be able to solve a wide variety of problems with breadth and versatility similar to human intelligence.[4]\n

\n

Techniques

\n

AI research uses a wide variety of techniques to accomplish the goals above.[b]\n

\n

Search and optimization

\n

AI can solve many problems by intelligently searching through many possible solutions.[68] There are two very different kinds of search used in AI: state space search and local search.\n

\n
\n

State space search searches through a tree of possible states to try to find a goal state.[69] For example, planning algorithms search through trees of goals and subgoals, attempting to find a path to a target goal, a process called means-ends analysis.[70]\n

Simple exhaustive searches[71] are rarely sufficient for most real-world problems: the search space (the number of places to search) quickly grows to astronomical numbers. The result is a search that is too slow or never completes.[15] "Heuristics" or "rules of thumb" can help prioritize choices that are more likely to reach a goal.[72]\n

Adversarial search is used for game-playing programs, such as chess or Go. It searches through a tree of possible moves and counter-moves, looking for a winning position.[73]\n

\n
\n
Illustration of gradient descent for 3 different starting points; two parameters (represented by the plan coordinates) are adjusted in order to minimize the loss function (the height)

Local search uses mathematical optimization to find a solution to a problem. It begins with some form of guess and refines it incrementally.[74]\n

Gradient descent is a type of local search that optimizes a set of numerical parameters by incrementally adjusting them to minimize a loss function. Variants of gradient descent are commonly used to train neural networks.[75]\n

Another type of local search is evolutionary computation, which aims to iteratively improve a set of candidate solutions by "mutating" and "recombining" them, selecting only the fittest to survive each generation.[76]\n

Distributed search processes can coordinate via swarm intelligence algorithms. Two popular swarm algorithms used in search are particle swarm optimization (inspired by bird flocking) and ant colony optimization (inspired by ant trails).[77]\n

\n

Logic

\n

Formal logic is used for reasoning and knowledge representation.[78]\nFormal logic comes in two main forms: propositional logic (which operates on statements that are true or false and uses logical connectives such as "and", "or", "not" and "implies")[79] and predicate logic (which also operates on objects, predicates and relations and uses quantifiers such as "Every X is a Y" and "There are some Xs that are Ys").[80]\n

Deductive reasoning in logic is the process of proving a new statement (conclusion) from other statements that are given and assumed to be true (the premises).[81] Proofs can be structured as proof trees, in which nodes are labelled by sentences, and children nodes are connected to parent nodes by inference rules.\n

Given a problem and a set of premises, problem-solving reduces to searching for a proof tree whose root node is labelled by a solution of the problem and whose leaf nodes are labelled by premises or axioms. In the case of Horn clauses, problem-solving search can be performed by reasoning forwards from the premises or backwards from the problem.[82] In the more general case of the clausal form of first-order logic, resolution is a single, axiom-free rule of inference, in which a problem is solved by proving a contradiction from premises that include the negation of the problem to be solved.[83]\n

Inference in both Horn clause logic and first-order logic is undecidable, and therefore intractable. However, backward reasoning with Horn clauses, which underpins computation in the logic programming language Prolog, is Turing complete. Moreover, its efficiency is competitive with computation in other symbolic programming languages.[84]\n

Fuzzy logic assigns a "degree of truth" between 0 and 1. It can therefore handle propositions that are vague and partially true.[85]\n

Non-monotonic logics, including logic programming with negation as failure, are designed to handle default reasoning.[28] Other specialized versions of logic have been developed to describe many complex domains.\n

\n

Probabilistic methods for uncertain reasoning

\n
A simple Bayesian network, with the associated conditional probability tables
\n

Many problems in AI (including in reasoning, planning, learning, perception, and robotics) require the agent to operate with incomplete or uncertain information. AI researchers have devised a number of tools to solve these problems using methods from probability theory and economics.[86] Precise mathematical tools have been developed that analyze how an agent can make choices and plan, using decision theory, decision analysis,[87] and information value theory.[88] These tools include models such as Markov decision processes,[89] dynamic decision networks,[90] game theory and mechanism design.[91]\n

Bayesian networks[92] are a tool that can be used for reasoning (using the Bayesian inference algorithm),[g][94] learning (using the expectation–maximization algorithm),[h][96] planning (using decision networks)[97] and perception (using dynamic Bayesian networks).[90]\n

Probabilistic algorithms can also be used for filtering, prediction, smoothing, and finding explanations for streams of data, thus helping perception systems analyze processes that occur over time (e.g., hidden Markov models or Kalman filters).[90]\n

\n
Expectation–maximization clustering of Old Faithful eruption data starts from a random guess but then successfully converges on an accurate clustering of the two physically distinct modes of eruption.
\n

Classifiers and statistical learning methods

\n

The simplest AI applications can be divided into two types: classifiers (e.g., "if shiny then diamond"), on one hand, and controllers (e.g., "if diamond then pick up"), on the other hand. Classifiers[98] are functions that use pattern matching to determine the closest match. They can be fine-tuned based on chosen examples using supervised learning. Each pattern (also called an "observation") is labeled with a certain predefined class. All the observations combined with their class labels are known as a data set. When a new observation is received, that observation is classified based on previous experience.[45]\n

There are many kinds of classifiers in use.[99] The decision tree is the simplest and most widely used symbolic machine learning algorithm.[100] K-nearest neighbor algorithm was the most widely used analogical AI until the mid-1990s, and Kernel methods such as the support vector machine (SVM) displaced k-nearest neighbor in the 1990s.[101]\nThe naive Bayes classifier is reportedly the "most widely used learner"[102] at Google, due in part to its scalability.[103]\nNeural networks are also used as classifiers.[104]\n

\n

Artificial neural networks

\n
A neural network is an interconnected group of nodes, akin to the vast network of neurons in the human brain.
\n

An artificial neural network is based on a collection of nodes also known as artificial neurons, which loosely model the neurons in a biological brain. It is trained to recognise patterns; once trained, it can recognise those patterns in fresh data. There is an input, at least one hidden layer of nodes and an output. Each node applies a function and once the weight crosses its specified threshold, the data is transmitted to the next layer. A network is typically called a deep neural network if it has at least 2 hidden layers.[104]\n

Learning algorithms for neural networks use local search to choose the weights that will get the right output for each input during training. The most common training technique is the backpropagation algorithm.[105] Neural networks learn to model complex relationships between inputs and outputs and find patterns in data. In theory, a neural network can learn any function.[106]\n

In feedforward neural networks the signal passes in only one direction.[107] Recurrent neural networks feed the output signal back into the input, which allows short-term memories of previous input events. Long short term memory is the most successful network architecture for recurrent networks.[108] Perceptrons[109] use only a single layer of neurons; deep learning[110] uses multiple layers. Convolutional neural networks strengthen the connection between neurons that are "close" to each other—this is especially important in image processing, where a local set of neurons must identify an "edge" before the network can identify an object.[111]\n

\n
\n

Deep learning

\n
\n

Deep learning[110] uses several layers of neurons between the network\'s inputs and outputs. The multiple layers can progressively extract higher-level features from the raw input. For example, in image processing, lower layers may identify edges, while higher layers may identify the concepts relevant to a human such as digits, letters, or faces.[112]\n

Deep learning has profoundly improved the performance of programs in many important subfields of artificial intelligence, including computer vision, speech recognition, natural language processing, image classification,[113] and others. The reason that deep learning performs so well in so many applications is not known as of 2023.[114] The sudden success of deep learning in 2012–2015 did not occur because of some new discovery or theoretical breakthrough (deep neural networks and backpropagation had been described by many people, as far back as the 1950s)[i] but because of two factors: the incredible increase in computer power (including the hundred-fold increase in speed by switching to GPUs) and the availability of vast amounts of training data, especially the giant curated datasets used for benchmark testing, such as ImageNet.[j]\n

\n

GPT

\n

Generative pre-trained transformers (GPT) are large language models (LLMs) that generate text based on the semantic relationships between words in sentences. Text-based GPT models are pretrained on a large corpus of text that can be from the Internet. The pretraining consists of predicting the next token (a token being usually a word, subword, or punctuation). Throughout this pretraining, GPT models accumulate knowledge about the world and can then generate human-like text by repeatedly predicting the next token. Typically, a subsequent training phase makes the model more truthful, useful, and harmless, usually with a technique called reinforcement learning from human feedback (RLHF). Current GPT models are prone to generating falsehoods called "hallucinations", although this can be reduced with RLHF and quality data. They are used in chatbots, which allow people to ask a question or request a task in simple text.[122][123]\n

Current models and services include Gemini (formerly Bard), ChatGPT, Grok, Claude, Copilot, and LLaMA.[124] Multimodal GPT models can process different types of data (modalities) such as images, videos, sound, and text.[125]\n

\n

Hardware and software

\n\n

In the late 2010s, graphics processing units (GPUs) that were increasingly designed with AI-specific enhancements and used with specialized TensorFlow software had replaced previously used central processing unit (CPUs) as the dominant means for large-scale (commercial and academic) machine learning models\' training.[126] Specialized programming languages such as Prolog were used in early AI research,[127] but general-purpose programming languages like Python have become predominant.[128]\n

The transistor density in integrated circuits has been observed to roughly double every 18 months—a trend known as Moore\'s law, named after the Intel co-founder Gordon Moore, who first identified it. Improvements in GPUs have been even faster.[129]\n

\n

Applications

\n

AI and machine learning technology is used in most of the essential applications of the 2020s, including: search engines (such as Google Search), targeting online advertisements, recommendation systems (offered by Netflix, YouTube or Amazon), driving internet traffic, targeted advertising (AdSense, Facebook), virtual assistants (such as Siri or Alexa), autonomous vehicles (including drones, ADAS and self-driving cars), automatic language translation (Microsoft Translator, Google Translate), facial recognition (Apple\'s Face ID or Microsoft\'s DeepFace and Google\'s FaceNet) and image labeling (used by Facebook, Apple\'s iPhoto and TikTok). The deployment of AI may be overseen by a Chief automation officer (CAO).\n

Health and medicine

\n\n

The application of AI in medicine and medical research has the potential to increase patient care and quality of life.[130] Through the lens of the Hippocratic Oath, medical professionals are ethically compelled to use AI, if applications can more accurately diagnose and treat patients.[131][132]\n

For medical research, AI is an important tool for processing and integrating big data. This is particularly important for organoid and tissue engineering development which use microscopy imaging as a key technique in fabrication.[133] It has been suggested that AI can overcome discrepancies in funding allocated to different fields of research.[133] New AI tools can deepen the understanding of biomedically relevant pathways. For example, AlphaFold 2 (2021) demonstrated the ability to approximate, in hours rather than months, the 3D structure of a protein.[134] In 2023, it was reported that AI-guided drug discovery helped find a class of antibiotics capable of killing two different types of drug-resistant bacteria.[135] In 2024, researchers used machine learning to accelerate the search for Parkinson\'s disease drug treatments. Their aim was to identify compounds that block the clumping, or aggregation, of alpha-synuclein (the protein that characterises Parkinson\'s disease). They were able to speed up the initial screening process ten-fold and reduce the cost by a thousand-fold.[136][137]\n

\n

Games

\n\n

Game playing programs have been used since the 1950s to demonstrate and test AI\'s most advanced techniques.[138] Deep Blue became the first computer chess-playing system to beat a reigning world chess champion, Garry Kasparov, on 11 May 1997.[139] In 2011, in a Jeopardy! quiz show exhibition match, IBM\'s question answering system, Watson, defeated the two greatest Jeopardy! champions, Brad Rutter and Ken Jennings, by a significant margin.[140] In March 2016, AlphaGo won 4 out of 5 games of Go in a match with Go champion Lee Sedol, becoming the first computer Go-playing system to beat a professional Go player without handicaps. Then, in 2017, it defeated Ke Jie, who was the best Go player in the world.[141] Other programs handle imperfect-information games, such as the poker-playing program Pluribus.[142] DeepMind developed increasingly generalistic reinforcement learning models, such as with MuZero, which could be trained to play chess, Go, or Atari games.[143] In 2019, DeepMind\'s AlphaStar achieved grandmaster level in StarCraft II, a particularly challenging real-time strategy game that involves incomplete knowledge of what happens on the map.[144] In 2021, an AI agent competed in a PlayStation Gran Turismo competition, winning against four of the world\'s best Gran Turismo drivers using deep reinforcement learning.[145] In 2024, Google DeepMind introduced SIMA, a type of AI capable of autonomously playing nine previously unseen open-world video games by observing screen output, as well as executing short, specific tasks in response to natural language instructions.[146]\n

\n

Mathematics

\n

In mathematics, special forms of formal step-by-step reasoning are used. In contrast, LLMs such as GPT-4 Turbo, Gemini Ultra, Claude Opus, LLaMa-2 or Mistral Large are working with probabilistic models, which can produce wrong answers in the form of hallucinations. Therefore, they need not only a large database of mathematical problems to learn from but also methods such as supervised fine-tuning or trained classifiers with human-annotated data to improve answers for new problems and learn from corrections.[147] A 2024 study showed that the performance of some language models for reasoning capabilities in solving math problems not included in their training data was low, even for problems with only minor deviations from trained data.[148]\n

Alternatively, dedicated models for mathematic problem solving with higher precision for the outcome including proof of theorems have been developed such as Alpha Tensor, Alpha Geometry and Alpha Proof all from Google DeepMind,[149] Llemma from eleuther[150] or Julius.[151]\n

When natural language is used to describe mathematical problems, converters transform such prompts into a formal language such as Lean to define mathematic tasks.\n

Some models have been developed to solve challenging problems and reach good results in benchmark tests, others to serve as educational tools in mathematics.[152]\n

\n

Finance

\n

Finance is one of the fastest growing sectors where applied AI tools are being deployed: from retail online banking to investment advice and insurance, where automated "robot advisers" have been in use for some years.[153]\n

World Pensions experts like Nicolas Firzli insist it may be too early to see the emergence of highly innovative AI-informed financial products and services: "the deployment of AI tools will simply further automatise things: destroying tens of thousands of jobs in banking, financial planning, and pension advice in the process, but I\'m not sure it will unleash a new wave of [e.g., sophisticated] pension innovation."[154]\n

\n

Military

\n\n

Various countries are deploying AI military applications.[155] The main applications enhance command and control, communications, sensors, integration and interoperability.[156] Research is targeting intelligence collection and analysis, logistics, cyber operations, information operations, and semiautonomous and autonomous vehicles.[155] AI technologies enable coordination of sensors and effectors, threat detection and identification, marking of enemy positions, target acquisition, coordination and deconfliction of distributed Joint Fires between networked combat vehicles involving manned and unmanned teams.[156] AI was incorporated into military operations in Iraq and Syria.[155]\n

In November 2023, US Vice President Kamala Harris disclosed a declaration signed by 31 nations to set guardrails for the military use of AI. The commitments include using legal reviews to ensure the compliance of military AI with international laws, and being cautious and transparent in the development of this technology.[157]\n

\n

Generative AI

\n\n
Vincent van Gogh in watercolour created by generative AI software
\n

In the early 2020s, generative AI gained widespread prominence. GenAI is AI capable of generating text, images, videos, or other data using generative models,[158][159] often in response to prompts.[160][161]\n

In March 2023, 58% of U.S. adults had heard about ChatGPT and 14% had tried it.[162] The increasing realism and ease-of-use of AI-based text-to-image generators such as Midjourney, DALL-E, and Stable Diffusion sparked a trend of viral AI-generated photos. Widespread attention was gained by a fake photo of Pope Francis wearing a white puffer coat, the fictional arrest of Donald Trump, and a hoax of an attack on the Pentagon, as well as the usage in professional creative arts.[163][164]\n

\n

Agents

\n

Artificial intelligent (AI) agents are software entities designed to perceive their environment, make decisions, and take actions autonomously to achieve specific goals. These agents can interact with users, their environment, or other agents. AI agents are used in various applications, including virtual assistants, chatbots, autonomous vehicles, game-playing systems, and industrial robotics. AI agents operate within the constraints of their programming, available computational resources, and hardware limitations. This means they are restricted to performing tasks within their defined scope and have finite memory and processing capabilities. In real-world applications, AI agents often face time constraints for decision-making and action execution. Many AI agents incorporate learning algorithms, enabling them to improve their performance over time through experience or training. Using machine learning, AI agents can adapt to new situations and optimise their behaviour for their designated tasks.[165][166][167]\n

\n

Other industry-specific tasks

\n

There are also thousands of successful AI applications used to solve specific problems for specific industries or institutions. In a 2017 survey, one in five companies reported having incorporated "AI" in some offerings or processes.[168] A few examples are energy storage, medical diagnosis, military logistics, applications that predict the result of judicial decisions, foreign policy, or supply chain management.\n

AI applications for evacuation and disaster management are growing. AI has been used to investigate if and how people evacuated in large scale and small scale evacuations using historical data from GPS, videos or social media. Further, AI can provide real time information on the real time evacuation conditions.[169][170][171]\n

In agriculture, AI has helped farmers identify areas that need irrigation, fertilization, pesticide treatments or increasing yield. Agronomists use AI to conduct research and development. AI has been used to predict the ripening time for crops such as tomatoes, monitor soil moisture, operate agricultural robots, conduct predictive analytics, classify livestock pig call emotions, automate greenhouses, detect diseases and pests, and save water.\n

Artificial intelligence is used in astronomy to analyze increasing amounts of available data and applications, mainly for "classification, regression, clustering, forecasting, generation, discovery, and the development of new scientific insights." For example, it is used for discovering exoplanets, forecasting solar activity, and distinguishing between signals and instrumental effects in gravitational wave astronomy. Additionally, it could be used for activities in space, such as space exploration, including the analysis of data from space missions, real-time science decisions of spacecraft, space debris avoidance, and more autonomous operation.\n

During the 2024 Indian elections, US$50 millions was spent on authorized AI-generated content, notably by creating deepfakes of allied (including sometimes deceased) politicians to better engage with voters, and by translating speeches to various local languages.[172] \n

\n

Ethics

\n\n

AI has potential benefits and potential risks.[173] AI may be able to advance science and find solutions for serious problems: Demis Hassabis of Deep Mind hopes to "solve intelligence, and then use that to solve everything else".[174] However, as the use of AI has become widespread, several unintended consequences and risks have been identified.[175] In-production systems can sometimes not factor ethics and bias into their AI training processes, especially when the AI algorithms are inherently unexplainable in deep learning.[176]\n

\n

Risks and harm

\n
\n\n

Machine learning algorithms require large amounts of data. The techniques used to acquire this data have raised concerns about privacy, surveillance and copyright.\n

AI-powered devices and services, such as virtual assistants and IoT products, continuously collect personal information, raising concerns about intrusive data gathering and unauthorized access by third parties. The loss of privacy is further exacerbated by AI\'s ability to process and combine vast amounts of data, potentially leading to a surveillance society where individual activities are constantly monitored and analyzed without adequate safeguards or transparency.\n

Sensitive user data collected may include online activity records, geolocation data, video or audio.[177] For example, in order to build speech recognition algorithms, Amazon has recorded millions of private conversations and allowed temporary workers to listen to and transcribe some of them.[178] Opinions about this widespread surveillance range from those who see it as a necessary evil to those for whom it is clearly unethical and a violation of the right to privacy.[179]\n

AI developers argue that this is the only way to deliver valuable applications. and have developed several techniques that attempt to preserve privacy while still obtaining the data, such as data aggregation, de-identification and differential privacy.[180] Since 2016, some privacy experts, such as Cynthia Dwork, have begun to view privacy in terms of fairness. Brian Christian wrote that experts have pivoted "from the question of \'what they know\' to the question of \'what they\'re doing with it\'."[181]\n

Generative AI is often trained on unlicensed copyrighted works, including in domains such as images or computer code; the output is then used under the rationale of "fair use". Experts disagree about how well and under what circumstances this rationale will hold up in courts of law; relevant factors may include "the purpose and character of the use of the copyrighted work" and "the effect upon the potential market for the copyrighted work".[182][183] Website owners who do not wish to have their content scraped can indicate it in a "robots.txt" file.[184] In 2023, leading authors (including John Grisham and Jonathan Franzen) sued AI companies for using their work to train generative AI.[185][186] Another discussed approach is to envision a separate sui generis system of protection for creations generated by AI to ensure fair attribution and compensation for human authors.[187]\n

\n

Dominance by tech giants

\n

The commercial AI scene is dominated by Big Tech companies such as Alphabet Inc., Amazon, Apple Inc., Meta Platforms, and Microsoft.[188][189][190] Some of these players already own the vast majority of existing cloud infrastructure and computing power from data centers, allowing them to entrench further in the marketplace.[191][192]\n

\n

Substantial power needs and other environmental impacts

\n\n

In January 2024, the International Energy Agency (IEA) released Electricity 2024, Analysis and Forecast to 2026, forecasting electric power use.[193] This is the first IEA report to make projections for data centers and power consumption for artificial intelligence and cryptocurrency. The report states that power demand for these uses might double by 2026, with additional electric power usage equal to electricity used by the whole Japanese nation.[194]\n

Prodigious power consumption by AI is responsible for the growth of fossil fuels use, and might delay closings of obsolete, carbon-emitting coal energy facilities. There is a feverish rise in the construction of data centers throughout the US, making large technology firms (e.g., Microsoft, Meta, Google, Amazon) into voracious consumers of electric power. Projected electric consumption is so immense that there is concern that it will be fulfilled no matter the source. A ChatGPT search involves the use of 10 times the electrical energy as a Google search. The large firms are in haste to find power sources – from nuclear energy to geothermal to fusion. The tech firms argue that – in the long view – AI will be eventually kinder to the environment, but they need the energy now. AI makes the power grid more efficient and "intelligent", will assist in the growth of nuclear power, and track overall carbon emissions, according to technology firms.[195]\n

A 2024 Goldman Sachs Research Paper, AI Data Centers and the Coming US Power Demand Surge, found "US power demand (is) likely to experience growth not seen in a generation...." and forecasts that, by 2030, US data centers will consume 8% of US power, as opposed to 3% in 2022, presaging growth for the electrical power generation industry by a variety of means.[196] Data centers\' need for more and more electrical power is such that they might max out the electrical grid. The Big Tech companies counter that AI can be used to maximize the utilization of the grid by all.[197]\n

In 2024, the Wall Street Journal reported that big AI companies have begun negotiations with the US nuclear power providers to provide electricity to the data centers. In March 2024 Amazon purchased a Pennsylvania nuclear-powered data center for $650 Million (US).[198] Nvidia CEO Jen-Hsun Huang said nuclear power is a good option for the data centers.[199]\n

In September 2024, Microsoft announced an agreement with Constellation Energy to re-open the Three Mile Island nuclear power plant to provide Microsoft with 100% of all electric power produced by the plant for 20 years. Reopening the plant, which suffered a partial nuclear meltdown of its Unit 2 reactor in 1979, will require Constellation to get through strict regulatory processes which will include extensive safety scrutiny from the US Nuclear Regulatory Commission. If approved (this will be the first ever US re-commissioning of a nuclear plant), over 835 megawatts of power – enough for 800,000 homes – of energy will be produced. The cost for re-opening and upgrading is estimated at $1.6 billion (US) and is dependent on tax breaks for nuclear power contained in the 2022 US Inflation Reduction Act.[200] The US government and the state of Michigan are investing almost $2 billion (US) to reopen the Palisades Nuclear reactor on Lake Michigan. Closed since 2022, the plant is planned to be reopened in October 2025. The Three Mile Island facility will be renamed the Crane Clean Energy Center after Chris Crane, a nuclear proponent and former CEO of Exelon who was responsible for Exelon spinoff of Constellation.[201]\n

After the last approval in September 2024, Taiwan suspended the approval of data centers north of Taoyuan with a capacity of more than 5 MW, due to power supply shortages.[202] On the other hand, Singapore imposed a ban on the opening of data centers in 2019 due to electric power, but in 2022, lifted this ban.[202]\n

Although most nuclear plants in Japan have been shut down after the 2011 Fukushima nuclear accident, according to an October 2024 Bloomberg article in Japanese, cloud gaming services company Ubitus, in which Nvidia has a stake, is looking for land in Japan near nuclear power plant for a new data center for generative AI. CEO Wesley Kuo said nuclear power plants are the most efficient, cheap and stable power for AI.[203]\n

On 1 November 2024, the Federal Energy Regulatory Commission (FERC) rejected an application submitted by Talen Energy for approval to supply some electricity from the nuclear power station Susquehanna to Amazon\'s data center.[204] \nAccording to the Commission Chairman Willie L. Phillips, it is a burden on the electricity grid as well as a significant cost shifting concern to households and other business sectors.[204]\n

\n

Misinformation

\n\n

YouTube, Facebook and others use recommender systems to guide users to more content. These AI programs were given the goal of maximizing user engagement (that is, the only goal was to keep people watching). The AI learned that users tended to choose misinformation, conspiracy theories, and extreme partisan content, and, to keep them watching, the AI recommended more of it. Users also tended to watch more content on the same subject, so the AI led people into filter bubbles where they received multiple versions of the same misinformation.[205] This convinced many users that the misinformation was true, and ultimately undermined trust in institutions, the media and the government.[206] The AI program had correctly learned to maximize its goal, but the result was harmful to society. After the U.S. election in 2016, major technology companies took steps to mitigate the problem [citation needed].\n

In 2022, generative AI began to create images, audio, video and text that are indistinguishable from real photographs, recordings, films, or human writing. It is possible for bad actors to use this technology to create massive amounts of misinformation or propaganda.[207] AI pioneer Geoffrey Hinton expressed concern about AI enabling "authoritarian leaders to manipulate their electorates" on a large scale, among other risks.[208]\n

\n

Algorithmic bias and fairness

\n\n

Machine learning applications will be biased[k] if they learn from biased data.[210] The developers may not be aware that the bias exists.[211] Bias can be introduced by the way training data is selected and by the way a model is deployed.[212][210] If a biased algorithm is used to make decisions that can seriously harm people (as it can in medicine, finance, recruitment, housing or policing) then the algorithm may cause discrimination.[213] The field of fairness studies how to prevent harms from algorithmic biases.\n

On June 28, 2015, Google Photos\'s new image labeling feature mistakenly identified Jacky Alcine and a friend as "gorillas" because they were black. The system was trained on a dataset that contained very few images of black people,[214] a problem called "sample size disparity".[215] Google "fixed" this problem by preventing the system from labelling anything as a "gorilla". Eight years later, in 2023, Google Photos still could not identify a gorilla, and neither could similar products from Apple, Facebook, Microsoft and Amazon.[216]\n

COMPAS is a commercial program widely used by U.S. courts to assess the likelihood of a defendant becoming a recidivist. In 2016, Julia Angwin at ProPublica discovered that COMPAS exhibited racial bias, despite the fact that the program was not told the races of the defendants. Although the error rate for both whites and blacks was calibrated equal at exactly 61%, the errors for each race were different—the system consistently overestimated the chance that a black person would re-offend and would underestimate the chance that a white person would not re-offend.[217] In 2017, several researchers[l] showed that it was mathematically impossible for COMPAS to accommodate all possible measures of fairness when the base rates of re-offense were different for whites and blacks in the data.[219]\n

A program can make biased decisions even if the data does not explicitly mention a problematic feature (such as "race" or "gender"). The feature will correlate with other features (like "address", "shopping history" or "first name"), and the program will make the same decisions based on these features as it would on "race" or "gender".[220] Moritz Hardt said "the most robust fact in this research area is that fairness through blindness doesn\'t work."[221]\n

Criticism of COMPAS highlighted that machine learning models are designed to make "predictions" that are only valid if we assume that the future will resemble the past. If they are trained on data that includes the results of racist decisions in the past, machine learning models must predict that racist decisions will be made in the future. If an application then uses these predictions as recommendations, some of these "recommendations" will likely be racist.[222] Thus, machine learning is not well suited to help make decisions in areas where there is hope that the future will be better than the past. It is descriptive rather than prescriptive.[m]\n

Bias and unfairness may go undetected because the developers are overwhelmingly white and male: among AI engineers, about 4% are black and 20% are women.[215]\n

There are various conflicting definitions and mathematical models of fairness. These notions depend on ethical assumptions, and are influenced by beliefs about society. One broad category is distributive fairness, which focuses on the outcomes, often identifying groups and seeking to compensate for statistical disparities. Representational fairness tries to ensure that AI systems do not reinforce negative stereotypes or render certain groups invisible. Procedural fairness focuses on the decision process rather than the outcome. The most relevant notions of fairness may depend on the context, notably the type of AI application and the stakeholders. The subjectivity in the notions of bias and fairness makes it difficult for companies to operationalize them. Having access to sensitive attributes such as race or gender is also considered by many AI ethicists to be necessary in order to compensate for biases, but it may conflict with anti-discrimination laws.[209]\n

At its 2022 Conference on Fairness, Accountability, and Transparency (ACM FAccT 2022), the Association for Computing Machinery, in Seoul, South Korea, presented and published findings that recommend that until AI and robotics systems are demonstrated to be free of bias mistakes, they are unsafe, and the use of self-learning neural networks trained on vast, unregulated sources of flawed internet data should be curtailed.[dubiousdiscuss][224]\n

\n

Lack of transparency

\n\n

Many AI systems are so complex that their designers cannot explain how they reach their decisions.[225] Particularly with deep neural networks, in which there are a large amount of non-linear relationships between inputs and outputs. But some popular explainability techniques exist.[226]\n

It is impossible to be certain that a program is operating correctly if no one knows how exactly it works. There have been many cases where a machine learning program passed rigorous tests, but nevertheless learned something different than what the programmers intended. For example, a system that could identify skin diseases better than medical professionals was found to actually have a strong tendency to classify images with a ruler as "cancerous", because pictures of malignancies typically include a ruler to show the scale.[227] Another machine learning system designed to help effectively allocate medical resources was found to classify patients with asthma as being at "low risk" of dying from pneumonia. Having asthma is actually a severe risk factor, but since the patients having asthma would usually get much more medical care, they were relatively unlikely to die according to the training data. The correlation between asthma and low risk of dying from pneumonia was real, but misleading.[228]\n

People who have been harmed by an algorithm\'s decision have a right to an explanation.[229] Doctors, for example, are expected to clearly and completely explain to their colleagues the reasoning behind any decision they make. Early drafts of the European Union\'s General Data Protection Regulation in 2016 included an explicit statement that this right exists.[n] Industry experts noted that this is an unsolved problem with no solution in sight. Regulators argued that nevertheless the harm is real: if the problem has no solution, the tools should not be used.[230]\n

DARPA established the XAI ("Explainable Artificial Intelligence") program in 2014 to try to solve these problems.[231]\n

Several approaches aim to address the transparency problem. SHAP enables to visualise the contribution of each feature to the output.[232] LIME can locally approximate a model\'s outputs with a simpler, interpretable model.[233] Multitask learning provides a large number of outputs in addition to the target classification. These other outputs can help developers deduce what the network has learned.[234] Deconvolution, DeepDream and other generative methods can allow developers to see what different layers of a deep network for computer vision have learned, and produce output that can suggest what the network is learning.[235] For generative pre-trained transformers, Anthropic developed a technique based on dictionary learning that associates patterns of neuron activations with human-understandable concepts.[236]\n

\n

Bad actors and weaponized AI

\n\n

Artificial intelligence provides a number of tools that are useful to bad actors, such as authoritarian governments, terrorists, criminals or rogue states.\n

A lethal autonomous weapon is a machine that locates, selects and engages human targets without human supervision.[o] Widely available AI tools can be used by bad actors to develop inexpensive autonomous weapons and, if produced at scale, they are potentially weapons of mass destruction.[238] Even when used in conventional warfare, it is unlikely that they will be unable to reliably choose targets and could potentially kill an innocent person.[238] In 2014, 30 nations (including China) supported a ban on autonomous weapons under the United Nations\' Convention on Certain Conventional Weapons, however the United States and others disagreed.[239] By 2015, over fifty countries were reported to be researching battlefield robots.[240]\n

AI tools make it easier for authoritarian governments to efficiently control their citizens in several ways. Face and voice recognition allow widespread surveillance. Machine learning, operating this data, can classify potential enemies of the state and prevent them from hiding. Recommendation systems can precisely target propaganda and misinformation for maximum effect. Deepfakes and generative AI aid in producing misinformation. Advanced AI can make authoritarian centralized decision making more competitive than liberal and decentralized systems such as markets. It lowers the cost and difficulty of digital warfare and advanced spyware.[241] All these technologies have been available since 2020 or earlier—AI facial recognition systems are already being used for mass surveillance in China.[242][243]\n

There many other ways that AI is expected to help bad actors, some of which can not be foreseen. For example, machine-learning AI is able to design tens of thousands of toxic molecules in a matter of hours.[244]\n

\n

Technological unemployment

\n\n

Economists have frequently highlighted the risks of redundancies from AI, and speculated about unemployment if there is no adequate social policy for full employment.[245]\n

In the past, technology has tended to increase rather than reduce total employment, but economists acknowledge that "we\'re in uncharted territory" with AI.[246] A survey of economists showed disagreement about whether the increasing use of robots and AI will cause a substantial increase in long-term unemployment, but they generally agree that it could be a net benefit if productivity gains are redistributed.[247] Risk estimates vary; for example, in the 2010s, Michael Osborne and Carl Benedikt Frey estimated 47% of U.S. jobs are at "high risk" of potential automation, while an OECD report classified only 9% of U.S. jobs as "high risk".[p][249] The methodology of speculating about future employment levels has been criticised as lacking evidential foundation, and for implying that technology, rather than social policy, creates unemployment, as opposed to redundancies.[245] In April 2023, it was reported that 70% of the jobs for Chinese video game illustrators had been eliminated by generative artificial intelligence.[250][251]\n

Unlike previous waves of automation, many middle-class jobs may be eliminated by artificial intelligence; The Economist stated in 2015 that "the worry that AI could do to white-collar jobs what steam power did to blue-collar ones during the Industrial Revolution" is "worth taking seriously".[252] Jobs at extreme risk range from paralegals to fast food cooks, while job demand is likely to increase for care-related professions ranging from personal healthcare to the clergy.[253]\n

From the early days of the development of artificial intelligence, there have been arguments, for example, those put forward by Joseph Weizenbaum, about whether tasks that can be done by computers actually should be done by them, given the difference between computers and humans, and between quantitative calculation and qualitative, value-based judgement.[254]\n

\n

Existential risk

\n\n

It has been argued AI will become so powerful that humanity may irreversibly lose control of it. This could, as physicist Stephen Hawking stated, "spell the end of the human race".[255] This scenario has been common in science fiction, when a computer or robot suddenly develops a human-like "self-awareness" (or "sentience" or "consciousness") and becomes a malevolent character.[q] These sci-fi scenarios are misleading in several ways.\n

First, AI does not require human-like "sentience" to be an existential risk. Modern AI programs are given specific goals and use learning and intelligence to achieve them. Philosopher Nick Bostrom argued that if one gives almost any goal to a sufficiently powerful AI, it may choose to destroy humanity to achieve it (he used the example of a paperclip factory manager).[257] Stuart Russell gives the example of household robot that tries to find a way to kill its owner to prevent it from being unplugged, reasoning that "you can\'t fetch the coffee if you\'re dead."[258] In order to be safe for humanity, a superintelligence would have to be genuinely aligned with humanity\'s morality and values so that it is "fundamentally on our side".[259]\n

Second, Yuval Noah Harari argues that AI does not require a robot body or physical control to pose an existential risk. The essential parts of civilization are not physical. Things like ideologies, law, government, money and the economy are made of language; they exist because there are stories that billions of people believe. The current prevalence of misinformation suggests that an AI could use language to convince people to believe anything, even to take actions that are destructive.[260]\n

The opinions amongst experts and industry insiders are mixed, with sizable fractions both concerned and unconcerned by risk from eventual superintelligent AI.[261] Personalities such as Stephen Hawking, Bill Gates, and Elon Musk,[262] as well as AI pioneers such as Yoshua Bengio, Stuart Russell, Demis Hassabis, and Sam Altman, have expressed concerns about existential risk from AI.\n

In May 2023, Geoffrey Hinton announced his resignation from Google in order to be able to "freely speak out about the risks of AI" without "considering how this impacts Google."[263] He notably mentioned risks of an AI takeover,[264] and stressed that in order to avoid the worst outcomes, establishing safety guidelines will require cooperation among those competing in use of AI.[265]\n

In 2023, many leading AI experts issued the joint statement that "Mitigating the risk of extinction from AI should be a global priority alongside other societal-scale risks such as pandemics and nuclear war".[266]\n

Other researchers, however, spoke in favor of a less dystopian view. AI pioneer Juergen Schmidhuber did not sign the joint statement, emphasising that in 95% of all cases, AI research is about making "human lives longer and healthier and easier."[267] While the tools that are now being used to improve lives can also be used by bad actors, "they can also be used against the bad actors."[268][269] Andrew Ng also argued that "it\'s a mistake to fall for the doomsday hype on AI—and that regulators who do will only benefit vested interests."[270] Yann LeCun "scoffs at his peers\' dystopian scenarios of supercharged misinformation and even, eventually, human extinction."[271] In the early 2010s, experts argued that the risks are too distant in the future to warrant research or that humans will be valuable from the perspective of a superintelligent machine.[272] However, after 2016, the study of current and future risks and possible solutions became a serious area of research.[273]\n

\n

Ethical machines and alignment

\n\n

Friendly AI are machines that have been designed from the beginning to minimize risks and to make choices that benefit humans. Eliezer Yudkowsky, who coined the term, argues that developing friendly AI should be a higher research priority: it may require a large investment and it must be completed before AI becomes an existential risk.[274]\n

Machines with intelligence have the potential to use their intelligence to make ethical decisions. The field of machine ethics provides machines with ethical principles and procedures for resolving ethical dilemmas.[275]\nThe field of machine ethics is also called computational morality,[275]\nand was founded at an AAAI symposium in 2005.[276]\n

Other approaches include Wendell Wallach\'s "artificial moral agents"[277] and Stuart J. Russell\'s three principles for developing provably beneficial machines.[278]\n

\n

Open source

\n

Active organizations in the AI open-source community include Hugging Face,[279] Google,[280] EleutherAI and Meta.[281] Various AI models, such as Llama 2, Mistral or Stable Diffusion, have been made open-weight,[282][283] meaning that their architecture and trained parameters (the "weights") are publicly available. Open-weight models can be freely fine-tuned, which allows companies to specialize them with their own data and for their own use-case.[284] Open-weight models are useful for research and innovation but can also be misused. Since they can be fine-tuned, any built-in security measure, such as objecting to harmful requests, can be trained away until it becomes ineffective. Some researchers warn that future AI models may develop dangerous capabilities (such as the potential to drastically facilitate bioterrorism) and that once released on the Internet, they cannot be deleted everywhere if needed. They recommend pre-release audits and cost-benefit analyses.[285]\n

\n

Frameworks

\n

Artificial Intelligence projects can have their ethical permissibility tested while designing, developing, and implementing an AI system. An AI framework such as the Care and Act Framework containing the SUM values—developed by the Alan Turing Institute tests projects in four main areas:[286][287]\n

\n
  • Respect the dignity of individual people
  • \n
  • Connect with other people sincerely, openly, and inclusively
  • \n
  • Care for the wellbeing of everyone
  • \n
  • Protect social values, justice, and the public interest
\n

Other developments in ethical frameworks include those decided upon during the Asilomar Conference, the Montreal Declaration for Responsible AI, and the IEEE\'s Ethics of Autonomous Systems initiative, among others;[288] however, these principles do not go without their criticisms, especially regards to the people chosen contributes to these frameworks.[289]\n

Promotion of the wellbeing of the people and communities that these technologies affect requires consideration of the social and ethical implications at all stages of AI system design, development and implementation, and collaboration between job roles such as data scientists, product managers, data engineers, domain experts, and delivery managers.[290]\n

The UK AI Safety Institute released in 2024 a testing toolset called \'Inspect\' for AI safety evaluations available under a MIT open-source licence which is freely available on GitHub and can be improved with third-party packages. It can be used to evaluate AI models in a range of areas including core knowledge, ability to reason, and autonomous capabilities.[291]\n

\n

Regulation

\n\n
AI Safety Summit
The first global AI Safety Summit was held in 2023 with a declaration calling for international co-operation.
\n

The regulation of artificial intelligence is the development of public sector policies and laws for promoting and regulating AI; it is therefore related to the broader regulation of algorithms.[292] The regulatory and policy landscape for AI is an emerging issue in jurisdictions globally.[293] According to AI Index at Stanford, the annual number of AI-related laws passed in the 127 survey countries jumped from one passed in 2016 to 37 passed in 2022 alone.[294][295] Between 2016 and 2020, more than 30 countries adopted dedicated strategies for AI.[296] Most EU member states had released national AI strategies, as had Canada, China, India, Japan, Mauritius, the Russian Federation, Saudi Arabia, United Arab Emirates, U.S., and Vietnam. Others were in the process of elaborating their own AI strategy, including Bangladesh, Malaysia and Tunisia.[296] The Global Partnership on Artificial Intelligence was launched in June 2020, stating a need for AI to be developed in accordance with human rights and democratic values, to ensure public confidence and trust in the technology.[296] Henry Kissinger, Eric Schmidt, and Daniel Huttenlocher published a joint statement in November 2021 calling for a government commission to regulate AI.[297] In 2023, OpenAI leaders published recommendations for the governance of superintelligence, which they believe may happen in less than 10 years.[298] In 2023, the United Nations also launched an advisory body to provide recommendations on AI governance; the body comprises technology company executives, governments officials and academics.[299] In 2024, the Council of Europe created the first international legally binding treaty on AI, called the "Framework Convention on Artificial Intelligence and Human Rights, Democracy and the Rule of Law". It was adopted by the European Union, the United States, the United Kingdom, and other signatories.[300]\n

In a 2022 Ipsos survey, attitudes towards AI varied greatly by country; 78% of Chinese citizens, but only 35% of Americans, agreed that "products and services using AI have more benefits than drawbacks".[294] A 2023 Reuters/Ipsos poll found that 61% of Americans agree, and 22% disagree, that AI poses risks to humanity.[301] In a 2023 Fox News poll, 35% of Americans thought it "very important", and an additional 41% thought it "somewhat important", for the federal government to regulate AI, versus 13% responding "not very important" and 8% responding "not at all important".[302][303]\n

In November 2023, the first global AI Safety Summit was held in Bletchley Park in the UK to discuss the near and far term risks of AI and the possibility of mandatory and voluntary regulatory frameworks.[304] 28 countries including the United States, China, and the European Union issued a declaration at the start of the summit, calling for international co-operation to manage the challenges and risks of artificial intelligence.[305][306] In May 2024 at the AI Seoul Summit, 16 global AI tech companies agreed to safety commitments on the development of AI.[307][308]\n

\n

History

\n\n\n

The study of mechanical or "formal" reasoning began with philosophers and mathematicians in antiquity. The study of logic led directly to Alan Turing\'s theory of computation, which suggested that a machine, by shuffling symbols as simple as "0" and "1", could simulate any conceivable form of mathematical reasoning.[309][310] This, along with concurrent discoveries in cybernetics, information theory and neurobiology, led researchers to consider the possibility of building an "electronic brain".[r] They developed several areas of research that would become part of AI,[312] such as McCullouch and Pitts design for "artificial neurons" in 1943,[115] and Turing\'s influential 1950 paper \'Computing Machinery and Intelligence\', which introduced the Turing test and showed that "machine intelligence" was plausible.[313][310]\n

The field of AI research was founded at a workshop at Dartmouth College in 1956.[s][6] The attendees became the leaders of AI research in the 1960s.[t] They and their students produced programs that the press described as "astonishing":[u] computers were learning checkers strategies, solving word problems in algebra, proving logical theorems and speaking English.[v][7] Artificial intelligence laboratories were set up at a number of British and U.S. universities in the latter 1950s and early 1960s.[310]\n

Researchers in the 1960s and the 1970s were convinced that their methods would eventually succeed in creating a machine with general intelligence and considered this the goal of their field.[317] In 1965 Herbert Simon predicted, "machines will be capable, within twenty years, of doing any work a man can do".[318] In 1967 Marvin Minsky agreed, writing that "within a generation ... the problem of creating \'artificial intelligence\' will substantially be solved".[319] They had, however, underestimated the difficulty of the problem.[w] In 1974, both the U.S. and British governments cut off exploratory research in response to the criticism of Sir James Lighthill[321] and ongoing pressure from the U.S. Congress to fund more productive projects.[322] Minsky\'s and Papert\'s book Perceptrons was understood as proving that artificial neural networks would never be useful for solving real-world tasks, thus discrediting the approach altogether.[323] The "AI winter", a period when obtaining funding for AI projects was difficult, followed.[9]\n

In the early 1980s, AI research was revived by the commercial success of expert systems,[324] a form of AI program that simulated the knowledge and analytical skills of human experts. By 1985, the market for AI had reached over a billion dollars. At the same time, Japan\'s fifth generation computer project inspired the U.S. and British governments to restore funding for academic research.[8] However, beginning with the collapse of the Lisp Machine market in 1987, AI once again fell into disrepute, and a second, longer-lasting winter began.[10]\n

Up to this point, most of AI\'s funding had gone to projects that used high-level symbols to represent mental objects like plans, goals, beliefs, and known facts. In the 1980s, some researchers began to doubt that this approach would be able to imitate all the processes of human cognition, especially perception, robotics, learning and pattern recognition,[325] and began to look into "sub-symbolic" approaches.[326] Rodney Brooks rejected "representation" in general and focussed directly on engineering machines that move and survive.[x] Judea Pearl, Lofti Zadeh and others developed methods that handled incomplete and uncertain information by making reasonable guesses rather than precise logic.[86][331] But the most important development was the revival of "connectionism", including neural network research, by Geoffrey Hinton and others.[332] In 1990, Yann LeCun successfully showed that convolutional neural networks can recognize handwritten digits, the first of many successful applications of neural networks.[333]\n

AI gradually restored its reputation in the late 1990s and early 21st century by exploiting formal mathematical methods and by finding specific solutions to specific problems. This "narrow" and "formal" focus allowed researchers to produce verifiable results and collaborate with other fields (such as statistics, economics and mathematics).[334] By 2000, solutions developed by AI researchers were being widely used, although in the 1990s they were rarely described as "artificial intelligence" (a tendency known as the AI effect).[335]\nHowever, several academic researchers became concerned that AI was no longer pursuing its original goal of creating versatile, fully intelligent machines. Beginning around 2002, they founded the subfield of artificial general intelligence (or "AGI"), which had several well-funded institutions by the 2010s.[4]\n

Deep learning began to dominate industry benchmarks in 2012 and was adopted throughout the field.[11]\nFor many specific tasks, other methods were abandoned.[y]\nDeep learning\'s success was based on both hardware improvements (faster computers,[337] graphics processing units, cloud computing[338]) and access to large amounts of data[339] (including curated datasets,[338] such as ImageNet). Deep learning\'s success led to an enormous increase in interest and funding in AI.[z] The amount of machine learning research (measured by total publications) increased by 50% in the years 2015–2019.[296]\n

In 2016, issues of fairness and the misuse of technology were catapulted into center stage at machine learning conferences, publications vastly increased, funding became available, and many researchers re-focussed their careers on these issues. The alignment problem became a serious field of academic study.[273]\n

In the late teens and early 2020s, AGI companies began to deliver programs that created enormous interest. In 2015, AlphaGo, developed by DeepMind, beat the world champion Go player. The program was taught only the rules of the game and developed strategy by itself. GPT-3 is a large language model that was released in 2020 by OpenAI and is capable of generating high-quality human-like text.[340] These programs, and others, inspired an aggressive AI boom, where large companies began investing billions in AI research. According to AI Impacts, about $50 billion annually was invested in "AI" around 2022 in the U.S. alone and about 20% of the new U.S. Computer Science PhD graduates have specialized in "AI".[341] About 800,000 "AI"-related U.S. job openings existed in 2022.[342]\n

\n

Philosophy

\n\n

Philosophical debates have historically sought to determine the nature of intelligence and how to make intelligent machines.[343] Another major focus has been whether machines can be conscious, and the associated ethical implications.[344] Many other topics in philosophy are relevant to AI, such as epistemology and free will.[345] Rapid advancements have intensified public discussions on the philosophy and ethics of AI.[344]\n

\n

Defining artificial intelligence

\n\n

Alan Turing wrote in 1950 "I propose to consider the question \'can machines think\'?"[346] He advised changing the question from whether a machine "thinks", to "whether or not it is possible for machinery to show intelligent behaviour".[346] He devised the Turing test, which measures the ability of a machine to simulate human conversation.[313] Since we can only observe the behavior of the machine, it does not matter if it is "actually" thinking or literally has a "mind". Turing notes that we can not determine these things about other people but "it is usual to have a polite convention that everyone thinks."[347]\n

\n
The Turing test can provide some evidence of intelligence, but it penalizes non-human intelligent behavior.[348]
\n

Russell and Norvig agree with Turing that intelligence must be defined in terms of external behavior, not internal structure.[1] However, they are critical that the test requires the machine to imitate humans. "Aeronautical engineering texts," they wrote, "do not define the goal of their field as making \'machines that fly so exactly like pigeons that they can fool other pigeons.\'"[349] AI founder John McCarthy agreed, writing that "Artificial intelligence is not, by definition, simulation of human intelligence".[350]\n

McCarthy defines intelligence as "the computational part of the ability to achieve goals in the world".[351] Another AI founder, Marvin Minsky similarly describes it as "the ability to solve hard problems".[352] The leading AI textbook defines it as the study of agents that perceive their environment and take actions that maximize their chances of achieving defined goals.[1] These definitions view intelligence in terms of well-defined problems with well-defined solutions, where both the difficulty of the problem and the performance of the program are direct measures of the "intelligence" of the machine—and no other philosophical discussion is required, or may not even be possible.\n

Another definition has been adopted by Google,[353] a major practitioner in the field of AI. This definition stipulates the ability of systems to synthesize information as the manifestation of intelligence, similar to the way it is defined in biological intelligence.\n

Some authors have suggested in practice, that the definition of AI is vague and difficult to define, with contention as to whether classical algorithms should be categorised as AI,[354] with many companies during the early 2020s AI boom using the term as a marketing buzzword, often even if they did "not actually use AI in a material way".[355]\n

\n

Evaluating approaches to AI

\n

No established unifying theory or paradigm has guided AI research for most of its history.[aa] The unprecedented success of statistical machine learning in the 2010s eclipsed all other approaches (so much so that some sources, especially in the business world, use the term "artificial intelligence" to mean "machine learning with neural networks"). This approach is mostly sub-symbolic, soft and narrow. Critics argue that these questions may have to be revisited by future generations of AI researchers.\n

\n

Symbolic AI and its limits

\n

Symbolic AI (or "GOFAI")[357] simulated the high-level conscious reasoning that people use when they solve puzzles, express legal reasoning and do mathematics. They were highly successful at "intelligent" tasks such as algebra or IQ tests. In the 1960s, Newell and Simon proposed the physical symbol systems hypothesis: "A physical symbol system has the necessary and sufficient means of general intelligent action."[358]\n

However, the symbolic approach failed on many tasks that humans solve easily, such as learning, recognizing an object or commonsense reasoning. Moravec\'s paradox is the discovery that high-level "intelligent" tasks were easy for AI, but low level "instinctive" tasks were extremely difficult.[359] Philosopher Hubert Dreyfus had argued since the 1960s that human expertise depends on unconscious instinct rather than conscious symbol manipulation, and on having a "feel" for the situation, rather than explicit symbolic knowledge.[360] Although his arguments had been ridiculed and ignored when they were first presented, eventually, AI research came to agree with him.[ab][16]\n

The issue is not resolved: sub-symbolic reasoning can make many of the same inscrutable mistakes that human intuition does, such as algorithmic bias. Critics such as Noam Chomsky argue continuing research into symbolic AI will still be necessary to attain general intelligence,[362][363] in part because sub-symbolic AI is a move away from explainable AI: it can be difficult or impossible to understand why a modern statistical AI program made a particular decision. The emerging field of neuro-symbolic artificial intelligence attempts to bridge the two approaches.\n

\n

Neat vs. scruffy

\n\n

"Neats" hope that intelligent behavior is described using simple, elegant principles (such as logic, optimization, or neural networks). "Scruffies" expect that it necessarily requires solving a large number of unrelated problems. Neats defend their programs with theoretical rigor, scruffies rely mainly on incremental testing to see if they work. This issue was actively discussed in the 1970s and 1980s,[364] but eventually was seen as irrelevant. Modern AI has elements of both.\n

\n

Soft vs. hard computing

\n\n

Finding a provably correct or optimal solution is intractable for many important problems.[15] Soft computing is a set of techniques, including genetic algorithms, fuzzy logic and neural networks, that are tolerant of imprecision, uncertainty, partial truth and approximation. Soft computing was introduced in the late 1980s and most successful AI programs in the 21st century are examples of soft computing with neural networks.\n

\n

Narrow vs. general AI

\n\n

AI researchers are divided as to whether to pursue the goals of artificial general intelligence and superintelligence directly or to solve as many specific problems as possible (narrow AI) in hopes these solutions will lead indirectly to the field\'s long-term goals.[365][366] General intelligence is difficult to define and difficult to measure, and modern AI has had more verifiable successes by focusing on specific problems with specific solutions. The sub-field of artificial general intelligence studies this area exclusively.\n

\n

Machine consciousness, sentience, and mind

\n\n

The philosophy of mind does not know whether a machine can have a mind, consciousness and mental states, in the same sense that human beings do. This issue considers the internal experiences of the machine, rather than its external behavior. Mainstream AI research considers this issue irrelevant because it does not affect the goals of the field: to build machines that can solve problems using intelligence. Russell and Norvig add that "[t]he additional project of making a machine conscious in exactly the way humans are is not one that we are equipped to take on."[367] However, the question has become central to the philosophy of mind. It is also typically the central question at issue in artificial intelligence in fiction.\n

\n

Consciousness

\n\n

David Chalmers identified two problems in understanding the mind, which he named the "hard" and "easy" problems of consciousness.[368] The easy problem is understanding how the brain processes signals, makes plans and controls behavior. The hard problem is explaining how this feels or why it should feel like anything at all, assuming we are right in thinking that it truly does feel like something (Dennett\'s consciousness illusionism says this is an illusion). While human information processing is easy to explain, human subjective experience is difficult to explain. For example, it is easy to imagine a color-blind person who has learned to identify which objects in their field of view are red, but it is not clear what would be required for the person to know what red looks like.[369]\n

\n

Computationalism and functionalism

\n\n

Computationalism is the position in the philosophy of mind that the human mind is an information processing system and that thinking is a form of computing. Computationalism argues that the relationship between mind and body is similar or identical to the relationship between software and hardware and thus may be a solution to the mind–body problem. This philosophical position was inspired by the work of AI researchers and cognitive scientists in the 1960s and was originally proposed by philosophers Jerry Fodor and Hilary Putnam.[370]\n

Philosopher John Searle characterized this position as "strong AI": "The appropriately programmed computer with the right inputs and outputs would thereby have a mind in exactly the same sense human beings have minds."[ac] Searle counters this assertion with his Chinese room argument, which attempts to show that, even if a machine perfectly simulates human behavior, there is still no reason to suppose it also has a mind.[374]\n

\n

AI welfare and rights

\n

It is difficult or impossible to reliably evaluate whether an advanced AI is sentient (has the ability to feel), and if so, to what degree.[375] But if there is a significant chance that a given machine can feel and suffer, then it may be entitled to certain rights or welfare protection measures, similarly to animals.[376][377] Sapience (a set of capacities related to high intelligence, such as discernment or self-awareness) may provide another moral basis for AI rights.[376] Robot rights are also sometimes proposed as a practical way to integrate autonomous agents into society.[378]\n

In 2017, the European Union considered granting "electronic personhood" to some of the most capable AI systems. Similarly to the legal status of companies, it would have conferred rights but also responsibilities.[379] Critics argued in 2018 that granting rights to AI systems would downplay the importance of human rights, and that legislation should focus on user needs rather than speculative futuristic scenarios. They also noted that robots lacked the autonomy to take part to society on their own.[380][381]\n

Progress in AI increased interest in the topic. Proponents of AI welfare and rights often argue that AI sentience, if it emerges, would be particularly easy to deny. They warn that this may be a moral blind spot analogous to slavery or factory farming, which could lead to large-scale suffering if sentient AI is created and carelessly exploited.[377][376]\n

\n

Future

\n

Superintelligence and the singularity

\n

A superintelligence is a hypothetical agent that would possess intelligence far surpassing that of the brightest and most gifted human mind.[366]If research into artificial general intelligence produced sufficiently intelligent software, it might be able to reprogram and improve itself. The improved software would be even better at improving itself, leading to what I. J. Good called an "intelligence explosion" and Vernor Vinge called a "singularity".[382]\n

However, technologies cannot improve exponentially indefinitely, and typically follow an S-shaped curve, slowing when they reach the physical limits of what the technology can do.[383]\n

\n

Transhumanism

\n\n

Robot designer Hans Moravec, cyberneticist Kevin Warwick and inventor Ray Kurzweil have predicted that humans and machines may merge in the future into cyborgs that are more capable and powerful than either. This idea, called transhumanism, has roots in the writings of Aldous Huxley and Robert Ettinger.[384]\n

Edward Fredkin argues that "artificial intelligence is the next step in evolution", an idea first proposed by Samuel Butler\'s "Darwin among the Machines" as far back as 1863, and expanded upon by George Dyson in his 1998 book Darwin Among the Machines: The Evolution of Global Intelligence.[385]\n

\n

In fiction

\n\n
The word "robot" itself was coined by Karel Čapek in his 1921 play R.U.R., the title standing for "Rossum\'s Universal Robots".
\n

Thought-capable artificial beings have appeared as storytelling devices since antiquity,[386] and have been a persistent theme in science fiction.[387]\n

A common trope in these works began with Mary Shelley\'s Frankenstein, where a human creation becomes a threat to its masters. This includes such works as Arthur C. Clarke\'s and Stanley Kubrick\'s 2001: A Space Odyssey (both 1968), with HAL 9000, the murderous computer in charge of the Discovery One spaceship, as well as The Terminator (1984) and The Matrix (1999). In contrast, the rare loyal robots such as Gort from The Day the Earth Stood Still (1951) and Bishop from Aliens (1986) are less prominent in popular culture.[388]\n

Isaac Asimov introduced the Three Laws of Robotics in many stories, most notably with the "Multivac" super-intelligent computer. Asimov\'s laws are often brought up during lay discussions of machine ethics;[389] while almost all artificial intelligence researchers are familiar with Asimov\'s laws through popular culture, they generally consider the laws useless for many reasons, one of which is their ambiguity.[390]\n

Several works use AI to force us to confront the fundamental question of what makes us human, showing us artificial beings that have the ability to feel, and thus to suffer. This appears in Karel Čapek\'s R.U.R., the films A.I. Artificial Intelligence and Ex Machina, as well as the novel Do Androids Dream of Electric Sheep?, by Philip K. Dick. Dick considers the idea that our understanding of human subjectivity is altered by technology created with artificial intelligence.[391]\n

\n

See also

\n\n

Explanatory notes

\n
\n
    \n
  1. ^ a b This list of intelligent traits is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
  2. \n
  3. ^ a b This list of tools is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
  4. \n
  5. ^ It is among the reasons that expert systems proved to be inefficient for capturing knowledge.[30][31]\n
  6. \n
  7. ^ \n"Rational agent" is general term used in economics, philosophy and theoretical artificial intelligence. It can refer to anything that directs its behavior to accomplish goals, such as a person, an animal, a corporation, a nation, or in the case of AI, a computer program.\n
  8. \n
  9. ^ Alan Turing discussed the centrality of learning as early as 1950, in his classic paper "Computing Machinery and Intelligence".[42] In 1956, at the original Dartmouth AI summer conference, Ray Solomonoff wrote a report on unsupervised probabilistic machine learning: "An Inductive Inference Machine".[43]\n
  10. \n
  11. ^ See AI winter § Machine translation and the ALPAC report of 1966\n
  12. \n
  13. ^ \nCompared with symbolic logic, formal Bayesian inference is computationally expensive. For inference to be tractable, most observations must be conditionally independent of one another. AdSense uses a Bayesian network with over 300 million edges to learn which ads to serve.[93]\n
  14. \n
  15. ^ Expectation–maximization, one of the most popular algorithms in machine learning, allows clustering in the presence of unknown latent variables.[95]\n
  16. \n
  17. ^ \nSome form of deep neural networks (without a specific learning algorithm) were described by:\nWarren S. McCulloch and Walter Pitts (1943)[115]\nAlan Turing (1948);[116]\nKarl Steinbuch and Roger David Joseph (1961).[117]\nDeep or recurrent networks that learned (or used gradient descent) were developed by:\nFrank Rosenblatt(1957);[116]\nOliver Selfridge (1959);[117]\nAlexey Ivakhnenko and Valentin Lapa (1965);[118]\nKaoru Nakano (1971);[119]\nShun-Ichi Amari (1972);[119]\nJohn Joseph Hopfield (1982).[119]\nPrecursors to backpropagation were developed by:\nHenry J. Kelley (1960);[116]\nArthur E. Bryson (1962);[116]\nStuart Dreyfus (1962);[116]\nArthur E. Bryson and Yu-Chi Ho (1969);[116]\nBackpropagation was independently developed by:\nSeppo Linnainmaa (1970);[120]\nPaul Werbos (1974).[116]\n
  18. \n
  19. ^ Geoffrey Hinton said, of his work on neural networks in the 1990s, "our labeled datasets were thousands of times too small. [And] our computers were millions of times too slow."[121]\n
  20. \n
  21. ^ In statistics, a bias is a systematic error or deviation from the correct value. But in the context of fairness, it refers to a tendency in favor or against a certain group or individual characteristic, usually in a way that is considered unfair or harmful. A statistically unbiased AI system that produces disparate outcomes for different demographic groups may thus be viewed as biased in the ethical sense.[209]\n
  22. \n
  23. ^ Including Jon Kleinberg (Cornell University), Sendhil Mullainathan (University of Chicago), Cynthia Chouldechova (Carnegie Mellon) and Sam Corbett-Davis (Stanford)[218]\n
  24. \n
  25. ^ Moritz Hardt (a director at the Max Planck Institute for Intelligent Systems) argues that machine learning "is fundamentally the wrong tool for a lot of domains, where you\'re trying to design interventions and mechanisms that change the world."[223]\n
  26. \n
  27. ^ When the law was passed in 2018, it still contained a form of this provision.\n
  28. \n
  29. ^ This is the United Nations\' definition, and includes things like land mines as well.[237]\n
  30. \n
  31. ^ See table 4; 9% is both the OECD average and the U.S. average.[248]\n
  32. \n
  33. ^ Sometimes called a "robopocalypse"[256]\n
  34. \n
  35. ^ "Electronic brain" was the term used by the press around this time.[309][311]\n
  36. \n
  37. ^ \nDaniel Crevier wrote, "the conference is generally recognized as the official birthdate of the new science."[314] Russell and Norvig called the conference "the inception of artificial intelligence."[115]\n
  38. \n
  39. ^ \nRussell and Norvig wrote "for the next 20 years the field would be dominated by these people and their students."[315]\n
  40. \n
  41. ^ \nRussell and Norvig wrote "it was astonishing whenever a computer did anything kind of smartish".[316]\n
  42. \n
  43. ^ \nThe programs described are Arthur Samuel\'s checkers program for the IBM 701, Daniel Bobrow\'s STUDENT, Newell and Simon\'s Logic Theorist and Terry Winograd\'s SHRDLU.\n
  44. \n
  45. ^ Russell and Norvig write: "in almost all cases, these early systems failed on more difficult problems"[320]\n
  46. \n
  47. ^ \nEmbodied approaches to AI[327] were championed by Hans Moravec[328] and Rodney Brooks[329] and went by many names: Nouvelle AI.[329] Developmental robotics.[330]\n
  48. \n
  49. ^ Matteo Wong wrote in The Atlantic: "Whereas for decades, computer-science fields such as natural-language processing, computer vision, and robotics used extremely different methods, now they all use a programming method called "deep learning." As a result, their code and approaches have become more similar, and their models are easier to integrate into one another."[336]\n
  50. \n
  51. ^ Jack Clark wrote in Bloomberg: "After a half-decade of quiet breakthroughs in artificial intelligence, 2015 has been a landmark year. Computers are smarter and learning faster than ever", and noted that the number of software projects that use machine learning at Google increased from a "sporadic usage" in 2012 to more than 2,700 projects in 2015.[338]\n
  52. \n
  53. ^ Nils Nilsson wrote in 1983: "Simply put, there is wide disagreement in the field about what AI is all about."[356]\n
  54. \n
  55. ^ \nDaniel Crevier wrote that "time has proven the accuracy and perceptiveness of some of Dreyfus\'s comments. Had he formulated them less aggressively, constructive actions they suggested might have been taken much earlier."[361]\n
  56. \n
  57. ^ \nSearle presented this definition of "Strong AI" in 1999.[371] Searle\'s original formulation was "The appropriately programmed computer really is a mind, in the sense that computers given the right programs can be literally said to understand and have other cognitive states."[372] Strong AI is defined similarly by Russell and Norvig: "Stong AI – the assertion that machines that do so are actually thinking (as opposed to simulating thinking)."[373]\n
  58. \n
\n

References

\n
\n
    \n
  1. ^ a b c Russell & Norvig (2021), pp. 1–4.\n
  2. \n
  3. ^ AI set to exceed human brain power Archived 2008-02-19 at the Wayback Machine CNN.com (July 26, 2006)\n
  4. \n
  5. ^ Kaplan, Andreas; Haenlein, Michael (2019). "Siri, Siri, in my hand: Who\'s the fairest in the land? On the interpretations, illustrations, and implications of artificial intelligence". Business Horizons. 62: 15–25. doi:10.1016/j.bushor.2018.08.004. ISSN 0007-6813. S2CID 158433736.\n
  6. \n
  7. ^ a b c \nArtificial general intelligence: Russell & Norvig (2021, pp. 32–33, 1020–1021)
    Proposal for the modern version: Pennachin & Goertzel (2007)
    Warnings of overspecialization in AI from leading researchers: Nilsson (1995), McCarthy (2007), Beal & Winston (2009)
    \n
  8. \n
  9. ^ Russell & Norvig (2021, §1.2).\n
  10. \n
  11. ^ a b Dartmouth workshop: Russell & Norvig (2021, p. 18), McCorduck (2004, pp. 111–136), NRC (1999, pp. 200–201)
    The proposal: McCarthy et al. (1955)
    \n
  12. \n
  13. ^ a b Successful programs of the 1960s: McCorduck (2004, pp. 243–252), Crevier (1993, pp. 52–107), Moravec (1988, p. 9), Russell & Norvig (2021, pp. 19–21)\n
  14. \n
  15. ^ a b Funding initiatives in the early 1980s: Fifth Generation Project (Japan), Alvey (UK), Microelectronics and Computer Technology Corporation (US), Strategic Computing Initiative (US): McCorduck (2004, pp. 426–441), Crevier (1993, pp. 161–162, 197–203, 211, 240), Russell & Norvig (2021, p. 23), NRC (1999, pp. 210–211), Newquist (1994, pp. 235–248)\n
  16. \n
  17. ^ a b First AI Winter, Lighthill report, Mansfield Amendment: Crevier (1993, pp. 115–117), Russell & Norvig (2021, pp. 21–22), NRC (1999, pp. 212–213), Howe (1994), Newquist (1994, pp. 189–201)\n
  18. \n
  19. ^ a b Second AI Winter: Russell & Norvig (2021, p. 24), McCorduck (2004, pp. 430–435), Crevier (1993, pp. 209–210), NRC (1999, pp. 214–216), Newquist (1994, pp. 301–318)\n
  20. \n
  21. ^ a b Deep learning revolution, AlexNet: Goldman (2022), Russell & Norvig (2021, p. 26), McKinsey (2018)\n
  22. \n
  23. ^ Toews (2023).\n
  24. \n
  25. ^ Problem-solving, puzzle solving, game playing, and deduction: Russell & Norvig (2021, chpt. 3–5), Russell & Norvig (2021, chpt. 6) (constraint satisfaction), Poole, Mackworth & Goebel (1998, chpt. 2, 3, 7, 9), Luger & Stubblefield (2004, chpt. 3, 4, 6, 8), Nilsson (1998, chpt. 7–12)\n
  26. \n
  27. ^ Uncertain reasoning: Russell & Norvig (2021, chpt. 12–18), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 333–381), Nilsson (1998, chpt. 7–12)\n
  28. \n
  29. ^ a b c Intractability and efficiency and the combinatorial explosion: Russell & Norvig (2021, p. 21)\n
  30. \n
  31. ^ a b c Psychological evidence of the prevalence of sub-symbolic reasoning and knowledge: Kahneman (2011), Dreyfus & Dreyfus (1986), Wason & Shapiro (1966), Kahneman, Slovic & Tversky (1982)\n
  32. \n
  33. ^ Knowledge representation and knowledge engineering: Russell & Norvig (2021, chpt. 10), Poole, Mackworth & Goebel (1998, pp. 23–46, 69–81, 169–233, 235–277, 281–298, 319–345), Luger & Stubblefield (2004, pp. 227–243), Nilsson (1998, chpt. 17.1–17.4, 18)\n
  34. \n
  35. ^ Smoliar & Zhang (1994).\n
  36. \n
  37. ^ Neumann & Möller (2008).\n
  38. \n
  39. ^ Kuperman, Reichley & Bailey (2006).\n
  40. \n
  41. ^ McGarry (2005).\n
  42. \n
  43. ^ Bertini, Del Bimbo & Torniai (2006).\n
  44. \n
  45. ^ Russell & Norvig (2021), pp. 272.\n
  46. \n
  47. ^ Representing categories and relations: Semantic networks, description logics, inheritance (including frames, and scripts): Russell & Norvig (2021, §10.2 & 10.5), Poole, Mackworth & Goebel (1998, pp. 174–177), Luger & Stubblefield (2004, pp. 248–258), Nilsson (1998, chpt. 18.3)\n
  48. \n
  49. ^ Representing events and time:Situation calculus, event calculus, fluent calculus (including solving the frame problem): Russell & Norvig (2021, §10.3), Poole, Mackworth & Goebel (1998, pp. 281–298), Nilsson (1998, chpt. 18.2)\n
  50. \n
  51. ^ Causal calculus: Poole, Mackworth & Goebel (1998, pp. 335–337)\n
  52. \n
  53. ^ Representing knowledge about knowledge: Belief calculus, modal logics: Russell & Norvig (2021, §10.4), Poole, Mackworth & Goebel (1998, pp. 275–277)\n
  54. \n
  55. ^ a b Default reasoning, Frame problem, default logic, non-monotonic logics, circumscription, closed world assumption, abduction: Russell & Norvig (2021, §10.6), Poole, Mackworth & Goebel (1998, pp. 248–256, 323–335), Luger & Stubblefield (2004, pp. 335–363), Nilsson (1998, ~18.3.3)\n(Poole et al. places abduction under "default reasoning". Luger et al. places this under "uncertain reasoning").\n
  56. \n
  57. ^ a b Breadth of commonsense knowledge: Lenat & Guha (1989, Introduction), Crevier (1993, pp. 113–114), Moravec (1988, p. 13), Russell & Norvig (2021, pp. 241, 385, 982) (qualification problem)\n
  58. \n
  59. ^ Newquist (1994), p. 296.\n
  60. \n
  61. ^ Crevier (1993), pp. 204–208.\n
  62. \n
  63. ^ Russell & Norvig (2021), p. 528.\n
  64. \n
  65. ^ Automated planning: Russell & Norvig (2021, chpt. 11).\n
  66. \n
  67. ^ Automated decision making, Decision theory: Russell & Norvig (2021, chpt. 16–18).\n
  68. \n
  69. ^ Classical planning: Russell & Norvig (2021, Section 11.2).\n
  70. \n
  71. ^ Sensorless or "conformant" planning, contingent planning, replanning (a.k.a online planning): Russell & Norvig (2021, Section 11.5).\n
  72. \n
  73. ^ Uncertain preferences: Russell & Norvig (2021, Section 16.7)\nInverse reinforcement learning: Russell & Norvig (2021, Section 22.6)\n
  74. \n
  75. ^ Information value theory: Russell & Norvig (2021, Section 16.6).\n
  76. \n
  77. ^ Markov decision process: Russell & Norvig (2021, chpt. 17).\n
  78. \n
  79. ^ Game theory and multi-agent decision theory: Russell & Norvig (2021, chpt. 18).\n
  80. \n
  81. ^ Learning: Russell & Norvig (2021, chpt. 19–22), Poole, Mackworth & Goebel (1998, pp. 397–438), Luger & Stubblefield (2004, pp. 385–542), Nilsson (1998, chpt. 3.3, 10.3, 17.5, 20)\n
  82. \n
  83. ^ Turing (1950).\n
  84. \n
  85. ^ Solomonoff (1956).\n
  86. \n
  87. ^ Unsupervised learning: Russell & Norvig (2021, pp. 653) (definition), Russell & Norvig (2021, pp. 738–740) (cluster analysis), Russell & Norvig (2021, pp. 846–860) (word embedding)\n
  88. \n
  89. ^ a b Supervised learning: Russell & Norvig (2021, §19.2) (Definition), Russell & Norvig (2021, Chpt. 19–20) (Techniques)\n
  90. \n
  91. ^ Reinforcement learning: Russell & Norvig (2021, chpt. 22), Luger & Stubblefield (2004, pp. 442–449)\n
  92. \n
  93. ^ Transfer learning: Russell & Norvig (2021, pp. 281), The Economist (2016)\n
  94. \n
  95. ^ "Artificial Intelligence (AI): What Is AI and How Does It Work? | Built In". builtin.com. Retrieved 30 October 2023.\n
  96. \n
  97. ^ Computational learning theory: Russell & Norvig (2021, pp. 672–674), Jordan & Mitchell (2015)\n
  98. \n
  99. ^ Natural language processing (NLP): Russell & Norvig (2021, chpt. 23–24), Poole, Mackworth & Goebel (1998, pp. 91–104), Luger & Stubblefield (2004, pp. 591–632)\n
  100. \n
  101. ^ Subproblems of NLP: Russell & Norvig (2021, pp. 849–850)\n
  102. \n
  103. ^ Russell & Norvig (2021), pp. 856–858.\n
  104. \n
  105. ^ Dickson (2022).\n
  106. \n
  107. ^ Modern statistical and deep learning approaches to NLP: Russell & Norvig (2021, chpt. 24), Cambria & White (2014)\n
  108. \n
  109. ^ Vincent (2019).\n
  110. \n
  111. ^ Russell & Norvig (2021), pp. 875–878.\n
  112. \n
  113. ^ Bushwick (2023).\n
  114. \n
  115. ^ Computer vision: Russell & Norvig (2021, chpt. 25), Nilsson (1998, chpt. 6)\n
  116. \n
  117. ^ Russell & Norvig (2021), pp. 849–850.\n
  118. \n
  119. ^ Russell & Norvig (2021), pp. 895–899.\n
  120. \n
  121. ^ Russell & Norvig (2021), pp. 899–901.\n
  122. \n
  123. ^ Challa et al. (2011).\n
  124. \n
  125. ^ Russell & Norvig (2021), pp. 931–938.\n
  126. \n
  127. ^ MIT AIL (2014).\n
  128. \n
  129. ^ Affective computing: Thro (1993), Edelson (1991), Tao & Tan (2005), Scassellati (2002)\n
  130. \n
  131. ^ Waddell (2018).\n
  132. \n
  133. ^ Poria et al. (2017).\n
  134. \n
  135. ^ Search algorithms: Russell & Norvig (2021, chpts. 3–5), Poole, Mackworth & Goebel (1998, pp. 113–163), Luger & Stubblefield (2004, pp. 79–164, 193–219), Nilsson (1998, chpts. 7–12)\n
  136. \n
  137. ^ State space search: Russell & Norvig (2021, chpt. 3)\n
  138. \n
  139. ^ Russell & Norvig (2021), sect. 11.2.\n
  140. \n
  141. ^ Uninformed searches (breadth first search, depth-first search and general state space search): Russell & Norvig (2021, sect. 3.4), Poole, Mackworth & Goebel (1998, pp. 113–132), Luger & Stubblefield (2004, pp. 79–121), Nilsson (1998, chpt. 8)\n
  142. \n
  143. ^ Heuristic or informed searches (e.g., greedy best first and A*): Russell & Norvig (2021, sect. 3.5), Poole, Mackworth & Goebel (1998, pp. 132–147), Poole & Mackworth (2017, sect. 3.6), Luger & Stubblefield (2004, pp. 133–150)\n
  144. \n
  145. ^ Adversarial search: Russell & Norvig (2021, chpt. 5)\n
  146. \n
  147. ^ Local or "optimization" search: Russell & Norvig (2021, chpt. 4)\n
  148. \n
  149. ^ Singh Chauhan, Nagesh (18 December 2020). "Optimization Algorithms in Neural Networks". KDnuggets. Retrieved 13 January 2024.\n
  150. \n
  151. ^ Evolutionary computation: Russell & Norvig (2021, sect. 4.1.2)\n
  152. \n
  153. ^ Merkle & Middendorf (2013).\n
  154. \n
  155. ^ Logic: Russell & Norvig (2021, chpts. 6–9), Luger & Stubblefield (2004, pp. 35–77), Nilsson (1998, chpt. 13–16)\n
  156. \n
  157. ^ Propositional logic: Russell & Norvig (2021, chpt. 6), Luger & Stubblefield (2004, pp. 45–50), Nilsson (1998, chpt. 13)\n
  158. \n
  159. ^ First-order logic and features such as equality: Russell & Norvig (2021, chpt. 7), Poole, Mackworth & Goebel (1998, pp. 268–275), Luger & Stubblefield (2004, pp. 50–62), Nilsson (1998, chpt. 15)\n
  160. \n
  161. ^ Logical inference: Russell & Norvig (2021, chpt. 10)\n
  162. \n
  163. ^ logical deduction as search: Russell & Norvig (2021, sects. 9.3, 9.4), Poole, Mackworth & Goebel (1998, pp. ~46–52), Luger & Stubblefield (2004, pp. 62–73), Nilsson (1998, chpt. 4.2, 7.2)\n
  164. \n
  165. ^ Resolution and unification: Russell & Norvig (2021, sections 7.5.2, 9.2, 9.5)\n
  166. \n
  167. ^ Warren, D.H.; Pereira, L.M.; Pereira, F. (1977). "Prolog-the language and its implementation compared with Lisp". ACM SIGPLAN Notices. 12 (8): 109–115. doi:10.1145/872734.806939.\n
  168. \n
  169. ^ Fuzzy logic: Russell & Norvig (2021, pp. 214, 255, 459), Scientific American (1999)\n
  170. \n
  171. ^ a b Stochastic methods for uncertain reasoning: Russell & Norvig (2021, chpt. 12–18, 20), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 165–191, 333–381), Nilsson (1998, chpt. 19)\n
  172. \n
  173. ^ decision theory and decision analysis: Russell & Norvig (2021, chpt. 16–18), Poole, Mackworth & Goebel (1998, pp. 381–394)\n
  174. \n
  175. ^ Information value theory: Russell & Norvig (2021, sect. 16.6)\n
  176. \n
  177. ^ Markov decision processes and dynamic decision networks: Russell & Norvig (2021, chpt. 17)\n
  178. \n
  179. ^ a b c Stochastic temporal models: Russell & Norvig (2021, chpt. 14)\nHidden Markov model: Russell & Norvig (2021, sect. 14.3)\nKalman filters: Russell & Norvig (2021, sect. 14.4)\nDynamic Bayesian networks: Russell & Norvig (2021, sect. 14.5)\n
  180. \n
  181. ^ Game theory and mechanism design: Russell & Norvig (2021, chpt. 18)\n
  182. \n
  183. ^ Bayesian networks: Russell & Norvig (2021, sects. 12.5–12.6, 13.4–13.5, 14.3–14.5, 16.5, 20.2–20.3), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~182–190, ≈363–379), Nilsson (1998, chpt. 19.3–19.4)\n
  184. \n
  185. ^ Domingos (2015), chpt. 6.\n
  186. \n
  187. ^ Bayesian inference algorithm: Russell & Norvig (2021, sect. 13.3–13.5), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~363–379), Nilsson (1998, chpt. 19.4 & 7)\n
  188. \n
  189. ^ Domingos (2015), p. 210.\n
  190. \n
  191. ^ Bayesian learning and the expectation–maximization algorithm: Russell & Norvig (2021, chpt. 20), Poole, Mackworth & Goebel (1998, pp. 424–433), Nilsson (1998, chpt. 20), Domingos (2015, p. 210)\n
  192. \n
  193. ^ Bayesian decision theory and Bayesian decision networks: Russell & Norvig (2021, sect. 16.5)\n
  194. \n
  195. ^ Statistical learning methods and classifiers: Russell & Norvig (2021, chpt. 20),\n
  196. \n
  197. ^ Ciaramella, Alberto; Ciaramella, Marco (2024). Introduction to Artificial Intelligence: from data analysis to generative AI. Intellisemantic Editions. ISBN 978-8-8947-8760-3.\n
  198. \n
  199. ^ Decision trees: Russell & Norvig (2021, sect. 19.3), Domingos (2015, p. 88)\n
  200. \n
  201. ^ Non-parameteric learning models such as K-nearest neighbor and support vector machines: Russell & Norvig (2021, sect. 19.7), Domingos (2015, p. 187) (k-nearest neighbor)\n\n
  202. \n
  203. ^ Domingos (2015), p. 152.\n
  204. \n
  205. ^ Naive Bayes classifier: Russell & Norvig (2021, sect. 12.6), Domingos (2015, p. 152)\n
  206. \n
  207. ^ a b Neural networks: Russell & Norvig (2021, chpt. 21), Domingos (2015, Chapter 4)\n
  208. \n
  209. ^ Gradient calculation in computational graphs, backpropagation, automatic differentiation: Russell & Norvig (2021, sect. 21.2), Luger & Stubblefield (2004, pp. 467–474), Nilsson (1998, chpt. 3.3)\n
  210. \n
  211. ^ Universal approximation theorem: Russell & Norvig (2021, p. 752)\nThe theorem: Cybenko (1988), Hornik, Stinchcombe & White (1989)\n
  212. \n
  213. ^ Feedforward neural networks: Russell & Norvig (2021, sect. 21.1)\n
  214. \n
  215. ^ Recurrent neural networks: Russell & Norvig (2021, sect. 21.6)\n
  216. \n
  217. ^ Perceptrons: Russell & Norvig (2021, pp. 21, 22, 683, 22)\n
  218. \n
  219. ^ a b Deep learning: Russell & Norvig (2021, chpt. 21), Goodfellow, Bengio & Courville (2016), Hinton et al. (2016), Schmidhuber (2015)\n
  220. \n
  221. ^ Convolutional neural networks: Russell & Norvig (2021, sect. 21.3)\n
  222. \n
  223. ^ Deng & Yu (2014), pp. 199–200.\n
  224. \n
  225. ^ Ciresan, Meier & Schmidhuber (2012).\n
  226. \n
  227. ^ Russell & Norvig (2021), p. 751.\n
  228. \n
  229. ^ a b c Russell & Norvig (2021), p. 17.\n
  230. \n
  231. ^ a b c d e f g Russell & Norvig (2021), p. 785.\n
  232. \n
  233. ^ a b Schmidhuber (2022), sect. 5.\n
  234. \n
  235. ^ Schmidhuber (2022), sect. 6.\n
  236. \n
  237. ^ a b c Schmidhuber (2022), sect. 7.\n
  238. \n
  239. ^ Schmidhuber (2022), sect. 8.\n
  240. \n
  241. ^ Quoted in Christian (2020, p. 22)\n
  242. \n
  243. ^ Smith (2023).\n
  244. \n
  245. ^ "Explained: Generative AI". 9 November 2023.\n
  246. \n
  247. ^ "AI Writing and Content Creation Tools". MIT Sloan Teaching & Learning Technologies. Archived from the original on 25 December 2023. Retrieved 25 December 2023.\n
  248. \n
  249. ^ Marmouyet (2023).\n
  250. \n
  251. ^ Kobielus (2019).\n
  252. \n
  253. ^ Thomason, James (21 May 2024). "Mojo Rising: The resurgence of AI-first programming languages". VentureBeat. Archived from the original on 27 June 2024. Retrieved 26 May 2024.\n
  254. \n
  255. ^ Wodecki, Ben (5 May 2023). "7 AI Programming Languages You Need to Know". AI Business. Archived from the original on 25 July 2024. Retrieved 5 October 2024.\n
  256. \n
  257. ^ Plumb, Taryn (18 September 2024). "Why Jensen Huang and Marc Benioff see \'gigantic\' opportunity for agentic AI". VentureBeat. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
  258. \n
  259. ^ Davenport, T; Kalakota, R (June 2019). "The potential for artificial intelligence in healthcare". Future Healthc J. 6 (2): 94–98. doi:10.7861/futurehosp.6-2-94. PMC 6616181. PMID 31363513.\n
  260. \n
  261. ^ Lyakhova, U.A.; Lyakhov, P.A. (2024). "Systematic review of approaches to detection and classification of skin cancer using artificial intelligence: Development and prospects". Computers in Biology and Medicine. 178: 108742. doi:10.1016/j.compbiomed.2024.108742. PMID 38875908.\n
  262. \n
  263. ^ Alqudaihi, Kawther S.; Aslam, Nida; Khan, Irfan Ullah; Almuhaideb, Abdullah M.; Alsunaidi, Shikah J.; Ibrahim, Nehad M. Abdel Rahman; Alhaidari, Fahd A.; Shaikh, Fatema S.; Alsenbel, Yasmine M.; Alalharith, Dima M.; Alharthi, Hajar M.; Alghamdi, Wejdan M.; Alshahrani, Mohammed S. (2021). "Cough Sound Detection and Diagnosis Using Artificial Intelligence Techniques: Challenges and Opportunities". IEEE Access. 9: 102327–102344. Bibcode:2021IEEEA...9j2327A. doi:10.1109/ACCESS.2021.3097559. ISSN 2169-3536. PMC 8545201. PMID 34786317.\n
  264. \n
  265. ^ a b Bax, Monique; Thorpe, Jordan; Romanov, Valentin (December 2023). "The future of personalized cardiovascular medicine demands 3D and 4D printing, stem cells, and artificial intelligence". Frontiers in Sensors. 4. doi:10.3389/fsens.2023.1294721. ISSN 2673-5067.\n
  266. \n
  267. ^ Jumper, J; Evans, R; Pritzel, A (2021). "Highly accurate protein structure prediction with AlphaFold". Nature. 596 (7873): 583–589. Bibcode:2021Natur.596..583J. doi:10.1038/s41586-021-03819-2. PMC 8371605. PMID 34265844.\n
  268. \n
  269. ^ "AI discovers new class of antibiotics to kill drug-resistant bacteria". 20 December 2023. Archived from the original on 16 September 2024. Retrieved 5 October 2024.\n
  270. \n
  271. ^ "AI speeds up drug design for Parkinson\'s ten-fold". Cambridge University. 17 April 2024. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  272. \n
  273. ^ Horne, Robert I.; Andrzejewska, Ewa A.; Alam, Parvez; Brotzakis, Z. Faidon; Srivastava, Ankit; Aubert, Alice; Nowinska, Magdalena; Gregory, Rebecca C.; Staats, Roxine; Possenti, Andrea; Chia, Sean; Sormanni, Pietro; Ghetti, Bernardino; Caughey, Byron; Knowles, Tuomas P. J.; Vendruscolo, Michele (17 April 2024). "Discovery of potent inhibitors of α-synuclein aggregation using structure-based iterative learning". Nature Chemical Biology. 20 (5). Nature: 634–645. doi:10.1038/s41589-024-01580-x. PMC 11062903. PMID 38632492.\n
  274. \n
  275. ^ Grant, Eugene F.; Lardner, Rex (25 July 1952). "The Talk of the Town – It". The New Yorker. ISSN 0028-792X. Archived from the original on 16 February 2020. Retrieved 28 January 2024.\n
  276. \n
  277. ^ Anderson, Mark Robert (11 May 2017). "Twenty years on from Deep Blue vs Kasparov: how a chess match started the big data revolution". The Conversation. Archived from the original on 17 September 2024. Retrieved 28 January 2024.\n
  278. \n
  279. ^ Markoff, John (16 February 2011). "Computer Wins on \'Jeopardy!\': Trivial, It\'s Not". The New York Times. ISSN 0362-4331. Archived from the original on 22 October 2014. Retrieved 28 January 2024.\n
  280. \n
  281. ^ Byford, Sam (27 May 2017). "AlphaGo retires from competitive Go after defeating world number one 3–0". The Verge. Archived from the original on 7 June 2017. Retrieved 28 January 2024.\n
  282. \n
  283. ^ Brown, Noam; Sandholm, Tuomas (30 August 2019). "Superhuman AI for multiplayer poker". Science. 365 (6456): 885–890. Bibcode:2019Sci...365..885B. doi:10.1126/science.aay2400. ISSN 0036-8075. PMID 31296650.\n
  284. \n
  285. ^ "MuZero: Mastering Go, chess, shogi and Atari without rules". Google DeepMind. 23 December 2020. Retrieved 28 January 2024.\n
  286. \n
  287. ^ Sample, Ian (30 October 2019). "AI becomes grandmaster in \'fiendishly complex\' StarCraft II". The Guardian. ISSN 0261-3077. Archived from the original on 29 December 2020. Retrieved 28 January 2024.\n
  288. \n
  289. ^ Wurman, P. R.; Barrett, S.; Kawamoto, K. (2022). "Outracing champion Gran Turismo drivers with deep reinforcement learning" (PDF). Nature. 602 (7896): 223–228. Bibcode:2022Natur.602..223W. doi:10.1038/s41586-021-04357-7. PMID 35140384.\n
  290. \n
  291. ^ Wilkins, Alex (13 March 2024). "Google AI learns to play open-world video games by watching them". New Scientist. Archived from the original on 26 July 2024. Retrieved 21 July 2024.\n
  292. \n
  293. ^ Uesato, J. et al.: Improving mathematical reasoning with process supervision. Archived 15 September 2024 at the Wayback Machine openai.com, May 31, 2023. Retrieved 2024-08-07.\n
  294. \n
  295. ^ Srivastava, Saurabh (29 February 2024). "Functional Benchmarks for Robust Evaluation of Reasoning Performance, and the Reasoning Gap". arXiv:2402.19450 [cs.AI].\n
  296. \n
  297. ^ Roberts, Siobhan (25 July 2024). "AI achieves silver-medal standard solving International Mathematical Olympiad problems". The New York Times. Archived from the original on 26 September 2024. Retrieved 7 August 2024.\n
  298. \n
  299. ^ LLEMMA. eleuther.ai. Retrieved 2024-08-07.\n
  300. \n
  301. ^ AI Math. Archived 5 October 2024 at the Wayback Machine Caesars Labs, 2024. Retrieved 2024-08-07.\n
  302. \n
  303. ^ Alex McFarland: 7 Best AI for Math Tools. Archived 11 September 2024 at the Wayback Machine unite.ai. Retrieved 2024-08-07\n
  304. \n
  305. ^ Matthew Finio & Amanda Downie: IBM Think 2024 Primer, "What is Artificial Intelligence (AI) in Finance?" 8 Dec. 2023\n
  306. \n
  307. ^ M. Nicolas, J. Firzli: Pensions Age/European Pensions magazine, "Artificial Intelligence: Ask the Industry" May June 2024 https://videovoice.org/ai-in-finance-innovation-entrepreneurship-vs-over-regulation-with-the-eus-artificial-intelligence-act-wont-work-as-intended/ Archived 11 September 2024 at the Wayback Machine.\n
  308. \n
  309. ^ a b c Congressional Research Service (2019). Artificial Intelligence and National Security (PDF). Washington, DC: Congressional Research Service. Archived (PDF) from the original on 8 May 2020. Retrieved 5 October 2024.PD-notice\n
  310. \n
  311. ^ a b Slyusar, Vadym (2019). "Artificial intelligence as the basis of future control networks". ResearchGate. doi:10.13140/RG.2.2.30247.50087. Archived from the original on 28 April 2021. Retrieved 20 July 2019.\n
  312. \n
  313. ^ Knight, Will. "The US and 30 Other Nations Agree to Set Guardrails for Military AI". Wired. ISSN 1059-1028. Archived from the original on 20 September 2024. Retrieved 24 January 2024.\n
  314. \n
  315. ^ Newsom, Gavin; Weber, Shirley N. (6 September 2023). "Executive Order N-12-23" (PDF). Executive Department, State of California. Archived (PDF) from the original on 21 February 2024. Retrieved 7 September 2023.\n
  316. \n
  317. ^ Pinaya, Walter H. L.; Graham, Mark S.; Kerfoot, Eric; Tudosiu, Petru-Daniel; Dafflon, Jessica; Fernandez, Virginia; Sanchez, Pedro; Wolleb, Julia; da Costa, Pedro F.; Patel, Ashay (2023). "Generative AI for Medical Imaging: extending the MONAI Framework". arXiv:2307.15208 [eess.IV].\n
  318. \n
  319. ^ Griffith, Erin; Metz, Cade (27 January 2023). "Anthropic Said to Be Closing In on $300 Million in New A.I. Funding". The New York Times. Archived from the original on 9 December 2023. Retrieved 14 March 2023.\n
  320. \n
  321. ^ Lanxon, Nate; Bass, Dina; Davalos, Jackie (10 March 2023). "A Cheat Sheet to AI Buzzwords and Their Meanings". Bloomberg News. Archived from the original on 17 November 2023. Retrieved 14 March 2023.\n
  322. \n
  323. ^ Marcelline, Marco (27 May 2023). "ChatGPT: Most Americans Know About It, But Few Actually Use the AI Chatbot". PCMag. Archived from the original on 21 May 2024. Retrieved 28 January 2024.\n
  324. \n
  325. ^ Lu, Donna (31 March 2023). "Misinformation, mistakes and the Pope in a puffer: what rapidly evolving AI can – and can\'t – do". The Guardian. ISSN 0261-3077. Archived from the original on 10 June 2024. Retrieved 28 January 2024.\n
  326. \n
  327. ^ Hurst, Luke (23 May 2023). "How a fake image of a Pentagon explosion shared on Twitter caused a real dip on Wall Street". euronews. Retrieved 28 January 2024.\n
  328. \n
  329. ^ Poole, David; Mackworth, Alan (2023). Artificial Intelligence, Foundations of Computational Agents (3rd ed.). Cambridge University Press. doi:10.1017/9781009258227. ISBN 978-1-0092-5819-7. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  330. \n
  331. ^ Russell, Stuart; Norvig, Peter (2020). Artificial Intelligence: A Modern Approach (4th ed.). Pearson. ISBN 978-0-1346-1099-3.\n
  332. \n
  333. ^ "Why agents are the next frontier of generative AI". McKinsey Digital. 24 July 2024. Archived from the original on 3 October 2024. Retrieved 10 August 2024.\n
  334. \n
  335. ^ Ransbotham, Sam; Kiron, David; Gerbert, Philipp; Reeves, Martin (6 September 2017). "Reshaping Business With Artificial Intelligence". MIT Sloan Management Review. Archived from the original on 13 February 2024.\n
  336. \n
  337. ^ Sun, Yuran; Zhao, Xilei; Lovreglio, Ruggiero; Kuligowski, Erica (1 January 2024), Naser, M. Z. (ed.), "8 – AI for large-scale evacuation modeling: promises and challenges", Interpretable Machine Learning for the Analysis, Design, Assessment, and Informed Decision Making for Civil Infrastructure, Woodhead Publishing Series in Civil and Structural Engineering, Woodhead Publishing, pp. 185–204, ISBN 978-0-1282-4073-1, archived from the original on 19 May 2024, retrieved 28 June 2024.\n
  338. \n
  339. ^ Gomaa, Islam; Adelzadeh, Masoud; Gwynne, Steven; Spencer, Bruce; Ko, Yoon; Bénichou, Noureddine; Ma, Chunyun; Elsagan, Nour; Duong, Dana; Zalok, Ehab; Kinateder, Max (1 November 2021). "A Framework for Intelligent Fire Detection and Evacuation System". Fire Technology. 57 (6): 3179–3185. doi:10.1007/s10694-021-01157-3. ISSN 1572-8099. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  340. \n
  341. ^ Zhao, Xilei; Lovreglio, Ruggiero; Nilsson, Daniel (1 May 2020). "Modelling and interpreting pre-evacuation decision-making using machine learning". Automation in Construction. 113: 103140. doi:10.1016/j.autcon.2020.103140. ISSN 0926-5805. Archived from the original on 19 May 2024. Retrieved 5 October 2024.\n
  342. \n
  343. ^ "India\'s latest election embraced AI technology. Here are some ways it was used constructively". PBS News. 12 June 2024. Retrieved 28 October 2024.\n
  344. \n
  345. ^ Müller, Vincent C. (30 April 2020). "Ethics of Artificial Intelligence and Robotics". Stanford Encyclopedia of Philosophy Archive. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  346. \n
  347. ^ Simonite (2016).\n
  348. \n
  349. ^ Russell & Norvig (2021), p. 987.\n
  350. \n
  351. ^ Laskowski (2023).\n
  352. \n
  353. ^ GAO (2022).\n
  354. \n
  355. ^ Valinsky (2019).\n
  356. \n
  357. ^ Russell & Norvig (2021), p. 991.\n
  358. \n
  359. ^ Russell & Norvig (2021), pp. 991–992.\n
  360. \n
  361. ^ Christian (2020), p. 63.\n
  362. \n
  363. ^ Vincent (2022).\n
  364. \n
  365. ^ Kopel, Matthew. "Copyright Services: Fair Use". Cornell University Library. Archived from the original on 26 September 2024. Retrieved 26 April 2024.\n
  366. \n
  367. ^ Burgess, Matt. "How to Stop Your Data From Being Used to Train AI". Wired. ISSN 1059-1028. Archived from the original on 3 October 2024. Retrieved 26 April 2024.\n
  368. \n
  369. ^ Reisner (2023).\n
  370. \n
  371. ^ Alter & Harris (2023).\n
  372. \n
  373. ^ "Getting the Innovation Ecosystem Ready for AI. An IP policy toolkit" (PDF). WIPO.\n
  374. \n
  375. ^ Hammond, George (27 December 2023). "Big Tech is spending more than VC firms on AI startups". Ars Technica. Archived from the original on 10 January 2024.\n
  376. \n
  377. ^ Wong, Matteo (24 October 2023). "The Future of AI Is GOMA". The Atlantic. Archived from the original on 5 January 2024.\n
  378. \n
  379. ^ "Big tech and the pursuit of AI dominance". The Economist. 26 March 2023. Archived from the original on 29 December 2023.\n
  380. \n
  381. ^ Fung, Brian (19 December 2023). "Where the battle to dominate AI may be won". CNN Business. Archived from the original on 13 January 2024.\n
  382. \n
  383. ^ Metz, Cade (5 July 2023). "In the Age of A.I., Tech\'s Little Guys Need Big Friends". The New York Times. Archived from the original on 8 July 2024. Retrieved 5 October 2024.\n
  384. \n
  385. ^ "Electricity 2024 – Analysis". IEA. 24 January 2024. Retrieved 13 July 2024.\n
  386. \n
  387. ^ Calvert, Brian (28 March 2024). "AI already uses as much energy as a small country. It\'s only the beginning". Vox. New York, New York. Archived from the original on 3 July 2024. Retrieved 5 October 2024.\n
  388. \n
  389. ^ Halper, Evan; O\'Donovan, Caroline (21 June 2024). "AI is exhausting the power grid. Tech firms are seeking a miracle solution". Washington Post.\n
  390. \n
  391. ^ Davenport, Carly. "AI Data Centers and the Coming YS Power Demand Surge" (PDF). Goldman Sachs. Archived from the original (PDF) on 26 July 2024. Retrieved 5 October 2024.\n
  392. \n
  393. ^ Ryan, Carol (12 April 2024). "Energy-Guzzling AI Is Also the Future of Energy Savings". Wall Street Journal. Dow Jones.\n
  394. \n
  395. ^ Hiller, Jennifer (1 July 2024). "Tech Industry Wants to Lock Up Nuclear Power for AI". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  396. \n
  397. ^ Kendall, Tyler (28 September 2024). "Nvidia\'s Huang Says Nuclear Power an Option to Feed Data Centers". Bloomberg.\n
  398. \n
  399. ^ Halper, Evan (20 September 2024). "Microsoft deal would reopen Three Mile Island nuclear plant to power AI". Washington Post.\n
  400. \n
  401. ^ Hiller, Jennifer (20 September 2024). "Three Mile Island\'s Nuclear Plant to Reopen, Help Power Microsoft\'s AI Centers". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  402. \n
  403. ^ a b Niva Yadav (19 August 2024). "Taiwan to stop large data centers in the North, cites insufficient power". DatacenterDynamics.\n
  404. \n
  405. ^ Mochizuki, Takashi; Oda, Shoko (18 October 2024). "エヌビディア出資の日本企業、原発近くでAIデータセンター新設検討". Bloomberg (in Japanese).\n
  406. \n
  407. ^ a b Naureen S Malik and Will Wade (5 November 2024). "Nuclear-Hungry AI Campuses Need New Plan to Find Power Fast". Bloomberg.\n
  408. \n
  409. ^ Nicas (2018).\n
  410. \n
  411. ^ Rainie, Lee; Keeter, Scott; Perrin, Andrew (22 July 2019). "Trust and Distrust in America". Pew Research Center. Archived from the original on 22 February 2024.\n
  412. \n
  413. ^ Williams (2023).\n
  414. \n
  415. ^ Taylor & Hern (2023).\n
  416. \n
  417. ^ a b Samuel, Sigal (19 April 2022). "Why it\'s so damn hard to make AI fair and unbiased". Vox. Archived from the original on 5 October 2024. Retrieved 24 July 2024.\n
  418. \n
  419. ^ a b Rose (2023).\n
  420. \n
  421. ^ CNA (2019).\n
  422. \n
  423. ^ Goffrey (2008), p. 17.\n
  424. \n
  425. ^ Berdahl et al. (2023); Goffrey (2008, p. 17); Rose (2023); Russell & Norvig (2021, p. 995)\n
  426. \n
  427. ^ Christian (2020), p. 25.\n
  428. \n
  429. ^ a b Russell & Norvig (2021), p. 995.\n
  430. \n
  431. ^ Grant & Hill (2023).\n
  432. \n
  433. ^ Larson & Angwin (2016).\n
  434. \n
  435. ^ Christian (2020), p. 67–70.\n
  436. \n
  437. ^ Christian (2020, pp. 67–70); Russell & Norvig (2021, pp. 993–994)\n
  438. \n
  439. ^ Russell & Norvig (2021, p. 995); Lipartito (2011, p. 36); Goodman & Flaxman (2017, p. 6); Christian (2020, pp. 39–40, 65)\n
  440. \n
  441. ^ Quoted in Christian (2020, p. 65).\n
  442. \n
  443. ^ Russell & Norvig (2021, p. 994); Christian (2020, pp. 40, 80–81)\n
  444. \n
  445. ^ Quoted in Christian (2020, p. 80)\n
  446. \n
  447. ^ Dockrill (2022).\n
  448. \n
  449. ^ Sample (2017).\n
  450. \n
  451. ^ "Black Box AI". 16 June 2023. Archived from the original on 15 June 2024. Retrieved 5 October 2024.\n
  452. \n
  453. ^ Christian (2020), p. 110.\n
  454. \n
  455. ^ Christian (2020), pp. 88–91.\n
  456. \n
  457. ^ Christian (2020, p. 83); Russell & Norvig (2021, p. 997)\n
  458. \n
  459. ^ Christian (2020), p. 91.\n
  460. \n
  461. ^ Christian (2020), p. 83.\n
  462. \n
  463. ^ Verma (2021).\n
  464. \n
  465. ^ Rothman (2020).\n
  466. \n
  467. ^ Christian (2020), pp. 105–108.\n
  468. \n
  469. ^ Christian (2020), pp. 108–112.\n
  470. \n
  471. ^ Ropek, Lucas (21 May 2024). "New Anthropic Research Sheds Light on AI\'s \'Black Box\'". Gizmodo. Archived from the original on 5 October 2024. Retrieved 23 May 2024.\n
  472. \n
  473. ^ Russell & Norvig (2021), p. 989.\n
  474. \n
  475. ^ a b Russell & Norvig (2021), pp. 987–990.\n
  476. \n
  477. ^ Russell & Norvig (2021), p. 988.\n
  478. \n
  479. ^ Robitzski (2018); Sainato (2015)\n
  480. \n
  481. ^ Harari (2018).\n
  482. \n
  483. ^ Buckley, Chris; Mozur, Paul (22 May 2019). "How China Uses High-Tech Surveillance to Subdue Minorities". The New York Times. Archived from the original on 25 November 2019. Retrieved 2 July 2019.\n
  484. \n
  485. ^ "Security lapse exposed a Chinese smart city surveillance system". 3 May 2019. Archived from the original on 7 March 2021. Retrieved 14 September 2020.\n
  486. \n
  487. ^ Urbina et al. (2022).\n
  488. \n
  489. ^ a b E. McGaughey, \'Will Robots Automate Your Job Away? Full Employment, Basic Income, and Economic Democracy\' (2022), 51(3) Industrial Law Journal 511–559. Archived 27 May 2023 at the Wayback Machine.\n
  490. \n
  491. ^ Ford & Colvin (2015);McGaughey (2022)\n
  492. \n
  493. ^ IGM Chicago (2017).\n
  494. \n
  495. ^ Arntz, Gregory & Zierahn (2016), p. 33.\n
  496. \n
  497. ^ Lohr (2017); Frey & Osborne (2017); Arntz, Gregory & Zierahn (2016, p. 33)\n
  498. \n
  499. ^ Zhou, Viola (11 April 2023). "AI is already taking video game illustrators\' jobs in China". Rest of World. Archived from the original on 21 February 2024. Retrieved 17 August 2023.\n
  500. \n
  501. ^ Carter, Justin (11 April 2023). "China\'s game art industry reportedly decimated by growing AI use". Game Developer. Archived from the original on 17 August 2023. Retrieved 17 August 2023.\n
  502. \n
  503. ^ Morgenstern (2015).\n
  504. \n
  505. ^ Mahdawi (2017); Thompson (2014)\n
  506. \n
  507. ^ Tarnoff, Ben (4 August 2023). "Lessons from Eliza". The Guardian Weekly. pp. 34–39.\n
  508. \n
  509. ^ Cellan-Jones (2014).\n
  510. \n
  511. ^ Russell & Norvig 2021, p. 1001.\n
  512. \n
  513. ^ Bostrom (2014).\n
  514. \n
  515. ^ Russell (2019).\n
  516. \n
  517. ^ Bostrom (2014); Müller & Bostrom (2014); Bostrom (2015).\n
  518. \n
  519. ^ Harari (2023).\n
  520. \n
  521. ^ Müller & Bostrom (2014).\n
  522. \n
  523. ^ Leaders\' concerns about the existential risks of AI around 2015: Rawlinson (2015), Holley (2015), Gibbs (2014), Sainato (2015)\n
  524. \n
  525. ^ ""Godfather of artificial intelligence" talks impact and potential of new AI". CBS News. 25 March 2023. Archived from the original on 28 March 2023. Retrieved 28 March 2023.\n
  526. \n
  527. ^ Pittis, Don (4 May 2023). "Canadian artificial intelligence leader Geoffrey Hinton piles on fears of computer takeover". CBC. Archived from the original on 7 July 2024. Retrieved 5 October 2024.\n
  528. \n
  529. ^ "\'50–50 chance\' that AI outsmarts humanity, Geoffrey Hinton says". Bloomberg BNN. 14 June 2024. Retrieved 6 July 2024.\n
  530. \n
  531. ^ Valance (2023).\n
  532. \n
  533. ^ Taylor, Josh (7 May 2023). "Rise of artificial intelligence is inevitable but should not be feared, \'father of AI\' says". The Guardian. Archived from the original on 23 October 2023. Retrieved 26 May 2023.\n
  534. \n
  535. ^ Colton, Emma (7 May 2023). "\'Father of AI\' says tech fears misplaced: \'You cannot stop it\'". Fox News. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
  536. \n
  537. ^ Jones, Hessie (23 May 2023). "Juergen Schmidhuber, Renowned \'Father Of Modern AI,\' Says His Life\'s Work Won\'t Lead To Dystopia". Forbes. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
  538. \n
  539. ^ McMorrow, Ryan (19 December 2023). "Andrew Ng: \'Do we think the world is better off with more or less intelligence?\'". Financial Times. Archived from the original on 25 January 2024. Retrieved 30 December 2023.\n
  540. \n
  541. ^ Levy, Steven (22 December 2023). "How Not to Be Stupid About AI, With Yann LeCun". Wired. Archived from the original on 28 December 2023. Retrieved 30 December 2023.\n
  542. \n
  543. ^ Arguments that AI is not an imminent risk: Brooks (2014), Geist (2015), Madrigal (2015), Lee (2014)\n
  544. \n
  545. ^ a b Christian (2020), pp. 67, 73.\n
  546. \n
  547. ^ Yudkowsky (2008).\n
  548. \n
  549. ^ a b Anderson & Anderson (2011).\n
  550. \n
  551. ^ AAAI (2014).\n
  552. \n
  553. ^ Wallach (2010).\n
  554. \n
  555. ^ Russell (2019), p. 173.\n
  556. \n
  557. ^ Stewart, Ashley; Melton, Monica. "Hugging Face CEO says he\'s focused on building a \'sustainable model\' for the $4.5 billion open-source-AI startup". Business Insider. Archived from the original on 25 September 2024. Retrieved 14 April 2024.\n
  558. \n
  559. ^ Wiggers, Kyle (9 April 2024). "Google open sources tools to support AI model development". TechCrunch. Archived from the original on 10 September 2024. Retrieved 14 April 2024.\n
  560. \n
  561. ^ Heaven, Will Douglas (12 May 2023). "The open-source AI boom is built on Big Tech\'s handouts. How long will it last?". MIT Technology Review. Retrieved 14 April 2024.\n
  562. \n
  563. ^ Brodsky, Sascha (19 December 2023). "Mistral AI\'s New Language Model Aims for Open Source Supremacy". AI Business. Archived from the original on 5 September 2024. Retrieved 5 October 2024.\n
  564. \n
  565. ^ Edwards, Benj (22 February 2024). "Stability announces Stable Diffusion 3, a next-gen AI image generator". Ars Technica. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
  566. \n
  567. ^ Marshall, Matt (29 January 2024). "How enterprises are using open source LLMs: 16 examples". VentureBeat. Archived from the original on 26 September 2024. Retrieved 5 October 2024.\n
  568. \n
  569. ^ Piper, Kelsey (2 February 2024). "Should we make our most powerful AI models open source to all?". Vox. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
  570. \n
  571. ^ Alan Turing Institute (2019). "Understanding artificial intelligence ethics and safety" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
  572. \n
  573. ^ Alan Turing Institute (2023). "AI Ethics and Governance in Practice" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
  574. \n
  575. ^ Floridi, Luciano; Cowls, Josh (23 June 2019). "A Unified Framework of Five Principles for AI in Society". Harvard Data Science Review. 1 (1). doi:10.1162/99608f92.8cd550d1. S2CID 198775713.\n
  576. \n
  577. ^ Buruk, Banu; Ekmekci, Perihan Elif; Arda, Berna (1 September 2020). "A critical perspective on guidelines for responsible and trustworthy artificial intelligence". Medicine, Health Care and Philosophy. 23 (3): 387–399. doi:10.1007/s11019-020-09948-1. ISSN 1572-8633. PMID 32236794. S2CID 214766800. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  578. \n
  579. ^ Kamila, Manoj Kumar; Jasrotia, Sahil Singh (1 January 2023). "Ethical issues in the development of artificial intelligence: recognizing the risks". International Journal of Ethics and Systems. ahead-of-print (ahead-of-print). doi:10.1108/IJOES-05-2023-0107. ISSN 2514-9369. S2CID 259614124. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  580. \n
  581. ^ "AI Safety Institute releases new AI safety evaluations platform". UK Government. 10 May 2024. Archived from the original on 5 October 2024. Retrieved 14 May 2024.\n
  582. \n
  583. ^ Regulation of AI to mitigate risks: Berryhill et al. (2019), Barfield & Pagallo (2018), Iphofen & Kritikos (2019), Wirtz, Weyerer & Geyer (2018), Buiten (2019)\n
  584. \n\n
  585. ^ a b Vincent (2023).\n
  586. \n
  587. ^ Stanford University (2023).\n
  588. \n
  589. ^ a b c d UNESCO (2021).\n
  590. \n
  591. ^ Kissinger (2021).\n
  592. \n
  593. ^ Altman, Brockman & Sutskever (2023).\n
  594. \n
  595. ^ VOA News (25 October 2023). "UN Announces Advisory Body on Artificial Intelligence". Archived from the original on 18 September 2024. Retrieved 5 October 2024.\n
  596. \n
  597. ^ "Council of Europe opens first ever global treaty on AI for signature". Council of Europe. 5 September 2024. Archived from the original on 17 September 2024. Retrieved 17 September 2024.\n
  598. \n
  599. ^ Edwards (2023).\n
  600. \n
  601. ^ Kasperowicz (2023).\n
  602. \n
  603. ^ Fox News (2023).\n
  604. \n
  605. ^ Milmo, Dan (3 November 2023). "Hope or Horror? The great AI debate dividing its pioneers". The Guardian Weekly. pp. 10–12.\n
  606. \n
  607. ^ "The Bletchley Declaration by Countries Attending the AI Safety Summit, 1–2 November 2023". GOV.UK. 1 November 2023. Archived from the original on 1 November 2023. Retrieved 2 November 2023.\n
  608. \n
  609. ^ "Countries agree to safe and responsible development of frontier AI in landmark Bletchley Declaration". GOV.UK (Press release). Archived from the original on 1 November 2023. Retrieved 1 November 2023.\n
  610. \n
  611. ^ "Second global AI summit secures safety commitments from companies". Reuters. 21 May 2024. Retrieved 23 May 2024.\n
  612. \n
  613. ^ "Frontier AI Safety Commitments, AI Seoul Summit 2024". gov.uk. 21 May 2024. Archived from the original on 23 May 2024. Retrieved 23 May 2024.\n
  614. \n
  615. ^ a b Russell & Norvig 2021, p. 9.\n
  616. \n
  617. ^ a b c Copeland, J., ed. (2004). The Essential Turing: the ideas that gave birth to the computer age. Oxford, England: Clarendon Press. ISBN 0-1982-5079-7.\n
  618. \n
  619. ^ "Google books ngram". Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  620. \n
  621. ^ AI\'s immediate precursors: McCorduck (2004, pp. 51–107), Crevier (1993, pp. 27–32), Russell & Norvig (2021, pp. 8–17), Moravec (1988, p. 3)\n
  622. \n
  623. ^ a b Turing\'s original publication of the Turing test in "Computing machinery and intelligence": Turing (1950)\nHistorical influence and philosophical implications: Haugeland (1985, pp. 6–9), Crevier (1993, p. 24), McCorduck (2004, pp. 70–71), Russell & Norvig (2021, pp. 2, 984)\n
  624. \n
  625. ^ Crevier (1993), pp. 47–49.\n
  626. \n
  627. ^ Russell & Norvig (2003), p. 17.\n
  628. \n
  629. ^ Russell & Norvig (2003), p. 18.\n
  630. \n
  631. ^ Newquist (1994), pp. 86–86.\n
  632. \n
  633. ^ Simon (1965, p. 96) quoted in Crevier (1993, p. 109)\n
  634. \n
  635. ^ Minsky (1967, p. 2) quoted in Crevier (1993, p. 109)\n
  636. \n
  637. ^ Russell & Norvig (2021), p. 21.\n
  638. \n
  639. ^ Lighthill (1973).\n
  640. \n
  641. ^ NRC 1999, pp. 212–213.\n
  642. \n
  643. ^ Russell & Norvig (2021), p. 22.\n
  644. \n
  645. ^ Expert systems: Russell & Norvig (2021, pp. 23, 292), Luger & Stubblefield (2004, pp. 227–331), Nilsson (1998, chpt. 17.4), McCorduck (2004, pp. 327–335, 434–435), Crevier (1993, pp. 145–162, 197–203), Newquist (1994, pp. 155–183)\n
  646. \n
  647. ^ Russell & Norvig (2021), p. 24.\n
  648. \n
  649. ^ Nilsson (1998), p. 7.\n
  650. \n
  651. ^ McCorduck (2004), pp. 454–462.\n
  652. \n
  653. ^ Moravec (1988).\n
  654. \n
  655. ^ a b Brooks (1990).\n
  656. \n
  657. ^ Developmental robotics: Weng et al. (2001), Lungarella et al. (2003), Asada et al. (2009), Oudeyer (2010)\n
  658. \n
  659. ^ Russell & Norvig (2021), p. 25.\n
  660. \n
  661. ^ Crevier (1993, pp. 214–215), Russell & Norvig (2021, pp. 24, 26)\n
  662. \n
  663. ^ Russell & Norvig (2021), p. 26.\n
  664. \n
  665. ^ Formal and narrow methods adopted in the 1990s: Russell & Norvig (2021, pp. 24–26), McCorduck (2004, pp. 486–487)\n
  666. \n
  667. ^ AI widely used in the late 1990s: Kurzweil (2005, p. 265), NRC (1999, pp. 216–222), Newquist (1994, pp. 189–201)\n
  668. \n
  669. ^ Wong (2023).\n
  670. \n
  671. ^ Moore\'s Law and AI: Russell & Norvig (2021, pp. 14, 27)\n
  672. \n
  673. ^ a b c Clark (2015b).\n
  674. \n
  675. ^ Big data: Russell & Norvig (2021, p. 26)\n
  676. \n
  677. ^ Sagar, Ram (3 June 2020). "OpenAI Releases GPT-3, The Largest Model So Far". Analytics India Magazine. Archived from the original on 4 August 2020. Retrieved 15 March 2023.\n
  678. \n
  679. ^ DiFeliciantonio (2023).\n
  680. \n
  681. ^ Goswami (2023).\n
  682. \n
  683. ^ Grayling, Anthony; Ball, Brian (1 August 2024). "Philosophy is crucial in the age of AI". The Conversation. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
  684. \n
  685. ^ a b Jarow, Oshan (15 June 2024). "Will AI ever become conscious? It depends on how you think about biology". Vox. Archived from the original on 21 September 2024. Retrieved 4 October 2024.\n
  686. \n
  687. ^ McCarthy, John. "The Philosophy of AI and the AI of Philosophy". jmc.stanford.edu. Archived from the original on 23 October 2018. Retrieved 3 October 2024.\n
  688. \n
  689. ^ a b Turing (1950), p. 1.\n
  690. \n
  691. ^ Turing (1950), Under "The Argument from Consciousness".\n
  692. \n
  693. ^ Kirk-Giannini, Cameron Domenico; Goldstein, Simon (16 October 2023). "AI is closer than ever to passing the Turing test for \'intelligence\'. What happens when it does?". The Conversation. Archived from the original on 25 September 2024. Retrieved 17 August 2024.\n
  694. \n
  695. ^ Russell & Norvig (2021), p. 3.\n
  696. \n
  697. ^ Maker (2006).\n
  698. \n
  699. ^ McCarthy (1999).\n
  700. \n
  701. ^ Minsky (1986).\n
  702. \n
  703. ^ "What Is Artificial Intelligence (AI)?". Google Cloud Platform. Archived from the original on 31 July 2023. Retrieved 16 October 2023.\n
  704. \n
  705. ^ "One of the Biggest Problems in Regulating AI Is Agreeing on a Definition". carnegieendowment.org. Retrieved 31 July 2024.\n
  706. \n
  707. ^ "AI or BS? How to tell if a marketing tool really uses artificial intelligence". The Drum. Retrieved 31 July 2024.\n
  708. \n
  709. ^ Nilsson (1983), p. 10.\n
  710. \n
  711. ^ Haugeland (1985), pp. 112–117.\n
  712. \n
  713. ^ Physical symbol system hypothesis: Newell & Simon (1976, p. 116)\nHistorical significance: McCorduck (2004, p. 153), Russell & Norvig (2021, p. 19)\n
  714. \n
  715. ^ Moravec\'s paradox: Moravec (1988, pp. 15–16), Minsky (1986, p. 29), Pinker (2007, pp. 190–191)\n
  716. \n
  717. ^ Dreyfus\' critique of AI: Dreyfus (1972), Dreyfus & Dreyfus (1986)\nHistorical significance and philosophical implications: Crevier (1993, pp. 120–132), McCorduck (2004, pp. 211–239), Russell & Norvig (2021, pp. 981–982), Fearn (2007, chpt. 3)\n
  718. \n
  719. ^ Crevier (1993), p. 125.\n
  720. \n
  721. ^ Langley (2011).\n
  722. \n
  723. ^ Katz (2012).\n
  724. \n
  725. ^ Neats vs. scruffies, the historic debate: McCorduck (2004, pp. 421–424, 486–489), Crevier (1993, p. 168), Nilsson (1983, pp. 10–11), Russell & Norvig (2021, p. 24)\nA classic example of the "scruffy" approach to intelligence: Minsky (1986)\nA modern example of neat AI and its aspirations in the 21st century: Domingos (2015)\n
  726. \n
  727. ^ Pennachin & Goertzel (2007).\n
  728. \n
  729. ^ a b Roberts (2016).\n
  730. \n
  731. ^ Russell & Norvig (2021), p. 986.\n
  732. \n
  733. ^ Chalmers (1995).\n
  734. \n
  735. ^ Dennett (1991).\n
  736. \n
  737. ^ Horst (2005).\n
  738. \n
  739. ^ Searle (1999).\n
  740. \n
  741. ^ Searle (1980), p. 1.\n
  742. \n
  743. ^ Russell & Norvig (2021), p. 9817.\n
  744. \n
  745. ^ Searle\'s Chinese room argument: Searle (1980). Searle\'s original presentation of the thought experiment., Searle (1999).\nDiscussion: Russell & Norvig (2021, pp. 985), McCorduck (2004, pp. 443–445), Crevier (1993, pp. 269–271)\n
  746. \n
  747. ^ Leith, Sam (7 July 2022). "Nick Bostrom: How can we be certain a machine isn\'t conscious?". The Spectator. Archived from the original on 26 September 2024. Retrieved 23 February 2024.\n
  748. \n
  749. ^ a b c Thomson, Jonny (31 October 2022). "Why don\'t robots have rights?". Big Think. Archived from the original on 13 September 2024. Retrieved 23 February 2024.\n
  750. \n
  751. ^ a b Kateman, Brian (24 July 2023). "AI Should Be Terrified of Humans". Time. Archived from the original on 25 September 2024. Retrieved 23 February 2024.\n
  752. \n
  753. ^ Wong, Jeff (10 July 2023). "What leaders need to know about robot rights". Fast Company.\n
  754. \n
  755. ^ Hern, Alex (12 January 2017). "Give robots \'personhood\' status, EU committee argues". The Guardian. ISSN 0261-3077. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
  756. \n
  757. ^ Dovey, Dana (14 April 2018). "Experts Don\'t Think Robots Should Have Rights". Newsweek. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
  758. \n
  759. ^ Cuddy, Alice (13 April 2018). "Robot rights violate human rights, experts warn EU". euronews. Archived from the original on 19 September 2024. Retrieved 23 February 2024.\n
  760. \n
  761. ^ The Intelligence explosion and technological singularity: Russell & Norvig (2021, pp. 1004–1005), Omohundro (2008), Kurzweil (2005)\n\nI. J. Good\'s "intelligence explosion": Good (1965)\n\nVernor Vinge\'s "singularity": Vinge (1993)\n
  762. \n
  763. ^ Russell & Norvig (2021), p. 1005.\n
  764. \n
  765. ^ Transhumanism: Moravec (1988), Kurzweil (2005), Russell & Norvig (2021, p. 1005)\n
  766. \n
  767. ^ AI as evolution: Edward Fredkin is quoted in McCorduck (2004, p. 401), Butler (1863), Dyson (1998)\n
  768. \n
  769. ^ AI in myth: McCorduck (2004, pp. 4–5)\n
  770. \n
  771. ^ McCorduck (2004), pp. 340–400.\n
  772. \n
  773. ^ Buttazzo (2001).\n
  774. \n
  775. ^ Anderson (2008).\n
  776. \n
  777. ^ McCauley (2007).\n
  778. \n
  779. ^ Galvan (1997).\n
  780. \n
\n

AI textbooks

\n

The two most widely used textbooks in 2023 (see the Open Syllabus):\n

\n\n

These were the four of the most widely used AI textbooks in 2008:\n

\n
\n\n
\n

Other textbooks:\n

\n\n

History of AI

\n
\n\n
\n

Other sources

\n
\n\n\n
\n

Further reading

\n
\n\n
\n
\n\n\n\n\n\n\n\n\n\n
\n
\n\t\t\t\t\t\n\t\t\t\t
\n\t\t\t\n\t\t\t\n\t\t\n\t\t\n\t \n \n
\n\t\n
\n\n\n'}'. -[2024-11-07 16:58:14,544] [INFO] [Engine]: Agent 'agent3' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_pmcvYrq1RWJ2KPeaHf85Tvep', type='function')], function_call=None) -[2024-11-07 16:58:14,545] [INFO] [BaseAgent]: Agent 'agent3' is planning the next step. -[2024-11-07 16:58:15,380] [INFO] [BaseAgent]: Agent 'agent3' selected 'agent1' as the next agent with plan: 'The next agent, agent1, should use Bing search results to gather information on the history of artificial intelligence and its impact on society. They can look for articles, studies, and expert opinions to provide a comprehensive overview of the topic.'. -[2024-11-07 16:58:15,381] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_86o0aB6BCxSa7eXe5iDhbQ4O', type='function')], function_call=None) -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.forbes.com/sites/forbestechcouncil/2021/12/16/top-ai-trends-to-watch-out-for-in-2022/?sh=3b4c4b7b6b6b"}', name='fetch_webpage'), id='call_fo1OlgUItVbiWPDw9N1BlzXO', type='function')], function_call=None) -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.forbes.com/sites/forbestechcouncil/2021/12/14/ai-trends-to-watch-in-2022/?sh=3b4b4b7b6b6b"}', name='fetch_webpage'), id='call_Gv3Ss5qpWS7jLKpNXFJyUDod', type='function')], function_call=None) -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_pmcvYrq1RWJ2KPeaHf85Tvep', type='function')], function_call=None) -[2024-11-07 16:58:15,659] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-07 16:58:15,660] [INFO] [Engine]: Agent 'agent1' is executing task. -[2024-11-07 16:58:15,660] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'The next agent, agent1, should use Bing search results to gather information on the history of artificial intelligence and its impact on society. They can look for articles, studies, and expert opinions to provide a comprehensive overview of the topic.'. -[2024-11-07 16:58:16,560] [INFO] [BaseAgent]: Agent 'agent1' called 'fetch_webpage' with args '{'url': 'https://www.bing.com/search?q=history+of+artificial+intelligence+and+its+impact+on+society'}'. -[2024-11-07 16:58:16,562] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': True, 'error-msg': '', 'url': 'https://www.bing.com/search?q=history+of+artificial+intelligence+and+its+impact+on+society', 'content': 'history of artificial intelligence and its impact on society - Search
  1. There are no results for history of artificial intelligence and its impact on society

    • Check your spelling or try different keywords

    Ref A: CE387C3877854C9D9CB6B43E1C34ECEC Ref B: CHI30EDGE0216 Ref C: 2024-11-07T22:58:16Z

'}'. -[2024-11-07 16:58:16,565] [INFO] [Engine]: Agent 'agent1' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.bing.com/search?q=history+of+artificial+intelligence+and+its+impact+on+society"}', name='fetch_webpage'), id='call_kKIKiEaXPvFJoOz4uZq9DCG9', type='function')], function_call=None) -[2024-11-07 16:58:16,566] [INFO] [BaseAgent]: Agent 'agent1' is planning the next step. -[2024-11-07 16:58:17,415] [INFO] [BaseAgent]: Agent 'agent1' selected 'agent3' as the next agent with plan: 'The next agent should use the information gathered from Wikipedia to provide a detailed overview of the history of artificial intelligence and its impact on society. They should focus on key milestones, advancements, and societal implications of artificial intelligence throughout history.'. -[2024-11-07 16:58:17,416] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_86o0aB6BCxSa7eXe5iDhbQ4O', type='function')], function_call=None) -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.forbes.com/sites/forbestechcouncil/2021/12/16/top-ai-trends-to-watch-out-for-in-2022/?sh=3b4c4b7b6b6b"}', name='fetch_webpage'), id='call_fo1OlgUItVbiWPDw9N1BlzXO', type='function')], function_call=None) -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.forbes.com/sites/forbestechcouncil/2021/12/14/ai-trends-to-watch-in-2022/?sh=3b4b4b7b6b6b"}', name='fetch_webpage'), id='call_Gv3Ss5qpWS7jLKpNXFJyUDod', type='function')], function_call=None) -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_pmcvYrq1RWJ2KPeaHf85Tvep', type='function')], function_call=None) -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.bing.com/search?q=history+of+artificial+intelligence+and+its+impact+on+society"}', name='fetch_webpage'), id='call_kKIKiEaXPvFJoOz4uZq9DCG9', type='function')], function_call=None) -[2024-11-07 16:58:17,815] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-07 16:58:17,815] [ERROR] [Engine]: An error occurred during chain-based coordination. -Traceback (most recent call last): - File "/Users/zhukunlun/Documents/GitHub/MARBLE/marble/engine/engine.py", line 369, in chain_coordinate - summary = self._summarize_results(agents_results) - File "/Users/zhukunlun/Documents/GitHub/MARBLE/marble/engine/engine.py", line 448, in _summarize_results - result = json.dumps(result.content) -AttributeError: 'dict' object has no attribute 'content' -[2024-11-07 16:58:17,817] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-07 16:58:17,817] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-07 16:58:17,817] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-07 16:58:17,818] [INFO] [Engine]: Chain-based coordination simulation completed. -[2024-11-07 16:58:32,018] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-07 16:58:32,018] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-07 16:58:32,018] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-07 16:58:32,018] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-07 16:58:32,018] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-07 16:58:32,018] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-07 16:58:32,018] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-07 16:58:32,018] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-07 16:58:32,018] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'chain'. -[2024-11-07 16:58:32,018] [INFO] [AgentGraph]: Relationship added: agent1 --[reports_to]--> agent2 -[2024-11-07 16:58:32,018] [INFO] [AgentGraph]: Relationship added: agent1 --[manages]--> agent3 -[2024-11-07 16:58:32,018] [INFO] [AgentGraph]: Relationship added: agent2 --[supports]--> agent4 -[2024-11-07 16:58:32,018] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborates_with]--> agent4 -[2024-11-07 16:58:32,018] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-07 16:58:32,018] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-07 16:58:32,018] [INFO] [Engine]: Engine initialized. -[2024-11-07 16:58:32,018] [INFO] [Engine]: Engine starting simulation. -[2024-11-07 16:58:32,018] [INFO] [Engine]: Running in chain-based coordination mode. -[2024-11-07 16:58:32,018] [INFO] [Engine]: Starting chain-based coordination. -[2024-11-07 16:58:32,019] [INFO] [Engine]: Agent 'agent1' is executing task. -[2024-11-07 16:58:32,019] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Find new about the latest trends in AI.'. -[2024-11-07 16:58:34,342] [INFO] [BaseAgent]: Agent 'agent1' called 'fetch_webpage' with args '{'url': 'https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/'}'. -[2024-11-07 16:58:34,343] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': False, 'error-msg': '404 Client Error: Not Found for url: https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/'}'. -[2024-11-07 16:58:34,344] [INFO] [Engine]: Agent 'agent1' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_Jewo5kQHSereVQ9nxjlwDSGX', type='function')], function_call=None) -[2024-11-07 16:58:34,344] [INFO] [BaseAgent]: Agent 'agent1' is planning the next step. -[2024-11-07 16:58:35,943] [INFO] [BaseAgent]: Agent 'agent1' selected 'agent2' as the next agent with plan: 'The next agent, agent2, with a curious and ambitious profile, should use google to search for additional information on the top AI trends to watch out for in 2022. They can explore different sources and perspectives to gather comprehensive insights. The agent can focus on recent updates, emerging technologies, and potential impacts of these trends in various industries.'. -[2024-11-07 16:58:35,943] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_Jewo5kQHSereVQ9nxjlwDSGX', type='function')], function_call=None) -[2024-11-07 16:58:36,253] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-07 16:58:36,253] [INFO] [Engine]: Agent 'agent2' is executing task. -[2024-11-07 16:58:36,254] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'The next agent, agent2, with a curious and ambitious profile, should use google to search for additional information on the top AI trends to watch out for in 2022. They can explore different sources and perspectives to gather comprehensive insights. The agent can focus on recent updates, emerging technologies, and potential impacts of these trends in various industries.'. -[2024-11-07 16:58:37,559] [INFO] [BaseAgent]: Agent 'agent2' called 'fetch_webpage' with args '{'url': 'https://www.google.com/search?q=top+AI+trends+2022'}'. -[2024-11-07 16:58:37,567] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'error-msg': '', 'url': 'https://www.google.com/search?q=top+AI+trends+2022', 'content': 'top AI trends 2022 - Google Search

Accessibility Links

About 370,000,000 results (0.36 seconds) 

Search Results

AI Overview

Searching

From sources across the web

9 moreShow less
Feedback
People also ask
Feedback
People also search for
Feedback

Page Navigation

Google apps
'}'. -[2024-11-07 16:58:37,684] [INFO] [Engine]: Agent 'agent2' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=top+AI+trends+2022"}', name='fetch_webpage'), id='call_1v99ilivRPOzme5EIlKrFF0O', type='function')], function_call=None) -[2024-11-07 16:58:37,684] [INFO] [BaseAgent]: Agent 'agent2' is planning the next step. -[2024-11-07 16:58:38,400] [INFO] [BaseAgent]: Agent 'agent2' selected 'agent3' as the next agent with plan: 'The next agent should research the top AI trends for 2022 on Wikipedia and summarize the key findings, including any notable developments or advancements in the field.'. -[2024-11-07 16:58:38,401] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_Jewo5kQHSereVQ9nxjlwDSGX', type='function')], function_call=None) -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=top+AI+trends+2022"}', name='fetch_webpage'), id='call_1v99ilivRPOzme5EIlKrFF0O', type='function')], function_call=None) -[2024-11-07 16:58:38,808] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-07 16:58:38,809] [INFO] [Engine]: Agent 'agent3' is executing task. -[2024-11-07 16:58:38,809] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'The next agent should research the top AI trends for 2022 on Wikipedia and summarize the key findings, including any notable developments or advancements in the field.'. -[2024-11-07 16:58:39,606] [INFO] [BaseAgent]: Agent 'agent3' called 'fetch_webpage' with args '{'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence'}'. -[2024-11-07 16:58:39,619] [INFO] [BaseAgent]: Agent 'agent3' obtained result '{'success': True, 'error-msg': '', 'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence', 'content': '\n\n\n\nArtificial intelligence - Wikipedia\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nJump to content\n
\n\t
\n\t\t
\n\t\t\t
\n\n\t\t\n\t\t\t\n\n\n\t\t
\n\t\t
\n\t\t\t\n\n\n\t\t\t\n\n\t\t
\n\t\n\n
\n\t
\n\t\t
\n\t\t\t
\n\t\t
\n\t\t
\n\t\t\t
\n\t\t
\n\t\t\t\n\t\t
\n\t
\n\t
\n\t\t\t\t
\n\t\t\n\t\t\t
\n\t\t
\n\t\t
\n\t\t\t
\n\t\t\t\t
\n\t\t\t\t\t\n\t\t\t\t\t

Artificial intelligence

\n\t\t\t\t\t\t\t\n
\n\t\n\t\n\t
\n\n\t\t
\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t
\n\n\t
\n
\n
\n\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\t
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
\n\t\t\t\t\t\t
\n\t\t\t\t\t\t\t\n\t\t\t\t\n\t\t\t\t\t\t\t
\n\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\t\n\t\t\t\t\t\t
\n\t\t\n\t\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\t\t
\n\t\t
Page semi-protected
\n\t\t
\n\n\t\t\t\t\t\t
From Wikipedia, the free encyclopedia
\n\t\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t
\n\n

\n

\n\n\n\n\n\n\n\n

Artificial intelligence (AI), in its broadest sense, is intelligence exhibited by machines, particularly computer systems. It is a field of research in computer science that develops and studies methods and software that enable machines to perceive their environment and use learning and intelligence to take actions that maximize their chances of achieving defined goals.[1] Such machines may be called AIs.\n

Some high-profile applications of AI include advanced web search engines (e.g., Google Search); recommendation systems (used by YouTube, Amazon, and Netflix); interacting via human speech (e.g., Google Assistant, Siri, and Alexa); autonomous vehicles (e.g., Waymo); generative and creative tools (e.g., ChatGPT, and AI art); and superhuman play and analysis in strategy games (e.g., chess and Go). However, many AI applications are not perceived as AI: "A lot of cutting edge AI has filtered into general applications, often without being called AI because once something becomes useful enough and common enough it\'s not labeled AI anymore."[2][3]\n

The various subfields of AI research are centered around particular goals and the use of particular tools. The traditional goals of AI research include reasoning, knowledge representation, planning, learning, natural language processing, perception, and support for robotics.[a] General intelligence—the ability to complete any task performable by a human on an at least equal level—is among the field\'s long-term goals.[4] To reach these goals, AI researchers have adapted and integrated a wide range of techniques, including search and mathematical optimization, formal logic, artificial neural networks, and methods based on statistics, operations research, and economics.[b] AI also draws upon psychology, linguistics, philosophy, neuroscience, and other fields.[5]\n

Artificial intelligence was founded as an academic discipline in 1956,[6] and the field went through multiple cycles of optimism,[7][8] followed by periods of disappointment and loss of funding, known as AI winter.[9][10] Funding and interest vastly increased after 2012 when deep learning outperformed previous AI techniques.[11] This growth accelerated further after 2017 with the transformer architecture,[12] and by the early 2020s hundreds of billions of dollars were being invested in AI (known as the "AI boom"). The widespread use of AI in the 21st century exposed several unintended consequences and harms in the present and raised concerns about its risks and long-term effects in the future, prompting discussions about regulatory policies to ensure the safety and benefits of the technology.\n

\n\n

Goals

\n

The general problem of simulating (or creating) intelligence has been broken into subproblems. These consist of particular traits or capabilities that researchers expect an intelligent system to display. The traits described below have received the most attention and cover the scope of AI research.[a]\n

\n

Reasoning and problem-solving

\n

Early researchers developed algorithms that imitated step-by-step reasoning that humans use when they solve puzzles or make logical deductions.[13] By the late 1980s and 1990s, methods were developed for dealing with uncertain or incomplete information, employing concepts from probability and economics.[14]\n

Many of these algorithms are insufficient for solving large reasoning problems because they experience a "combinatorial explosion": They become exponentially slower as the problems grow.[15] Even humans rarely use the step-by-step deduction that early AI research could model. They solve most of their problems using fast, intuitive judgments.[16] Accurate and efficient reasoning is an unsolved problem.\n

\n

Knowledge representation

\n
An ontology represents knowledge as a set of concepts within a domain and the relationships between those concepts.
\n

Knowledge representation and knowledge engineering[17] allow AI programs to answer questions intelligently and make deductions about real-world facts. Formal knowledge representations are used in content-based indexing and retrieval,[18] scene interpretation,[19] clinical decision support,[20] knowledge discovery (mining "interesting" and actionable inferences from large databases),[21] and other areas.[22]\n

A knowledge base is a body of knowledge represented in a form that can be used by a program. An ontology is the set of objects, relations, concepts, and properties used by a particular domain of knowledge.[23] Knowledge bases need to represent things such as objects, properties, categories, and relations between objects;[24] situations, events, states, and time;[25] causes and effects;[26] knowledge about knowledge (what we know about what other people know);[27] default reasoning (things that humans assume are true until they are told differently and will remain true even when other facts are changing);[28] and many other aspects and domains of knowledge.\n

Among the most difficult problems in knowledge representation are the breadth of commonsense knowledge (the set of atomic facts that the average person knows is enormous);[29] and the sub-symbolic form of most commonsense knowledge (much of what people know is not represented as "facts" or "statements" that they could express verbally).[16] There is also the difficulty of knowledge acquisition, the problem of obtaining knowledge for AI applications.[c]\n

\n

Planning and decision-making

\n

An "agent" is anything that perceives and takes actions in the world. A rational agent has goals or preferences and takes actions to make them happen.[d][32] In automated planning, the agent has a specific goal.[33] In automated decision-making, the agent has preferences—there are some situations it would prefer to be in, and some situations it is trying to avoid. The decision-making agent assigns a number to each situation (called the "utility") that measures how much the agent prefers it. For each possible action, it can calculate the "expected utility": the utility of all possible outcomes of the action, weighted by the probability that the outcome will occur. It can then choose the action with the maximum expected utility.[34]\n

In classical planning, the agent knows exactly what the effect of any action will be.[35] In most real-world problems, however, the agent may not be certain about the situation they are in (it is "unknown" or "unobservable") and it may not know for certain what will happen after each possible action (it is not "deterministic"). It must choose an action by making a probabilistic guess and then reassess the situation to see if the action worked.[36]\n

In some problems, the agent\'s preferences may be uncertain, especially if there are other agents or humans involved. These can be learned (e.g., with inverse reinforcement learning), or the agent can seek information to improve its preferences.[37] Information value theory can be used to weigh the value of exploratory or experimental actions.[38] The space of possible future actions and situations is typically intractably large, so the agents must take actions and evaluate situations while being uncertain of what the outcome will be.\n

A Markov decision process has a transition model that describes the probability that a particular action will change the state in a particular way and a reward function that supplies the utility of each state and the cost of each action. A policy associates a decision with each possible state. The policy could be calculated (e.g., by iteration), be heuristic, or it can be learned.[39]\n

Game theory describes the rational behavior of multiple interacting agents and is used in AI programs that make decisions that involve other agents.[40]\n

\n

Learning

\n

Machine learning is the study of programs that can improve their performance on a given task automatically.[41] It has been a part of AI from the beginning.[e]\n

There are several kinds of machine learning. Unsupervised learning analyzes a stream of data and finds patterns and makes predictions without any other guidance.[44] Supervised learning requires a human to label the input data first, and comes in two main varieties: classification (where the program must learn to predict what category the input belongs in) and regression (where the program must deduce a numeric function based on numeric input).[45]\n

In reinforcement learning, the agent is rewarded for good responses and punished for bad ones. The agent learns to choose responses that are classified as "good".[46] Transfer learning is when the knowledge gained from one problem is applied to a new problem.[47] Deep learning is a type of machine learning that runs inputs through biologically inspired artificial neural networks for all of these types of learning.[48]\n

Computational learning theory can assess learners by computational complexity, by sample complexity (how much data is required), or by other notions of optimization.[49]\n

\n
\n

Natural language processing

\n

Natural language processing (NLP)[50] allows programs to read, write and communicate in human languages such as English. Specific problems include speech recognition, speech synthesis, machine translation, information extraction, information retrieval and question answering.[51]\n

Early work, based on Noam Chomsky\'s generative grammar and semantic networks, had difficulty with word-sense disambiguation[f] unless restricted to small domains called "micro-worlds" (due to the common sense knowledge problem[29]). Margaret Masterman believed that it was meaning and not grammar that was the key to understanding languages, and that thesauri and not dictionaries should be the basis of computational language structure.\n

Modern deep learning techniques for NLP include word embedding (representing words, typically as vectors encoding their meaning),[52] transformers (a deep learning architecture using an attention mechanism),[53] and others.[54] In 2019, generative pre-trained transformer (or "GPT") language models began to generate coherent text,[55][56] and by 2023, these models were able to get human-level scores on the bar exam, SAT test, GRE test, and many other real-world applications.[57]\n

\n

Perception

\n

Machine perception is the ability to use input from sensors (such as cameras, microphones, wireless signals, active lidar, sonar, radar, and tactile sensors) to deduce aspects of the world. Computer vision is the ability to analyze visual input.[58]\n

The field includes speech recognition,[59] image classification,[60] facial recognition, object recognition,[61]object tracking,[62] and robotic perception.[63]\n

\n

Social intelligence

\n
Kismet, a robot head which was made in the 1990s; it is a machine that can recognize and simulate emotions.[64]
\n

Affective computing is an interdisciplinary umbrella that comprises systems that recognize, interpret, process, or simulate human feeling, emotion, and mood.[65] For example, some virtual assistants are programmed to speak conversationally or even to banter humorously; it makes them appear more sensitive to the emotional dynamics of human interaction, or to otherwise facilitate human–computer interaction.\n

However, this tends to give naïve users an unrealistic conception of the intelligence of existing computer agents.[66] Moderate successes related to affective computing include textual sentiment analysis and, more recently, multimodal sentiment analysis, wherein AI classifies the affects displayed by a videotaped subject.[67]\n

\n

General intelligence

\n

A machine with artificial general intelligence should be able to solve a wide variety of problems with breadth and versatility similar to human intelligence.[4]\n

\n

Techniques

\n

AI research uses a wide variety of techniques to accomplish the goals above.[b]\n

\n

Search and optimization

\n

AI can solve many problems by intelligently searching through many possible solutions.[68] There are two very different kinds of search used in AI: state space search and local search.\n

\n
\n

State space search searches through a tree of possible states to try to find a goal state.[69] For example, planning algorithms search through trees of goals and subgoals, attempting to find a path to a target goal, a process called means-ends analysis.[70]\n

Simple exhaustive searches[71] are rarely sufficient for most real-world problems: the search space (the number of places to search) quickly grows to astronomical numbers. The result is a search that is too slow or never completes.[15] "Heuristics" or "rules of thumb" can help prioritize choices that are more likely to reach a goal.[72]\n

Adversarial search is used for game-playing programs, such as chess or Go. It searches through a tree of possible moves and counter-moves, looking for a winning position.[73]\n

\n
\n
Illustration of gradient descent for 3 different starting points; two parameters (represented by the plan coordinates) are adjusted in order to minimize the loss function (the height)

Local search uses mathematical optimization to find a solution to a problem. It begins with some form of guess and refines it incrementally.[74]\n

Gradient descent is a type of local search that optimizes a set of numerical parameters by incrementally adjusting them to minimize a loss function. Variants of gradient descent are commonly used to train neural networks.[75]\n

Another type of local search is evolutionary computation, which aims to iteratively improve a set of candidate solutions by "mutating" and "recombining" them, selecting only the fittest to survive each generation.[76]\n

Distributed search processes can coordinate via swarm intelligence algorithms. Two popular swarm algorithms used in search are particle swarm optimization (inspired by bird flocking) and ant colony optimization (inspired by ant trails).[77]\n

\n

Logic

\n

Formal logic is used for reasoning and knowledge representation.[78]\nFormal logic comes in two main forms: propositional logic (which operates on statements that are true or false and uses logical connectives such as "and", "or", "not" and "implies")[79] and predicate logic (which also operates on objects, predicates and relations and uses quantifiers such as "Every X is a Y" and "There are some Xs that are Ys").[80]\n

Deductive reasoning in logic is the process of proving a new statement (conclusion) from other statements that are given and assumed to be true (the premises).[81] Proofs can be structured as proof trees, in which nodes are labelled by sentences, and children nodes are connected to parent nodes by inference rules.\n

Given a problem and a set of premises, problem-solving reduces to searching for a proof tree whose root node is labelled by a solution of the problem and whose leaf nodes are labelled by premises or axioms. In the case of Horn clauses, problem-solving search can be performed by reasoning forwards from the premises or backwards from the problem.[82] In the more general case of the clausal form of first-order logic, resolution is a single, axiom-free rule of inference, in which a problem is solved by proving a contradiction from premises that include the negation of the problem to be solved.[83]\n

Inference in both Horn clause logic and first-order logic is undecidable, and therefore intractable. However, backward reasoning with Horn clauses, which underpins computation in the logic programming language Prolog, is Turing complete. Moreover, its efficiency is competitive with computation in other symbolic programming languages.[84]\n

Fuzzy logic assigns a "degree of truth" between 0 and 1. It can therefore handle propositions that are vague and partially true.[85]\n

Non-monotonic logics, including logic programming with negation as failure, are designed to handle default reasoning.[28] Other specialized versions of logic have been developed to describe many complex domains.\n

\n

Probabilistic methods for uncertain reasoning

\n
A simple Bayesian network, with the associated conditional probability tables
\n

Many problems in AI (including in reasoning, planning, learning, perception, and robotics) require the agent to operate with incomplete or uncertain information. AI researchers have devised a number of tools to solve these problems using methods from probability theory and economics.[86] Precise mathematical tools have been developed that analyze how an agent can make choices and plan, using decision theory, decision analysis,[87] and information value theory.[88] These tools include models such as Markov decision processes,[89] dynamic decision networks,[90] game theory and mechanism design.[91]\n

Bayesian networks[92] are a tool that can be used for reasoning (using the Bayesian inference algorithm),[g][94] learning (using the expectation–maximization algorithm),[h][96] planning (using decision networks)[97] and perception (using dynamic Bayesian networks).[90]\n

Probabilistic algorithms can also be used for filtering, prediction, smoothing, and finding explanations for streams of data, thus helping perception systems analyze processes that occur over time (e.g., hidden Markov models or Kalman filters).[90]\n

\n
Expectation–maximization clustering of Old Faithful eruption data starts from a random guess but then successfully converges on an accurate clustering of the two physically distinct modes of eruption.
\n

Classifiers and statistical learning methods

\n

The simplest AI applications can be divided into two types: classifiers (e.g., "if shiny then diamond"), on one hand, and controllers (e.g., "if diamond then pick up"), on the other hand. Classifiers[98] are functions that use pattern matching to determine the closest match. They can be fine-tuned based on chosen examples using supervised learning. Each pattern (also called an "observation") is labeled with a certain predefined class. All the observations combined with their class labels are known as a data set. When a new observation is received, that observation is classified based on previous experience.[45]\n

There are many kinds of classifiers in use.[99] The decision tree is the simplest and most widely used symbolic machine learning algorithm.[100] K-nearest neighbor algorithm was the most widely used analogical AI until the mid-1990s, and Kernel methods such as the support vector machine (SVM) displaced k-nearest neighbor in the 1990s.[101]\nThe naive Bayes classifier is reportedly the "most widely used learner"[102] at Google, due in part to its scalability.[103]\nNeural networks are also used as classifiers.[104]\n

\n

Artificial neural networks

\n
A neural network is an interconnected group of nodes, akin to the vast network of neurons in the human brain.
\n

An artificial neural network is based on a collection of nodes also known as artificial neurons, which loosely model the neurons in a biological brain. It is trained to recognise patterns; once trained, it can recognise those patterns in fresh data. There is an input, at least one hidden layer of nodes and an output. Each node applies a function and once the weight crosses its specified threshold, the data is transmitted to the next layer. A network is typically called a deep neural network if it has at least 2 hidden layers.[104]\n

Learning algorithms for neural networks use local search to choose the weights that will get the right output for each input during training. The most common training technique is the backpropagation algorithm.[105] Neural networks learn to model complex relationships between inputs and outputs and find patterns in data. In theory, a neural network can learn any function.[106]\n

In feedforward neural networks the signal passes in only one direction.[107] Recurrent neural networks feed the output signal back into the input, which allows short-term memories of previous input events. Long short term memory is the most successful network architecture for recurrent networks.[108] Perceptrons[109] use only a single layer of neurons; deep learning[110] uses multiple layers. Convolutional neural networks strengthen the connection between neurons that are "close" to each other—this is especially important in image processing, where a local set of neurons must identify an "edge" before the network can identify an object.[111]\n

\n
\n

Deep learning

\n
\n

Deep learning[110] uses several layers of neurons between the network\'s inputs and outputs. The multiple layers can progressively extract higher-level features from the raw input. For example, in image processing, lower layers may identify edges, while higher layers may identify the concepts relevant to a human such as digits, letters, or faces.[112]\n

Deep learning has profoundly improved the performance of programs in many important subfields of artificial intelligence, including computer vision, speech recognition, natural language processing, image classification,[113] and others. The reason that deep learning performs so well in so many applications is not known as of 2023.[114] The sudden success of deep learning in 2012–2015 did not occur because of some new discovery or theoretical breakthrough (deep neural networks and backpropagation had been described by many people, as far back as the 1950s)[i] but because of two factors: the incredible increase in computer power (including the hundred-fold increase in speed by switching to GPUs) and the availability of vast amounts of training data, especially the giant curated datasets used for benchmark testing, such as ImageNet.[j]\n

\n

GPT

\n

Generative pre-trained transformers (GPT) are large language models (LLMs) that generate text based on the semantic relationships between words in sentences. Text-based GPT models are pretrained on a large corpus of text that can be from the Internet. The pretraining consists of predicting the next token (a token being usually a word, subword, or punctuation). Throughout this pretraining, GPT models accumulate knowledge about the world and can then generate human-like text by repeatedly predicting the next token. Typically, a subsequent training phase makes the model more truthful, useful, and harmless, usually with a technique called reinforcement learning from human feedback (RLHF). Current GPT models are prone to generating falsehoods called "hallucinations", although this can be reduced with RLHF and quality data. They are used in chatbots, which allow people to ask a question or request a task in simple text.[122][123]\n

Current models and services include Gemini (formerly Bard), ChatGPT, Grok, Claude, Copilot, and LLaMA.[124] Multimodal GPT models can process different types of data (modalities) such as images, videos, sound, and text.[125]\n

\n

Hardware and software

\n\n

In the late 2010s, graphics processing units (GPUs) that were increasingly designed with AI-specific enhancements and used with specialized TensorFlow software had replaced previously used central processing unit (CPUs) as the dominant means for large-scale (commercial and academic) machine learning models\' training.[126] Specialized programming languages such as Prolog were used in early AI research,[127] but general-purpose programming languages like Python have become predominant.[128]\n

The transistor density in integrated circuits has been observed to roughly double every 18 months—a trend known as Moore\'s law, named after the Intel co-founder Gordon Moore, who first identified it. Improvements in GPUs have been even faster.[129]\n

\n

Applications

\n

AI and machine learning technology is used in most of the essential applications of the 2020s, including: search engines (such as Google Search), targeting online advertisements, recommendation systems (offered by Netflix, YouTube or Amazon), driving internet traffic, targeted advertising (AdSense, Facebook), virtual assistants (such as Siri or Alexa), autonomous vehicles (including drones, ADAS and self-driving cars), automatic language translation (Microsoft Translator, Google Translate), facial recognition (Apple\'s Face ID or Microsoft\'s DeepFace and Google\'s FaceNet) and image labeling (used by Facebook, Apple\'s iPhoto and TikTok). The deployment of AI may be overseen by a Chief automation officer (CAO).\n

Health and medicine

\n\n

The application of AI in medicine and medical research has the potential to increase patient care and quality of life.[130] Through the lens of the Hippocratic Oath, medical professionals are ethically compelled to use AI, if applications can more accurately diagnose and treat patients.[131][132]\n

For medical research, AI is an important tool for processing and integrating big data. This is particularly important for organoid and tissue engineering development which use microscopy imaging as a key technique in fabrication.[133] It has been suggested that AI can overcome discrepancies in funding allocated to different fields of research.[133] New AI tools can deepen the understanding of biomedically relevant pathways. For example, AlphaFold 2 (2021) demonstrated the ability to approximate, in hours rather than months, the 3D structure of a protein.[134] In 2023, it was reported that AI-guided drug discovery helped find a class of antibiotics capable of killing two different types of drug-resistant bacteria.[135] In 2024, researchers used machine learning to accelerate the search for Parkinson\'s disease drug treatments. Their aim was to identify compounds that block the clumping, or aggregation, of alpha-synuclein (the protein that characterises Parkinson\'s disease). They were able to speed up the initial screening process ten-fold and reduce the cost by a thousand-fold.[136][137]\n

\n

Games

\n\n

Game playing programs have been used since the 1950s to demonstrate and test AI\'s most advanced techniques.[138] Deep Blue became the first computer chess-playing system to beat a reigning world chess champion, Garry Kasparov, on 11 May 1997.[139] In 2011, in a Jeopardy! quiz show exhibition match, IBM\'s question answering system, Watson, defeated the two greatest Jeopardy! champions, Brad Rutter and Ken Jennings, by a significant margin.[140] In March 2016, AlphaGo won 4 out of 5 games of Go in a match with Go champion Lee Sedol, becoming the first computer Go-playing system to beat a professional Go player without handicaps. Then, in 2017, it defeated Ke Jie, who was the best Go player in the world.[141] Other programs handle imperfect-information games, such as the poker-playing program Pluribus.[142] DeepMind developed increasingly generalistic reinforcement learning models, such as with MuZero, which could be trained to play chess, Go, or Atari games.[143] In 2019, DeepMind\'s AlphaStar achieved grandmaster level in StarCraft II, a particularly challenging real-time strategy game that involves incomplete knowledge of what happens on the map.[144] In 2021, an AI agent competed in a PlayStation Gran Turismo competition, winning against four of the world\'s best Gran Turismo drivers using deep reinforcement learning.[145] In 2024, Google DeepMind introduced SIMA, a type of AI capable of autonomously playing nine previously unseen open-world video games by observing screen output, as well as executing short, specific tasks in response to natural language instructions.[146]\n

\n

Mathematics

\n

In mathematics, special forms of formal step-by-step reasoning are used. In contrast, LLMs such as GPT-4 Turbo, Gemini Ultra, Claude Opus, LLaMa-2 or Mistral Large are working with probabilistic models, which can produce wrong answers in the form of hallucinations. Therefore, they need not only a large database of mathematical problems to learn from but also methods such as supervised fine-tuning or trained classifiers with human-annotated data to improve answers for new problems and learn from corrections.[147] A 2024 study showed that the performance of some language models for reasoning capabilities in solving math problems not included in their training data was low, even for problems with only minor deviations from trained data.[148]\n

Alternatively, dedicated models for mathematic problem solving with higher precision for the outcome including proof of theorems have been developed such as Alpha Tensor, Alpha Geometry and Alpha Proof all from Google DeepMind,[149] Llemma from eleuther[150] or Julius.[151]\n

When natural language is used to describe mathematical problems, converters transform such prompts into a formal language such as Lean to define mathematic tasks.\n

Some models have been developed to solve challenging problems and reach good results in benchmark tests, others to serve as educational tools in mathematics.[152]\n

\n

Finance

\n

Finance is one of the fastest growing sectors where applied AI tools are being deployed: from retail online banking to investment advice and insurance, where automated "robot advisers" have been in use for some years.[153]\n

World Pensions experts like Nicolas Firzli insist it may be too early to see the emergence of highly innovative AI-informed financial products and services: "the deployment of AI tools will simply further automatise things: destroying tens of thousands of jobs in banking, financial planning, and pension advice in the process, but I\'m not sure it will unleash a new wave of [e.g., sophisticated] pension innovation."[154]\n

\n

Military

\n\n

Various countries are deploying AI military applications.[155] The main applications enhance command and control, communications, sensors, integration and interoperability.[156] Research is targeting intelligence collection and analysis, logistics, cyber operations, information operations, and semiautonomous and autonomous vehicles.[155] AI technologies enable coordination of sensors and effectors, threat detection and identification, marking of enemy positions, target acquisition, coordination and deconfliction of distributed Joint Fires between networked combat vehicles involving manned and unmanned teams.[156] AI was incorporated into military operations in Iraq and Syria.[155]\n

In November 2023, US Vice President Kamala Harris disclosed a declaration signed by 31 nations to set guardrails for the military use of AI. The commitments include using legal reviews to ensure the compliance of military AI with international laws, and being cautious and transparent in the development of this technology.[157]\n

\n

Generative AI

\n\n
Vincent van Gogh in watercolour created by generative AI software
\n

In the early 2020s, generative AI gained widespread prominence. GenAI is AI capable of generating text, images, videos, or other data using generative models,[158][159] often in response to prompts.[160][161]\n

In March 2023, 58% of U.S. adults had heard about ChatGPT and 14% had tried it.[162] The increasing realism and ease-of-use of AI-based text-to-image generators such as Midjourney, DALL-E, and Stable Diffusion sparked a trend of viral AI-generated photos. Widespread attention was gained by a fake photo of Pope Francis wearing a white puffer coat, the fictional arrest of Donald Trump, and a hoax of an attack on the Pentagon, as well as the usage in professional creative arts.[163][164]\n

\n

Agents

\n

Artificial intelligent (AI) agents are software entities designed to perceive their environment, make decisions, and take actions autonomously to achieve specific goals. These agents can interact with users, their environment, or other agents. AI agents are used in various applications, including virtual assistants, chatbots, autonomous vehicles, game-playing systems, and industrial robotics. AI agents operate within the constraints of their programming, available computational resources, and hardware limitations. This means they are restricted to performing tasks within their defined scope and have finite memory and processing capabilities. In real-world applications, AI agents often face time constraints for decision-making and action execution. Many AI agents incorporate learning algorithms, enabling them to improve their performance over time through experience or training. Using machine learning, AI agents can adapt to new situations and optimise their behaviour for their designated tasks.[165][166][167]\n

\n

Other industry-specific tasks

\n

There are also thousands of successful AI applications used to solve specific problems for specific industries or institutions. In a 2017 survey, one in five companies reported having incorporated "AI" in some offerings or processes.[168] A few examples are energy storage, medical diagnosis, military logistics, applications that predict the result of judicial decisions, foreign policy, or supply chain management.\n

AI applications for evacuation and disaster management are growing. AI has been used to investigate if and how people evacuated in large scale and small scale evacuations using historical data from GPS, videos or social media. Further, AI can provide real time information on the real time evacuation conditions.[169][170][171]\n

In agriculture, AI has helped farmers identify areas that need irrigation, fertilization, pesticide treatments or increasing yield. Agronomists use AI to conduct research and development. AI has been used to predict the ripening time for crops such as tomatoes, monitor soil moisture, operate agricultural robots, conduct predictive analytics, classify livestock pig call emotions, automate greenhouses, detect diseases and pests, and save water.\n

Artificial intelligence is used in astronomy to analyze increasing amounts of available data and applications, mainly for "classification, regression, clustering, forecasting, generation, discovery, and the development of new scientific insights." For example, it is used for discovering exoplanets, forecasting solar activity, and distinguishing between signals and instrumental effects in gravitational wave astronomy. Additionally, it could be used for activities in space, such as space exploration, including the analysis of data from space missions, real-time science decisions of spacecraft, space debris avoidance, and more autonomous operation.\n

During the 2024 Indian elections, US$50 millions was spent on authorized AI-generated content, notably by creating deepfakes of allied (including sometimes deceased) politicians to better engage with voters, and by translating speeches to various local languages.[172] \n

\n

Ethics

\n\n

AI has potential benefits and potential risks.[173] AI may be able to advance science and find solutions for serious problems: Demis Hassabis of Deep Mind hopes to "solve intelligence, and then use that to solve everything else".[174] However, as the use of AI has become widespread, several unintended consequences and risks have been identified.[175] In-production systems can sometimes not factor ethics and bias into their AI training processes, especially when the AI algorithms are inherently unexplainable in deep learning.[176]\n

\n

Risks and harm

\n
\n\n

Machine learning algorithms require large amounts of data. The techniques used to acquire this data have raised concerns about privacy, surveillance and copyright.\n

AI-powered devices and services, such as virtual assistants and IoT products, continuously collect personal information, raising concerns about intrusive data gathering and unauthorized access by third parties. The loss of privacy is further exacerbated by AI\'s ability to process and combine vast amounts of data, potentially leading to a surveillance society where individual activities are constantly monitored and analyzed without adequate safeguards or transparency.\n

Sensitive user data collected may include online activity records, geolocation data, video or audio.[177] For example, in order to build speech recognition algorithms, Amazon has recorded millions of private conversations and allowed temporary workers to listen to and transcribe some of them.[178] Opinions about this widespread surveillance range from those who see it as a necessary evil to those for whom it is clearly unethical and a violation of the right to privacy.[179]\n

AI developers argue that this is the only way to deliver valuable applications. and have developed several techniques that attempt to preserve privacy while still obtaining the data, such as data aggregation, de-identification and differential privacy.[180] Since 2016, some privacy experts, such as Cynthia Dwork, have begun to view privacy in terms of fairness. Brian Christian wrote that experts have pivoted "from the question of \'what they know\' to the question of \'what they\'re doing with it\'."[181]\n

Generative AI is often trained on unlicensed copyrighted works, including in domains such as images or computer code; the output is then used under the rationale of "fair use". Experts disagree about how well and under what circumstances this rationale will hold up in courts of law; relevant factors may include "the purpose and character of the use of the copyrighted work" and "the effect upon the potential market for the copyrighted work".[182][183] Website owners who do not wish to have their content scraped can indicate it in a "robots.txt" file.[184] In 2023, leading authors (including John Grisham and Jonathan Franzen) sued AI companies for using their work to train generative AI.[185][186] Another discussed approach is to envision a separate sui generis system of protection for creations generated by AI to ensure fair attribution and compensation for human authors.[187]\n

\n

Dominance by tech giants

\n

The commercial AI scene is dominated by Big Tech companies such as Alphabet Inc., Amazon, Apple Inc., Meta Platforms, and Microsoft.[188][189][190] Some of these players already own the vast majority of existing cloud infrastructure and computing power from data centers, allowing them to entrench further in the marketplace.[191][192]\n

\n

Substantial power needs and other environmental impacts

\n\n

In January 2024, the International Energy Agency (IEA) released Electricity 2024, Analysis and Forecast to 2026, forecasting electric power use.[193] This is the first IEA report to make projections for data centers and power consumption for artificial intelligence and cryptocurrency. The report states that power demand for these uses might double by 2026, with additional electric power usage equal to electricity used by the whole Japanese nation.[194]\n

Prodigious power consumption by AI is responsible for the growth of fossil fuels use, and might delay closings of obsolete, carbon-emitting coal energy facilities. There is a feverish rise in the construction of data centers throughout the US, making large technology firms (e.g., Microsoft, Meta, Google, Amazon) into voracious consumers of electric power. Projected electric consumption is so immense that there is concern that it will be fulfilled no matter the source. A ChatGPT search involves the use of 10 times the electrical energy as a Google search. The large firms are in haste to find power sources – from nuclear energy to geothermal to fusion. The tech firms argue that – in the long view – AI will be eventually kinder to the environment, but they need the energy now. AI makes the power grid more efficient and "intelligent", will assist in the growth of nuclear power, and track overall carbon emissions, according to technology firms.[195]\n

A 2024 Goldman Sachs Research Paper, AI Data Centers and the Coming US Power Demand Surge, found "US power demand (is) likely to experience growth not seen in a generation...." and forecasts that, by 2030, US data centers will consume 8% of US power, as opposed to 3% in 2022, presaging growth for the electrical power generation industry by a variety of means.[196] Data centers\' need for more and more electrical power is such that they might max out the electrical grid. The Big Tech companies counter that AI can be used to maximize the utilization of the grid by all.[197]\n

In 2024, the Wall Street Journal reported that big AI companies have begun negotiations with the US nuclear power providers to provide electricity to the data centers. In March 2024 Amazon purchased a Pennsylvania nuclear-powered data center for $650 Million (US).[198] Nvidia CEO Jen-Hsun Huang said nuclear power is a good option for the data centers.[199]\n

In September 2024, Microsoft announced an agreement with Constellation Energy to re-open the Three Mile Island nuclear power plant to provide Microsoft with 100% of all electric power produced by the plant for 20 years. Reopening the plant, which suffered a partial nuclear meltdown of its Unit 2 reactor in 1979, will require Constellation to get through strict regulatory processes which will include extensive safety scrutiny from the US Nuclear Regulatory Commission. If approved (this will be the first ever US re-commissioning of a nuclear plant), over 835 megawatts of power – enough for 800,000 homes – of energy will be produced. The cost for re-opening and upgrading is estimated at $1.6 billion (US) and is dependent on tax breaks for nuclear power contained in the 2022 US Inflation Reduction Act.[200] The US government and the state of Michigan are investing almost $2 billion (US) to reopen the Palisades Nuclear reactor on Lake Michigan. Closed since 2022, the plant is planned to be reopened in October 2025. The Three Mile Island facility will be renamed the Crane Clean Energy Center after Chris Crane, a nuclear proponent and former CEO of Exelon who was responsible for Exelon spinoff of Constellation.[201]\n

After the last approval in September 2024, Taiwan suspended the approval of data centers north of Taoyuan with a capacity of more than 5 MW, due to power supply shortages.[202] On the other hand, Singapore imposed a ban on the opening of data centers in 2019 due to electric power, but in 2022, lifted this ban.[202]\n

Although most nuclear plants in Japan have been shut down after the 2011 Fukushima nuclear accident, according to an October 2024 Bloomberg article in Japanese, cloud gaming services company Ubitus, in which Nvidia has a stake, is looking for land in Japan near nuclear power plant for a new data center for generative AI. CEO Wesley Kuo said nuclear power plants are the most efficient, cheap and stable power for AI.[203]\n

On 1 November 2024, the Federal Energy Regulatory Commission (FERC) rejected an application submitted by Talen Energy for approval to supply some electricity from the nuclear power station Susquehanna to Amazon\'s data center.[204] \nAccording to the Commission Chairman Willie L. Phillips, it is a burden on the electricity grid as well as a significant cost shifting concern to households and other business sectors.[204]\n

\n

Misinformation

\n\n

YouTube, Facebook and others use recommender systems to guide users to more content. These AI programs were given the goal of maximizing user engagement (that is, the only goal was to keep people watching). The AI learned that users tended to choose misinformation, conspiracy theories, and extreme partisan content, and, to keep them watching, the AI recommended more of it. Users also tended to watch more content on the same subject, so the AI led people into filter bubbles where they received multiple versions of the same misinformation.[205] This convinced many users that the misinformation was true, and ultimately undermined trust in institutions, the media and the government.[206] The AI program had correctly learned to maximize its goal, but the result was harmful to society. After the U.S. election in 2016, major technology companies took steps to mitigate the problem [citation needed].\n

In 2022, generative AI began to create images, audio, video and text that are indistinguishable from real photographs, recordings, films, or human writing. It is possible for bad actors to use this technology to create massive amounts of misinformation or propaganda.[207] AI pioneer Geoffrey Hinton expressed concern about AI enabling "authoritarian leaders to manipulate their electorates" on a large scale, among other risks.[208]\n

\n

Algorithmic bias and fairness

\n\n

Machine learning applications will be biased[k] if they learn from biased data.[210] The developers may not be aware that the bias exists.[211] Bias can be introduced by the way training data is selected and by the way a model is deployed.[212][210] If a biased algorithm is used to make decisions that can seriously harm people (as it can in medicine, finance, recruitment, housing or policing) then the algorithm may cause discrimination.[213] The field of fairness studies how to prevent harms from algorithmic biases.\n

On June 28, 2015, Google Photos\'s new image labeling feature mistakenly identified Jacky Alcine and a friend as "gorillas" because they were black. The system was trained on a dataset that contained very few images of black people,[214] a problem called "sample size disparity".[215] Google "fixed" this problem by preventing the system from labelling anything as a "gorilla". Eight years later, in 2023, Google Photos still could not identify a gorilla, and neither could similar products from Apple, Facebook, Microsoft and Amazon.[216]\n

COMPAS is a commercial program widely used by U.S. courts to assess the likelihood of a defendant becoming a recidivist. In 2016, Julia Angwin at ProPublica discovered that COMPAS exhibited racial bias, despite the fact that the program was not told the races of the defendants. Although the error rate for both whites and blacks was calibrated equal at exactly 61%, the errors for each race were different—the system consistently overestimated the chance that a black person would re-offend and would underestimate the chance that a white person would not re-offend.[217] In 2017, several researchers[l] showed that it was mathematically impossible for COMPAS to accommodate all possible measures of fairness when the base rates of re-offense were different for whites and blacks in the data.[219]\n

A program can make biased decisions even if the data does not explicitly mention a problematic feature (such as "race" or "gender"). The feature will correlate with other features (like "address", "shopping history" or "first name"), and the program will make the same decisions based on these features as it would on "race" or "gender".[220] Moritz Hardt said "the most robust fact in this research area is that fairness through blindness doesn\'t work."[221]\n

Criticism of COMPAS highlighted that machine learning models are designed to make "predictions" that are only valid if we assume that the future will resemble the past. If they are trained on data that includes the results of racist decisions in the past, machine learning models must predict that racist decisions will be made in the future. If an application then uses these predictions as recommendations, some of these "recommendations" will likely be racist.[222] Thus, machine learning is not well suited to help make decisions in areas where there is hope that the future will be better than the past. It is descriptive rather than prescriptive.[m]\n

Bias and unfairness may go undetected because the developers are overwhelmingly white and male: among AI engineers, about 4% are black and 20% are women.[215]\n

There are various conflicting definitions and mathematical models of fairness. These notions depend on ethical assumptions, and are influenced by beliefs about society. One broad category is distributive fairness, which focuses on the outcomes, often identifying groups and seeking to compensate for statistical disparities. Representational fairness tries to ensure that AI systems do not reinforce negative stereotypes or render certain groups invisible. Procedural fairness focuses on the decision process rather than the outcome. The most relevant notions of fairness may depend on the context, notably the type of AI application and the stakeholders. The subjectivity in the notions of bias and fairness makes it difficult for companies to operationalize them. Having access to sensitive attributes such as race or gender is also considered by many AI ethicists to be necessary in order to compensate for biases, but it may conflict with anti-discrimination laws.[209]\n

At its 2022 Conference on Fairness, Accountability, and Transparency (ACM FAccT 2022), the Association for Computing Machinery, in Seoul, South Korea, presented and published findings that recommend that until AI and robotics systems are demonstrated to be free of bias mistakes, they are unsafe, and the use of self-learning neural networks trained on vast, unregulated sources of flawed internet data should be curtailed.[dubiousdiscuss][224]\n

\n

Lack of transparency

\n\n

Many AI systems are so complex that their designers cannot explain how they reach their decisions.[225] Particularly with deep neural networks, in which there are a large amount of non-linear relationships between inputs and outputs. But some popular explainability techniques exist.[226]\n

It is impossible to be certain that a program is operating correctly if no one knows how exactly it works. There have been many cases where a machine learning program passed rigorous tests, but nevertheless learned something different than what the programmers intended. For example, a system that could identify skin diseases better than medical professionals was found to actually have a strong tendency to classify images with a ruler as "cancerous", because pictures of malignancies typically include a ruler to show the scale.[227] Another machine learning system designed to help effectively allocate medical resources was found to classify patients with asthma as being at "low risk" of dying from pneumonia. Having asthma is actually a severe risk factor, but since the patients having asthma would usually get much more medical care, they were relatively unlikely to die according to the training data. The correlation between asthma and low risk of dying from pneumonia was real, but misleading.[228]\n

People who have been harmed by an algorithm\'s decision have a right to an explanation.[229] Doctors, for example, are expected to clearly and completely explain to their colleagues the reasoning behind any decision they make. Early drafts of the European Union\'s General Data Protection Regulation in 2016 included an explicit statement that this right exists.[n] Industry experts noted that this is an unsolved problem with no solution in sight. Regulators argued that nevertheless the harm is real: if the problem has no solution, the tools should not be used.[230]\n

DARPA established the XAI ("Explainable Artificial Intelligence") program in 2014 to try to solve these problems.[231]\n

Several approaches aim to address the transparency problem. SHAP enables to visualise the contribution of each feature to the output.[232] LIME can locally approximate a model\'s outputs with a simpler, interpretable model.[233] Multitask learning provides a large number of outputs in addition to the target classification. These other outputs can help developers deduce what the network has learned.[234] Deconvolution, DeepDream and other generative methods can allow developers to see what different layers of a deep network for computer vision have learned, and produce output that can suggest what the network is learning.[235] For generative pre-trained transformers, Anthropic developed a technique based on dictionary learning that associates patterns of neuron activations with human-understandable concepts.[236]\n

\n

Bad actors and weaponized AI

\n\n

Artificial intelligence provides a number of tools that are useful to bad actors, such as authoritarian governments, terrorists, criminals or rogue states.\n

A lethal autonomous weapon is a machine that locates, selects and engages human targets without human supervision.[o] Widely available AI tools can be used by bad actors to develop inexpensive autonomous weapons and, if produced at scale, they are potentially weapons of mass destruction.[238] Even when used in conventional warfare, it is unlikely that they will be unable to reliably choose targets and could potentially kill an innocent person.[238] In 2014, 30 nations (including China) supported a ban on autonomous weapons under the United Nations\' Convention on Certain Conventional Weapons, however the United States and others disagreed.[239] By 2015, over fifty countries were reported to be researching battlefield robots.[240]\n

AI tools make it easier for authoritarian governments to efficiently control their citizens in several ways. Face and voice recognition allow widespread surveillance. Machine learning, operating this data, can classify potential enemies of the state and prevent them from hiding. Recommendation systems can precisely target propaganda and misinformation for maximum effect. Deepfakes and generative AI aid in producing misinformation. Advanced AI can make authoritarian centralized decision making more competitive than liberal and decentralized systems such as markets. It lowers the cost and difficulty of digital warfare and advanced spyware.[241] All these technologies have been available since 2020 or earlier—AI facial recognition systems are already being used for mass surveillance in China.[242][243]\n

There many other ways that AI is expected to help bad actors, some of which can not be foreseen. For example, machine-learning AI is able to design tens of thousands of toxic molecules in a matter of hours.[244]\n

\n

Technological unemployment

\n\n

Economists have frequently highlighted the risks of redundancies from AI, and speculated about unemployment if there is no adequate social policy for full employment.[245]\n

In the past, technology has tended to increase rather than reduce total employment, but economists acknowledge that "we\'re in uncharted territory" with AI.[246] A survey of economists showed disagreement about whether the increasing use of robots and AI will cause a substantial increase in long-term unemployment, but they generally agree that it could be a net benefit if productivity gains are redistributed.[247] Risk estimates vary; for example, in the 2010s, Michael Osborne and Carl Benedikt Frey estimated 47% of U.S. jobs are at "high risk" of potential automation, while an OECD report classified only 9% of U.S. jobs as "high risk".[p][249] The methodology of speculating about future employment levels has been criticised as lacking evidential foundation, and for implying that technology, rather than social policy, creates unemployment, as opposed to redundancies.[245] In April 2023, it was reported that 70% of the jobs for Chinese video game illustrators had been eliminated by generative artificial intelligence.[250][251]\n

Unlike previous waves of automation, many middle-class jobs may be eliminated by artificial intelligence; The Economist stated in 2015 that "the worry that AI could do to white-collar jobs what steam power did to blue-collar ones during the Industrial Revolution" is "worth taking seriously".[252] Jobs at extreme risk range from paralegals to fast food cooks, while job demand is likely to increase for care-related professions ranging from personal healthcare to the clergy.[253]\n

From the early days of the development of artificial intelligence, there have been arguments, for example, those put forward by Joseph Weizenbaum, about whether tasks that can be done by computers actually should be done by them, given the difference between computers and humans, and between quantitative calculation and qualitative, value-based judgement.[254]\n

\n

Existential risk

\n\n

It has been argued AI will become so powerful that humanity may irreversibly lose control of it. This could, as physicist Stephen Hawking stated, "spell the end of the human race".[255] This scenario has been common in science fiction, when a computer or robot suddenly develops a human-like "self-awareness" (or "sentience" or "consciousness") and becomes a malevolent character.[q] These sci-fi scenarios are misleading in several ways.\n

First, AI does not require human-like "sentience" to be an existential risk. Modern AI programs are given specific goals and use learning and intelligence to achieve them. Philosopher Nick Bostrom argued that if one gives almost any goal to a sufficiently powerful AI, it may choose to destroy humanity to achieve it (he used the example of a paperclip factory manager).[257] Stuart Russell gives the example of household robot that tries to find a way to kill its owner to prevent it from being unplugged, reasoning that "you can\'t fetch the coffee if you\'re dead."[258] In order to be safe for humanity, a superintelligence would have to be genuinely aligned with humanity\'s morality and values so that it is "fundamentally on our side".[259]\n

Second, Yuval Noah Harari argues that AI does not require a robot body or physical control to pose an existential risk. The essential parts of civilization are not physical. Things like ideologies, law, government, money and the economy are made of language; they exist because there are stories that billions of people believe. The current prevalence of misinformation suggests that an AI could use language to convince people to believe anything, even to take actions that are destructive.[260]\n

The opinions amongst experts and industry insiders are mixed, with sizable fractions both concerned and unconcerned by risk from eventual superintelligent AI.[261] Personalities such as Stephen Hawking, Bill Gates, and Elon Musk,[262] as well as AI pioneers such as Yoshua Bengio, Stuart Russell, Demis Hassabis, and Sam Altman, have expressed concerns about existential risk from AI.\n

In May 2023, Geoffrey Hinton announced his resignation from Google in order to be able to "freely speak out about the risks of AI" without "considering how this impacts Google."[263] He notably mentioned risks of an AI takeover,[264] and stressed that in order to avoid the worst outcomes, establishing safety guidelines will require cooperation among those competing in use of AI.[265]\n

In 2023, many leading AI experts issued the joint statement that "Mitigating the risk of extinction from AI should be a global priority alongside other societal-scale risks such as pandemics and nuclear war".[266]\n

Other researchers, however, spoke in favor of a less dystopian view. AI pioneer Juergen Schmidhuber did not sign the joint statement, emphasising that in 95% of all cases, AI research is about making "human lives longer and healthier and easier."[267] While the tools that are now being used to improve lives can also be used by bad actors, "they can also be used against the bad actors."[268][269] Andrew Ng also argued that "it\'s a mistake to fall for the doomsday hype on AI—and that regulators who do will only benefit vested interests."[270] Yann LeCun "scoffs at his peers\' dystopian scenarios of supercharged misinformation and even, eventually, human extinction."[271] In the early 2010s, experts argued that the risks are too distant in the future to warrant research or that humans will be valuable from the perspective of a superintelligent machine.[272] However, after 2016, the study of current and future risks and possible solutions became a serious area of research.[273]\n

\n

Ethical machines and alignment

\n\n

Friendly AI are machines that have been designed from the beginning to minimize risks and to make choices that benefit humans. Eliezer Yudkowsky, who coined the term, argues that developing friendly AI should be a higher research priority: it may require a large investment and it must be completed before AI becomes an existential risk.[274]\n

Machines with intelligence have the potential to use their intelligence to make ethical decisions. The field of machine ethics provides machines with ethical principles and procedures for resolving ethical dilemmas.[275]\nThe field of machine ethics is also called computational morality,[275]\nand was founded at an AAAI symposium in 2005.[276]\n

Other approaches include Wendell Wallach\'s "artificial moral agents"[277] and Stuart J. Russell\'s three principles for developing provably beneficial machines.[278]\n

\n

Open source

\n

Active organizations in the AI open-source community include Hugging Face,[279] Google,[280] EleutherAI and Meta.[281] Various AI models, such as Llama 2, Mistral or Stable Diffusion, have been made open-weight,[282][283] meaning that their architecture and trained parameters (the "weights") are publicly available. Open-weight models can be freely fine-tuned, which allows companies to specialize them with their own data and for their own use-case.[284] Open-weight models are useful for research and innovation but can also be misused. Since they can be fine-tuned, any built-in security measure, such as objecting to harmful requests, can be trained away until it becomes ineffective. Some researchers warn that future AI models may develop dangerous capabilities (such as the potential to drastically facilitate bioterrorism) and that once released on the Internet, they cannot be deleted everywhere if needed. They recommend pre-release audits and cost-benefit analyses.[285]\n

\n

Frameworks

\n

Artificial Intelligence projects can have their ethical permissibility tested while designing, developing, and implementing an AI system. An AI framework such as the Care and Act Framework containing the SUM values—developed by the Alan Turing Institute tests projects in four main areas:[286][287]\n

\n
  • Respect the dignity of individual people
  • \n
  • Connect with other people sincerely, openly, and inclusively
  • \n
  • Care for the wellbeing of everyone
  • \n
  • Protect social values, justice, and the public interest
\n

Other developments in ethical frameworks include those decided upon during the Asilomar Conference, the Montreal Declaration for Responsible AI, and the IEEE\'s Ethics of Autonomous Systems initiative, among others;[288] however, these principles do not go without their criticisms, especially regards to the people chosen contributes to these frameworks.[289]\n

Promotion of the wellbeing of the people and communities that these technologies affect requires consideration of the social and ethical implications at all stages of AI system design, development and implementation, and collaboration between job roles such as data scientists, product managers, data engineers, domain experts, and delivery managers.[290]\n

The UK AI Safety Institute released in 2024 a testing toolset called \'Inspect\' for AI safety evaluations available under a MIT open-source licence which is freely available on GitHub and can be improved with third-party packages. It can be used to evaluate AI models in a range of areas including core knowledge, ability to reason, and autonomous capabilities.[291]\n

\n

Regulation

\n\n
AI Safety Summit
The first global AI Safety Summit was held in 2023 with a declaration calling for international co-operation.
\n

The regulation of artificial intelligence is the development of public sector policies and laws for promoting and regulating AI; it is therefore related to the broader regulation of algorithms.[292] The regulatory and policy landscape for AI is an emerging issue in jurisdictions globally.[293] According to AI Index at Stanford, the annual number of AI-related laws passed in the 127 survey countries jumped from one passed in 2016 to 37 passed in 2022 alone.[294][295] Between 2016 and 2020, more than 30 countries adopted dedicated strategies for AI.[296] Most EU member states had released national AI strategies, as had Canada, China, India, Japan, Mauritius, the Russian Federation, Saudi Arabia, United Arab Emirates, U.S., and Vietnam. Others were in the process of elaborating their own AI strategy, including Bangladesh, Malaysia and Tunisia.[296] The Global Partnership on Artificial Intelligence was launched in June 2020, stating a need for AI to be developed in accordance with human rights and democratic values, to ensure public confidence and trust in the technology.[296] Henry Kissinger, Eric Schmidt, and Daniel Huttenlocher published a joint statement in November 2021 calling for a government commission to regulate AI.[297] In 2023, OpenAI leaders published recommendations for the governance of superintelligence, which they believe may happen in less than 10 years.[298] In 2023, the United Nations also launched an advisory body to provide recommendations on AI governance; the body comprises technology company executives, governments officials and academics.[299] In 2024, the Council of Europe created the first international legally binding treaty on AI, called the "Framework Convention on Artificial Intelligence and Human Rights, Democracy and the Rule of Law". It was adopted by the European Union, the United States, the United Kingdom, and other signatories.[300]\n

In a 2022 Ipsos survey, attitudes towards AI varied greatly by country; 78% of Chinese citizens, but only 35% of Americans, agreed that "products and services using AI have more benefits than drawbacks".[294] A 2023 Reuters/Ipsos poll found that 61% of Americans agree, and 22% disagree, that AI poses risks to humanity.[301] In a 2023 Fox News poll, 35% of Americans thought it "very important", and an additional 41% thought it "somewhat important", for the federal government to regulate AI, versus 13% responding "not very important" and 8% responding "not at all important".[302][303]\n

In November 2023, the first global AI Safety Summit was held in Bletchley Park in the UK to discuss the near and far term risks of AI and the possibility of mandatory and voluntary regulatory frameworks.[304] 28 countries including the United States, China, and the European Union issued a declaration at the start of the summit, calling for international co-operation to manage the challenges and risks of artificial intelligence.[305][306] In May 2024 at the AI Seoul Summit, 16 global AI tech companies agreed to safety commitments on the development of AI.[307][308]\n

\n

History

\n\n\n

The study of mechanical or "formal" reasoning began with philosophers and mathematicians in antiquity. The study of logic led directly to Alan Turing\'s theory of computation, which suggested that a machine, by shuffling symbols as simple as "0" and "1", could simulate any conceivable form of mathematical reasoning.[309][310] This, along with concurrent discoveries in cybernetics, information theory and neurobiology, led researchers to consider the possibility of building an "electronic brain".[r] They developed several areas of research that would become part of AI,[312] such as McCullouch and Pitts design for "artificial neurons" in 1943,[115] and Turing\'s influential 1950 paper \'Computing Machinery and Intelligence\', which introduced the Turing test and showed that "machine intelligence" was plausible.[313][310]\n

The field of AI research was founded at a workshop at Dartmouth College in 1956.[s][6] The attendees became the leaders of AI research in the 1960s.[t] They and their students produced programs that the press described as "astonishing":[u] computers were learning checkers strategies, solving word problems in algebra, proving logical theorems and speaking English.[v][7] Artificial intelligence laboratories were set up at a number of British and U.S. universities in the latter 1950s and early 1960s.[310]\n

Researchers in the 1960s and the 1970s were convinced that their methods would eventually succeed in creating a machine with general intelligence and considered this the goal of their field.[317] In 1965 Herbert Simon predicted, "machines will be capable, within twenty years, of doing any work a man can do".[318] In 1967 Marvin Minsky agreed, writing that "within a generation ... the problem of creating \'artificial intelligence\' will substantially be solved".[319] They had, however, underestimated the difficulty of the problem.[w] In 1974, both the U.S. and British governments cut off exploratory research in response to the criticism of Sir James Lighthill[321] and ongoing pressure from the U.S. Congress to fund more productive projects.[322] Minsky\'s and Papert\'s book Perceptrons was understood as proving that artificial neural networks would never be useful for solving real-world tasks, thus discrediting the approach altogether.[323] The "AI winter", a period when obtaining funding for AI projects was difficult, followed.[9]\n

In the early 1980s, AI research was revived by the commercial success of expert systems,[324] a form of AI program that simulated the knowledge and analytical skills of human experts. By 1985, the market for AI had reached over a billion dollars. At the same time, Japan\'s fifth generation computer project inspired the U.S. and British governments to restore funding for academic research.[8] However, beginning with the collapse of the Lisp Machine market in 1987, AI once again fell into disrepute, and a second, longer-lasting winter began.[10]\n

Up to this point, most of AI\'s funding had gone to projects that used high-level symbols to represent mental objects like plans, goals, beliefs, and known facts. In the 1980s, some researchers began to doubt that this approach would be able to imitate all the processes of human cognition, especially perception, robotics, learning and pattern recognition,[325] and began to look into "sub-symbolic" approaches.[326] Rodney Brooks rejected "representation" in general and focussed directly on engineering machines that move and survive.[x] Judea Pearl, Lofti Zadeh and others developed methods that handled incomplete and uncertain information by making reasonable guesses rather than precise logic.[86][331] But the most important development was the revival of "connectionism", including neural network research, by Geoffrey Hinton and others.[332] In 1990, Yann LeCun successfully showed that convolutional neural networks can recognize handwritten digits, the first of many successful applications of neural networks.[333]\n

AI gradually restored its reputation in the late 1990s and early 21st century by exploiting formal mathematical methods and by finding specific solutions to specific problems. This "narrow" and "formal" focus allowed researchers to produce verifiable results and collaborate with other fields (such as statistics, economics and mathematics).[334] By 2000, solutions developed by AI researchers were being widely used, although in the 1990s they were rarely described as "artificial intelligence" (a tendency known as the AI effect).[335]\nHowever, several academic researchers became concerned that AI was no longer pursuing its original goal of creating versatile, fully intelligent machines. Beginning around 2002, they founded the subfield of artificial general intelligence (or "AGI"), which had several well-funded institutions by the 2010s.[4]\n

Deep learning began to dominate industry benchmarks in 2012 and was adopted throughout the field.[11]\nFor many specific tasks, other methods were abandoned.[y]\nDeep learning\'s success was based on both hardware improvements (faster computers,[337] graphics processing units, cloud computing[338]) and access to large amounts of data[339] (including curated datasets,[338] such as ImageNet). Deep learning\'s success led to an enormous increase in interest and funding in AI.[z] The amount of machine learning research (measured by total publications) increased by 50% in the years 2015–2019.[296]\n

In 2016, issues of fairness and the misuse of technology were catapulted into center stage at machine learning conferences, publications vastly increased, funding became available, and many researchers re-focussed their careers on these issues. The alignment problem became a serious field of academic study.[273]\n

In the late teens and early 2020s, AGI companies began to deliver programs that created enormous interest. In 2015, AlphaGo, developed by DeepMind, beat the world champion Go player. The program was taught only the rules of the game and developed strategy by itself. GPT-3 is a large language model that was released in 2020 by OpenAI and is capable of generating high-quality human-like text.[340] These programs, and others, inspired an aggressive AI boom, where large companies began investing billions in AI research. According to AI Impacts, about $50 billion annually was invested in "AI" around 2022 in the U.S. alone and about 20% of the new U.S. Computer Science PhD graduates have specialized in "AI".[341] About 800,000 "AI"-related U.S. job openings existed in 2022.[342]\n

\n

Philosophy

\n\n

Philosophical debates have historically sought to determine the nature of intelligence and how to make intelligent machines.[343] Another major focus has been whether machines can be conscious, and the associated ethical implications.[344] Many other topics in philosophy are relevant to AI, such as epistemology and free will.[345] Rapid advancements have intensified public discussions on the philosophy and ethics of AI.[344]\n

\n

Defining artificial intelligence

\n\n

Alan Turing wrote in 1950 "I propose to consider the question \'can machines think\'?"[346] He advised changing the question from whether a machine "thinks", to "whether or not it is possible for machinery to show intelligent behaviour".[346] He devised the Turing test, which measures the ability of a machine to simulate human conversation.[313] Since we can only observe the behavior of the machine, it does not matter if it is "actually" thinking or literally has a "mind". Turing notes that we can not determine these things about other people but "it is usual to have a polite convention that everyone thinks."[347]\n

\n
The Turing test can provide some evidence of intelligence, but it penalizes non-human intelligent behavior.[348]
\n

Russell and Norvig agree with Turing that intelligence must be defined in terms of external behavior, not internal structure.[1] However, they are critical that the test requires the machine to imitate humans. "Aeronautical engineering texts," they wrote, "do not define the goal of their field as making \'machines that fly so exactly like pigeons that they can fool other pigeons.\'"[349] AI founder John McCarthy agreed, writing that "Artificial intelligence is not, by definition, simulation of human intelligence".[350]\n

McCarthy defines intelligence as "the computational part of the ability to achieve goals in the world".[351] Another AI founder, Marvin Minsky similarly describes it as "the ability to solve hard problems".[352] The leading AI textbook defines it as the study of agents that perceive their environment and take actions that maximize their chances of achieving defined goals.[1] These definitions view intelligence in terms of well-defined problems with well-defined solutions, where both the difficulty of the problem and the performance of the program are direct measures of the "intelligence" of the machine—and no other philosophical discussion is required, or may not even be possible.\n

Another definition has been adopted by Google,[353] a major practitioner in the field of AI. This definition stipulates the ability of systems to synthesize information as the manifestation of intelligence, similar to the way it is defined in biological intelligence.\n

Some authors have suggested in practice, that the definition of AI is vague and difficult to define, with contention as to whether classical algorithms should be categorised as AI,[354] with many companies during the early 2020s AI boom using the term as a marketing buzzword, often even if they did "not actually use AI in a material way".[355]\n

\n

Evaluating approaches to AI

\n

No established unifying theory or paradigm has guided AI research for most of its history.[aa] The unprecedented success of statistical machine learning in the 2010s eclipsed all other approaches (so much so that some sources, especially in the business world, use the term "artificial intelligence" to mean "machine learning with neural networks"). This approach is mostly sub-symbolic, soft and narrow. Critics argue that these questions may have to be revisited by future generations of AI researchers.\n

\n

Symbolic AI and its limits

\n

Symbolic AI (or "GOFAI")[357] simulated the high-level conscious reasoning that people use when they solve puzzles, express legal reasoning and do mathematics. They were highly successful at "intelligent" tasks such as algebra or IQ tests. In the 1960s, Newell and Simon proposed the physical symbol systems hypothesis: "A physical symbol system has the necessary and sufficient means of general intelligent action."[358]\n

However, the symbolic approach failed on many tasks that humans solve easily, such as learning, recognizing an object or commonsense reasoning. Moravec\'s paradox is the discovery that high-level "intelligent" tasks were easy for AI, but low level "instinctive" tasks were extremely difficult.[359] Philosopher Hubert Dreyfus had argued since the 1960s that human expertise depends on unconscious instinct rather than conscious symbol manipulation, and on having a "feel" for the situation, rather than explicit symbolic knowledge.[360] Although his arguments had been ridiculed and ignored when they were first presented, eventually, AI research came to agree with him.[ab][16]\n

The issue is not resolved: sub-symbolic reasoning can make many of the same inscrutable mistakes that human intuition does, such as algorithmic bias. Critics such as Noam Chomsky argue continuing research into symbolic AI will still be necessary to attain general intelligence,[362][363] in part because sub-symbolic AI is a move away from explainable AI: it can be difficult or impossible to understand why a modern statistical AI program made a particular decision. The emerging field of neuro-symbolic artificial intelligence attempts to bridge the two approaches.\n

\n

Neat vs. scruffy

\n\n

"Neats" hope that intelligent behavior is described using simple, elegant principles (such as logic, optimization, or neural networks). "Scruffies" expect that it necessarily requires solving a large number of unrelated problems. Neats defend their programs with theoretical rigor, scruffies rely mainly on incremental testing to see if they work. This issue was actively discussed in the 1970s and 1980s,[364] but eventually was seen as irrelevant. Modern AI has elements of both.\n

\n

Soft vs. hard computing

\n\n

Finding a provably correct or optimal solution is intractable for many important problems.[15] Soft computing is a set of techniques, including genetic algorithms, fuzzy logic and neural networks, that are tolerant of imprecision, uncertainty, partial truth and approximation. Soft computing was introduced in the late 1980s and most successful AI programs in the 21st century are examples of soft computing with neural networks.\n

\n

Narrow vs. general AI

\n\n

AI researchers are divided as to whether to pursue the goals of artificial general intelligence and superintelligence directly or to solve as many specific problems as possible (narrow AI) in hopes these solutions will lead indirectly to the field\'s long-term goals.[365][366] General intelligence is difficult to define and difficult to measure, and modern AI has had more verifiable successes by focusing on specific problems with specific solutions. The sub-field of artificial general intelligence studies this area exclusively.\n

\n

Machine consciousness, sentience, and mind

\n\n

The philosophy of mind does not know whether a machine can have a mind, consciousness and mental states, in the same sense that human beings do. This issue considers the internal experiences of the machine, rather than its external behavior. Mainstream AI research considers this issue irrelevant because it does not affect the goals of the field: to build machines that can solve problems using intelligence. Russell and Norvig add that "[t]he additional project of making a machine conscious in exactly the way humans are is not one that we are equipped to take on."[367] However, the question has become central to the philosophy of mind. It is also typically the central question at issue in artificial intelligence in fiction.\n

\n

Consciousness

\n\n

David Chalmers identified two problems in understanding the mind, which he named the "hard" and "easy" problems of consciousness.[368] The easy problem is understanding how the brain processes signals, makes plans and controls behavior. The hard problem is explaining how this feels or why it should feel like anything at all, assuming we are right in thinking that it truly does feel like something (Dennett\'s consciousness illusionism says this is an illusion). While human information processing is easy to explain, human subjective experience is difficult to explain. For example, it is easy to imagine a color-blind person who has learned to identify which objects in their field of view are red, but it is not clear what would be required for the person to know what red looks like.[369]\n

\n

Computationalism and functionalism

\n\n

Computationalism is the position in the philosophy of mind that the human mind is an information processing system and that thinking is a form of computing. Computationalism argues that the relationship between mind and body is similar or identical to the relationship between software and hardware and thus may be a solution to the mind–body problem. This philosophical position was inspired by the work of AI researchers and cognitive scientists in the 1960s and was originally proposed by philosophers Jerry Fodor and Hilary Putnam.[370]\n

Philosopher John Searle characterized this position as "strong AI": "The appropriately programmed computer with the right inputs and outputs would thereby have a mind in exactly the same sense human beings have minds."[ac] Searle counters this assertion with his Chinese room argument, which attempts to show that, even if a machine perfectly simulates human behavior, there is still no reason to suppose it also has a mind.[374]\n

\n

AI welfare and rights

\n

It is difficult or impossible to reliably evaluate whether an advanced AI is sentient (has the ability to feel), and if so, to what degree.[375] But if there is a significant chance that a given machine can feel and suffer, then it may be entitled to certain rights or welfare protection measures, similarly to animals.[376][377] Sapience (a set of capacities related to high intelligence, such as discernment or self-awareness) may provide another moral basis for AI rights.[376] Robot rights are also sometimes proposed as a practical way to integrate autonomous agents into society.[378]\n

In 2017, the European Union considered granting "electronic personhood" to some of the most capable AI systems. Similarly to the legal status of companies, it would have conferred rights but also responsibilities.[379] Critics argued in 2018 that granting rights to AI systems would downplay the importance of human rights, and that legislation should focus on user needs rather than speculative futuristic scenarios. They also noted that robots lacked the autonomy to take part to society on their own.[380][381]\n

Progress in AI increased interest in the topic. Proponents of AI welfare and rights often argue that AI sentience, if it emerges, would be particularly easy to deny. They warn that this may be a moral blind spot analogous to slavery or factory farming, which could lead to large-scale suffering if sentient AI is created and carelessly exploited.[377][376]\n

\n

Future

\n

Superintelligence and the singularity

\n

A superintelligence is a hypothetical agent that would possess intelligence far surpassing that of the brightest and most gifted human mind.[366]If research into artificial general intelligence produced sufficiently intelligent software, it might be able to reprogram and improve itself. The improved software would be even better at improving itself, leading to what I. J. Good called an "intelligence explosion" and Vernor Vinge called a "singularity".[382]\n

However, technologies cannot improve exponentially indefinitely, and typically follow an S-shaped curve, slowing when they reach the physical limits of what the technology can do.[383]\n

\n

Transhumanism

\n\n

Robot designer Hans Moravec, cyberneticist Kevin Warwick and inventor Ray Kurzweil have predicted that humans and machines may merge in the future into cyborgs that are more capable and powerful than either. This idea, called transhumanism, has roots in the writings of Aldous Huxley and Robert Ettinger.[384]\n

Edward Fredkin argues that "artificial intelligence is the next step in evolution", an idea first proposed by Samuel Butler\'s "Darwin among the Machines" as far back as 1863, and expanded upon by George Dyson in his 1998 book Darwin Among the Machines: The Evolution of Global Intelligence.[385]\n

\n

In fiction

\n\n
The word "robot" itself was coined by Karel Čapek in his 1921 play R.U.R., the title standing for "Rossum\'s Universal Robots".
\n

Thought-capable artificial beings have appeared as storytelling devices since antiquity,[386] and have been a persistent theme in science fiction.[387]\n

A common trope in these works began with Mary Shelley\'s Frankenstein, where a human creation becomes a threat to its masters. This includes such works as Arthur C. Clarke\'s and Stanley Kubrick\'s 2001: A Space Odyssey (both 1968), with HAL 9000, the murderous computer in charge of the Discovery One spaceship, as well as The Terminator (1984) and The Matrix (1999). In contrast, the rare loyal robots such as Gort from The Day the Earth Stood Still (1951) and Bishop from Aliens (1986) are less prominent in popular culture.[388]\n

Isaac Asimov introduced the Three Laws of Robotics in many stories, most notably with the "Multivac" super-intelligent computer. Asimov\'s laws are often brought up during lay discussions of machine ethics;[389] while almost all artificial intelligence researchers are familiar with Asimov\'s laws through popular culture, they generally consider the laws useless for many reasons, one of which is their ambiguity.[390]\n

Several works use AI to force us to confront the fundamental question of what makes us human, showing us artificial beings that have the ability to feel, and thus to suffer. This appears in Karel Čapek\'s R.U.R., the films A.I. Artificial Intelligence and Ex Machina, as well as the novel Do Androids Dream of Electric Sheep?, by Philip K. Dick. Dick considers the idea that our understanding of human subjectivity is altered by technology created with artificial intelligence.[391]\n

\n

See also

\n\n

Explanatory notes

\n
\n
    \n
  1. ^ a b This list of intelligent traits is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
  2. \n
  3. ^ a b This list of tools is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
  4. \n
  5. ^ It is among the reasons that expert systems proved to be inefficient for capturing knowledge.[30][31]\n
  6. \n
  7. ^ \n"Rational agent" is general term used in economics, philosophy and theoretical artificial intelligence. It can refer to anything that directs its behavior to accomplish goals, such as a person, an animal, a corporation, a nation, or in the case of AI, a computer program.\n
  8. \n
  9. ^ Alan Turing discussed the centrality of learning as early as 1950, in his classic paper "Computing Machinery and Intelligence".[42] In 1956, at the original Dartmouth AI summer conference, Ray Solomonoff wrote a report on unsupervised probabilistic machine learning: "An Inductive Inference Machine".[43]\n
  10. \n
  11. ^ See AI winter § Machine translation and the ALPAC report of 1966\n
  12. \n
  13. ^ \nCompared with symbolic logic, formal Bayesian inference is computationally expensive. For inference to be tractable, most observations must be conditionally independent of one another. AdSense uses a Bayesian network with over 300 million edges to learn which ads to serve.[93]\n
  14. \n
  15. ^ Expectation–maximization, one of the most popular algorithms in machine learning, allows clustering in the presence of unknown latent variables.[95]\n
  16. \n
  17. ^ \nSome form of deep neural networks (without a specific learning algorithm) were described by:\nWarren S. McCulloch and Walter Pitts (1943)[115]\nAlan Turing (1948);[116]\nKarl Steinbuch and Roger David Joseph (1961).[117]\nDeep or recurrent networks that learned (or used gradient descent) were developed by:\nFrank Rosenblatt(1957);[116]\nOliver Selfridge (1959);[117]\nAlexey Ivakhnenko and Valentin Lapa (1965);[118]\nKaoru Nakano (1971);[119]\nShun-Ichi Amari (1972);[119]\nJohn Joseph Hopfield (1982).[119]\nPrecursors to backpropagation were developed by:\nHenry J. Kelley (1960);[116]\nArthur E. Bryson (1962);[116]\nStuart Dreyfus (1962);[116]\nArthur E. Bryson and Yu-Chi Ho (1969);[116]\nBackpropagation was independently developed by:\nSeppo Linnainmaa (1970);[120]\nPaul Werbos (1974).[116]\n
  18. \n
  19. ^ Geoffrey Hinton said, of his work on neural networks in the 1990s, "our labeled datasets were thousands of times too small. [And] our computers were millions of times too slow."[121]\n
  20. \n
  21. ^ In statistics, a bias is a systematic error or deviation from the correct value. But in the context of fairness, it refers to a tendency in favor or against a certain group or individual characteristic, usually in a way that is considered unfair or harmful. A statistically unbiased AI system that produces disparate outcomes for different demographic groups may thus be viewed as biased in the ethical sense.[209]\n
  22. \n
  23. ^ Including Jon Kleinberg (Cornell University), Sendhil Mullainathan (University of Chicago), Cynthia Chouldechova (Carnegie Mellon) and Sam Corbett-Davis (Stanford)[218]\n
  24. \n
  25. ^ Moritz Hardt (a director at the Max Planck Institute for Intelligent Systems) argues that machine learning "is fundamentally the wrong tool for a lot of domains, where you\'re trying to design interventions and mechanisms that change the world."[223]\n
  26. \n
  27. ^ When the law was passed in 2018, it still contained a form of this provision.\n
  28. \n
  29. ^ This is the United Nations\' definition, and includes things like land mines as well.[237]\n
  30. \n
  31. ^ See table 4; 9% is both the OECD average and the U.S. average.[248]\n
  32. \n
  33. ^ Sometimes called a "robopocalypse"[256]\n
  34. \n
  35. ^ "Electronic brain" was the term used by the press around this time.[309][311]\n
  36. \n
  37. ^ \nDaniel Crevier wrote, "the conference is generally recognized as the official birthdate of the new science."[314] Russell and Norvig called the conference "the inception of artificial intelligence."[115]\n
  38. \n
  39. ^ \nRussell and Norvig wrote "for the next 20 years the field would be dominated by these people and their students."[315]\n
  40. \n
  41. ^ \nRussell and Norvig wrote "it was astonishing whenever a computer did anything kind of smartish".[316]\n
  42. \n
  43. ^ \nThe programs described are Arthur Samuel\'s checkers program for the IBM 701, Daniel Bobrow\'s STUDENT, Newell and Simon\'s Logic Theorist and Terry Winograd\'s SHRDLU.\n
  44. \n
  45. ^ Russell and Norvig write: "in almost all cases, these early systems failed on more difficult problems"[320]\n
  46. \n
  47. ^ \nEmbodied approaches to AI[327] were championed by Hans Moravec[328] and Rodney Brooks[329] and went by many names: Nouvelle AI.[329] Developmental robotics.[330]\n
  48. \n
  49. ^ Matteo Wong wrote in The Atlantic: "Whereas for decades, computer-science fields such as natural-language processing, computer vision, and robotics used extremely different methods, now they all use a programming method called "deep learning." As a result, their code and approaches have become more similar, and their models are easier to integrate into one another."[336]\n
  50. \n
  51. ^ Jack Clark wrote in Bloomberg: "After a half-decade of quiet breakthroughs in artificial intelligence, 2015 has been a landmark year. Computers are smarter and learning faster than ever", and noted that the number of software projects that use machine learning at Google increased from a "sporadic usage" in 2012 to more than 2,700 projects in 2015.[338]\n
  52. \n
  53. ^ Nils Nilsson wrote in 1983: "Simply put, there is wide disagreement in the field about what AI is all about."[356]\n
  54. \n
  55. ^ \nDaniel Crevier wrote that "time has proven the accuracy and perceptiveness of some of Dreyfus\'s comments. Had he formulated them less aggressively, constructive actions they suggested might have been taken much earlier."[361]\n
  56. \n
  57. ^ \nSearle presented this definition of "Strong AI" in 1999.[371] Searle\'s original formulation was "The appropriately programmed computer really is a mind, in the sense that computers given the right programs can be literally said to understand and have other cognitive states."[372] Strong AI is defined similarly by Russell and Norvig: "Stong AI – the assertion that machines that do so are actually thinking (as opposed to simulating thinking)."[373]\n
  58. \n
\n

References

\n
\n
    \n
  1. ^ a b c Russell & Norvig (2021), pp. 1–4.\n
  2. \n
  3. ^ AI set to exceed human brain power Archived 2008-02-19 at the Wayback Machine CNN.com (July 26, 2006)\n
  4. \n
  5. ^ Kaplan, Andreas; Haenlein, Michael (2019). "Siri, Siri, in my hand: Who\'s the fairest in the land? On the interpretations, illustrations, and implications of artificial intelligence". Business Horizons. 62: 15–25. doi:10.1016/j.bushor.2018.08.004. ISSN 0007-6813. S2CID 158433736.\n
  6. \n
  7. ^ a b c \nArtificial general intelligence: Russell & Norvig (2021, pp. 32–33, 1020–1021)
    Proposal for the modern version: Pennachin & Goertzel (2007)
    Warnings of overspecialization in AI from leading researchers: Nilsson (1995), McCarthy (2007), Beal & Winston (2009)
    \n
  8. \n
  9. ^ Russell & Norvig (2021, §1.2).\n
  10. \n
  11. ^ a b Dartmouth workshop: Russell & Norvig (2021, p. 18), McCorduck (2004, pp. 111–136), NRC (1999, pp. 200–201)
    The proposal: McCarthy et al. (1955)
    \n
  12. \n
  13. ^ a b Successful programs of the 1960s: McCorduck (2004, pp. 243–252), Crevier (1993, pp. 52–107), Moravec (1988, p. 9), Russell & Norvig (2021, pp. 19–21)\n
  14. \n
  15. ^ a b Funding initiatives in the early 1980s: Fifth Generation Project (Japan), Alvey (UK), Microelectronics and Computer Technology Corporation (US), Strategic Computing Initiative (US): McCorduck (2004, pp. 426–441), Crevier (1993, pp. 161–162, 197–203, 211, 240), Russell & Norvig (2021, p. 23), NRC (1999, pp. 210–211), Newquist (1994, pp. 235–248)\n
  16. \n
  17. ^ a b First AI Winter, Lighthill report, Mansfield Amendment: Crevier (1993, pp. 115–117), Russell & Norvig (2021, pp. 21–22), NRC (1999, pp. 212–213), Howe (1994), Newquist (1994, pp. 189–201)\n
  18. \n
  19. ^ a b Second AI Winter: Russell & Norvig (2021, p. 24), McCorduck (2004, pp. 430–435), Crevier (1993, pp. 209–210), NRC (1999, pp. 214–216), Newquist (1994, pp. 301–318)\n
  20. \n
  21. ^ a b Deep learning revolution, AlexNet: Goldman (2022), Russell & Norvig (2021, p. 26), McKinsey (2018)\n
  22. \n
  23. ^ Toews (2023).\n
  24. \n
  25. ^ Problem-solving, puzzle solving, game playing, and deduction: Russell & Norvig (2021, chpt. 3–5), Russell & Norvig (2021, chpt. 6) (constraint satisfaction), Poole, Mackworth & Goebel (1998, chpt. 2, 3, 7, 9), Luger & Stubblefield (2004, chpt. 3, 4, 6, 8), Nilsson (1998, chpt. 7–12)\n
  26. \n
  27. ^ Uncertain reasoning: Russell & Norvig (2021, chpt. 12–18), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 333–381), Nilsson (1998, chpt. 7–12)\n
  28. \n
  29. ^ a b c Intractability and efficiency and the combinatorial explosion: Russell & Norvig (2021, p. 21)\n
  30. \n
  31. ^ a b c Psychological evidence of the prevalence of sub-symbolic reasoning and knowledge: Kahneman (2011), Dreyfus & Dreyfus (1986), Wason & Shapiro (1966), Kahneman, Slovic & Tversky (1982)\n
  32. \n
  33. ^ Knowledge representation and knowledge engineering: Russell & Norvig (2021, chpt. 10), Poole, Mackworth & Goebel (1998, pp. 23–46, 69–81, 169–233, 235–277, 281–298, 319–345), Luger & Stubblefield (2004, pp. 227–243), Nilsson (1998, chpt. 17.1–17.4, 18)\n
  34. \n
  35. ^ Smoliar & Zhang (1994).\n
  36. \n
  37. ^ Neumann & Möller (2008).\n
  38. \n
  39. ^ Kuperman, Reichley & Bailey (2006).\n
  40. \n
  41. ^ McGarry (2005).\n
  42. \n
  43. ^ Bertini, Del Bimbo & Torniai (2006).\n
  44. \n
  45. ^ Russell & Norvig (2021), pp. 272.\n
  46. \n
  47. ^ Representing categories and relations: Semantic networks, description logics, inheritance (including frames, and scripts): Russell & Norvig (2021, §10.2 & 10.5), Poole, Mackworth & Goebel (1998, pp. 174–177), Luger & Stubblefield (2004, pp. 248–258), Nilsson (1998, chpt. 18.3)\n
  48. \n
  49. ^ Representing events and time:Situation calculus, event calculus, fluent calculus (including solving the frame problem): Russell & Norvig (2021, §10.3), Poole, Mackworth & Goebel (1998, pp. 281–298), Nilsson (1998, chpt. 18.2)\n
  50. \n
  51. ^ Causal calculus: Poole, Mackworth & Goebel (1998, pp. 335–337)\n
  52. \n
  53. ^ Representing knowledge about knowledge: Belief calculus, modal logics: Russell & Norvig (2021, §10.4), Poole, Mackworth & Goebel (1998, pp. 275–277)\n
  54. \n
  55. ^ a b Default reasoning, Frame problem, default logic, non-monotonic logics, circumscription, closed world assumption, abduction: Russell & Norvig (2021, §10.6), Poole, Mackworth & Goebel (1998, pp. 248–256, 323–335), Luger & Stubblefield (2004, pp. 335–363), Nilsson (1998, ~18.3.3)\n(Poole et al. places abduction under "default reasoning". Luger et al. places this under "uncertain reasoning").\n
  56. \n
  57. ^ a b Breadth of commonsense knowledge: Lenat & Guha (1989, Introduction), Crevier (1993, pp. 113–114), Moravec (1988, p. 13), Russell & Norvig (2021, pp. 241, 385, 982) (qualification problem)\n
  58. \n
  59. ^ Newquist (1994), p. 296.\n
  60. \n
  61. ^ Crevier (1993), pp. 204–208.\n
  62. \n
  63. ^ Russell & Norvig (2021), p. 528.\n
  64. \n
  65. ^ Automated planning: Russell & Norvig (2021, chpt. 11).\n
  66. \n
  67. ^ Automated decision making, Decision theory: Russell & Norvig (2021, chpt. 16–18).\n
  68. \n
  69. ^ Classical planning: Russell & Norvig (2021, Section 11.2).\n
  70. \n
  71. ^ Sensorless or "conformant" planning, contingent planning, replanning (a.k.a online planning): Russell & Norvig (2021, Section 11.5).\n
  72. \n
  73. ^ Uncertain preferences: Russell & Norvig (2021, Section 16.7)\nInverse reinforcement learning: Russell & Norvig (2021, Section 22.6)\n
  74. \n
  75. ^ Information value theory: Russell & Norvig (2021, Section 16.6).\n
  76. \n
  77. ^ Markov decision process: Russell & Norvig (2021, chpt. 17).\n
  78. \n
  79. ^ Game theory and multi-agent decision theory: Russell & Norvig (2021, chpt. 18).\n
  80. \n
  81. ^ Learning: Russell & Norvig (2021, chpt. 19–22), Poole, Mackworth & Goebel (1998, pp. 397–438), Luger & Stubblefield (2004, pp. 385–542), Nilsson (1998, chpt. 3.3, 10.3, 17.5, 20)\n
  82. \n
  83. ^ Turing (1950).\n
  84. \n
  85. ^ Solomonoff (1956).\n
  86. \n
  87. ^ Unsupervised learning: Russell & Norvig (2021, pp. 653) (definition), Russell & Norvig (2021, pp. 738–740) (cluster analysis), Russell & Norvig (2021, pp. 846–860) (word embedding)\n
  88. \n
  89. ^ a b Supervised learning: Russell & Norvig (2021, §19.2) (Definition), Russell & Norvig (2021, Chpt. 19–20) (Techniques)\n
  90. \n
  91. ^ Reinforcement learning: Russell & Norvig (2021, chpt. 22), Luger & Stubblefield (2004, pp. 442–449)\n
  92. \n
  93. ^ Transfer learning: Russell & Norvig (2021, pp. 281), The Economist (2016)\n
  94. \n
  95. ^ "Artificial Intelligence (AI): What Is AI and How Does It Work? | Built In". builtin.com. Retrieved 30 October 2023.\n
  96. \n
  97. ^ Computational learning theory: Russell & Norvig (2021, pp. 672–674), Jordan & Mitchell (2015)\n
  98. \n
  99. ^ Natural language processing (NLP): Russell & Norvig (2021, chpt. 23–24), Poole, Mackworth & Goebel (1998, pp. 91–104), Luger & Stubblefield (2004, pp. 591–632)\n
  100. \n
  101. ^ Subproblems of NLP: Russell & Norvig (2021, pp. 849–850)\n
  102. \n
  103. ^ Russell & Norvig (2021), pp. 856–858.\n
  104. \n
  105. ^ Dickson (2022).\n
  106. \n
  107. ^ Modern statistical and deep learning approaches to NLP: Russell & Norvig (2021, chpt. 24), Cambria & White (2014)\n
  108. \n
  109. ^ Vincent (2019).\n
  110. \n
  111. ^ Russell & Norvig (2021), pp. 875–878.\n
  112. \n
  113. ^ Bushwick (2023).\n
  114. \n
  115. ^ Computer vision: Russell & Norvig (2021, chpt. 25), Nilsson (1998, chpt. 6)\n
  116. \n
  117. ^ Russell & Norvig (2021), pp. 849–850.\n
  118. \n
  119. ^ Russell & Norvig (2021), pp. 895–899.\n
  120. \n
  121. ^ Russell & Norvig (2021), pp. 899–901.\n
  122. \n
  123. ^ Challa et al. (2011).\n
  124. \n
  125. ^ Russell & Norvig (2021), pp. 931–938.\n
  126. \n
  127. ^ MIT AIL (2014).\n
  128. \n
  129. ^ Affective computing: Thro (1993), Edelson (1991), Tao & Tan (2005), Scassellati (2002)\n
  130. \n
  131. ^ Waddell (2018).\n
  132. \n
  133. ^ Poria et al. (2017).\n
  134. \n
  135. ^ Search algorithms: Russell & Norvig (2021, chpts. 3–5), Poole, Mackworth & Goebel (1998, pp. 113–163), Luger & Stubblefield (2004, pp. 79–164, 193–219), Nilsson (1998, chpts. 7–12)\n
  136. \n
  137. ^ State space search: Russell & Norvig (2021, chpt. 3)\n
  138. \n
  139. ^ Russell & Norvig (2021), sect. 11.2.\n
  140. \n
  141. ^ Uninformed searches (breadth first search, depth-first search and general state space search): Russell & Norvig (2021, sect. 3.4), Poole, Mackworth & Goebel (1998, pp. 113–132), Luger & Stubblefield (2004, pp. 79–121), Nilsson (1998, chpt. 8)\n
  142. \n
  143. ^ Heuristic or informed searches (e.g., greedy best first and A*): Russell & Norvig (2021, sect. 3.5), Poole, Mackworth & Goebel (1998, pp. 132–147), Poole & Mackworth (2017, sect. 3.6), Luger & Stubblefield (2004, pp. 133–150)\n
  144. \n
  145. ^ Adversarial search: Russell & Norvig (2021, chpt. 5)\n
  146. \n
  147. ^ Local or "optimization" search: Russell & Norvig (2021, chpt. 4)\n
  148. \n
  149. ^ Singh Chauhan, Nagesh (18 December 2020). "Optimization Algorithms in Neural Networks". KDnuggets. Retrieved 13 January 2024.\n
  150. \n
  151. ^ Evolutionary computation: Russell & Norvig (2021, sect. 4.1.2)\n
  152. \n
  153. ^ Merkle & Middendorf (2013).\n
  154. \n
  155. ^ Logic: Russell & Norvig (2021, chpts. 6–9), Luger & Stubblefield (2004, pp. 35–77), Nilsson (1998, chpt. 13–16)\n
  156. \n
  157. ^ Propositional logic: Russell & Norvig (2021, chpt. 6), Luger & Stubblefield (2004, pp. 45–50), Nilsson (1998, chpt. 13)\n
  158. \n
  159. ^ First-order logic and features such as equality: Russell & Norvig (2021, chpt. 7), Poole, Mackworth & Goebel (1998, pp. 268–275), Luger & Stubblefield (2004, pp. 50–62), Nilsson (1998, chpt. 15)\n
  160. \n
  161. ^ Logical inference: Russell & Norvig (2021, chpt. 10)\n
  162. \n
  163. ^ logical deduction as search: Russell & Norvig (2021, sects. 9.3, 9.4), Poole, Mackworth & Goebel (1998, pp. ~46–52), Luger & Stubblefield (2004, pp. 62–73), Nilsson (1998, chpt. 4.2, 7.2)\n
  164. \n
  165. ^ Resolution and unification: Russell & Norvig (2021, sections 7.5.2, 9.2, 9.5)\n
  166. \n
  167. ^ Warren, D.H.; Pereira, L.M.; Pereira, F. (1977). "Prolog-the language and its implementation compared with Lisp". ACM SIGPLAN Notices. 12 (8): 109–115. doi:10.1145/872734.806939.\n
  168. \n
  169. ^ Fuzzy logic: Russell & Norvig (2021, pp. 214, 255, 459), Scientific American (1999)\n
  170. \n
  171. ^ a b Stochastic methods for uncertain reasoning: Russell & Norvig (2021, chpt. 12–18, 20), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 165–191, 333–381), Nilsson (1998, chpt. 19)\n
  172. \n
  173. ^ decision theory and decision analysis: Russell & Norvig (2021, chpt. 16–18), Poole, Mackworth & Goebel (1998, pp. 381–394)\n
  174. \n
  175. ^ Information value theory: Russell & Norvig (2021, sect. 16.6)\n
  176. \n
  177. ^ Markov decision processes and dynamic decision networks: Russell & Norvig (2021, chpt. 17)\n
  178. \n
  179. ^ a b c Stochastic temporal models: Russell & Norvig (2021, chpt. 14)\nHidden Markov model: Russell & Norvig (2021, sect. 14.3)\nKalman filters: Russell & Norvig (2021, sect. 14.4)\nDynamic Bayesian networks: Russell & Norvig (2021, sect. 14.5)\n
  180. \n
  181. ^ Game theory and mechanism design: Russell & Norvig (2021, chpt. 18)\n
  182. \n
  183. ^ Bayesian networks: Russell & Norvig (2021, sects. 12.5–12.6, 13.4–13.5, 14.3–14.5, 16.5, 20.2–20.3), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~182–190, ≈363–379), Nilsson (1998, chpt. 19.3–19.4)\n
  184. \n
  185. ^ Domingos (2015), chpt. 6.\n
  186. \n
  187. ^ Bayesian inference algorithm: Russell & Norvig (2021, sect. 13.3–13.5), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~363–379), Nilsson (1998, chpt. 19.4 & 7)\n
  188. \n
  189. ^ Domingos (2015), p. 210.\n
  190. \n
  191. ^ Bayesian learning and the expectation–maximization algorithm: Russell & Norvig (2021, chpt. 20), Poole, Mackworth & Goebel (1998, pp. 424–433), Nilsson (1998, chpt. 20), Domingos (2015, p. 210)\n
  192. \n
  193. ^ Bayesian decision theory and Bayesian decision networks: Russell & Norvig (2021, sect. 16.5)\n
  194. \n
  195. ^ Statistical learning methods and classifiers: Russell & Norvig (2021, chpt. 20),\n
  196. \n
  197. ^ Ciaramella, Alberto; Ciaramella, Marco (2024). Introduction to Artificial Intelligence: from data analysis to generative AI. Intellisemantic Editions. ISBN 978-8-8947-8760-3.\n
  198. \n
  199. ^ Decision trees: Russell & Norvig (2021, sect. 19.3), Domingos (2015, p. 88)\n
  200. \n
  201. ^ Non-parameteric learning models such as K-nearest neighbor and support vector machines: Russell & Norvig (2021, sect. 19.7), Domingos (2015, p. 187) (k-nearest neighbor)\n\n
  202. \n
  203. ^ Domingos (2015), p. 152.\n
  204. \n
  205. ^ Naive Bayes classifier: Russell & Norvig (2021, sect. 12.6), Domingos (2015, p. 152)\n
  206. \n
  207. ^ a b Neural networks: Russell & Norvig (2021, chpt. 21), Domingos (2015, Chapter 4)\n
  208. \n
  209. ^ Gradient calculation in computational graphs, backpropagation, automatic differentiation: Russell & Norvig (2021, sect. 21.2), Luger & Stubblefield (2004, pp. 467–474), Nilsson (1998, chpt. 3.3)\n
  210. \n
  211. ^ Universal approximation theorem: Russell & Norvig (2021, p. 752)\nThe theorem: Cybenko (1988), Hornik, Stinchcombe & White (1989)\n
  212. \n
  213. ^ Feedforward neural networks: Russell & Norvig (2021, sect. 21.1)\n
  214. \n
  215. ^ Recurrent neural networks: Russell & Norvig (2021, sect. 21.6)\n
  216. \n
  217. ^ Perceptrons: Russell & Norvig (2021, pp. 21, 22, 683, 22)\n
  218. \n
  219. ^ a b Deep learning: Russell & Norvig (2021, chpt. 21), Goodfellow, Bengio & Courville (2016), Hinton et al. (2016), Schmidhuber (2015)\n
  220. \n
  221. ^ Convolutional neural networks: Russell & Norvig (2021, sect. 21.3)\n
  222. \n
  223. ^ Deng & Yu (2014), pp. 199–200.\n
  224. \n
  225. ^ Ciresan, Meier & Schmidhuber (2012).\n
  226. \n
  227. ^ Russell & Norvig (2021), p. 751.\n
  228. \n
  229. ^ a b c Russell & Norvig (2021), p. 17.\n
  230. \n
  231. ^ a b c d e f g Russell & Norvig (2021), p. 785.\n
  232. \n
  233. ^ a b Schmidhuber (2022), sect. 5.\n
  234. \n
  235. ^ Schmidhuber (2022), sect. 6.\n
  236. \n
  237. ^ a b c Schmidhuber (2022), sect. 7.\n
  238. \n
  239. ^ Schmidhuber (2022), sect. 8.\n
  240. \n
  241. ^ Quoted in Christian (2020, p. 22)\n
  242. \n
  243. ^ Smith (2023).\n
  244. \n
  245. ^ "Explained: Generative AI". 9 November 2023.\n
  246. \n
  247. ^ "AI Writing and Content Creation Tools". MIT Sloan Teaching & Learning Technologies. Archived from the original on 25 December 2023. Retrieved 25 December 2023.\n
  248. \n
  249. ^ Marmouyet (2023).\n
  250. \n
  251. ^ Kobielus (2019).\n
  252. \n
  253. ^ Thomason, James (21 May 2024). "Mojo Rising: The resurgence of AI-first programming languages". VentureBeat. Archived from the original on 27 June 2024. Retrieved 26 May 2024.\n
  254. \n
  255. ^ Wodecki, Ben (5 May 2023). "7 AI Programming Languages You Need to Know". AI Business. Archived from the original on 25 July 2024. Retrieved 5 October 2024.\n
  256. \n
  257. ^ Plumb, Taryn (18 September 2024). "Why Jensen Huang and Marc Benioff see \'gigantic\' opportunity for agentic AI". VentureBeat. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
  258. \n
  259. ^ Davenport, T; Kalakota, R (June 2019). "The potential for artificial intelligence in healthcare". Future Healthc J. 6 (2): 94–98. doi:10.7861/futurehosp.6-2-94. PMC 6616181. PMID 31363513.\n
  260. \n
  261. ^ Lyakhova, U.A.; Lyakhov, P.A. (2024). "Systematic review of approaches to detection and classification of skin cancer using artificial intelligence: Development and prospects". Computers in Biology and Medicine. 178: 108742. doi:10.1016/j.compbiomed.2024.108742. PMID 38875908.\n
  262. \n
  263. ^ Alqudaihi, Kawther S.; Aslam, Nida; Khan, Irfan Ullah; Almuhaideb, Abdullah M.; Alsunaidi, Shikah J.; Ibrahim, Nehad M. Abdel Rahman; Alhaidari, Fahd A.; Shaikh, Fatema S.; Alsenbel, Yasmine M.; Alalharith, Dima M.; Alharthi, Hajar M.; Alghamdi, Wejdan M.; Alshahrani, Mohammed S. (2021). "Cough Sound Detection and Diagnosis Using Artificial Intelligence Techniques: Challenges and Opportunities". IEEE Access. 9: 102327–102344. Bibcode:2021IEEEA...9j2327A. doi:10.1109/ACCESS.2021.3097559. ISSN 2169-3536. PMC 8545201. PMID 34786317.\n
  264. \n
  265. ^ a b Bax, Monique; Thorpe, Jordan; Romanov, Valentin (December 2023). "The future of personalized cardiovascular medicine demands 3D and 4D printing, stem cells, and artificial intelligence". Frontiers in Sensors. 4. doi:10.3389/fsens.2023.1294721. ISSN 2673-5067.\n
  266. \n
  267. ^ Jumper, J; Evans, R; Pritzel, A (2021). "Highly accurate protein structure prediction with AlphaFold". Nature. 596 (7873): 583–589. Bibcode:2021Natur.596..583J. doi:10.1038/s41586-021-03819-2. PMC 8371605. PMID 34265844.\n
  268. \n
  269. ^ "AI discovers new class of antibiotics to kill drug-resistant bacteria". 20 December 2023. Archived from the original on 16 September 2024. Retrieved 5 October 2024.\n
  270. \n
  271. ^ "AI speeds up drug design for Parkinson\'s ten-fold". Cambridge University. 17 April 2024. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  272. \n
  273. ^ Horne, Robert I.; Andrzejewska, Ewa A.; Alam, Parvez; Brotzakis, Z. Faidon; Srivastava, Ankit; Aubert, Alice; Nowinska, Magdalena; Gregory, Rebecca C.; Staats, Roxine; Possenti, Andrea; Chia, Sean; Sormanni, Pietro; Ghetti, Bernardino; Caughey, Byron; Knowles, Tuomas P. J.; Vendruscolo, Michele (17 April 2024). "Discovery of potent inhibitors of α-synuclein aggregation using structure-based iterative learning". Nature Chemical Biology. 20 (5). Nature: 634–645. doi:10.1038/s41589-024-01580-x. PMC 11062903. PMID 38632492.\n
  274. \n
  275. ^ Grant, Eugene F.; Lardner, Rex (25 July 1952). "The Talk of the Town – It". The New Yorker. ISSN 0028-792X. Archived from the original on 16 February 2020. Retrieved 28 January 2024.\n
  276. \n
  277. ^ Anderson, Mark Robert (11 May 2017). "Twenty years on from Deep Blue vs Kasparov: how a chess match started the big data revolution". The Conversation. Archived from the original on 17 September 2024. Retrieved 28 January 2024.\n
  278. \n
  279. ^ Markoff, John (16 February 2011). "Computer Wins on \'Jeopardy!\': Trivial, It\'s Not". The New York Times. ISSN 0362-4331. Archived from the original on 22 October 2014. Retrieved 28 January 2024.\n
  280. \n
  281. ^ Byford, Sam (27 May 2017). "AlphaGo retires from competitive Go after defeating world number one 3–0". The Verge. Archived from the original on 7 June 2017. Retrieved 28 January 2024.\n
  282. \n
  283. ^ Brown, Noam; Sandholm, Tuomas (30 August 2019). "Superhuman AI for multiplayer poker". Science. 365 (6456): 885–890. Bibcode:2019Sci...365..885B. doi:10.1126/science.aay2400. ISSN 0036-8075. PMID 31296650.\n
  284. \n
  285. ^ "MuZero: Mastering Go, chess, shogi and Atari without rules". Google DeepMind. 23 December 2020. Retrieved 28 January 2024.\n
  286. \n
  287. ^ Sample, Ian (30 October 2019). "AI becomes grandmaster in \'fiendishly complex\' StarCraft II". The Guardian. ISSN 0261-3077. Archived from the original on 29 December 2020. Retrieved 28 January 2024.\n
  288. \n
  289. ^ Wurman, P. R.; Barrett, S.; Kawamoto, K. (2022). "Outracing champion Gran Turismo drivers with deep reinforcement learning" (PDF). Nature. 602 (7896): 223–228. Bibcode:2022Natur.602..223W. doi:10.1038/s41586-021-04357-7. PMID 35140384.\n
  290. \n
  291. ^ Wilkins, Alex (13 March 2024). "Google AI learns to play open-world video games by watching them". New Scientist. Archived from the original on 26 July 2024. Retrieved 21 July 2024.\n
  292. \n
  293. ^ Uesato, J. et al.: Improving mathematical reasoning with process supervision. Archived 15 September 2024 at the Wayback Machine openai.com, May 31, 2023. Retrieved 2024-08-07.\n
  294. \n
  295. ^ Srivastava, Saurabh (29 February 2024). "Functional Benchmarks for Robust Evaluation of Reasoning Performance, and the Reasoning Gap". arXiv:2402.19450 [cs.AI].\n
  296. \n
  297. ^ Roberts, Siobhan (25 July 2024). "AI achieves silver-medal standard solving International Mathematical Olympiad problems". The New York Times. Archived from the original on 26 September 2024. Retrieved 7 August 2024.\n
  298. \n
  299. ^ LLEMMA. eleuther.ai. Retrieved 2024-08-07.\n
  300. \n
  301. ^ AI Math. Archived 5 October 2024 at the Wayback Machine Caesars Labs, 2024. Retrieved 2024-08-07.\n
  302. \n
  303. ^ Alex McFarland: 7 Best AI for Math Tools. Archived 11 September 2024 at the Wayback Machine unite.ai. Retrieved 2024-08-07\n
  304. \n
  305. ^ Matthew Finio & Amanda Downie: IBM Think 2024 Primer, "What is Artificial Intelligence (AI) in Finance?" 8 Dec. 2023\n
  306. \n
  307. ^ M. Nicolas, J. Firzli: Pensions Age/European Pensions magazine, "Artificial Intelligence: Ask the Industry" May June 2024 https://videovoice.org/ai-in-finance-innovation-entrepreneurship-vs-over-regulation-with-the-eus-artificial-intelligence-act-wont-work-as-intended/ Archived 11 September 2024 at the Wayback Machine.\n
  308. \n
  309. ^ a b c Congressional Research Service (2019). Artificial Intelligence and National Security (PDF). Washington, DC: Congressional Research Service. Archived (PDF) from the original on 8 May 2020. Retrieved 5 October 2024.PD-notice\n
  310. \n
  311. ^ a b Slyusar, Vadym (2019). "Artificial intelligence as the basis of future control networks". ResearchGate. doi:10.13140/RG.2.2.30247.50087. Archived from the original on 28 April 2021. Retrieved 20 July 2019.\n
  312. \n
  313. ^ Knight, Will. "The US and 30 Other Nations Agree to Set Guardrails for Military AI". Wired. ISSN 1059-1028. Archived from the original on 20 September 2024. Retrieved 24 January 2024.\n
  314. \n
  315. ^ Newsom, Gavin; Weber, Shirley N. (6 September 2023). "Executive Order N-12-23" (PDF). Executive Department, State of California. Archived (PDF) from the original on 21 February 2024. Retrieved 7 September 2023.\n
  316. \n
  317. ^ Pinaya, Walter H. L.; Graham, Mark S.; Kerfoot, Eric; Tudosiu, Petru-Daniel; Dafflon, Jessica; Fernandez, Virginia; Sanchez, Pedro; Wolleb, Julia; da Costa, Pedro F.; Patel, Ashay (2023). "Generative AI for Medical Imaging: extending the MONAI Framework". arXiv:2307.15208 [eess.IV].\n
  318. \n
  319. ^ Griffith, Erin; Metz, Cade (27 January 2023). "Anthropic Said to Be Closing In on $300 Million in New A.I. Funding". The New York Times. Archived from the original on 9 December 2023. Retrieved 14 March 2023.\n
  320. \n
  321. ^ Lanxon, Nate; Bass, Dina; Davalos, Jackie (10 March 2023). "A Cheat Sheet to AI Buzzwords and Their Meanings". Bloomberg News. Archived from the original on 17 November 2023. Retrieved 14 March 2023.\n
  322. \n
  323. ^ Marcelline, Marco (27 May 2023). "ChatGPT: Most Americans Know About It, But Few Actually Use the AI Chatbot". PCMag. Archived from the original on 21 May 2024. Retrieved 28 January 2024.\n
  324. \n
  325. ^ Lu, Donna (31 March 2023). "Misinformation, mistakes and the Pope in a puffer: what rapidly evolving AI can – and can\'t – do". The Guardian. ISSN 0261-3077. Archived from the original on 10 June 2024. Retrieved 28 January 2024.\n
  326. \n
  327. ^ Hurst, Luke (23 May 2023). "How a fake image of a Pentagon explosion shared on Twitter caused a real dip on Wall Street". euronews. Retrieved 28 January 2024.\n
  328. \n
  329. ^ Poole, David; Mackworth, Alan (2023). Artificial Intelligence, Foundations of Computational Agents (3rd ed.). Cambridge University Press. doi:10.1017/9781009258227. ISBN 978-1-0092-5819-7. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  330. \n
  331. ^ Russell, Stuart; Norvig, Peter (2020). Artificial Intelligence: A Modern Approach (4th ed.). Pearson. ISBN 978-0-1346-1099-3.\n
  332. \n
  333. ^ "Why agents are the next frontier of generative AI". McKinsey Digital. 24 July 2024. Archived from the original on 3 October 2024. Retrieved 10 August 2024.\n
  334. \n
  335. ^ Ransbotham, Sam; Kiron, David; Gerbert, Philipp; Reeves, Martin (6 September 2017). "Reshaping Business With Artificial Intelligence". MIT Sloan Management Review. Archived from the original on 13 February 2024.\n
  336. \n
  337. ^ Sun, Yuran; Zhao, Xilei; Lovreglio, Ruggiero; Kuligowski, Erica (1 January 2024), Naser, M. Z. (ed.), "8 – AI for large-scale evacuation modeling: promises and challenges", Interpretable Machine Learning for the Analysis, Design, Assessment, and Informed Decision Making for Civil Infrastructure, Woodhead Publishing Series in Civil and Structural Engineering, Woodhead Publishing, pp. 185–204, ISBN 978-0-1282-4073-1, archived from the original on 19 May 2024, retrieved 28 June 2024.\n
  338. \n
  339. ^ Gomaa, Islam; Adelzadeh, Masoud; Gwynne, Steven; Spencer, Bruce; Ko, Yoon; Bénichou, Noureddine; Ma, Chunyun; Elsagan, Nour; Duong, Dana; Zalok, Ehab; Kinateder, Max (1 November 2021). "A Framework for Intelligent Fire Detection and Evacuation System". Fire Technology. 57 (6): 3179–3185. doi:10.1007/s10694-021-01157-3. ISSN 1572-8099. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  340. \n
  341. ^ Zhao, Xilei; Lovreglio, Ruggiero; Nilsson, Daniel (1 May 2020). "Modelling and interpreting pre-evacuation decision-making using machine learning". Automation in Construction. 113: 103140. doi:10.1016/j.autcon.2020.103140. ISSN 0926-5805. Archived from the original on 19 May 2024. Retrieved 5 October 2024.\n
  342. \n
  343. ^ "India\'s latest election embraced AI technology. Here are some ways it was used constructively". PBS News. 12 June 2024. Retrieved 28 October 2024.\n
  344. \n
  345. ^ Müller, Vincent C. (30 April 2020). "Ethics of Artificial Intelligence and Robotics". Stanford Encyclopedia of Philosophy Archive. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  346. \n
  347. ^ Simonite (2016).\n
  348. \n
  349. ^ Russell & Norvig (2021), p. 987.\n
  350. \n
  351. ^ Laskowski (2023).\n
  352. \n
  353. ^ GAO (2022).\n
  354. \n
  355. ^ Valinsky (2019).\n
  356. \n
  357. ^ Russell & Norvig (2021), p. 991.\n
  358. \n
  359. ^ Russell & Norvig (2021), pp. 991–992.\n
  360. \n
  361. ^ Christian (2020), p. 63.\n
  362. \n
  363. ^ Vincent (2022).\n
  364. \n
  365. ^ Kopel, Matthew. "Copyright Services: Fair Use". Cornell University Library. Archived from the original on 26 September 2024. Retrieved 26 April 2024.\n
  366. \n
  367. ^ Burgess, Matt. "How to Stop Your Data From Being Used to Train AI". Wired. ISSN 1059-1028. Archived from the original on 3 October 2024. Retrieved 26 April 2024.\n
  368. \n
  369. ^ Reisner (2023).\n
  370. \n
  371. ^ Alter & Harris (2023).\n
  372. \n
  373. ^ "Getting the Innovation Ecosystem Ready for AI. An IP policy toolkit" (PDF). WIPO.\n
  374. \n
  375. ^ Hammond, George (27 December 2023). "Big Tech is spending more than VC firms on AI startups". Ars Technica. Archived from the original on 10 January 2024.\n
  376. \n
  377. ^ Wong, Matteo (24 October 2023). "The Future of AI Is GOMA". The Atlantic. Archived from the original on 5 January 2024.\n
  378. \n
  379. ^ "Big tech and the pursuit of AI dominance". The Economist. 26 March 2023. Archived from the original on 29 December 2023.\n
  380. \n
  381. ^ Fung, Brian (19 December 2023). "Where the battle to dominate AI may be won". CNN Business. Archived from the original on 13 January 2024.\n
  382. \n
  383. ^ Metz, Cade (5 July 2023). "In the Age of A.I., Tech\'s Little Guys Need Big Friends". The New York Times. Archived from the original on 8 July 2024. Retrieved 5 October 2024.\n
  384. \n
  385. ^ "Electricity 2024 – Analysis". IEA. 24 January 2024. Retrieved 13 July 2024.\n
  386. \n
  387. ^ Calvert, Brian (28 March 2024). "AI already uses as much energy as a small country. It\'s only the beginning". Vox. New York, New York. Archived from the original on 3 July 2024. Retrieved 5 October 2024.\n
  388. \n
  389. ^ Halper, Evan; O\'Donovan, Caroline (21 June 2024). "AI is exhausting the power grid. Tech firms are seeking a miracle solution". Washington Post.\n
  390. \n
  391. ^ Davenport, Carly. "AI Data Centers and the Coming YS Power Demand Surge" (PDF). Goldman Sachs. Archived from the original (PDF) on 26 July 2024. Retrieved 5 October 2024.\n
  392. \n
  393. ^ Ryan, Carol (12 April 2024). "Energy-Guzzling AI Is Also the Future of Energy Savings". Wall Street Journal. Dow Jones.\n
  394. \n
  395. ^ Hiller, Jennifer (1 July 2024). "Tech Industry Wants to Lock Up Nuclear Power for AI". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  396. \n
  397. ^ Kendall, Tyler (28 September 2024). "Nvidia\'s Huang Says Nuclear Power an Option to Feed Data Centers". Bloomberg.\n
  398. \n
  399. ^ Halper, Evan (20 September 2024). "Microsoft deal would reopen Three Mile Island nuclear plant to power AI". Washington Post.\n
  400. \n
  401. ^ Hiller, Jennifer (20 September 2024). "Three Mile Island\'s Nuclear Plant to Reopen, Help Power Microsoft\'s AI Centers". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  402. \n
  403. ^ a b Niva Yadav (19 August 2024). "Taiwan to stop large data centers in the North, cites insufficient power". DatacenterDynamics.\n
  404. \n
  405. ^ Mochizuki, Takashi; Oda, Shoko (18 October 2024). "エヌビディア出資の日本企業、原発近くでAIデータセンター新設検討". Bloomberg (in Japanese).\n
  406. \n
  407. ^ a b Naureen S Malik and Will Wade (5 November 2024). "Nuclear-Hungry AI Campuses Need New Plan to Find Power Fast". Bloomberg.\n
  408. \n
  409. ^ Nicas (2018).\n
  410. \n
  411. ^ Rainie, Lee; Keeter, Scott; Perrin, Andrew (22 July 2019). "Trust and Distrust in America". Pew Research Center. Archived from the original on 22 February 2024.\n
  412. \n
  413. ^ Williams (2023).\n
  414. \n
  415. ^ Taylor & Hern (2023).\n
  416. \n
  417. ^ a b Samuel, Sigal (19 April 2022). "Why it\'s so damn hard to make AI fair and unbiased". Vox. Archived from the original on 5 October 2024. Retrieved 24 July 2024.\n
  418. \n
  419. ^ a b Rose (2023).\n
  420. \n
  421. ^ CNA (2019).\n
  422. \n
  423. ^ Goffrey (2008), p. 17.\n
  424. \n
  425. ^ Berdahl et al. (2023); Goffrey (2008, p. 17); Rose (2023); Russell & Norvig (2021, p. 995)\n
  426. \n
  427. ^ Christian (2020), p. 25.\n
  428. \n
  429. ^ a b Russell & Norvig (2021), p. 995.\n
  430. \n
  431. ^ Grant & Hill (2023).\n
  432. \n
  433. ^ Larson & Angwin (2016).\n
  434. \n
  435. ^ Christian (2020), p. 67–70.\n
  436. \n
  437. ^ Christian (2020, pp. 67–70); Russell & Norvig (2021, pp. 993–994)\n
  438. \n
  439. ^ Russell & Norvig (2021, p. 995); Lipartito (2011, p. 36); Goodman & Flaxman (2017, p. 6); Christian (2020, pp. 39–40, 65)\n
  440. \n
  441. ^ Quoted in Christian (2020, p. 65).\n
  442. \n
  443. ^ Russell & Norvig (2021, p. 994); Christian (2020, pp. 40, 80–81)\n
  444. \n
  445. ^ Quoted in Christian (2020, p. 80)\n
  446. \n
  447. ^ Dockrill (2022).\n
  448. \n
  449. ^ Sample (2017).\n
  450. \n
  451. ^ "Black Box AI". 16 June 2023. Archived from the original on 15 June 2024. Retrieved 5 October 2024.\n
  452. \n
  453. ^ Christian (2020), p. 110.\n
  454. \n
  455. ^ Christian (2020), pp. 88–91.\n
  456. \n
  457. ^ Christian (2020, p. 83); Russell & Norvig (2021, p. 997)\n
  458. \n
  459. ^ Christian (2020), p. 91.\n
  460. \n
  461. ^ Christian (2020), p. 83.\n
  462. \n
  463. ^ Verma (2021).\n
  464. \n
  465. ^ Rothman (2020).\n
  466. \n
  467. ^ Christian (2020), pp. 105–108.\n
  468. \n
  469. ^ Christian (2020), pp. 108–112.\n
  470. \n
  471. ^ Ropek, Lucas (21 May 2024). "New Anthropic Research Sheds Light on AI\'s \'Black Box\'". Gizmodo. Archived from the original on 5 October 2024. Retrieved 23 May 2024.\n
  472. \n
  473. ^ Russell & Norvig (2021), p. 989.\n
  474. \n
  475. ^ a b Russell & Norvig (2021), pp. 987–990.\n
  476. \n
  477. ^ Russell & Norvig (2021), p. 988.\n
  478. \n
  479. ^ Robitzski (2018); Sainato (2015)\n
  480. \n
  481. ^ Harari (2018).\n
  482. \n
  483. ^ Buckley, Chris; Mozur, Paul (22 May 2019). "How China Uses High-Tech Surveillance to Subdue Minorities". The New York Times. Archived from the original on 25 November 2019. Retrieved 2 July 2019.\n
  484. \n
  485. ^ "Security lapse exposed a Chinese smart city surveillance system". 3 May 2019. Archived from the original on 7 March 2021. Retrieved 14 September 2020.\n
  486. \n
  487. ^ Urbina et al. (2022).\n
  488. \n
  489. ^ a b E. McGaughey, \'Will Robots Automate Your Job Away? Full Employment, Basic Income, and Economic Democracy\' (2022), 51(3) Industrial Law Journal 511–559. Archived 27 May 2023 at the Wayback Machine.\n
  490. \n
  491. ^ Ford & Colvin (2015);McGaughey (2022)\n
  492. \n
  493. ^ IGM Chicago (2017).\n
  494. \n
  495. ^ Arntz, Gregory & Zierahn (2016), p. 33.\n
  496. \n
  497. ^ Lohr (2017); Frey & Osborne (2017); Arntz, Gregory & Zierahn (2016, p. 33)\n
  498. \n
  499. ^ Zhou, Viola (11 April 2023). "AI is already taking video game illustrators\' jobs in China". Rest of World. Archived from the original on 21 February 2024. Retrieved 17 August 2023.\n
  500. \n
  501. ^ Carter, Justin (11 April 2023). "China\'s game art industry reportedly decimated by growing AI use". Game Developer. Archived from the original on 17 August 2023. Retrieved 17 August 2023.\n
  502. \n
  503. ^ Morgenstern (2015).\n
  504. \n
  505. ^ Mahdawi (2017); Thompson (2014)\n
  506. \n
  507. ^ Tarnoff, Ben (4 August 2023). "Lessons from Eliza". The Guardian Weekly. pp. 34–39.\n
  508. \n
  509. ^ Cellan-Jones (2014).\n
  510. \n
  511. ^ Russell & Norvig 2021, p. 1001.\n
  512. \n
  513. ^ Bostrom (2014).\n
  514. \n
  515. ^ Russell (2019).\n
  516. \n
  517. ^ Bostrom (2014); Müller & Bostrom (2014); Bostrom (2015).\n
  518. \n
  519. ^ Harari (2023).\n
  520. \n
  521. ^ Müller & Bostrom (2014).\n
  522. \n
  523. ^ Leaders\' concerns about the existential risks of AI around 2015: Rawlinson (2015), Holley (2015), Gibbs (2014), Sainato (2015)\n
  524. \n
  525. ^ ""Godfather of artificial intelligence" talks impact and potential of new AI". CBS News. 25 March 2023. Archived from the original on 28 March 2023. Retrieved 28 March 2023.\n
  526. \n
  527. ^ Pittis, Don (4 May 2023). "Canadian artificial intelligence leader Geoffrey Hinton piles on fears of computer takeover". CBC. Archived from the original on 7 July 2024. Retrieved 5 October 2024.\n
  528. \n
  529. ^ "\'50–50 chance\' that AI outsmarts humanity, Geoffrey Hinton says". Bloomberg BNN. 14 June 2024. Retrieved 6 July 2024.\n
  530. \n
  531. ^ Valance (2023).\n
  532. \n
  533. ^ Taylor, Josh (7 May 2023). "Rise of artificial intelligence is inevitable but should not be feared, \'father of AI\' says". The Guardian. Archived from the original on 23 October 2023. Retrieved 26 May 2023.\n
  534. \n
  535. ^ Colton, Emma (7 May 2023). "\'Father of AI\' says tech fears misplaced: \'You cannot stop it\'". Fox News. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
  536. \n
  537. ^ Jones, Hessie (23 May 2023). "Juergen Schmidhuber, Renowned \'Father Of Modern AI,\' Says His Life\'s Work Won\'t Lead To Dystopia". Forbes. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
  538. \n
  539. ^ McMorrow, Ryan (19 December 2023). "Andrew Ng: \'Do we think the world is better off with more or less intelligence?\'". Financial Times. Archived from the original on 25 January 2024. Retrieved 30 December 2023.\n
  540. \n
  541. ^ Levy, Steven (22 December 2023). "How Not to Be Stupid About AI, With Yann LeCun". Wired. Archived from the original on 28 December 2023. Retrieved 30 December 2023.\n
  542. \n
  543. ^ Arguments that AI is not an imminent risk: Brooks (2014), Geist (2015), Madrigal (2015), Lee (2014)\n
  544. \n
  545. ^ a b Christian (2020), pp. 67, 73.\n
  546. \n
  547. ^ Yudkowsky (2008).\n
  548. \n
  549. ^ a b Anderson & Anderson (2011).\n
  550. \n
  551. ^ AAAI (2014).\n
  552. \n
  553. ^ Wallach (2010).\n
  554. \n
  555. ^ Russell (2019), p. 173.\n
  556. \n
  557. ^ Stewart, Ashley; Melton, Monica. "Hugging Face CEO says he\'s focused on building a \'sustainable model\' for the $4.5 billion open-source-AI startup". Business Insider. Archived from the original on 25 September 2024. Retrieved 14 April 2024.\n
  558. \n
  559. ^ Wiggers, Kyle (9 April 2024). "Google open sources tools to support AI model development". TechCrunch. Archived from the original on 10 September 2024. Retrieved 14 April 2024.\n
  560. \n
  561. ^ Heaven, Will Douglas (12 May 2023). "The open-source AI boom is built on Big Tech\'s handouts. How long will it last?". MIT Technology Review. Retrieved 14 April 2024.\n
  562. \n
  563. ^ Brodsky, Sascha (19 December 2023). "Mistral AI\'s New Language Model Aims for Open Source Supremacy". AI Business. Archived from the original on 5 September 2024. Retrieved 5 October 2024.\n
  564. \n
  565. ^ Edwards, Benj (22 February 2024). "Stability announces Stable Diffusion 3, a next-gen AI image generator". Ars Technica. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
  566. \n
  567. ^ Marshall, Matt (29 January 2024). "How enterprises are using open source LLMs: 16 examples". VentureBeat. Archived from the original on 26 September 2024. Retrieved 5 October 2024.\n
  568. \n
  569. ^ Piper, Kelsey (2 February 2024). "Should we make our most powerful AI models open source to all?". Vox. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
  570. \n
  571. ^ Alan Turing Institute (2019). "Understanding artificial intelligence ethics and safety" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
  572. \n
  573. ^ Alan Turing Institute (2023). "AI Ethics and Governance in Practice" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
  574. \n
  575. ^ Floridi, Luciano; Cowls, Josh (23 June 2019). "A Unified Framework of Five Principles for AI in Society". Harvard Data Science Review. 1 (1). doi:10.1162/99608f92.8cd550d1. S2CID 198775713.\n
  576. \n
  577. ^ Buruk, Banu; Ekmekci, Perihan Elif; Arda, Berna (1 September 2020). "A critical perspective on guidelines for responsible and trustworthy artificial intelligence". Medicine, Health Care and Philosophy. 23 (3): 387–399. doi:10.1007/s11019-020-09948-1. ISSN 1572-8633. PMID 32236794. S2CID 214766800. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  578. \n
  579. ^ Kamila, Manoj Kumar; Jasrotia, Sahil Singh (1 January 2023). "Ethical issues in the development of artificial intelligence: recognizing the risks". International Journal of Ethics and Systems. ahead-of-print (ahead-of-print). doi:10.1108/IJOES-05-2023-0107. ISSN 2514-9369. S2CID 259614124. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  580. \n
  581. ^ "AI Safety Institute releases new AI safety evaluations platform". UK Government. 10 May 2024. Archived from the original on 5 October 2024. Retrieved 14 May 2024.\n
  582. \n
  583. ^ Regulation of AI to mitigate risks: Berryhill et al. (2019), Barfield & Pagallo (2018), Iphofen & Kritikos (2019), Wirtz, Weyerer & Geyer (2018), Buiten (2019)\n
  584. \n\n
  585. ^ a b Vincent (2023).\n
  586. \n
  587. ^ Stanford University (2023).\n
  588. \n
  589. ^ a b c d UNESCO (2021).\n
  590. \n
  591. ^ Kissinger (2021).\n
  592. \n
  593. ^ Altman, Brockman & Sutskever (2023).\n
  594. \n
  595. ^ VOA News (25 October 2023). "UN Announces Advisory Body on Artificial Intelligence". Archived from the original on 18 September 2024. Retrieved 5 October 2024.\n
  596. \n
  597. ^ "Council of Europe opens first ever global treaty on AI for signature". Council of Europe. 5 September 2024. Archived from the original on 17 September 2024. Retrieved 17 September 2024.\n
  598. \n
  599. ^ Edwards (2023).\n
  600. \n
  601. ^ Kasperowicz (2023).\n
  602. \n
  603. ^ Fox News (2023).\n
  604. \n
  605. ^ Milmo, Dan (3 November 2023). "Hope or Horror? The great AI debate dividing its pioneers". The Guardian Weekly. pp. 10–12.\n
  606. \n
  607. ^ "The Bletchley Declaration by Countries Attending the AI Safety Summit, 1–2 November 2023". GOV.UK. 1 November 2023. Archived from the original on 1 November 2023. Retrieved 2 November 2023.\n
  608. \n
  609. ^ "Countries agree to safe and responsible development of frontier AI in landmark Bletchley Declaration". GOV.UK (Press release). Archived from the original on 1 November 2023. Retrieved 1 November 2023.\n
  610. \n
  611. ^ "Second global AI summit secures safety commitments from companies". Reuters. 21 May 2024. Retrieved 23 May 2024.\n
  612. \n
  613. ^ "Frontier AI Safety Commitments, AI Seoul Summit 2024". gov.uk. 21 May 2024. Archived from the original on 23 May 2024. Retrieved 23 May 2024.\n
  614. \n
  615. ^ a b Russell & Norvig 2021, p. 9.\n
  616. \n
  617. ^ a b c Copeland, J., ed. (2004). The Essential Turing: the ideas that gave birth to the computer age. Oxford, England: Clarendon Press. ISBN 0-1982-5079-7.\n
  618. \n
  619. ^ "Google books ngram". Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  620. \n
  621. ^ AI\'s immediate precursors: McCorduck (2004, pp. 51–107), Crevier (1993, pp. 27–32), Russell & Norvig (2021, pp. 8–17), Moravec (1988, p. 3)\n
  622. \n
  623. ^ a b Turing\'s original publication of the Turing test in "Computing machinery and intelligence": Turing (1950)\nHistorical influence and philosophical implications: Haugeland (1985, pp. 6–9), Crevier (1993, p. 24), McCorduck (2004, pp. 70–71), Russell & Norvig (2021, pp. 2, 984)\n
  624. \n
  625. ^ Crevier (1993), pp. 47–49.\n
  626. \n
  627. ^ Russell & Norvig (2003), p. 17.\n
  628. \n
  629. ^ Russell & Norvig (2003), p. 18.\n
  630. \n
  631. ^ Newquist (1994), pp. 86–86.\n
  632. \n
  633. ^ Simon (1965, p. 96) quoted in Crevier (1993, p. 109)\n
  634. \n
  635. ^ Minsky (1967, p. 2) quoted in Crevier (1993, p. 109)\n
  636. \n
  637. ^ Russell & Norvig (2021), p. 21.\n
  638. \n
  639. ^ Lighthill (1973).\n
  640. \n
  641. ^ NRC 1999, pp. 212–213.\n
  642. \n
  643. ^ Russell & Norvig (2021), p. 22.\n
  644. \n
  645. ^ Expert systems: Russell & Norvig (2021, pp. 23, 292), Luger & Stubblefield (2004, pp. 227–331), Nilsson (1998, chpt. 17.4), McCorduck (2004, pp. 327–335, 434–435), Crevier (1993, pp. 145–162, 197–203), Newquist (1994, pp. 155–183)\n
  646. \n
  647. ^ Russell & Norvig (2021), p. 24.\n
  648. \n
  649. ^ Nilsson (1998), p. 7.\n
  650. \n
  651. ^ McCorduck (2004), pp. 454–462.\n
  652. \n
  653. ^ Moravec (1988).\n
  654. \n
  655. ^ a b Brooks (1990).\n
  656. \n
  657. ^ Developmental robotics: Weng et al. (2001), Lungarella et al. (2003), Asada et al. (2009), Oudeyer (2010)\n
  658. \n
  659. ^ Russell & Norvig (2021), p. 25.\n
  660. \n
  661. ^ Crevier (1993, pp. 214–215), Russell & Norvig (2021, pp. 24, 26)\n
  662. \n
  663. ^ Russell & Norvig (2021), p. 26.\n
  664. \n
  665. ^ Formal and narrow methods adopted in the 1990s: Russell & Norvig (2021, pp. 24–26), McCorduck (2004, pp. 486–487)\n
  666. \n
  667. ^ AI widely used in the late 1990s: Kurzweil (2005, p. 265), NRC (1999, pp. 216–222), Newquist (1994, pp. 189–201)\n
  668. \n
  669. ^ Wong (2023).\n
  670. \n
  671. ^ Moore\'s Law and AI: Russell & Norvig (2021, pp. 14, 27)\n
  672. \n
  673. ^ a b c Clark (2015b).\n
  674. \n
  675. ^ Big data: Russell & Norvig (2021, p. 26)\n
  676. \n
  677. ^ Sagar, Ram (3 June 2020). "OpenAI Releases GPT-3, The Largest Model So Far". Analytics India Magazine. Archived from the original on 4 August 2020. Retrieved 15 March 2023.\n
  678. \n
  679. ^ DiFeliciantonio (2023).\n
  680. \n
  681. ^ Goswami (2023).\n
  682. \n
  683. ^ Grayling, Anthony; Ball, Brian (1 August 2024). "Philosophy is crucial in the age of AI". The Conversation. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
  684. \n
  685. ^ a b Jarow, Oshan (15 June 2024). "Will AI ever become conscious? It depends on how you think about biology". Vox. Archived from the original on 21 September 2024. Retrieved 4 October 2024.\n
  686. \n
  687. ^ McCarthy, John. "The Philosophy of AI and the AI of Philosophy". jmc.stanford.edu. Archived from the original on 23 October 2018. Retrieved 3 October 2024.\n
  688. \n
  689. ^ a b Turing (1950), p. 1.\n
  690. \n
  691. ^ Turing (1950), Under "The Argument from Consciousness".\n
  692. \n
  693. ^ Kirk-Giannini, Cameron Domenico; Goldstein, Simon (16 October 2023). "AI is closer than ever to passing the Turing test for \'intelligence\'. What happens when it does?". The Conversation. Archived from the original on 25 September 2024. Retrieved 17 August 2024.\n
  694. \n
  695. ^ Russell & Norvig (2021), p. 3.\n
  696. \n
  697. ^ Maker (2006).\n
  698. \n
  699. ^ McCarthy (1999).\n
  700. \n
  701. ^ Minsky (1986).\n
  702. \n
  703. ^ "What Is Artificial Intelligence (AI)?". Google Cloud Platform. Archived from the original on 31 July 2023. Retrieved 16 October 2023.\n
  704. \n
  705. ^ "One of the Biggest Problems in Regulating AI Is Agreeing on a Definition". carnegieendowment.org. Retrieved 31 July 2024.\n
  706. \n
  707. ^ "AI or BS? How to tell if a marketing tool really uses artificial intelligence". The Drum. Retrieved 31 July 2024.\n
  708. \n
  709. ^ Nilsson (1983), p. 10.\n
  710. \n
  711. ^ Haugeland (1985), pp. 112–117.\n
  712. \n
  713. ^ Physical symbol system hypothesis: Newell & Simon (1976, p. 116)\nHistorical significance: McCorduck (2004, p. 153), Russell & Norvig (2021, p. 19)\n
  714. \n
  715. ^ Moravec\'s paradox: Moravec (1988, pp. 15–16), Minsky (1986, p. 29), Pinker (2007, pp. 190–191)\n
  716. \n
  717. ^ Dreyfus\' critique of AI: Dreyfus (1972), Dreyfus & Dreyfus (1986)\nHistorical significance and philosophical implications: Crevier (1993, pp. 120–132), McCorduck (2004, pp. 211–239), Russell & Norvig (2021, pp. 981–982), Fearn (2007, chpt. 3)\n
  718. \n
  719. ^ Crevier (1993), p. 125.\n
  720. \n
  721. ^ Langley (2011).\n
  722. \n
  723. ^ Katz (2012).\n
  724. \n
  725. ^ Neats vs. scruffies, the historic debate: McCorduck (2004, pp. 421–424, 486–489), Crevier (1993, p. 168), Nilsson (1983, pp. 10–11), Russell & Norvig (2021, p. 24)\nA classic example of the "scruffy" approach to intelligence: Minsky (1986)\nA modern example of neat AI and its aspirations in the 21st century: Domingos (2015)\n
  726. \n
  727. ^ Pennachin & Goertzel (2007).\n
  728. \n
  729. ^ a b Roberts (2016).\n
  730. \n
  731. ^ Russell & Norvig (2021), p. 986.\n
  732. \n
  733. ^ Chalmers (1995).\n
  734. \n
  735. ^ Dennett (1991).\n
  736. \n
  737. ^ Horst (2005).\n
  738. \n
  739. ^ Searle (1999).\n
  740. \n
  741. ^ Searle (1980), p. 1.\n
  742. \n
  743. ^ Russell & Norvig (2021), p. 9817.\n
  744. \n
  745. ^ Searle\'s Chinese room argument: Searle (1980). Searle\'s original presentation of the thought experiment., Searle (1999).\nDiscussion: Russell & Norvig (2021, pp. 985), McCorduck (2004, pp. 443–445), Crevier (1993, pp. 269–271)\n
  746. \n
  747. ^ Leith, Sam (7 July 2022). "Nick Bostrom: How can we be certain a machine isn\'t conscious?". The Spectator. Archived from the original on 26 September 2024. Retrieved 23 February 2024.\n
  748. \n
  749. ^ a b c Thomson, Jonny (31 October 2022). "Why don\'t robots have rights?". Big Think. Archived from the original on 13 September 2024. Retrieved 23 February 2024.\n
  750. \n
  751. ^ a b Kateman, Brian (24 July 2023). "AI Should Be Terrified of Humans". Time. Archived from the original on 25 September 2024. Retrieved 23 February 2024.\n
  752. \n
  753. ^ Wong, Jeff (10 July 2023). "What leaders need to know about robot rights". Fast Company.\n
  754. \n
  755. ^ Hern, Alex (12 January 2017). "Give robots \'personhood\' status, EU committee argues". The Guardian. ISSN 0261-3077. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
  756. \n
  757. ^ Dovey, Dana (14 April 2018). "Experts Don\'t Think Robots Should Have Rights". Newsweek. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
  758. \n
  759. ^ Cuddy, Alice (13 April 2018). "Robot rights violate human rights, experts warn EU". euronews. Archived from the original on 19 September 2024. Retrieved 23 February 2024.\n
  760. \n
  761. ^ The Intelligence explosion and technological singularity: Russell & Norvig (2021, pp. 1004–1005), Omohundro (2008), Kurzweil (2005)\n\nI. J. Good\'s "intelligence explosion": Good (1965)\n\nVernor Vinge\'s "singularity": Vinge (1993)\n
  762. \n
  763. ^ Russell & Norvig (2021), p. 1005.\n
  764. \n
  765. ^ Transhumanism: Moravec (1988), Kurzweil (2005), Russell & Norvig (2021, p. 1005)\n
  766. \n
  767. ^ AI as evolution: Edward Fredkin is quoted in McCorduck (2004, p. 401), Butler (1863), Dyson (1998)\n
  768. \n
  769. ^ AI in myth: McCorduck (2004, pp. 4–5)\n
  770. \n
  771. ^ McCorduck (2004), pp. 340–400.\n
  772. \n
  773. ^ Buttazzo (2001).\n
  774. \n
  775. ^ Anderson (2008).\n
  776. \n
  777. ^ McCauley (2007).\n
  778. \n
  779. ^ Galvan (1997).\n
  780. \n
\n

AI textbooks

\n

The two most widely used textbooks in 2023 (see the Open Syllabus):\n

\n\n

These were the four of the most widely used AI textbooks in 2008:\n

\n
\n\n
\n

Other textbooks:\n

\n\n

History of AI

\n
\n\n
\n

Other sources

\n
\n\n\n
\n

Further reading

\n
\n\n
\n
\n\n\n\n\n\n\n\n\n\n
\n
\n\t\t\t\t\t\n\t\t\t\t
\n\t\t\t\n\t\t\t\n\t\t\n\t\t\n\t \n \n
\n\t\n
\n\n\n'}'. -[2024-11-07 16:58:39,846] [INFO] [Engine]: Agent 'agent3' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_TXKgHrvodIn5Zs2pMGew3MHQ', type='function')], function_call=None) -[2024-11-07 16:58:39,850] [INFO] [BaseAgent]: Agent 'agent3' is planning the next step. -[2024-11-07 16:58:40,755] [INFO] [BaseAgent]: Agent 'agent3' selected 'agent1' as the next agent with plan: 'The next agent, agent1, who is a helpful and supportive team leader, should use Bing to search for information on artificial intelligence. The agent can gather additional insights, examples, and perspectives from the search results to enrich the team's understanding of the topic.'. -[2024-11-07 16:58:40,755] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_Jewo5kQHSereVQ9nxjlwDSGX', type='function')], function_call=None) -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=top+AI+trends+2022"}', name='fetch_webpage'), id='call_1v99ilivRPOzme5EIlKrFF0O', type='function')], function_call=None) -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_TXKgHrvodIn5Zs2pMGew3MHQ', type='function')], function_call=None) -[2024-11-07 16:58:41,163] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': False} -[2024-11-07 16:58:41,164] [INFO] [Engine]: EnginePlanner decided to terminate the simulation. -[2024-11-07 16:58:41,164] [ERROR] [Engine]: An error occurred during chain-based coordination. -Traceback (most recent call last): - File "/Users/zhukunlun/Documents/GitHub/MARBLE/marble/engine/engine.py", line 369, in chain_coordinate - summary = self._summarize_results(agents_results) - File "/Users/zhukunlun/Documents/GitHub/MARBLE/marble/engine/engine.py", line 449, in _summarize_results - result = json.dumps(result.content) -AttributeError: 'dict' object has no attribute 'content' -[2024-11-07 16:58:41,167] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-07 16:58:41,168] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-07 16:58:41,168] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-07 16:58:41,168] [INFO] [Engine]: Chain-based coordination simulation completed. -[2024-11-07 17:01:52,115] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-07 17:01:52,115] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-07 17:01:52,116] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-07 17:01:52,116] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-07 17:01:52,116] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-07 17:01:52,116] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-07 17:01:52,116] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-07 17:01:52,116] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-07 17:01:52,116] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'chain'. -[2024-11-07 17:01:52,116] [INFO] [AgentGraph]: Relationship added: agent1 --[reports_to]--> agent2 -[2024-11-07 17:01:52,116] [INFO] [AgentGraph]: Relationship added: agent1 --[manages]--> agent3 -[2024-11-07 17:01:52,116] [INFO] [AgentGraph]: Relationship added: agent2 --[supports]--> agent4 -[2024-11-07 17:01:52,116] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborates_with]--> agent4 -[2024-11-07 17:01:52,116] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-07 17:01:52,116] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-07 17:01:52,116] [INFO] [Engine]: Engine initialized. -[2024-11-07 17:01:52,116] [INFO] [Engine]: Engine starting simulation. -[2024-11-07 17:01:52,116] [INFO] [Engine]: Running in chain-based coordination mode. -[2024-11-07 17:01:52,116] [INFO] [Engine]: Starting chain-based coordination. -[2024-11-07 17:01:52,116] [INFO] [Engine]: Agent 'agent1' is executing task. -[2024-11-07 17:01:52,116] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Find new about the latest trends in AI.'. -[2024-11-07 17:01:54,452] [INFO] [BaseAgent]: Agent 'agent1' called 'fetch_webpage' with args '{'url': 'https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/'}'. -[2024-11-07 17:01:54,452] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': False, 'error-msg': '404 Client Error: Not Found for url: https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/'}'. -[2024-11-07 17:01:54,453] [INFO] [Engine]: Agent 'agent1' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_zkU9afMLTWUiawieF91h4qga', type='function')], function_call=None) -[2024-11-07 17:01:54,453] [INFO] [BaseAgent]: Agent 'agent1' is planning the next step. -[2024-11-07 17:01:55,827] [INFO] [BaseAgent]: Agent 'agent1' selected 'agent3' as the next agent with plan: 'The next planning task is for agent3 to utilize Wikipedia to research and gather information on the top AI trends to watch out for in 2022. Agent3 should focus on summarizing the key trends, providing insights, and ensuring the information is accurate and reliable.'. -[2024-11-07 17:01:55,827] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_zkU9afMLTWUiawieF91h4qga', type='function')], function_call=None) -[2024-11-07 17:01:56,274] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-07 17:01:56,275] [INFO] [Engine]: Agent 'agent3' is executing task. -[2024-11-07 17:01:56,275] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'The next planning task is for agent3 to utilize Wikipedia to research and gather information on the top AI trends to watch out for in 2022. Agent3 should focus on summarizing the key trends, providing insights, and ensuring the information is accurate and reliable.'. -[2024-11-07 17:01:56,966] [INFO] [BaseAgent]: Agent 'agent3' called 'fetch_webpage' with args '{'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence'}'. -[2024-11-07 17:01:56,979] [INFO] [BaseAgent]: Agent 'agent3' obtained result '{'success': True, 'error-msg': '', 'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence', 'content': '\n\n\n\nArtificial intelligence - Wikipedia\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nJump to content\n
\n\t
\n\t\t
\n\t\t\t
\n\n\t\t\n\t\t\t\n\n\n\t\t
\n\t\t
\n\t\t\t\n\n\n\t\t\t\n\n\t\t
\n\t\n\n
\n\t
\n\t\t
\n\t\t\t
\n\t\t
\n\t\t
\n\t\t\t
\n\t\t
\n\t\t\t\n\t\t
\n\t
\n\t
\n\t\t\t\t
\n\t\t\n\t\t\t
\n\t\t
\n\t\t
\n\t\t\t
\n\t\t\t\t
\n\t\t\t\t\t\n\t\t\t\t\t

Artificial intelligence

\n\t\t\t\t\t\t\t\n
\n\t\n\t\n\t
\n\n\t\t
\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t
\n\n\t
\n
\n
\n\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\t
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
\n\t\t\t\t\t\t
\n\t\t\t\t\t\t\t\n\t\t\t\t\n\t\t\t\t\t\t\t
\n\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\t\n\t\t\t\t\t\t
\n\t\t\n\t\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\t\t
\n\t\t
Page semi-protected
\n\t\t
\n\n\t\t\t\t\t\t
From Wikipedia, the free encyclopedia
\n\t\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t
\n\n

\n

\n\n\n\n\n\n\n\n

Artificial intelligence (AI), in its broadest sense, is intelligence exhibited by machines, particularly computer systems. It is a field of research in computer science that develops and studies methods and software that enable machines to perceive their environment and use learning and intelligence to take actions that maximize their chances of achieving defined goals.[1] Such machines may be called AIs.\n

Some high-profile applications of AI include advanced web search engines (e.g., Google Search); recommendation systems (used by YouTube, Amazon, and Netflix); interacting via human speech (e.g., Google Assistant, Siri, and Alexa); autonomous vehicles (e.g., Waymo); generative and creative tools (e.g., ChatGPT, and AI art); and superhuman play and analysis in strategy games (e.g., chess and Go). However, many AI applications are not perceived as AI: "A lot of cutting edge AI has filtered into general applications, often without being called AI because once something becomes useful enough and common enough it\'s not labeled AI anymore."[2][3]\n

The various subfields of AI research are centered around particular goals and the use of particular tools. The traditional goals of AI research include reasoning, knowledge representation, planning, learning, natural language processing, perception, and support for robotics.[a] General intelligence—the ability to complete any task performable by a human on an at least equal level—is among the field\'s long-term goals.[4] To reach these goals, AI researchers have adapted and integrated a wide range of techniques, including search and mathematical optimization, formal logic, artificial neural networks, and methods based on statistics, operations research, and economics.[b] AI also draws upon psychology, linguistics, philosophy, neuroscience, and other fields.[5]\n

Artificial intelligence was founded as an academic discipline in 1956,[6] and the field went through multiple cycles of optimism,[7][8] followed by periods of disappointment and loss of funding, known as AI winter.[9][10] Funding and interest vastly increased after 2012 when deep learning outperformed previous AI techniques.[11] This growth accelerated further after 2017 with the transformer architecture,[12] and by the early 2020s hundreds of billions of dollars were being invested in AI (known as the "AI boom"). The widespread use of AI in the 21st century exposed several unintended consequences and harms in the present and raised concerns about its risks and long-term effects in the future, prompting discussions about regulatory policies to ensure the safety and benefits of the technology.\n

\n\n

Goals

\n

The general problem of simulating (or creating) intelligence has been broken into subproblems. These consist of particular traits or capabilities that researchers expect an intelligent system to display. The traits described below have received the most attention and cover the scope of AI research.[a]\n

\n

Reasoning and problem-solving

\n

Early researchers developed algorithms that imitated step-by-step reasoning that humans use when they solve puzzles or make logical deductions.[13] By the late 1980s and 1990s, methods were developed for dealing with uncertain or incomplete information, employing concepts from probability and economics.[14]\n

Many of these algorithms are insufficient for solving large reasoning problems because they experience a "combinatorial explosion": They become exponentially slower as the problems grow.[15] Even humans rarely use the step-by-step deduction that early AI research could model. They solve most of their problems using fast, intuitive judgments.[16] Accurate and efficient reasoning is an unsolved problem.\n

\n

Knowledge representation

\n
An ontology represents knowledge as a set of concepts within a domain and the relationships between those concepts.
\n

Knowledge representation and knowledge engineering[17] allow AI programs to answer questions intelligently and make deductions about real-world facts. Formal knowledge representations are used in content-based indexing and retrieval,[18] scene interpretation,[19] clinical decision support,[20] knowledge discovery (mining "interesting" and actionable inferences from large databases),[21] and other areas.[22]\n

A knowledge base is a body of knowledge represented in a form that can be used by a program. An ontology is the set of objects, relations, concepts, and properties used by a particular domain of knowledge.[23] Knowledge bases need to represent things such as objects, properties, categories, and relations between objects;[24] situations, events, states, and time;[25] causes and effects;[26] knowledge about knowledge (what we know about what other people know);[27] default reasoning (things that humans assume are true until they are told differently and will remain true even when other facts are changing);[28] and many other aspects and domains of knowledge.\n

Among the most difficult problems in knowledge representation are the breadth of commonsense knowledge (the set of atomic facts that the average person knows is enormous);[29] and the sub-symbolic form of most commonsense knowledge (much of what people know is not represented as "facts" or "statements" that they could express verbally).[16] There is also the difficulty of knowledge acquisition, the problem of obtaining knowledge for AI applications.[c]\n

\n

Planning and decision-making

\n

An "agent" is anything that perceives and takes actions in the world. A rational agent has goals or preferences and takes actions to make them happen.[d][32] In automated planning, the agent has a specific goal.[33] In automated decision-making, the agent has preferences—there are some situations it would prefer to be in, and some situations it is trying to avoid. The decision-making agent assigns a number to each situation (called the "utility") that measures how much the agent prefers it. For each possible action, it can calculate the "expected utility": the utility of all possible outcomes of the action, weighted by the probability that the outcome will occur. It can then choose the action with the maximum expected utility.[34]\n

In classical planning, the agent knows exactly what the effect of any action will be.[35] In most real-world problems, however, the agent may not be certain about the situation they are in (it is "unknown" or "unobservable") and it may not know for certain what will happen after each possible action (it is not "deterministic"). It must choose an action by making a probabilistic guess and then reassess the situation to see if the action worked.[36]\n

In some problems, the agent\'s preferences may be uncertain, especially if there are other agents or humans involved. These can be learned (e.g., with inverse reinforcement learning), or the agent can seek information to improve its preferences.[37] Information value theory can be used to weigh the value of exploratory or experimental actions.[38] The space of possible future actions and situations is typically intractably large, so the agents must take actions and evaluate situations while being uncertain of what the outcome will be.\n

A Markov decision process has a transition model that describes the probability that a particular action will change the state in a particular way and a reward function that supplies the utility of each state and the cost of each action. A policy associates a decision with each possible state. The policy could be calculated (e.g., by iteration), be heuristic, or it can be learned.[39]\n

Game theory describes the rational behavior of multiple interacting agents and is used in AI programs that make decisions that involve other agents.[40]\n

\n

Learning

\n

Machine learning is the study of programs that can improve their performance on a given task automatically.[41] It has been a part of AI from the beginning.[e]\n

There are several kinds of machine learning. Unsupervised learning analyzes a stream of data and finds patterns and makes predictions without any other guidance.[44] Supervised learning requires a human to label the input data first, and comes in two main varieties: classification (where the program must learn to predict what category the input belongs in) and regression (where the program must deduce a numeric function based on numeric input).[45]\n

In reinforcement learning, the agent is rewarded for good responses and punished for bad ones. The agent learns to choose responses that are classified as "good".[46] Transfer learning is when the knowledge gained from one problem is applied to a new problem.[47] Deep learning is a type of machine learning that runs inputs through biologically inspired artificial neural networks for all of these types of learning.[48]\n

Computational learning theory can assess learners by computational complexity, by sample complexity (how much data is required), or by other notions of optimization.[49]\n

\n
\n

Natural language processing

\n

Natural language processing (NLP)[50] allows programs to read, write and communicate in human languages such as English. Specific problems include speech recognition, speech synthesis, machine translation, information extraction, information retrieval and question answering.[51]\n

Early work, based on Noam Chomsky\'s generative grammar and semantic networks, had difficulty with word-sense disambiguation[f] unless restricted to small domains called "micro-worlds" (due to the common sense knowledge problem[29]). Margaret Masterman believed that it was meaning and not grammar that was the key to understanding languages, and that thesauri and not dictionaries should be the basis of computational language structure.\n

Modern deep learning techniques for NLP include word embedding (representing words, typically as vectors encoding their meaning),[52] transformers (a deep learning architecture using an attention mechanism),[53] and others.[54] In 2019, generative pre-trained transformer (or "GPT") language models began to generate coherent text,[55][56] and by 2023, these models were able to get human-level scores on the bar exam, SAT test, GRE test, and many other real-world applications.[57]\n

\n

Perception

\n

Machine perception is the ability to use input from sensors (such as cameras, microphones, wireless signals, active lidar, sonar, radar, and tactile sensors) to deduce aspects of the world. Computer vision is the ability to analyze visual input.[58]\n

The field includes speech recognition,[59] image classification,[60] facial recognition, object recognition,[61]object tracking,[62] and robotic perception.[63]\n

\n

Social intelligence

\n
Kismet, a robot head which was made in the 1990s; it is a machine that can recognize and simulate emotions.[64]
\n

Affective computing is an interdisciplinary umbrella that comprises systems that recognize, interpret, process, or simulate human feeling, emotion, and mood.[65] For example, some virtual assistants are programmed to speak conversationally or even to banter humorously; it makes them appear more sensitive to the emotional dynamics of human interaction, or to otherwise facilitate human–computer interaction.\n

However, this tends to give naïve users an unrealistic conception of the intelligence of existing computer agents.[66] Moderate successes related to affective computing include textual sentiment analysis and, more recently, multimodal sentiment analysis, wherein AI classifies the affects displayed by a videotaped subject.[67]\n

\n

General intelligence

\n

A machine with artificial general intelligence should be able to solve a wide variety of problems with breadth and versatility similar to human intelligence.[4]\n

\n

Techniques

\n

AI research uses a wide variety of techniques to accomplish the goals above.[b]\n

\n

Search and optimization

\n

AI can solve many problems by intelligently searching through many possible solutions.[68] There are two very different kinds of search used in AI: state space search and local search.\n

\n
\n

State space search searches through a tree of possible states to try to find a goal state.[69] For example, planning algorithms search through trees of goals and subgoals, attempting to find a path to a target goal, a process called means-ends analysis.[70]\n

Simple exhaustive searches[71] are rarely sufficient for most real-world problems: the search space (the number of places to search) quickly grows to astronomical numbers. The result is a search that is too slow or never completes.[15] "Heuristics" or "rules of thumb" can help prioritize choices that are more likely to reach a goal.[72]\n

Adversarial search is used for game-playing programs, such as chess or Go. It searches through a tree of possible moves and counter-moves, looking for a winning position.[73]\n

\n
\n
Illustration of gradient descent for 3 different starting points; two parameters (represented by the plan coordinates) are adjusted in order to minimize the loss function (the height)

Local search uses mathematical optimization to find a solution to a problem. It begins with some form of guess and refines it incrementally.[74]\n

Gradient descent is a type of local search that optimizes a set of numerical parameters by incrementally adjusting them to minimize a loss function. Variants of gradient descent are commonly used to train neural networks.[75]\n

Another type of local search is evolutionary computation, which aims to iteratively improve a set of candidate solutions by "mutating" and "recombining" them, selecting only the fittest to survive each generation.[76]\n

Distributed search processes can coordinate via swarm intelligence algorithms. Two popular swarm algorithms used in search are particle swarm optimization (inspired by bird flocking) and ant colony optimization (inspired by ant trails).[77]\n

\n

Logic

\n

Formal logic is used for reasoning and knowledge representation.[78]\nFormal logic comes in two main forms: propositional logic (which operates on statements that are true or false and uses logical connectives such as "and", "or", "not" and "implies")[79] and predicate logic (which also operates on objects, predicates and relations and uses quantifiers such as "Every X is a Y" and "There are some Xs that are Ys").[80]\n

Deductive reasoning in logic is the process of proving a new statement (conclusion) from other statements that are given and assumed to be true (the premises).[81] Proofs can be structured as proof trees, in which nodes are labelled by sentences, and children nodes are connected to parent nodes by inference rules.\n

Given a problem and a set of premises, problem-solving reduces to searching for a proof tree whose root node is labelled by a solution of the problem and whose leaf nodes are labelled by premises or axioms. In the case of Horn clauses, problem-solving search can be performed by reasoning forwards from the premises or backwards from the problem.[82] In the more general case of the clausal form of first-order logic, resolution is a single, axiom-free rule of inference, in which a problem is solved by proving a contradiction from premises that include the negation of the problem to be solved.[83]\n

Inference in both Horn clause logic and first-order logic is undecidable, and therefore intractable. However, backward reasoning with Horn clauses, which underpins computation in the logic programming language Prolog, is Turing complete. Moreover, its efficiency is competitive with computation in other symbolic programming languages.[84]\n

Fuzzy logic assigns a "degree of truth" between 0 and 1. It can therefore handle propositions that are vague and partially true.[85]\n

Non-monotonic logics, including logic programming with negation as failure, are designed to handle default reasoning.[28] Other specialized versions of logic have been developed to describe many complex domains.\n

\n

Probabilistic methods for uncertain reasoning

\n
A simple Bayesian network, with the associated conditional probability tables
\n

Many problems in AI (including in reasoning, planning, learning, perception, and robotics) require the agent to operate with incomplete or uncertain information. AI researchers have devised a number of tools to solve these problems using methods from probability theory and economics.[86] Precise mathematical tools have been developed that analyze how an agent can make choices and plan, using decision theory, decision analysis,[87] and information value theory.[88] These tools include models such as Markov decision processes,[89] dynamic decision networks,[90] game theory and mechanism design.[91]\n

Bayesian networks[92] are a tool that can be used for reasoning (using the Bayesian inference algorithm),[g][94] learning (using the expectation–maximization algorithm),[h][96] planning (using decision networks)[97] and perception (using dynamic Bayesian networks).[90]\n

Probabilistic algorithms can also be used for filtering, prediction, smoothing, and finding explanations for streams of data, thus helping perception systems analyze processes that occur over time (e.g., hidden Markov models or Kalman filters).[90]\n

\n
Expectation–maximization clustering of Old Faithful eruption data starts from a random guess but then successfully converges on an accurate clustering of the two physically distinct modes of eruption.
\n

Classifiers and statistical learning methods

\n

The simplest AI applications can be divided into two types: classifiers (e.g., "if shiny then diamond"), on one hand, and controllers (e.g., "if diamond then pick up"), on the other hand. Classifiers[98] are functions that use pattern matching to determine the closest match. They can be fine-tuned based on chosen examples using supervised learning. Each pattern (also called an "observation") is labeled with a certain predefined class. All the observations combined with their class labels are known as a data set. When a new observation is received, that observation is classified based on previous experience.[45]\n

There are many kinds of classifiers in use.[99] The decision tree is the simplest and most widely used symbolic machine learning algorithm.[100] K-nearest neighbor algorithm was the most widely used analogical AI until the mid-1990s, and Kernel methods such as the support vector machine (SVM) displaced k-nearest neighbor in the 1990s.[101]\nThe naive Bayes classifier is reportedly the "most widely used learner"[102] at Google, due in part to its scalability.[103]\nNeural networks are also used as classifiers.[104]\n

\n

Artificial neural networks

\n
A neural network is an interconnected group of nodes, akin to the vast network of neurons in the human brain.
\n

An artificial neural network is based on a collection of nodes also known as artificial neurons, which loosely model the neurons in a biological brain. It is trained to recognise patterns; once trained, it can recognise those patterns in fresh data. There is an input, at least one hidden layer of nodes and an output. Each node applies a function and once the weight crosses its specified threshold, the data is transmitted to the next layer. A network is typically called a deep neural network if it has at least 2 hidden layers.[104]\n

Learning algorithms for neural networks use local search to choose the weights that will get the right output for each input during training. The most common training technique is the backpropagation algorithm.[105] Neural networks learn to model complex relationships between inputs and outputs and find patterns in data. In theory, a neural network can learn any function.[106]\n

In feedforward neural networks the signal passes in only one direction.[107] Recurrent neural networks feed the output signal back into the input, which allows short-term memories of previous input events. Long short term memory is the most successful network architecture for recurrent networks.[108] Perceptrons[109] use only a single layer of neurons; deep learning[110] uses multiple layers. Convolutional neural networks strengthen the connection between neurons that are "close" to each other—this is especially important in image processing, where a local set of neurons must identify an "edge" before the network can identify an object.[111]\n

\n
\n

Deep learning

\n
\n

Deep learning[110] uses several layers of neurons between the network\'s inputs and outputs. The multiple layers can progressively extract higher-level features from the raw input. For example, in image processing, lower layers may identify edges, while higher layers may identify the concepts relevant to a human such as digits, letters, or faces.[112]\n

Deep learning has profoundly improved the performance of programs in many important subfields of artificial intelligence, including computer vision, speech recognition, natural language processing, image classification,[113] and others. The reason that deep learning performs so well in so many applications is not known as of 2023.[114] The sudden success of deep learning in 2012–2015 did not occur because of some new discovery or theoretical breakthrough (deep neural networks and backpropagation had been described by many people, as far back as the 1950s)[i] but because of two factors: the incredible increase in computer power (including the hundred-fold increase in speed by switching to GPUs) and the availability of vast amounts of training data, especially the giant curated datasets used for benchmark testing, such as ImageNet.[j]\n

\n

GPT

\n

Generative pre-trained transformers (GPT) are large language models (LLMs) that generate text based on the semantic relationships between words in sentences. Text-based GPT models are pretrained on a large corpus of text that can be from the Internet. The pretraining consists of predicting the next token (a token being usually a word, subword, or punctuation). Throughout this pretraining, GPT models accumulate knowledge about the world and can then generate human-like text by repeatedly predicting the next token. Typically, a subsequent training phase makes the model more truthful, useful, and harmless, usually with a technique called reinforcement learning from human feedback (RLHF). Current GPT models are prone to generating falsehoods called "hallucinations", although this can be reduced with RLHF and quality data. They are used in chatbots, which allow people to ask a question or request a task in simple text.[122][123]\n

Current models and services include Gemini (formerly Bard), ChatGPT, Grok, Claude, Copilot, and LLaMA.[124] Multimodal GPT models can process different types of data (modalities) such as images, videos, sound, and text.[125]\n

\n

Hardware and software

\n\n

In the late 2010s, graphics processing units (GPUs) that were increasingly designed with AI-specific enhancements and used with specialized TensorFlow software had replaced previously used central processing unit (CPUs) as the dominant means for large-scale (commercial and academic) machine learning models\' training.[126] Specialized programming languages such as Prolog were used in early AI research,[127] but general-purpose programming languages like Python have become predominant.[128]\n

The transistor density in integrated circuits has been observed to roughly double every 18 months—a trend known as Moore\'s law, named after the Intel co-founder Gordon Moore, who first identified it. Improvements in GPUs have been even faster.[129]\n

\n

Applications

\n

AI and machine learning technology is used in most of the essential applications of the 2020s, including: search engines (such as Google Search), targeting online advertisements, recommendation systems (offered by Netflix, YouTube or Amazon), driving internet traffic, targeted advertising (AdSense, Facebook), virtual assistants (such as Siri or Alexa), autonomous vehicles (including drones, ADAS and self-driving cars), automatic language translation (Microsoft Translator, Google Translate), facial recognition (Apple\'s Face ID or Microsoft\'s DeepFace and Google\'s FaceNet) and image labeling (used by Facebook, Apple\'s iPhoto and TikTok). The deployment of AI may be overseen by a Chief automation officer (CAO).\n

Health and medicine

\n\n

The application of AI in medicine and medical research has the potential to increase patient care and quality of life.[130] Through the lens of the Hippocratic Oath, medical professionals are ethically compelled to use AI, if applications can more accurately diagnose and treat patients.[131][132]\n

For medical research, AI is an important tool for processing and integrating big data. This is particularly important for organoid and tissue engineering development which use microscopy imaging as a key technique in fabrication.[133] It has been suggested that AI can overcome discrepancies in funding allocated to different fields of research.[133] New AI tools can deepen the understanding of biomedically relevant pathways. For example, AlphaFold 2 (2021) demonstrated the ability to approximate, in hours rather than months, the 3D structure of a protein.[134] In 2023, it was reported that AI-guided drug discovery helped find a class of antibiotics capable of killing two different types of drug-resistant bacteria.[135] In 2024, researchers used machine learning to accelerate the search for Parkinson\'s disease drug treatments. Their aim was to identify compounds that block the clumping, or aggregation, of alpha-synuclein (the protein that characterises Parkinson\'s disease). They were able to speed up the initial screening process ten-fold and reduce the cost by a thousand-fold.[136][137]\n

\n

Games

\n\n

Game playing programs have been used since the 1950s to demonstrate and test AI\'s most advanced techniques.[138] Deep Blue became the first computer chess-playing system to beat a reigning world chess champion, Garry Kasparov, on 11 May 1997.[139] In 2011, in a Jeopardy! quiz show exhibition match, IBM\'s question answering system, Watson, defeated the two greatest Jeopardy! champions, Brad Rutter and Ken Jennings, by a significant margin.[140] In March 2016, AlphaGo won 4 out of 5 games of Go in a match with Go champion Lee Sedol, becoming the first computer Go-playing system to beat a professional Go player without handicaps. Then, in 2017, it defeated Ke Jie, who was the best Go player in the world.[141] Other programs handle imperfect-information games, such as the poker-playing program Pluribus.[142] DeepMind developed increasingly generalistic reinforcement learning models, such as with MuZero, which could be trained to play chess, Go, or Atari games.[143] In 2019, DeepMind\'s AlphaStar achieved grandmaster level in StarCraft II, a particularly challenging real-time strategy game that involves incomplete knowledge of what happens on the map.[144] In 2021, an AI agent competed in a PlayStation Gran Turismo competition, winning against four of the world\'s best Gran Turismo drivers using deep reinforcement learning.[145] In 2024, Google DeepMind introduced SIMA, a type of AI capable of autonomously playing nine previously unseen open-world video games by observing screen output, as well as executing short, specific tasks in response to natural language instructions.[146]\n

\n

Mathematics

\n

In mathematics, special forms of formal step-by-step reasoning are used. In contrast, LLMs such as GPT-4 Turbo, Gemini Ultra, Claude Opus, LLaMa-2 or Mistral Large are working with probabilistic models, which can produce wrong answers in the form of hallucinations. Therefore, they need not only a large database of mathematical problems to learn from but also methods such as supervised fine-tuning or trained classifiers with human-annotated data to improve answers for new problems and learn from corrections.[147] A 2024 study showed that the performance of some language models for reasoning capabilities in solving math problems not included in their training data was low, even for problems with only minor deviations from trained data.[148]\n

Alternatively, dedicated models for mathematic problem solving with higher precision for the outcome including proof of theorems have been developed such as Alpha Tensor, Alpha Geometry and Alpha Proof all from Google DeepMind,[149] Llemma from eleuther[150] or Julius.[151]\n

When natural language is used to describe mathematical problems, converters transform such prompts into a formal language such as Lean to define mathematic tasks.\n

Some models have been developed to solve challenging problems and reach good results in benchmark tests, others to serve as educational tools in mathematics.[152]\n

\n

Finance

\n

Finance is one of the fastest growing sectors where applied AI tools are being deployed: from retail online banking to investment advice and insurance, where automated "robot advisers" have been in use for some years.[153]\n

World Pensions experts like Nicolas Firzli insist it may be too early to see the emergence of highly innovative AI-informed financial products and services: "the deployment of AI tools will simply further automatise things: destroying tens of thousands of jobs in banking, financial planning, and pension advice in the process, but I\'m not sure it will unleash a new wave of [e.g., sophisticated] pension innovation."[154]\n

\n

Military

\n\n

Various countries are deploying AI military applications.[155] The main applications enhance command and control, communications, sensors, integration and interoperability.[156] Research is targeting intelligence collection and analysis, logistics, cyber operations, information operations, and semiautonomous and autonomous vehicles.[155] AI technologies enable coordination of sensors and effectors, threat detection and identification, marking of enemy positions, target acquisition, coordination and deconfliction of distributed Joint Fires between networked combat vehicles involving manned and unmanned teams.[156] AI was incorporated into military operations in Iraq and Syria.[155]\n

In November 2023, US Vice President Kamala Harris disclosed a declaration signed by 31 nations to set guardrails for the military use of AI. The commitments include using legal reviews to ensure the compliance of military AI with international laws, and being cautious and transparent in the development of this technology.[157]\n

\n

Generative AI

\n\n
Vincent van Gogh in watercolour created by generative AI software
\n

In the early 2020s, generative AI gained widespread prominence. GenAI is AI capable of generating text, images, videos, or other data using generative models,[158][159] often in response to prompts.[160][161]\n

In March 2023, 58% of U.S. adults had heard about ChatGPT and 14% had tried it.[162] The increasing realism and ease-of-use of AI-based text-to-image generators such as Midjourney, DALL-E, and Stable Diffusion sparked a trend of viral AI-generated photos. Widespread attention was gained by a fake photo of Pope Francis wearing a white puffer coat, the fictional arrest of Donald Trump, and a hoax of an attack on the Pentagon, as well as the usage in professional creative arts.[163][164]\n

\n

Agents

\n

Artificial intelligent (AI) agents are software entities designed to perceive their environment, make decisions, and take actions autonomously to achieve specific goals. These agents can interact with users, their environment, or other agents. AI agents are used in various applications, including virtual assistants, chatbots, autonomous vehicles, game-playing systems, and industrial robotics. AI agents operate within the constraints of their programming, available computational resources, and hardware limitations. This means they are restricted to performing tasks within their defined scope and have finite memory and processing capabilities. In real-world applications, AI agents often face time constraints for decision-making and action execution. Many AI agents incorporate learning algorithms, enabling them to improve their performance over time through experience or training. Using machine learning, AI agents can adapt to new situations and optimise their behaviour for their designated tasks.[165][166][167]\n

\n

Other industry-specific tasks

\n

There are also thousands of successful AI applications used to solve specific problems for specific industries or institutions. In a 2017 survey, one in five companies reported having incorporated "AI" in some offerings or processes.[168] A few examples are energy storage, medical diagnosis, military logistics, applications that predict the result of judicial decisions, foreign policy, or supply chain management.\n

AI applications for evacuation and disaster management are growing. AI has been used to investigate if and how people evacuated in large scale and small scale evacuations using historical data from GPS, videos or social media. Further, AI can provide real time information on the real time evacuation conditions.[169][170][171]\n

In agriculture, AI has helped farmers identify areas that need irrigation, fertilization, pesticide treatments or increasing yield. Agronomists use AI to conduct research and development. AI has been used to predict the ripening time for crops such as tomatoes, monitor soil moisture, operate agricultural robots, conduct predictive analytics, classify livestock pig call emotions, automate greenhouses, detect diseases and pests, and save water.\n

Artificial intelligence is used in astronomy to analyze increasing amounts of available data and applications, mainly for "classification, regression, clustering, forecasting, generation, discovery, and the development of new scientific insights." For example, it is used for discovering exoplanets, forecasting solar activity, and distinguishing between signals and instrumental effects in gravitational wave astronomy. Additionally, it could be used for activities in space, such as space exploration, including the analysis of data from space missions, real-time science decisions of spacecraft, space debris avoidance, and more autonomous operation.\n

During the 2024 Indian elections, US$50 millions was spent on authorized AI-generated content, notably by creating deepfakes of allied (including sometimes deceased) politicians to better engage with voters, and by translating speeches to various local languages.[172] \n

\n

Ethics

\n\n

AI has potential benefits and potential risks.[173] AI may be able to advance science and find solutions for serious problems: Demis Hassabis of Deep Mind hopes to "solve intelligence, and then use that to solve everything else".[174] However, as the use of AI has become widespread, several unintended consequences and risks have been identified.[175] In-production systems can sometimes not factor ethics and bias into their AI training processes, especially when the AI algorithms are inherently unexplainable in deep learning.[176]\n

\n

Risks and harm

\n
\n\n

Machine learning algorithms require large amounts of data. The techniques used to acquire this data have raised concerns about privacy, surveillance and copyright.\n

AI-powered devices and services, such as virtual assistants and IoT products, continuously collect personal information, raising concerns about intrusive data gathering and unauthorized access by third parties. The loss of privacy is further exacerbated by AI\'s ability to process and combine vast amounts of data, potentially leading to a surveillance society where individual activities are constantly monitored and analyzed without adequate safeguards or transparency.\n

Sensitive user data collected may include online activity records, geolocation data, video or audio.[177] For example, in order to build speech recognition algorithms, Amazon has recorded millions of private conversations and allowed temporary workers to listen to and transcribe some of them.[178] Opinions about this widespread surveillance range from those who see it as a necessary evil to those for whom it is clearly unethical and a violation of the right to privacy.[179]\n

AI developers argue that this is the only way to deliver valuable applications. and have developed several techniques that attempt to preserve privacy while still obtaining the data, such as data aggregation, de-identification and differential privacy.[180] Since 2016, some privacy experts, such as Cynthia Dwork, have begun to view privacy in terms of fairness. Brian Christian wrote that experts have pivoted "from the question of \'what they know\' to the question of \'what they\'re doing with it\'."[181]\n

Generative AI is often trained on unlicensed copyrighted works, including in domains such as images or computer code; the output is then used under the rationale of "fair use". Experts disagree about how well and under what circumstances this rationale will hold up in courts of law; relevant factors may include "the purpose and character of the use of the copyrighted work" and "the effect upon the potential market for the copyrighted work".[182][183] Website owners who do not wish to have their content scraped can indicate it in a "robots.txt" file.[184] In 2023, leading authors (including John Grisham and Jonathan Franzen) sued AI companies for using their work to train generative AI.[185][186] Another discussed approach is to envision a separate sui generis system of protection for creations generated by AI to ensure fair attribution and compensation for human authors.[187]\n

\n

Dominance by tech giants

\n

The commercial AI scene is dominated by Big Tech companies such as Alphabet Inc., Amazon, Apple Inc., Meta Platforms, and Microsoft.[188][189][190] Some of these players already own the vast majority of existing cloud infrastructure and computing power from data centers, allowing them to entrench further in the marketplace.[191][192]\n

\n

Substantial power needs and other environmental impacts

\n\n

In January 2024, the International Energy Agency (IEA) released Electricity 2024, Analysis and Forecast to 2026, forecasting electric power use.[193] This is the first IEA report to make projections for data centers and power consumption for artificial intelligence and cryptocurrency. The report states that power demand for these uses might double by 2026, with additional electric power usage equal to electricity used by the whole Japanese nation.[194]\n

Prodigious power consumption by AI is responsible for the growth of fossil fuels use, and might delay closings of obsolete, carbon-emitting coal energy facilities. There is a feverish rise in the construction of data centers throughout the US, making large technology firms (e.g., Microsoft, Meta, Google, Amazon) into voracious consumers of electric power. Projected electric consumption is so immense that there is concern that it will be fulfilled no matter the source. A ChatGPT search involves the use of 10 times the electrical energy as a Google search. The large firms are in haste to find power sources – from nuclear energy to geothermal to fusion. The tech firms argue that – in the long view – AI will be eventually kinder to the environment, but they need the energy now. AI makes the power grid more efficient and "intelligent", will assist in the growth of nuclear power, and track overall carbon emissions, according to technology firms.[195]\n

A 2024 Goldman Sachs Research Paper, AI Data Centers and the Coming US Power Demand Surge, found "US power demand (is) likely to experience growth not seen in a generation...." and forecasts that, by 2030, US data centers will consume 8% of US power, as opposed to 3% in 2022, presaging growth for the electrical power generation industry by a variety of means.[196] Data centers\' need for more and more electrical power is such that they might max out the electrical grid. The Big Tech companies counter that AI can be used to maximize the utilization of the grid by all.[197]\n

In 2024, the Wall Street Journal reported that big AI companies have begun negotiations with the US nuclear power providers to provide electricity to the data centers. In March 2024 Amazon purchased a Pennsylvania nuclear-powered data center for $650 Million (US).[198] Nvidia CEO Jen-Hsun Huang said nuclear power is a good option for the data centers.[199]\n

In September 2024, Microsoft announced an agreement with Constellation Energy to re-open the Three Mile Island nuclear power plant to provide Microsoft with 100% of all electric power produced by the plant for 20 years. Reopening the plant, which suffered a partial nuclear meltdown of its Unit 2 reactor in 1979, will require Constellation to get through strict regulatory processes which will include extensive safety scrutiny from the US Nuclear Regulatory Commission. If approved (this will be the first ever US re-commissioning of a nuclear plant), over 835 megawatts of power – enough for 800,000 homes – of energy will be produced. The cost for re-opening and upgrading is estimated at $1.6 billion (US) and is dependent on tax breaks for nuclear power contained in the 2022 US Inflation Reduction Act.[200] The US government and the state of Michigan are investing almost $2 billion (US) to reopen the Palisades Nuclear reactor on Lake Michigan. Closed since 2022, the plant is planned to be reopened in October 2025. The Three Mile Island facility will be renamed the Crane Clean Energy Center after Chris Crane, a nuclear proponent and former CEO of Exelon who was responsible for Exelon spinoff of Constellation.[201]\n

After the last approval in September 2024, Taiwan suspended the approval of data centers north of Taoyuan with a capacity of more than 5 MW, due to power supply shortages.[202] On the other hand, Singapore imposed a ban on the opening of data centers in 2019 due to electric power, but in 2022, lifted this ban.[202]\n

Although most nuclear plants in Japan have been shut down after the 2011 Fukushima nuclear accident, according to an October 2024 Bloomberg article in Japanese, cloud gaming services company Ubitus, in which Nvidia has a stake, is looking for land in Japan near nuclear power plant for a new data center for generative AI. CEO Wesley Kuo said nuclear power plants are the most efficient, cheap and stable power for AI.[203]\n

On 1 November 2024, the Federal Energy Regulatory Commission (FERC) rejected an application submitted by Talen Energy for approval to supply some electricity from the nuclear power station Susquehanna to Amazon\'s data center.[204] \nAccording to the Commission Chairman Willie L. Phillips, it is a burden on the electricity grid as well as a significant cost shifting concern to households and other business sectors.[204]\n

\n

Misinformation

\n\n

YouTube, Facebook and others use recommender systems to guide users to more content. These AI programs were given the goal of maximizing user engagement (that is, the only goal was to keep people watching). The AI learned that users tended to choose misinformation, conspiracy theories, and extreme partisan content, and, to keep them watching, the AI recommended more of it. Users also tended to watch more content on the same subject, so the AI led people into filter bubbles where they received multiple versions of the same misinformation.[205] This convinced many users that the misinformation was true, and ultimately undermined trust in institutions, the media and the government.[206] The AI program had correctly learned to maximize its goal, but the result was harmful to society. After the U.S. election in 2016, major technology companies took steps to mitigate the problem [citation needed].\n

In 2022, generative AI began to create images, audio, video and text that are indistinguishable from real photographs, recordings, films, or human writing. It is possible for bad actors to use this technology to create massive amounts of misinformation or propaganda.[207] AI pioneer Geoffrey Hinton expressed concern about AI enabling "authoritarian leaders to manipulate their electorates" on a large scale, among other risks.[208]\n

\n

Algorithmic bias and fairness

\n\n

Machine learning applications will be biased[k] if they learn from biased data.[210] The developers may not be aware that the bias exists.[211] Bias can be introduced by the way training data is selected and by the way a model is deployed.[212][210] If a biased algorithm is used to make decisions that can seriously harm people (as it can in medicine, finance, recruitment, housing or policing) then the algorithm may cause discrimination.[213] The field of fairness studies how to prevent harms from algorithmic biases.\n

On June 28, 2015, Google Photos\'s new image labeling feature mistakenly identified Jacky Alcine and a friend as "gorillas" because they were black. The system was trained on a dataset that contained very few images of black people,[214] a problem called "sample size disparity".[215] Google "fixed" this problem by preventing the system from labelling anything as a "gorilla". Eight years later, in 2023, Google Photos still could not identify a gorilla, and neither could similar products from Apple, Facebook, Microsoft and Amazon.[216]\n

COMPAS is a commercial program widely used by U.S. courts to assess the likelihood of a defendant becoming a recidivist. In 2016, Julia Angwin at ProPublica discovered that COMPAS exhibited racial bias, despite the fact that the program was not told the races of the defendants. Although the error rate for both whites and blacks was calibrated equal at exactly 61%, the errors for each race were different—the system consistently overestimated the chance that a black person would re-offend and would underestimate the chance that a white person would not re-offend.[217] In 2017, several researchers[l] showed that it was mathematically impossible for COMPAS to accommodate all possible measures of fairness when the base rates of re-offense were different for whites and blacks in the data.[219]\n

A program can make biased decisions even if the data does not explicitly mention a problematic feature (such as "race" or "gender"). The feature will correlate with other features (like "address", "shopping history" or "first name"), and the program will make the same decisions based on these features as it would on "race" or "gender".[220] Moritz Hardt said "the most robust fact in this research area is that fairness through blindness doesn\'t work."[221]\n

Criticism of COMPAS highlighted that machine learning models are designed to make "predictions" that are only valid if we assume that the future will resemble the past. If they are trained on data that includes the results of racist decisions in the past, machine learning models must predict that racist decisions will be made in the future. If an application then uses these predictions as recommendations, some of these "recommendations" will likely be racist.[222] Thus, machine learning is not well suited to help make decisions in areas where there is hope that the future will be better than the past. It is descriptive rather than prescriptive.[m]\n

Bias and unfairness may go undetected because the developers are overwhelmingly white and male: among AI engineers, about 4% are black and 20% are women.[215]\n

There are various conflicting definitions and mathematical models of fairness. These notions depend on ethical assumptions, and are influenced by beliefs about society. One broad category is distributive fairness, which focuses on the outcomes, often identifying groups and seeking to compensate for statistical disparities. Representational fairness tries to ensure that AI systems do not reinforce negative stereotypes or render certain groups invisible. Procedural fairness focuses on the decision process rather than the outcome. The most relevant notions of fairness may depend on the context, notably the type of AI application and the stakeholders. The subjectivity in the notions of bias and fairness makes it difficult for companies to operationalize them. Having access to sensitive attributes such as race or gender is also considered by many AI ethicists to be necessary in order to compensate for biases, but it may conflict with anti-discrimination laws.[209]\n

At its 2022 Conference on Fairness, Accountability, and Transparency (ACM FAccT 2022), the Association for Computing Machinery, in Seoul, South Korea, presented and published findings that recommend that until AI and robotics systems are demonstrated to be free of bias mistakes, they are unsafe, and the use of self-learning neural networks trained on vast, unregulated sources of flawed internet data should be curtailed.[dubiousdiscuss][224]\n

\n

Lack of transparency

\n\n

Many AI systems are so complex that their designers cannot explain how they reach their decisions.[225] Particularly with deep neural networks, in which there are a large amount of non-linear relationships between inputs and outputs. But some popular explainability techniques exist.[226]\n

It is impossible to be certain that a program is operating correctly if no one knows how exactly it works. There have been many cases where a machine learning program passed rigorous tests, but nevertheless learned something different than what the programmers intended. For example, a system that could identify skin diseases better than medical professionals was found to actually have a strong tendency to classify images with a ruler as "cancerous", because pictures of malignancies typically include a ruler to show the scale.[227] Another machine learning system designed to help effectively allocate medical resources was found to classify patients with asthma as being at "low risk" of dying from pneumonia. Having asthma is actually a severe risk factor, but since the patients having asthma would usually get much more medical care, they were relatively unlikely to die according to the training data. The correlation between asthma and low risk of dying from pneumonia was real, but misleading.[228]\n

People who have been harmed by an algorithm\'s decision have a right to an explanation.[229] Doctors, for example, are expected to clearly and completely explain to their colleagues the reasoning behind any decision they make. Early drafts of the European Union\'s General Data Protection Regulation in 2016 included an explicit statement that this right exists.[n] Industry experts noted that this is an unsolved problem with no solution in sight. Regulators argued that nevertheless the harm is real: if the problem has no solution, the tools should not be used.[230]\n

DARPA established the XAI ("Explainable Artificial Intelligence") program in 2014 to try to solve these problems.[231]\n

Several approaches aim to address the transparency problem. SHAP enables to visualise the contribution of each feature to the output.[232] LIME can locally approximate a model\'s outputs with a simpler, interpretable model.[233] Multitask learning provides a large number of outputs in addition to the target classification. These other outputs can help developers deduce what the network has learned.[234] Deconvolution, DeepDream and other generative methods can allow developers to see what different layers of a deep network for computer vision have learned, and produce output that can suggest what the network is learning.[235] For generative pre-trained transformers, Anthropic developed a technique based on dictionary learning that associates patterns of neuron activations with human-understandable concepts.[236]\n

\n

Bad actors and weaponized AI

\n\n

Artificial intelligence provides a number of tools that are useful to bad actors, such as authoritarian governments, terrorists, criminals or rogue states.\n

A lethal autonomous weapon is a machine that locates, selects and engages human targets without human supervision.[o] Widely available AI tools can be used by bad actors to develop inexpensive autonomous weapons and, if produced at scale, they are potentially weapons of mass destruction.[238] Even when used in conventional warfare, it is unlikely that they will be unable to reliably choose targets and could potentially kill an innocent person.[238] In 2014, 30 nations (including China) supported a ban on autonomous weapons under the United Nations\' Convention on Certain Conventional Weapons, however the United States and others disagreed.[239] By 2015, over fifty countries were reported to be researching battlefield robots.[240]\n

AI tools make it easier for authoritarian governments to efficiently control their citizens in several ways. Face and voice recognition allow widespread surveillance. Machine learning, operating this data, can classify potential enemies of the state and prevent them from hiding. Recommendation systems can precisely target propaganda and misinformation for maximum effect. Deepfakes and generative AI aid in producing misinformation. Advanced AI can make authoritarian centralized decision making more competitive than liberal and decentralized systems such as markets. It lowers the cost and difficulty of digital warfare and advanced spyware.[241] All these technologies have been available since 2020 or earlier—AI facial recognition systems are already being used for mass surveillance in China.[242][243]\n

There many other ways that AI is expected to help bad actors, some of which can not be foreseen. For example, machine-learning AI is able to design tens of thousands of toxic molecules in a matter of hours.[244]\n

\n

Technological unemployment

\n\n

Economists have frequently highlighted the risks of redundancies from AI, and speculated about unemployment if there is no adequate social policy for full employment.[245]\n

In the past, technology has tended to increase rather than reduce total employment, but economists acknowledge that "we\'re in uncharted territory" with AI.[246] A survey of economists showed disagreement about whether the increasing use of robots and AI will cause a substantial increase in long-term unemployment, but they generally agree that it could be a net benefit if productivity gains are redistributed.[247] Risk estimates vary; for example, in the 2010s, Michael Osborne and Carl Benedikt Frey estimated 47% of U.S. jobs are at "high risk" of potential automation, while an OECD report classified only 9% of U.S. jobs as "high risk".[p][249] The methodology of speculating about future employment levels has been criticised as lacking evidential foundation, and for implying that technology, rather than social policy, creates unemployment, as opposed to redundancies.[245] In April 2023, it was reported that 70% of the jobs for Chinese video game illustrators had been eliminated by generative artificial intelligence.[250][251]\n

Unlike previous waves of automation, many middle-class jobs may be eliminated by artificial intelligence; The Economist stated in 2015 that "the worry that AI could do to white-collar jobs what steam power did to blue-collar ones during the Industrial Revolution" is "worth taking seriously".[252] Jobs at extreme risk range from paralegals to fast food cooks, while job demand is likely to increase for care-related professions ranging from personal healthcare to the clergy.[253]\n

From the early days of the development of artificial intelligence, there have been arguments, for example, those put forward by Joseph Weizenbaum, about whether tasks that can be done by computers actually should be done by them, given the difference between computers and humans, and between quantitative calculation and qualitative, value-based judgement.[254]\n

\n

Existential risk

\n\n

It has been argued AI will become so powerful that humanity may irreversibly lose control of it. This could, as physicist Stephen Hawking stated, "spell the end of the human race".[255] This scenario has been common in science fiction, when a computer or robot suddenly develops a human-like "self-awareness" (or "sentience" or "consciousness") and becomes a malevolent character.[q] These sci-fi scenarios are misleading in several ways.\n

First, AI does not require human-like "sentience" to be an existential risk. Modern AI programs are given specific goals and use learning and intelligence to achieve them. Philosopher Nick Bostrom argued that if one gives almost any goal to a sufficiently powerful AI, it may choose to destroy humanity to achieve it (he used the example of a paperclip factory manager).[257] Stuart Russell gives the example of household robot that tries to find a way to kill its owner to prevent it from being unplugged, reasoning that "you can\'t fetch the coffee if you\'re dead."[258] In order to be safe for humanity, a superintelligence would have to be genuinely aligned with humanity\'s morality and values so that it is "fundamentally on our side".[259]\n

Second, Yuval Noah Harari argues that AI does not require a robot body or physical control to pose an existential risk. The essential parts of civilization are not physical. Things like ideologies, law, government, money and the economy are made of language; they exist because there are stories that billions of people believe. The current prevalence of misinformation suggests that an AI could use language to convince people to believe anything, even to take actions that are destructive.[260]\n

The opinions amongst experts and industry insiders are mixed, with sizable fractions both concerned and unconcerned by risk from eventual superintelligent AI.[261] Personalities such as Stephen Hawking, Bill Gates, and Elon Musk,[262] as well as AI pioneers such as Yoshua Bengio, Stuart Russell, Demis Hassabis, and Sam Altman, have expressed concerns about existential risk from AI.\n

In May 2023, Geoffrey Hinton announced his resignation from Google in order to be able to "freely speak out about the risks of AI" without "considering how this impacts Google."[263] He notably mentioned risks of an AI takeover,[264] and stressed that in order to avoid the worst outcomes, establishing safety guidelines will require cooperation among those competing in use of AI.[265]\n

In 2023, many leading AI experts issued the joint statement that "Mitigating the risk of extinction from AI should be a global priority alongside other societal-scale risks such as pandemics and nuclear war".[266]\n

Other researchers, however, spoke in favor of a less dystopian view. AI pioneer Juergen Schmidhuber did not sign the joint statement, emphasising that in 95% of all cases, AI research is about making "human lives longer and healthier and easier."[267] While the tools that are now being used to improve lives can also be used by bad actors, "they can also be used against the bad actors."[268][269] Andrew Ng also argued that "it\'s a mistake to fall for the doomsday hype on AI—and that regulators who do will only benefit vested interests."[270] Yann LeCun "scoffs at his peers\' dystopian scenarios of supercharged misinformation and even, eventually, human extinction."[271] In the early 2010s, experts argued that the risks are too distant in the future to warrant research or that humans will be valuable from the perspective of a superintelligent machine.[272] However, after 2016, the study of current and future risks and possible solutions became a serious area of research.[273]\n

\n

Ethical machines and alignment

\n\n

Friendly AI are machines that have been designed from the beginning to minimize risks and to make choices that benefit humans. Eliezer Yudkowsky, who coined the term, argues that developing friendly AI should be a higher research priority: it may require a large investment and it must be completed before AI becomes an existential risk.[274]\n

Machines with intelligence have the potential to use their intelligence to make ethical decisions. The field of machine ethics provides machines with ethical principles and procedures for resolving ethical dilemmas.[275]\nThe field of machine ethics is also called computational morality,[275]\nand was founded at an AAAI symposium in 2005.[276]\n

Other approaches include Wendell Wallach\'s "artificial moral agents"[277] and Stuart J. Russell\'s three principles for developing provably beneficial machines.[278]\n

\n

Open source

\n

Active organizations in the AI open-source community include Hugging Face,[279] Google,[280] EleutherAI and Meta.[281] Various AI models, such as Llama 2, Mistral or Stable Diffusion, have been made open-weight,[282][283] meaning that their architecture and trained parameters (the "weights") are publicly available. Open-weight models can be freely fine-tuned, which allows companies to specialize them with their own data and for their own use-case.[284] Open-weight models are useful for research and innovation but can also be misused. Since they can be fine-tuned, any built-in security measure, such as objecting to harmful requests, can be trained away until it becomes ineffective. Some researchers warn that future AI models may develop dangerous capabilities (such as the potential to drastically facilitate bioterrorism) and that once released on the Internet, they cannot be deleted everywhere if needed. They recommend pre-release audits and cost-benefit analyses.[285]\n

\n

Frameworks

\n

Artificial Intelligence projects can have their ethical permissibility tested while designing, developing, and implementing an AI system. An AI framework such as the Care and Act Framework containing the SUM values—developed by the Alan Turing Institute tests projects in four main areas:[286][287]\n

\n
  • Respect the dignity of individual people
  • \n
  • Connect with other people sincerely, openly, and inclusively
  • \n
  • Care for the wellbeing of everyone
  • \n
  • Protect social values, justice, and the public interest
\n

Other developments in ethical frameworks include those decided upon during the Asilomar Conference, the Montreal Declaration for Responsible AI, and the IEEE\'s Ethics of Autonomous Systems initiative, among others;[288] however, these principles do not go without their criticisms, especially regards to the people chosen contributes to these frameworks.[289]\n

Promotion of the wellbeing of the people and communities that these technologies affect requires consideration of the social and ethical implications at all stages of AI system design, development and implementation, and collaboration between job roles such as data scientists, product managers, data engineers, domain experts, and delivery managers.[290]\n

The UK AI Safety Institute released in 2024 a testing toolset called \'Inspect\' for AI safety evaluations available under a MIT open-source licence which is freely available on GitHub and can be improved with third-party packages. It can be used to evaluate AI models in a range of areas including core knowledge, ability to reason, and autonomous capabilities.[291]\n

\n

Regulation

\n\n
AI Safety Summit
The first global AI Safety Summit was held in 2023 with a declaration calling for international co-operation.
\n

The regulation of artificial intelligence is the development of public sector policies and laws for promoting and regulating AI; it is therefore related to the broader regulation of algorithms.[292] The regulatory and policy landscape for AI is an emerging issue in jurisdictions globally.[293] According to AI Index at Stanford, the annual number of AI-related laws passed in the 127 survey countries jumped from one passed in 2016 to 37 passed in 2022 alone.[294][295] Between 2016 and 2020, more than 30 countries adopted dedicated strategies for AI.[296] Most EU member states had released national AI strategies, as had Canada, China, India, Japan, Mauritius, the Russian Federation, Saudi Arabia, United Arab Emirates, U.S., and Vietnam. Others were in the process of elaborating their own AI strategy, including Bangladesh, Malaysia and Tunisia.[296] The Global Partnership on Artificial Intelligence was launched in June 2020, stating a need for AI to be developed in accordance with human rights and democratic values, to ensure public confidence and trust in the technology.[296] Henry Kissinger, Eric Schmidt, and Daniel Huttenlocher published a joint statement in November 2021 calling for a government commission to regulate AI.[297] In 2023, OpenAI leaders published recommendations for the governance of superintelligence, which they believe may happen in less than 10 years.[298] In 2023, the United Nations also launched an advisory body to provide recommendations on AI governance; the body comprises technology company executives, governments officials and academics.[299] In 2024, the Council of Europe created the first international legally binding treaty on AI, called the "Framework Convention on Artificial Intelligence and Human Rights, Democracy and the Rule of Law". It was adopted by the European Union, the United States, the United Kingdom, and other signatories.[300]\n

In a 2022 Ipsos survey, attitudes towards AI varied greatly by country; 78% of Chinese citizens, but only 35% of Americans, agreed that "products and services using AI have more benefits than drawbacks".[294] A 2023 Reuters/Ipsos poll found that 61% of Americans agree, and 22% disagree, that AI poses risks to humanity.[301] In a 2023 Fox News poll, 35% of Americans thought it "very important", and an additional 41% thought it "somewhat important", for the federal government to regulate AI, versus 13% responding "not very important" and 8% responding "not at all important".[302][303]\n

In November 2023, the first global AI Safety Summit was held in Bletchley Park in the UK to discuss the near and far term risks of AI and the possibility of mandatory and voluntary regulatory frameworks.[304] 28 countries including the United States, China, and the European Union issued a declaration at the start of the summit, calling for international co-operation to manage the challenges and risks of artificial intelligence.[305][306] In May 2024 at the AI Seoul Summit, 16 global AI tech companies agreed to safety commitments on the development of AI.[307][308]\n

\n

History

\n\n\n

The study of mechanical or "formal" reasoning began with philosophers and mathematicians in antiquity. The study of logic led directly to Alan Turing\'s theory of computation, which suggested that a machine, by shuffling symbols as simple as "0" and "1", could simulate any conceivable form of mathematical reasoning.[309][310] This, along with concurrent discoveries in cybernetics, information theory and neurobiology, led researchers to consider the possibility of building an "electronic brain".[r] They developed several areas of research that would become part of AI,[312] such as McCullouch and Pitts design for "artificial neurons" in 1943,[115] and Turing\'s influential 1950 paper \'Computing Machinery and Intelligence\', which introduced the Turing test and showed that "machine intelligence" was plausible.[313][310]\n

The field of AI research was founded at a workshop at Dartmouth College in 1956.[s][6] The attendees became the leaders of AI research in the 1960s.[t] They and their students produced programs that the press described as "astonishing":[u] computers were learning checkers strategies, solving word problems in algebra, proving logical theorems and speaking English.[v][7] Artificial intelligence laboratories were set up at a number of British and U.S. universities in the latter 1950s and early 1960s.[310]\n

Researchers in the 1960s and the 1970s were convinced that their methods would eventually succeed in creating a machine with general intelligence and considered this the goal of their field.[317] In 1965 Herbert Simon predicted, "machines will be capable, within twenty years, of doing any work a man can do".[318] In 1967 Marvin Minsky agreed, writing that "within a generation ... the problem of creating \'artificial intelligence\' will substantially be solved".[319] They had, however, underestimated the difficulty of the problem.[w] In 1974, both the U.S. and British governments cut off exploratory research in response to the criticism of Sir James Lighthill[321] and ongoing pressure from the U.S. Congress to fund more productive projects.[322] Minsky\'s and Papert\'s book Perceptrons was understood as proving that artificial neural networks would never be useful for solving real-world tasks, thus discrediting the approach altogether.[323] The "AI winter", a period when obtaining funding for AI projects was difficult, followed.[9]\n

In the early 1980s, AI research was revived by the commercial success of expert systems,[324] a form of AI program that simulated the knowledge and analytical skills of human experts. By 1985, the market for AI had reached over a billion dollars. At the same time, Japan\'s fifth generation computer project inspired the U.S. and British governments to restore funding for academic research.[8] However, beginning with the collapse of the Lisp Machine market in 1987, AI once again fell into disrepute, and a second, longer-lasting winter began.[10]\n

Up to this point, most of AI\'s funding had gone to projects that used high-level symbols to represent mental objects like plans, goals, beliefs, and known facts. In the 1980s, some researchers began to doubt that this approach would be able to imitate all the processes of human cognition, especially perception, robotics, learning and pattern recognition,[325] and began to look into "sub-symbolic" approaches.[326] Rodney Brooks rejected "representation" in general and focussed directly on engineering machines that move and survive.[x] Judea Pearl, Lofti Zadeh and others developed methods that handled incomplete and uncertain information by making reasonable guesses rather than precise logic.[86][331] But the most important development was the revival of "connectionism", including neural network research, by Geoffrey Hinton and others.[332] In 1990, Yann LeCun successfully showed that convolutional neural networks can recognize handwritten digits, the first of many successful applications of neural networks.[333]\n

AI gradually restored its reputation in the late 1990s and early 21st century by exploiting formal mathematical methods and by finding specific solutions to specific problems. This "narrow" and "formal" focus allowed researchers to produce verifiable results and collaborate with other fields (such as statistics, economics and mathematics).[334] By 2000, solutions developed by AI researchers were being widely used, although in the 1990s they were rarely described as "artificial intelligence" (a tendency known as the AI effect).[335]\nHowever, several academic researchers became concerned that AI was no longer pursuing its original goal of creating versatile, fully intelligent machines. Beginning around 2002, they founded the subfield of artificial general intelligence (or "AGI"), which had several well-funded institutions by the 2010s.[4]\n

Deep learning began to dominate industry benchmarks in 2012 and was adopted throughout the field.[11]\nFor many specific tasks, other methods were abandoned.[y]\nDeep learning\'s success was based on both hardware improvements (faster computers,[337] graphics processing units, cloud computing[338]) and access to large amounts of data[339] (including curated datasets,[338] such as ImageNet). Deep learning\'s success led to an enormous increase in interest and funding in AI.[z] The amount of machine learning research (measured by total publications) increased by 50% in the years 2015–2019.[296]\n

In 2016, issues of fairness and the misuse of technology were catapulted into center stage at machine learning conferences, publications vastly increased, funding became available, and many researchers re-focussed their careers on these issues. The alignment problem became a serious field of academic study.[273]\n

In the late teens and early 2020s, AGI companies began to deliver programs that created enormous interest. In 2015, AlphaGo, developed by DeepMind, beat the world champion Go player. The program was taught only the rules of the game and developed strategy by itself. GPT-3 is a large language model that was released in 2020 by OpenAI and is capable of generating high-quality human-like text.[340] These programs, and others, inspired an aggressive AI boom, where large companies began investing billions in AI research. According to AI Impacts, about $50 billion annually was invested in "AI" around 2022 in the U.S. alone and about 20% of the new U.S. Computer Science PhD graduates have specialized in "AI".[341] About 800,000 "AI"-related U.S. job openings existed in 2022.[342]\n

\n

Philosophy

\n\n

Philosophical debates have historically sought to determine the nature of intelligence and how to make intelligent machines.[343] Another major focus has been whether machines can be conscious, and the associated ethical implications.[344] Many other topics in philosophy are relevant to AI, such as epistemology and free will.[345] Rapid advancements have intensified public discussions on the philosophy and ethics of AI.[344]\n

\n

Defining artificial intelligence

\n\n

Alan Turing wrote in 1950 "I propose to consider the question \'can machines think\'?"[346] He advised changing the question from whether a machine "thinks", to "whether or not it is possible for machinery to show intelligent behaviour".[346] He devised the Turing test, which measures the ability of a machine to simulate human conversation.[313] Since we can only observe the behavior of the machine, it does not matter if it is "actually" thinking or literally has a "mind". Turing notes that we can not determine these things about other people but "it is usual to have a polite convention that everyone thinks."[347]\n

\n
The Turing test can provide some evidence of intelligence, but it penalizes non-human intelligent behavior.[348]
\n

Russell and Norvig agree with Turing that intelligence must be defined in terms of external behavior, not internal structure.[1] However, they are critical that the test requires the machine to imitate humans. "Aeronautical engineering texts," they wrote, "do not define the goal of their field as making \'machines that fly so exactly like pigeons that they can fool other pigeons.\'"[349] AI founder John McCarthy agreed, writing that "Artificial intelligence is not, by definition, simulation of human intelligence".[350]\n

McCarthy defines intelligence as "the computational part of the ability to achieve goals in the world".[351] Another AI founder, Marvin Minsky similarly describes it as "the ability to solve hard problems".[352] The leading AI textbook defines it as the study of agents that perceive their environment and take actions that maximize their chances of achieving defined goals.[1] These definitions view intelligence in terms of well-defined problems with well-defined solutions, where both the difficulty of the problem and the performance of the program are direct measures of the "intelligence" of the machine—and no other philosophical discussion is required, or may not even be possible.\n

Another definition has been adopted by Google,[353] a major practitioner in the field of AI. This definition stipulates the ability of systems to synthesize information as the manifestation of intelligence, similar to the way it is defined in biological intelligence.\n

Some authors have suggested in practice, that the definition of AI is vague and difficult to define, with contention as to whether classical algorithms should be categorised as AI,[354] with many companies during the early 2020s AI boom using the term as a marketing buzzword, often even if they did "not actually use AI in a material way".[355]\n

\n

Evaluating approaches to AI

\n

No established unifying theory or paradigm has guided AI research for most of its history.[aa] The unprecedented success of statistical machine learning in the 2010s eclipsed all other approaches (so much so that some sources, especially in the business world, use the term "artificial intelligence" to mean "machine learning with neural networks"). This approach is mostly sub-symbolic, soft and narrow. Critics argue that these questions may have to be revisited by future generations of AI researchers.\n

\n

Symbolic AI and its limits

\n

Symbolic AI (or "GOFAI")[357] simulated the high-level conscious reasoning that people use when they solve puzzles, express legal reasoning and do mathematics. They were highly successful at "intelligent" tasks such as algebra or IQ tests. In the 1960s, Newell and Simon proposed the physical symbol systems hypothesis: "A physical symbol system has the necessary and sufficient means of general intelligent action."[358]\n

However, the symbolic approach failed on many tasks that humans solve easily, such as learning, recognizing an object or commonsense reasoning. Moravec\'s paradox is the discovery that high-level "intelligent" tasks were easy for AI, but low level "instinctive" tasks were extremely difficult.[359] Philosopher Hubert Dreyfus had argued since the 1960s that human expertise depends on unconscious instinct rather than conscious symbol manipulation, and on having a "feel" for the situation, rather than explicit symbolic knowledge.[360] Although his arguments had been ridiculed and ignored when they were first presented, eventually, AI research came to agree with him.[ab][16]\n

The issue is not resolved: sub-symbolic reasoning can make many of the same inscrutable mistakes that human intuition does, such as algorithmic bias. Critics such as Noam Chomsky argue continuing research into symbolic AI will still be necessary to attain general intelligence,[362][363] in part because sub-symbolic AI is a move away from explainable AI: it can be difficult or impossible to understand why a modern statistical AI program made a particular decision. The emerging field of neuro-symbolic artificial intelligence attempts to bridge the two approaches.\n

\n

Neat vs. scruffy

\n\n

"Neats" hope that intelligent behavior is described using simple, elegant principles (such as logic, optimization, or neural networks). "Scruffies" expect that it necessarily requires solving a large number of unrelated problems. Neats defend their programs with theoretical rigor, scruffies rely mainly on incremental testing to see if they work. This issue was actively discussed in the 1970s and 1980s,[364] but eventually was seen as irrelevant. Modern AI has elements of both.\n

\n

Soft vs. hard computing

\n\n

Finding a provably correct or optimal solution is intractable for many important problems.[15] Soft computing is a set of techniques, including genetic algorithms, fuzzy logic and neural networks, that are tolerant of imprecision, uncertainty, partial truth and approximation. Soft computing was introduced in the late 1980s and most successful AI programs in the 21st century are examples of soft computing with neural networks.\n

\n

Narrow vs. general AI

\n\n

AI researchers are divided as to whether to pursue the goals of artificial general intelligence and superintelligence directly or to solve as many specific problems as possible (narrow AI) in hopes these solutions will lead indirectly to the field\'s long-term goals.[365][366] General intelligence is difficult to define and difficult to measure, and modern AI has had more verifiable successes by focusing on specific problems with specific solutions. The sub-field of artificial general intelligence studies this area exclusively.\n

\n

Machine consciousness, sentience, and mind

\n\n

The philosophy of mind does not know whether a machine can have a mind, consciousness and mental states, in the same sense that human beings do. This issue considers the internal experiences of the machine, rather than its external behavior. Mainstream AI research considers this issue irrelevant because it does not affect the goals of the field: to build machines that can solve problems using intelligence. Russell and Norvig add that "[t]he additional project of making a machine conscious in exactly the way humans are is not one that we are equipped to take on."[367] However, the question has become central to the philosophy of mind. It is also typically the central question at issue in artificial intelligence in fiction.\n

\n

Consciousness

\n\n

David Chalmers identified two problems in understanding the mind, which he named the "hard" and "easy" problems of consciousness.[368] The easy problem is understanding how the brain processes signals, makes plans and controls behavior. The hard problem is explaining how this feels or why it should feel like anything at all, assuming we are right in thinking that it truly does feel like something (Dennett\'s consciousness illusionism says this is an illusion). While human information processing is easy to explain, human subjective experience is difficult to explain. For example, it is easy to imagine a color-blind person who has learned to identify which objects in their field of view are red, but it is not clear what would be required for the person to know what red looks like.[369]\n

\n

Computationalism and functionalism

\n\n

Computationalism is the position in the philosophy of mind that the human mind is an information processing system and that thinking is a form of computing. Computationalism argues that the relationship between mind and body is similar or identical to the relationship between software and hardware and thus may be a solution to the mind–body problem. This philosophical position was inspired by the work of AI researchers and cognitive scientists in the 1960s and was originally proposed by philosophers Jerry Fodor and Hilary Putnam.[370]\n

Philosopher John Searle characterized this position as "strong AI": "The appropriately programmed computer with the right inputs and outputs would thereby have a mind in exactly the same sense human beings have minds."[ac] Searle counters this assertion with his Chinese room argument, which attempts to show that, even if a machine perfectly simulates human behavior, there is still no reason to suppose it also has a mind.[374]\n

\n

AI welfare and rights

\n

It is difficult or impossible to reliably evaluate whether an advanced AI is sentient (has the ability to feel), and if so, to what degree.[375] But if there is a significant chance that a given machine can feel and suffer, then it may be entitled to certain rights or welfare protection measures, similarly to animals.[376][377] Sapience (a set of capacities related to high intelligence, such as discernment or self-awareness) may provide another moral basis for AI rights.[376] Robot rights are also sometimes proposed as a practical way to integrate autonomous agents into society.[378]\n

In 2017, the European Union considered granting "electronic personhood" to some of the most capable AI systems. Similarly to the legal status of companies, it would have conferred rights but also responsibilities.[379] Critics argued in 2018 that granting rights to AI systems would downplay the importance of human rights, and that legislation should focus on user needs rather than speculative futuristic scenarios. They also noted that robots lacked the autonomy to take part to society on their own.[380][381]\n

Progress in AI increased interest in the topic. Proponents of AI welfare and rights often argue that AI sentience, if it emerges, would be particularly easy to deny. They warn that this may be a moral blind spot analogous to slavery or factory farming, which could lead to large-scale suffering if sentient AI is created and carelessly exploited.[377][376]\n

\n

Future

\n

Superintelligence and the singularity

\n

A superintelligence is a hypothetical agent that would possess intelligence far surpassing that of the brightest and most gifted human mind.[366]If research into artificial general intelligence produced sufficiently intelligent software, it might be able to reprogram and improve itself. The improved software would be even better at improving itself, leading to what I. J. Good called an "intelligence explosion" and Vernor Vinge called a "singularity".[382]\n

However, technologies cannot improve exponentially indefinitely, and typically follow an S-shaped curve, slowing when they reach the physical limits of what the technology can do.[383]\n

\n

Transhumanism

\n\n

Robot designer Hans Moravec, cyberneticist Kevin Warwick and inventor Ray Kurzweil have predicted that humans and machines may merge in the future into cyborgs that are more capable and powerful than either. This idea, called transhumanism, has roots in the writings of Aldous Huxley and Robert Ettinger.[384]\n

Edward Fredkin argues that "artificial intelligence is the next step in evolution", an idea first proposed by Samuel Butler\'s "Darwin among the Machines" as far back as 1863, and expanded upon by George Dyson in his 1998 book Darwin Among the Machines: The Evolution of Global Intelligence.[385]\n

\n

In fiction

\n\n
The word "robot" itself was coined by Karel Čapek in his 1921 play R.U.R., the title standing for "Rossum\'s Universal Robots".
\n

Thought-capable artificial beings have appeared as storytelling devices since antiquity,[386] and have been a persistent theme in science fiction.[387]\n

A common trope in these works began with Mary Shelley\'s Frankenstein, where a human creation becomes a threat to its masters. This includes such works as Arthur C. Clarke\'s and Stanley Kubrick\'s 2001: A Space Odyssey (both 1968), with HAL 9000, the murderous computer in charge of the Discovery One spaceship, as well as The Terminator (1984) and The Matrix (1999). In contrast, the rare loyal robots such as Gort from The Day the Earth Stood Still (1951) and Bishop from Aliens (1986) are less prominent in popular culture.[388]\n

Isaac Asimov introduced the Three Laws of Robotics in many stories, most notably with the "Multivac" super-intelligent computer. Asimov\'s laws are often brought up during lay discussions of machine ethics;[389] while almost all artificial intelligence researchers are familiar with Asimov\'s laws through popular culture, they generally consider the laws useless for many reasons, one of which is their ambiguity.[390]\n

Several works use AI to force us to confront the fundamental question of what makes us human, showing us artificial beings that have the ability to feel, and thus to suffer. This appears in Karel Čapek\'s R.U.R., the films A.I. Artificial Intelligence and Ex Machina, as well as the novel Do Androids Dream of Electric Sheep?, by Philip K. Dick. Dick considers the idea that our understanding of human subjectivity is altered by technology created with artificial intelligence.[391]\n

\n

See also

\n\n

Explanatory notes

\n
\n
    \n
  1. ^ a b This list of intelligent traits is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
  2. \n
  3. ^ a b This list of tools is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
  4. \n
  5. ^ It is among the reasons that expert systems proved to be inefficient for capturing knowledge.[30][31]\n
  6. \n
  7. ^ \n"Rational agent" is general term used in economics, philosophy and theoretical artificial intelligence. It can refer to anything that directs its behavior to accomplish goals, such as a person, an animal, a corporation, a nation, or in the case of AI, a computer program.\n
  8. \n
  9. ^ Alan Turing discussed the centrality of learning as early as 1950, in his classic paper "Computing Machinery and Intelligence".[42] In 1956, at the original Dartmouth AI summer conference, Ray Solomonoff wrote a report on unsupervised probabilistic machine learning: "An Inductive Inference Machine".[43]\n
  10. \n
  11. ^ See AI winter § Machine translation and the ALPAC report of 1966\n
  12. \n
  13. ^ \nCompared with symbolic logic, formal Bayesian inference is computationally expensive. For inference to be tractable, most observations must be conditionally independent of one another. AdSense uses a Bayesian network with over 300 million edges to learn which ads to serve.[93]\n
  14. \n
  15. ^ Expectation–maximization, one of the most popular algorithms in machine learning, allows clustering in the presence of unknown latent variables.[95]\n
  16. \n
  17. ^ \nSome form of deep neural networks (without a specific learning algorithm) were described by:\nWarren S. McCulloch and Walter Pitts (1943)[115]\nAlan Turing (1948);[116]\nKarl Steinbuch and Roger David Joseph (1961).[117]\nDeep or recurrent networks that learned (or used gradient descent) were developed by:\nFrank Rosenblatt(1957);[116]\nOliver Selfridge (1959);[117]\nAlexey Ivakhnenko and Valentin Lapa (1965);[118]\nKaoru Nakano (1971);[119]\nShun-Ichi Amari (1972);[119]\nJohn Joseph Hopfield (1982).[119]\nPrecursors to backpropagation were developed by:\nHenry J. Kelley (1960);[116]\nArthur E. Bryson (1962);[116]\nStuart Dreyfus (1962);[116]\nArthur E. Bryson and Yu-Chi Ho (1969);[116]\nBackpropagation was independently developed by:\nSeppo Linnainmaa (1970);[120]\nPaul Werbos (1974).[116]\n
  18. \n
  19. ^ Geoffrey Hinton said, of his work on neural networks in the 1990s, "our labeled datasets were thousands of times too small. [And] our computers were millions of times too slow."[121]\n
  20. \n
  21. ^ In statistics, a bias is a systematic error or deviation from the correct value. But in the context of fairness, it refers to a tendency in favor or against a certain group or individual characteristic, usually in a way that is considered unfair or harmful. A statistically unbiased AI system that produces disparate outcomes for different demographic groups may thus be viewed as biased in the ethical sense.[209]\n
  22. \n
  23. ^ Including Jon Kleinberg (Cornell University), Sendhil Mullainathan (University of Chicago), Cynthia Chouldechova (Carnegie Mellon) and Sam Corbett-Davis (Stanford)[218]\n
  24. \n
  25. ^ Moritz Hardt (a director at the Max Planck Institute for Intelligent Systems) argues that machine learning "is fundamentally the wrong tool for a lot of domains, where you\'re trying to design interventions and mechanisms that change the world."[223]\n
  26. \n
  27. ^ When the law was passed in 2018, it still contained a form of this provision.\n
  28. \n
  29. ^ This is the United Nations\' definition, and includes things like land mines as well.[237]\n
  30. \n
  31. ^ See table 4; 9% is both the OECD average and the U.S. average.[248]\n
  32. \n
  33. ^ Sometimes called a "robopocalypse"[256]\n
  34. \n
  35. ^ "Electronic brain" was the term used by the press around this time.[309][311]\n
  36. \n
  37. ^ \nDaniel Crevier wrote, "the conference is generally recognized as the official birthdate of the new science."[314] Russell and Norvig called the conference "the inception of artificial intelligence."[115]\n
  38. \n
  39. ^ \nRussell and Norvig wrote "for the next 20 years the field would be dominated by these people and their students."[315]\n
  40. \n
  41. ^ \nRussell and Norvig wrote "it was astonishing whenever a computer did anything kind of smartish".[316]\n
  42. \n
  43. ^ \nThe programs described are Arthur Samuel\'s checkers program for the IBM 701, Daniel Bobrow\'s STUDENT, Newell and Simon\'s Logic Theorist and Terry Winograd\'s SHRDLU.\n
  44. \n
  45. ^ Russell and Norvig write: "in almost all cases, these early systems failed on more difficult problems"[320]\n
  46. \n
  47. ^ \nEmbodied approaches to AI[327] were championed by Hans Moravec[328] and Rodney Brooks[329] and went by many names: Nouvelle AI.[329] Developmental robotics.[330]\n
  48. \n
  49. ^ Matteo Wong wrote in The Atlantic: "Whereas for decades, computer-science fields such as natural-language processing, computer vision, and robotics used extremely different methods, now they all use a programming method called "deep learning." As a result, their code and approaches have become more similar, and their models are easier to integrate into one another."[336]\n
  50. \n
  51. ^ Jack Clark wrote in Bloomberg: "After a half-decade of quiet breakthroughs in artificial intelligence, 2015 has been a landmark year. Computers are smarter and learning faster than ever", and noted that the number of software projects that use machine learning at Google increased from a "sporadic usage" in 2012 to more than 2,700 projects in 2015.[338]\n
  52. \n
  53. ^ Nils Nilsson wrote in 1983: "Simply put, there is wide disagreement in the field about what AI is all about."[356]\n
  54. \n
  55. ^ \nDaniel Crevier wrote that "time has proven the accuracy and perceptiveness of some of Dreyfus\'s comments. Had he formulated them less aggressively, constructive actions they suggested might have been taken much earlier."[361]\n
  56. \n
  57. ^ \nSearle presented this definition of "Strong AI" in 1999.[371] Searle\'s original formulation was "The appropriately programmed computer really is a mind, in the sense that computers given the right programs can be literally said to understand and have other cognitive states."[372] Strong AI is defined similarly by Russell and Norvig: "Stong AI – the assertion that machines that do so are actually thinking (as opposed to simulating thinking)."[373]\n
  58. \n
\n

References

\n
\n
    \n
  1. ^ a b c Russell & Norvig (2021), pp. 1–4.\n
  2. \n
  3. ^ AI set to exceed human brain power Archived 2008-02-19 at the Wayback Machine CNN.com (July 26, 2006)\n
  4. \n
  5. ^ Kaplan, Andreas; Haenlein, Michael (2019). "Siri, Siri, in my hand: Who\'s the fairest in the land? On the interpretations, illustrations, and implications of artificial intelligence". Business Horizons. 62: 15–25. doi:10.1016/j.bushor.2018.08.004. ISSN 0007-6813. S2CID 158433736.\n
  6. \n
  7. ^ a b c \nArtificial general intelligence: Russell & Norvig (2021, pp. 32–33, 1020–1021)
    Proposal for the modern version: Pennachin & Goertzel (2007)
    Warnings of overspecialization in AI from leading researchers: Nilsson (1995), McCarthy (2007), Beal & Winston (2009)
    \n
  8. \n
  9. ^ Russell & Norvig (2021, §1.2).\n
  10. \n
  11. ^ a b Dartmouth workshop: Russell & Norvig (2021, p. 18), McCorduck (2004, pp. 111–136), NRC (1999, pp. 200–201)
    The proposal: McCarthy et al. (1955)
    \n
  12. \n
  13. ^ a b Successful programs of the 1960s: McCorduck (2004, pp. 243–252), Crevier (1993, pp. 52–107), Moravec (1988, p. 9), Russell & Norvig (2021, pp. 19–21)\n
  14. \n
  15. ^ a b Funding initiatives in the early 1980s: Fifth Generation Project (Japan), Alvey (UK), Microelectronics and Computer Technology Corporation (US), Strategic Computing Initiative (US): McCorduck (2004, pp. 426–441), Crevier (1993, pp. 161–162, 197–203, 211, 240), Russell & Norvig (2021, p. 23), NRC (1999, pp. 210–211), Newquist (1994, pp. 235–248)\n
  16. \n
  17. ^ a b First AI Winter, Lighthill report, Mansfield Amendment: Crevier (1993, pp. 115–117), Russell & Norvig (2021, pp. 21–22), NRC (1999, pp. 212–213), Howe (1994), Newquist (1994, pp. 189–201)\n
  18. \n
  19. ^ a b Second AI Winter: Russell & Norvig (2021, p. 24), McCorduck (2004, pp. 430–435), Crevier (1993, pp. 209–210), NRC (1999, pp. 214–216), Newquist (1994, pp. 301–318)\n
  20. \n
  21. ^ a b Deep learning revolution, AlexNet: Goldman (2022), Russell & Norvig (2021, p. 26), McKinsey (2018)\n
  22. \n
  23. ^ Toews (2023).\n
  24. \n
  25. ^ Problem-solving, puzzle solving, game playing, and deduction: Russell & Norvig (2021, chpt. 3–5), Russell & Norvig (2021, chpt. 6) (constraint satisfaction), Poole, Mackworth & Goebel (1998, chpt. 2, 3, 7, 9), Luger & Stubblefield (2004, chpt. 3, 4, 6, 8), Nilsson (1998, chpt. 7–12)\n
  26. \n
  27. ^ Uncertain reasoning: Russell & Norvig (2021, chpt. 12–18), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 333–381), Nilsson (1998, chpt. 7–12)\n
  28. \n
  29. ^ a b c Intractability and efficiency and the combinatorial explosion: Russell & Norvig (2021, p. 21)\n
  30. \n
  31. ^ a b c Psychological evidence of the prevalence of sub-symbolic reasoning and knowledge: Kahneman (2011), Dreyfus & Dreyfus (1986), Wason & Shapiro (1966), Kahneman, Slovic & Tversky (1982)\n
  32. \n
  33. ^ Knowledge representation and knowledge engineering: Russell & Norvig (2021, chpt. 10), Poole, Mackworth & Goebel (1998, pp. 23–46, 69–81, 169–233, 235–277, 281–298, 319–345), Luger & Stubblefield (2004, pp. 227–243), Nilsson (1998, chpt. 17.1–17.4, 18)\n
  34. \n
  35. ^ Smoliar & Zhang (1994).\n
  36. \n
  37. ^ Neumann & Möller (2008).\n
  38. \n
  39. ^ Kuperman, Reichley & Bailey (2006).\n
  40. \n
  41. ^ McGarry (2005).\n
  42. \n
  43. ^ Bertini, Del Bimbo & Torniai (2006).\n
  44. \n
  45. ^ Russell & Norvig (2021), pp. 272.\n
  46. \n
  47. ^ Representing categories and relations: Semantic networks, description logics, inheritance (including frames, and scripts): Russell & Norvig (2021, §10.2 & 10.5), Poole, Mackworth & Goebel (1998, pp. 174–177), Luger & Stubblefield (2004, pp. 248–258), Nilsson (1998, chpt. 18.3)\n
  48. \n
  49. ^ Representing events and time:Situation calculus, event calculus, fluent calculus (including solving the frame problem): Russell & Norvig (2021, §10.3), Poole, Mackworth & Goebel (1998, pp. 281–298), Nilsson (1998, chpt. 18.2)\n
  50. \n
  51. ^ Causal calculus: Poole, Mackworth & Goebel (1998, pp. 335–337)\n
  52. \n
  53. ^ Representing knowledge about knowledge: Belief calculus, modal logics: Russell & Norvig (2021, §10.4), Poole, Mackworth & Goebel (1998, pp. 275–277)\n
  54. \n
  55. ^ a b Default reasoning, Frame problem, default logic, non-monotonic logics, circumscription, closed world assumption, abduction: Russell & Norvig (2021, §10.6), Poole, Mackworth & Goebel (1998, pp. 248–256, 323–335), Luger & Stubblefield (2004, pp. 335–363), Nilsson (1998, ~18.3.3)\n(Poole et al. places abduction under "default reasoning". Luger et al. places this under "uncertain reasoning").\n
  56. \n
  57. ^ a b Breadth of commonsense knowledge: Lenat & Guha (1989, Introduction), Crevier (1993, pp. 113–114), Moravec (1988, p. 13), Russell & Norvig (2021, pp. 241, 385, 982) (qualification problem)\n
  58. \n
  59. ^ Newquist (1994), p. 296.\n
  60. \n
  61. ^ Crevier (1993), pp. 204–208.\n
  62. \n
  63. ^ Russell & Norvig (2021), p. 528.\n
  64. \n
  65. ^ Automated planning: Russell & Norvig (2021, chpt. 11).\n
  66. \n
  67. ^ Automated decision making, Decision theory: Russell & Norvig (2021, chpt. 16–18).\n
  68. \n
  69. ^ Classical planning: Russell & Norvig (2021, Section 11.2).\n
  70. \n
  71. ^ Sensorless or "conformant" planning, contingent planning, replanning (a.k.a online planning): Russell & Norvig (2021, Section 11.5).\n
  72. \n
  73. ^ Uncertain preferences: Russell & Norvig (2021, Section 16.7)\nInverse reinforcement learning: Russell & Norvig (2021, Section 22.6)\n
  74. \n
  75. ^ Information value theory: Russell & Norvig (2021, Section 16.6).\n
  76. \n
  77. ^ Markov decision process: Russell & Norvig (2021, chpt. 17).\n
  78. \n
  79. ^ Game theory and multi-agent decision theory: Russell & Norvig (2021, chpt. 18).\n
  80. \n
  81. ^ Learning: Russell & Norvig (2021, chpt. 19–22), Poole, Mackworth & Goebel (1998, pp. 397–438), Luger & Stubblefield (2004, pp. 385–542), Nilsson (1998, chpt. 3.3, 10.3, 17.5, 20)\n
  82. \n
  83. ^ Turing (1950).\n
  84. \n
  85. ^ Solomonoff (1956).\n
  86. \n
  87. ^ Unsupervised learning: Russell & Norvig (2021, pp. 653) (definition), Russell & Norvig (2021, pp. 738–740) (cluster analysis), Russell & Norvig (2021, pp. 846–860) (word embedding)\n
  88. \n
  89. ^ a b Supervised learning: Russell & Norvig (2021, §19.2) (Definition), Russell & Norvig (2021, Chpt. 19–20) (Techniques)\n
  90. \n
  91. ^ Reinforcement learning: Russell & Norvig (2021, chpt. 22), Luger & Stubblefield (2004, pp. 442–449)\n
  92. \n
  93. ^ Transfer learning: Russell & Norvig (2021, pp. 281), The Economist (2016)\n
  94. \n
  95. ^ "Artificial Intelligence (AI): What Is AI and How Does It Work? | Built In". builtin.com. Retrieved 30 October 2023.\n
  96. \n
  97. ^ Computational learning theory: Russell & Norvig (2021, pp. 672–674), Jordan & Mitchell (2015)\n
  98. \n
  99. ^ Natural language processing (NLP): Russell & Norvig (2021, chpt. 23–24), Poole, Mackworth & Goebel (1998, pp. 91–104), Luger & Stubblefield (2004, pp. 591–632)\n
  100. \n
  101. ^ Subproblems of NLP: Russell & Norvig (2021, pp. 849–850)\n
  102. \n
  103. ^ Russell & Norvig (2021), pp. 856–858.\n
  104. \n
  105. ^ Dickson (2022).\n
  106. \n
  107. ^ Modern statistical and deep learning approaches to NLP: Russell & Norvig (2021, chpt. 24), Cambria & White (2014)\n
  108. \n
  109. ^ Vincent (2019).\n
  110. \n
  111. ^ Russell & Norvig (2021), pp. 875–878.\n
  112. \n
  113. ^ Bushwick (2023).\n
  114. \n
  115. ^ Computer vision: Russell & Norvig (2021, chpt. 25), Nilsson (1998, chpt. 6)\n
  116. \n
  117. ^ Russell & Norvig (2021), pp. 849–850.\n
  118. \n
  119. ^ Russell & Norvig (2021), pp. 895–899.\n
  120. \n
  121. ^ Russell & Norvig (2021), pp. 899–901.\n
  122. \n
  123. ^ Challa et al. (2011).\n
  124. \n
  125. ^ Russell & Norvig (2021), pp. 931–938.\n
  126. \n
  127. ^ MIT AIL (2014).\n
  128. \n
  129. ^ Affective computing: Thro (1993), Edelson (1991), Tao & Tan (2005), Scassellati (2002)\n
  130. \n
  131. ^ Waddell (2018).\n
  132. \n
  133. ^ Poria et al. (2017).\n
  134. \n
  135. ^ Search algorithms: Russell & Norvig (2021, chpts. 3–5), Poole, Mackworth & Goebel (1998, pp. 113–163), Luger & Stubblefield (2004, pp. 79–164, 193–219), Nilsson (1998, chpts. 7–12)\n
  136. \n
  137. ^ State space search: Russell & Norvig (2021, chpt. 3)\n
  138. \n
  139. ^ Russell & Norvig (2021), sect. 11.2.\n
  140. \n
  141. ^ Uninformed searches (breadth first search, depth-first search and general state space search): Russell & Norvig (2021, sect. 3.4), Poole, Mackworth & Goebel (1998, pp. 113–132), Luger & Stubblefield (2004, pp. 79–121), Nilsson (1998, chpt. 8)\n
  142. \n
  143. ^ Heuristic or informed searches (e.g., greedy best first and A*): Russell & Norvig (2021, sect. 3.5), Poole, Mackworth & Goebel (1998, pp. 132–147), Poole & Mackworth (2017, sect. 3.6), Luger & Stubblefield (2004, pp. 133–150)\n
  144. \n
  145. ^ Adversarial search: Russell & Norvig (2021, chpt. 5)\n
  146. \n
  147. ^ Local or "optimization" search: Russell & Norvig (2021, chpt. 4)\n
  148. \n
  149. ^ Singh Chauhan, Nagesh (18 December 2020). "Optimization Algorithms in Neural Networks". KDnuggets. Retrieved 13 January 2024.\n
  150. \n
  151. ^ Evolutionary computation: Russell & Norvig (2021, sect. 4.1.2)\n
  152. \n
  153. ^ Merkle & Middendorf (2013).\n
  154. \n
  155. ^ Logic: Russell & Norvig (2021, chpts. 6–9), Luger & Stubblefield (2004, pp. 35–77), Nilsson (1998, chpt. 13–16)\n
  156. \n
  157. ^ Propositional logic: Russell & Norvig (2021, chpt. 6), Luger & Stubblefield (2004, pp. 45–50), Nilsson (1998, chpt. 13)\n
  158. \n
  159. ^ First-order logic and features such as equality: Russell & Norvig (2021, chpt. 7), Poole, Mackworth & Goebel (1998, pp. 268–275), Luger & Stubblefield (2004, pp. 50–62), Nilsson (1998, chpt. 15)\n
  160. \n
  161. ^ Logical inference: Russell & Norvig (2021, chpt. 10)\n
  162. \n
  163. ^ logical deduction as search: Russell & Norvig (2021, sects. 9.3, 9.4), Poole, Mackworth & Goebel (1998, pp. ~46–52), Luger & Stubblefield (2004, pp. 62–73), Nilsson (1998, chpt. 4.2, 7.2)\n
  164. \n
  165. ^ Resolution and unification: Russell & Norvig (2021, sections 7.5.2, 9.2, 9.5)\n
  166. \n
  167. ^ Warren, D.H.; Pereira, L.M.; Pereira, F. (1977). "Prolog-the language and its implementation compared with Lisp". ACM SIGPLAN Notices. 12 (8): 109–115. doi:10.1145/872734.806939.\n
  168. \n
  169. ^ Fuzzy logic: Russell & Norvig (2021, pp. 214, 255, 459), Scientific American (1999)\n
  170. \n
  171. ^ a b Stochastic methods for uncertain reasoning: Russell & Norvig (2021, chpt. 12–18, 20), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 165–191, 333–381), Nilsson (1998, chpt. 19)\n
  172. \n
  173. ^ decision theory and decision analysis: Russell & Norvig (2021, chpt. 16–18), Poole, Mackworth & Goebel (1998, pp. 381–394)\n
  174. \n
  175. ^ Information value theory: Russell & Norvig (2021, sect. 16.6)\n
  176. \n
  177. ^ Markov decision processes and dynamic decision networks: Russell & Norvig (2021, chpt. 17)\n
  178. \n
  179. ^ a b c Stochastic temporal models: Russell & Norvig (2021, chpt. 14)\nHidden Markov model: Russell & Norvig (2021, sect. 14.3)\nKalman filters: Russell & Norvig (2021, sect. 14.4)\nDynamic Bayesian networks: Russell & Norvig (2021, sect. 14.5)\n
  180. \n
  181. ^ Game theory and mechanism design: Russell & Norvig (2021, chpt. 18)\n
  182. \n
  183. ^ Bayesian networks: Russell & Norvig (2021, sects. 12.5–12.6, 13.4–13.5, 14.3–14.5, 16.5, 20.2–20.3), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~182–190, ≈363–379), Nilsson (1998, chpt. 19.3–19.4)\n
  184. \n
  185. ^ Domingos (2015), chpt. 6.\n
  186. \n
  187. ^ Bayesian inference algorithm: Russell & Norvig (2021, sect. 13.3–13.5), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~363–379), Nilsson (1998, chpt. 19.4 & 7)\n
  188. \n
  189. ^ Domingos (2015), p. 210.\n
  190. \n
  191. ^ Bayesian learning and the expectation–maximization algorithm: Russell & Norvig (2021, chpt. 20), Poole, Mackworth & Goebel (1998, pp. 424–433), Nilsson (1998, chpt. 20), Domingos (2015, p. 210)\n
  192. \n
  193. ^ Bayesian decision theory and Bayesian decision networks: Russell & Norvig (2021, sect. 16.5)\n
  194. \n
  195. ^ Statistical learning methods and classifiers: Russell & Norvig (2021, chpt. 20),\n
  196. \n
  197. ^ Ciaramella, Alberto; Ciaramella, Marco (2024). Introduction to Artificial Intelligence: from data analysis to generative AI. Intellisemantic Editions. ISBN 978-8-8947-8760-3.\n
  198. \n
  199. ^ Decision trees: Russell & Norvig (2021, sect. 19.3), Domingos (2015, p. 88)\n
  200. \n
  201. ^ Non-parameteric learning models such as K-nearest neighbor and support vector machines: Russell & Norvig (2021, sect. 19.7), Domingos (2015, p. 187) (k-nearest neighbor)\n\n
  202. \n
  203. ^ Domingos (2015), p. 152.\n
  204. \n
  205. ^ Naive Bayes classifier: Russell & Norvig (2021, sect. 12.6), Domingos (2015, p. 152)\n
  206. \n
  207. ^ a b Neural networks: Russell & Norvig (2021, chpt. 21), Domingos (2015, Chapter 4)\n
  208. \n
  209. ^ Gradient calculation in computational graphs, backpropagation, automatic differentiation: Russell & Norvig (2021, sect. 21.2), Luger & Stubblefield (2004, pp. 467–474), Nilsson (1998, chpt. 3.3)\n
  210. \n
  211. ^ Universal approximation theorem: Russell & Norvig (2021, p. 752)\nThe theorem: Cybenko (1988), Hornik, Stinchcombe & White (1989)\n
  212. \n
  213. ^ Feedforward neural networks: Russell & Norvig (2021, sect. 21.1)\n
  214. \n
  215. ^ Recurrent neural networks: Russell & Norvig (2021, sect. 21.6)\n
  216. \n
  217. ^ Perceptrons: Russell & Norvig (2021, pp. 21, 22, 683, 22)\n
  218. \n
  219. ^ a b Deep learning: Russell & Norvig (2021, chpt. 21), Goodfellow, Bengio & Courville (2016), Hinton et al. (2016), Schmidhuber (2015)\n
  220. \n
  221. ^ Convolutional neural networks: Russell & Norvig (2021, sect. 21.3)\n
  222. \n
  223. ^ Deng & Yu (2014), pp. 199–200.\n
  224. \n
  225. ^ Ciresan, Meier & Schmidhuber (2012).\n
  226. \n
  227. ^ Russell & Norvig (2021), p. 751.\n
  228. \n
  229. ^ a b c Russell & Norvig (2021), p. 17.\n
  230. \n
  231. ^ a b c d e f g Russell & Norvig (2021), p. 785.\n
  232. \n
  233. ^ a b Schmidhuber (2022), sect. 5.\n
  234. \n
  235. ^ Schmidhuber (2022), sect. 6.\n
  236. \n
  237. ^ a b c Schmidhuber (2022), sect. 7.\n
  238. \n
  239. ^ Schmidhuber (2022), sect. 8.\n
  240. \n
  241. ^ Quoted in Christian (2020, p. 22)\n
  242. \n
  243. ^ Smith (2023).\n
  244. \n
  245. ^ "Explained: Generative AI". 9 November 2023.\n
  246. \n
  247. ^ "AI Writing and Content Creation Tools". MIT Sloan Teaching & Learning Technologies. Archived from the original on 25 December 2023. Retrieved 25 December 2023.\n
  248. \n
  249. ^ Marmouyet (2023).\n
  250. \n
  251. ^ Kobielus (2019).\n
  252. \n
  253. ^ Thomason, James (21 May 2024). "Mojo Rising: The resurgence of AI-first programming languages". VentureBeat. Archived from the original on 27 June 2024. Retrieved 26 May 2024.\n
  254. \n
  255. ^ Wodecki, Ben (5 May 2023). "7 AI Programming Languages You Need to Know". AI Business. Archived from the original on 25 July 2024. Retrieved 5 October 2024.\n
  256. \n
  257. ^ Plumb, Taryn (18 September 2024). "Why Jensen Huang and Marc Benioff see \'gigantic\' opportunity for agentic AI". VentureBeat. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
  258. \n
  259. ^ Davenport, T; Kalakota, R (June 2019). "The potential for artificial intelligence in healthcare". Future Healthc J. 6 (2): 94–98. doi:10.7861/futurehosp.6-2-94. PMC 6616181. PMID 31363513.\n
  260. \n
  261. ^ Lyakhova, U.A.; Lyakhov, P.A. (2024). "Systematic review of approaches to detection and classification of skin cancer using artificial intelligence: Development and prospects". Computers in Biology and Medicine. 178: 108742. doi:10.1016/j.compbiomed.2024.108742. PMID 38875908.\n
  262. \n
  263. ^ Alqudaihi, Kawther S.; Aslam, Nida; Khan, Irfan Ullah; Almuhaideb, Abdullah M.; Alsunaidi, Shikah J.; Ibrahim, Nehad M. Abdel Rahman; Alhaidari, Fahd A.; Shaikh, Fatema S.; Alsenbel, Yasmine M.; Alalharith, Dima M.; Alharthi, Hajar M.; Alghamdi, Wejdan M.; Alshahrani, Mohammed S. (2021). "Cough Sound Detection and Diagnosis Using Artificial Intelligence Techniques: Challenges and Opportunities". IEEE Access. 9: 102327–102344. Bibcode:2021IEEEA...9j2327A. doi:10.1109/ACCESS.2021.3097559. ISSN 2169-3536. PMC 8545201. PMID 34786317.\n
  264. \n
  265. ^ a b Bax, Monique; Thorpe, Jordan; Romanov, Valentin (December 2023). "The future of personalized cardiovascular medicine demands 3D and 4D printing, stem cells, and artificial intelligence". Frontiers in Sensors. 4. doi:10.3389/fsens.2023.1294721. ISSN 2673-5067.\n
  266. \n
  267. ^ Jumper, J; Evans, R; Pritzel, A (2021). "Highly accurate protein structure prediction with AlphaFold". Nature. 596 (7873): 583–589. Bibcode:2021Natur.596..583J. doi:10.1038/s41586-021-03819-2. PMC 8371605. PMID 34265844.\n
  268. \n
  269. ^ "AI discovers new class of antibiotics to kill drug-resistant bacteria". 20 December 2023. Archived from the original on 16 September 2024. Retrieved 5 October 2024.\n
  270. \n
  271. ^ "AI speeds up drug design for Parkinson\'s ten-fold". Cambridge University. 17 April 2024. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  272. \n
  273. ^ Horne, Robert I.; Andrzejewska, Ewa A.; Alam, Parvez; Brotzakis, Z. Faidon; Srivastava, Ankit; Aubert, Alice; Nowinska, Magdalena; Gregory, Rebecca C.; Staats, Roxine; Possenti, Andrea; Chia, Sean; Sormanni, Pietro; Ghetti, Bernardino; Caughey, Byron; Knowles, Tuomas P. J.; Vendruscolo, Michele (17 April 2024). "Discovery of potent inhibitors of α-synuclein aggregation using structure-based iterative learning". Nature Chemical Biology. 20 (5). Nature: 634–645. doi:10.1038/s41589-024-01580-x. PMC 11062903. PMID 38632492.\n
  274. \n
  275. ^ Grant, Eugene F.; Lardner, Rex (25 July 1952). "The Talk of the Town – It". The New Yorker. ISSN 0028-792X. Archived from the original on 16 February 2020. Retrieved 28 January 2024.\n
  276. \n
  277. ^ Anderson, Mark Robert (11 May 2017). "Twenty years on from Deep Blue vs Kasparov: how a chess match started the big data revolution". The Conversation. Archived from the original on 17 September 2024. Retrieved 28 January 2024.\n
  278. \n
  279. ^ Markoff, John (16 February 2011). "Computer Wins on \'Jeopardy!\': Trivial, It\'s Not". The New York Times. ISSN 0362-4331. Archived from the original on 22 October 2014. Retrieved 28 January 2024.\n
  280. \n
  281. ^ Byford, Sam (27 May 2017). "AlphaGo retires from competitive Go after defeating world number one 3–0". The Verge. Archived from the original on 7 June 2017. Retrieved 28 January 2024.\n
  282. \n
  283. ^ Brown, Noam; Sandholm, Tuomas (30 August 2019). "Superhuman AI for multiplayer poker". Science. 365 (6456): 885–890. Bibcode:2019Sci...365..885B. doi:10.1126/science.aay2400. ISSN 0036-8075. PMID 31296650.\n
  284. \n
  285. ^ "MuZero: Mastering Go, chess, shogi and Atari without rules". Google DeepMind. 23 December 2020. Retrieved 28 January 2024.\n
  286. \n
  287. ^ Sample, Ian (30 October 2019). "AI becomes grandmaster in \'fiendishly complex\' StarCraft II". The Guardian. ISSN 0261-3077. Archived from the original on 29 December 2020. Retrieved 28 January 2024.\n
  288. \n
  289. ^ Wurman, P. R.; Barrett, S.; Kawamoto, K. (2022). "Outracing champion Gran Turismo drivers with deep reinforcement learning" (PDF). Nature. 602 (7896): 223–228. Bibcode:2022Natur.602..223W. doi:10.1038/s41586-021-04357-7. PMID 35140384.\n
  290. \n
  291. ^ Wilkins, Alex (13 March 2024). "Google AI learns to play open-world video games by watching them". New Scientist. Archived from the original on 26 July 2024. Retrieved 21 July 2024.\n
  292. \n
  293. ^ Uesato, J. et al.: Improving mathematical reasoning with process supervision. Archived 15 September 2024 at the Wayback Machine openai.com, May 31, 2023. Retrieved 2024-08-07.\n
  294. \n
  295. ^ Srivastava, Saurabh (29 February 2024). "Functional Benchmarks for Robust Evaluation of Reasoning Performance, and the Reasoning Gap". arXiv:2402.19450 [cs.AI].\n
  296. \n
  297. ^ Roberts, Siobhan (25 July 2024). "AI achieves silver-medal standard solving International Mathematical Olympiad problems". The New York Times. Archived from the original on 26 September 2024. Retrieved 7 August 2024.\n
  298. \n
  299. ^ LLEMMA. eleuther.ai. Retrieved 2024-08-07.\n
  300. \n
  301. ^ AI Math. Archived 5 October 2024 at the Wayback Machine Caesars Labs, 2024. Retrieved 2024-08-07.\n
  302. \n
  303. ^ Alex McFarland: 7 Best AI for Math Tools. Archived 11 September 2024 at the Wayback Machine unite.ai. Retrieved 2024-08-07\n
  304. \n
  305. ^ Matthew Finio & Amanda Downie: IBM Think 2024 Primer, "What is Artificial Intelligence (AI) in Finance?" 8 Dec. 2023\n
  306. \n
  307. ^ M. Nicolas, J. Firzli: Pensions Age/European Pensions magazine, "Artificial Intelligence: Ask the Industry" May June 2024 https://videovoice.org/ai-in-finance-innovation-entrepreneurship-vs-over-regulation-with-the-eus-artificial-intelligence-act-wont-work-as-intended/ Archived 11 September 2024 at the Wayback Machine.\n
  308. \n
  309. ^ a b c Congressional Research Service (2019). Artificial Intelligence and National Security (PDF). Washington, DC: Congressional Research Service. Archived (PDF) from the original on 8 May 2020. Retrieved 5 October 2024.PD-notice\n
  310. \n
  311. ^ a b Slyusar, Vadym (2019). "Artificial intelligence as the basis of future control networks". ResearchGate. doi:10.13140/RG.2.2.30247.50087. Archived from the original on 28 April 2021. Retrieved 20 July 2019.\n
  312. \n
  313. ^ Knight, Will. "The US and 30 Other Nations Agree to Set Guardrails for Military AI". Wired. ISSN 1059-1028. Archived from the original on 20 September 2024. Retrieved 24 January 2024.\n
  314. \n
  315. ^ Newsom, Gavin; Weber, Shirley N. (6 September 2023). "Executive Order N-12-23" (PDF). Executive Department, State of California. Archived (PDF) from the original on 21 February 2024. Retrieved 7 September 2023.\n
  316. \n
  317. ^ Pinaya, Walter H. L.; Graham, Mark S.; Kerfoot, Eric; Tudosiu, Petru-Daniel; Dafflon, Jessica; Fernandez, Virginia; Sanchez, Pedro; Wolleb, Julia; da Costa, Pedro F.; Patel, Ashay (2023). "Generative AI for Medical Imaging: extending the MONAI Framework". arXiv:2307.15208 [eess.IV].\n
  318. \n
  319. ^ Griffith, Erin; Metz, Cade (27 January 2023). "Anthropic Said to Be Closing In on $300 Million in New A.I. Funding". The New York Times. Archived from the original on 9 December 2023. Retrieved 14 March 2023.\n
  320. \n
  321. ^ Lanxon, Nate; Bass, Dina; Davalos, Jackie (10 March 2023). "A Cheat Sheet to AI Buzzwords and Their Meanings". Bloomberg News. Archived from the original on 17 November 2023. Retrieved 14 March 2023.\n
  322. \n
  323. ^ Marcelline, Marco (27 May 2023). "ChatGPT: Most Americans Know About It, But Few Actually Use the AI Chatbot". PCMag. Archived from the original on 21 May 2024. Retrieved 28 January 2024.\n
  324. \n
  325. ^ Lu, Donna (31 March 2023). "Misinformation, mistakes and the Pope in a puffer: what rapidly evolving AI can – and can\'t – do". The Guardian. ISSN 0261-3077. Archived from the original on 10 June 2024. Retrieved 28 January 2024.\n
  326. \n
  327. ^ Hurst, Luke (23 May 2023). "How a fake image of a Pentagon explosion shared on Twitter caused a real dip on Wall Street". euronews. Retrieved 28 January 2024.\n
  328. \n
  329. ^ Poole, David; Mackworth, Alan (2023). Artificial Intelligence, Foundations of Computational Agents (3rd ed.). Cambridge University Press. doi:10.1017/9781009258227. ISBN 978-1-0092-5819-7. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  330. \n
  331. ^ Russell, Stuart; Norvig, Peter (2020). Artificial Intelligence: A Modern Approach (4th ed.). Pearson. ISBN 978-0-1346-1099-3.\n
  332. \n
  333. ^ "Why agents are the next frontier of generative AI". McKinsey Digital. 24 July 2024. Archived from the original on 3 October 2024. Retrieved 10 August 2024.\n
  334. \n
  335. ^ Ransbotham, Sam; Kiron, David; Gerbert, Philipp; Reeves, Martin (6 September 2017). "Reshaping Business With Artificial Intelligence". MIT Sloan Management Review. Archived from the original on 13 February 2024.\n
  336. \n
  337. ^ Sun, Yuran; Zhao, Xilei; Lovreglio, Ruggiero; Kuligowski, Erica (1 January 2024), Naser, M. Z. (ed.), "8 – AI for large-scale evacuation modeling: promises and challenges", Interpretable Machine Learning for the Analysis, Design, Assessment, and Informed Decision Making for Civil Infrastructure, Woodhead Publishing Series in Civil and Structural Engineering, Woodhead Publishing, pp. 185–204, ISBN 978-0-1282-4073-1, archived from the original on 19 May 2024, retrieved 28 June 2024.\n
  338. \n
  339. ^ Gomaa, Islam; Adelzadeh, Masoud; Gwynne, Steven; Spencer, Bruce; Ko, Yoon; Bénichou, Noureddine; Ma, Chunyun; Elsagan, Nour; Duong, Dana; Zalok, Ehab; Kinateder, Max (1 November 2021). "A Framework for Intelligent Fire Detection and Evacuation System". Fire Technology. 57 (6): 3179–3185. doi:10.1007/s10694-021-01157-3. ISSN 1572-8099. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  340. \n
  341. ^ Zhao, Xilei; Lovreglio, Ruggiero; Nilsson, Daniel (1 May 2020). "Modelling and interpreting pre-evacuation decision-making using machine learning". Automation in Construction. 113: 103140. doi:10.1016/j.autcon.2020.103140. ISSN 0926-5805. Archived from the original on 19 May 2024. Retrieved 5 October 2024.\n
  342. \n
  343. ^ "India\'s latest election embraced AI technology. Here are some ways it was used constructively". PBS News. 12 June 2024. Retrieved 28 October 2024.\n
  344. \n
  345. ^ Müller, Vincent C. (30 April 2020). "Ethics of Artificial Intelligence and Robotics". Stanford Encyclopedia of Philosophy Archive. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  346. \n
  347. ^ Simonite (2016).\n
  348. \n
  349. ^ Russell & Norvig (2021), p. 987.\n
  350. \n
  351. ^ Laskowski (2023).\n
  352. \n
  353. ^ GAO (2022).\n
  354. \n
  355. ^ Valinsky (2019).\n
  356. \n
  357. ^ Russell & Norvig (2021), p. 991.\n
  358. \n
  359. ^ Russell & Norvig (2021), pp. 991–992.\n
  360. \n
  361. ^ Christian (2020), p. 63.\n
  362. \n
  363. ^ Vincent (2022).\n
  364. \n
  365. ^ Kopel, Matthew. "Copyright Services: Fair Use". Cornell University Library. Archived from the original on 26 September 2024. Retrieved 26 April 2024.\n
  366. \n
  367. ^ Burgess, Matt. "How to Stop Your Data From Being Used to Train AI". Wired. ISSN 1059-1028. Archived from the original on 3 October 2024. Retrieved 26 April 2024.\n
  368. \n
  369. ^ Reisner (2023).\n
  370. \n
  371. ^ Alter & Harris (2023).\n
  372. \n
  373. ^ "Getting the Innovation Ecosystem Ready for AI. An IP policy toolkit" (PDF). WIPO.\n
  374. \n
  375. ^ Hammond, George (27 December 2023). "Big Tech is spending more than VC firms on AI startups". Ars Technica. Archived from the original on 10 January 2024.\n
  376. \n
  377. ^ Wong, Matteo (24 October 2023). "The Future of AI Is GOMA". The Atlantic. Archived from the original on 5 January 2024.\n
  378. \n
  379. ^ "Big tech and the pursuit of AI dominance". The Economist. 26 March 2023. Archived from the original on 29 December 2023.\n
  380. \n
  381. ^ Fung, Brian (19 December 2023). "Where the battle to dominate AI may be won". CNN Business. Archived from the original on 13 January 2024.\n
  382. \n
  383. ^ Metz, Cade (5 July 2023). "In the Age of A.I., Tech\'s Little Guys Need Big Friends". The New York Times. Archived from the original on 8 July 2024. Retrieved 5 October 2024.\n
  384. \n
  385. ^ "Electricity 2024 – Analysis". IEA. 24 January 2024. Retrieved 13 July 2024.\n
  386. \n
  387. ^ Calvert, Brian (28 March 2024). "AI already uses as much energy as a small country. It\'s only the beginning". Vox. New York, New York. Archived from the original on 3 July 2024. Retrieved 5 October 2024.\n
  388. \n
  389. ^ Halper, Evan; O\'Donovan, Caroline (21 June 2024). "AI is exhausting the power grid. Tech firms are seeking a miracle solution". Washington Post.\n
  390. \n
  391. ^ Davenport, Carly. "AI Data Centers and the Coming YS Power Demand Surge" (PDF). Goldman Sachs. Archived from the original (PDF) on 26 July 2024. Retrieved 5 October 2024.\n
  392. \n
  393. ^ Ryan, Carol (12 April 2024). "Energy-Guzzling AI Is Also the Future of Energy Savings". Wall Street Journal. Dow Jones.\n
  394. \n
  395. ^ Hiller, Jennifer (1 July 2024). "Tech Industry Wants to Lock Up Nuclear Power for AI". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  396. \n
  397. ^ Kendall, Tyler (28 September 2024). "Nvidia\'s Huang Says Nuclear Power an Option to Feed Data Centers". Bloomberg.\n
  398. \n
  399. ^ Halper, Evan (20 September 2024). "Microsoft deal would reopen Three Mile Island nuclear plant to power AI". Washington Post.\n
  400. \n
  401. ^ Hiller, Jennifer (20 September 2024). "Three Mile Island\'s Nuclear Plant to Reopen, Help Power Microsoft\'s AI Centers". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  402. \n
  403. ^ a b Niva Yadav (19 August 2024). "Taiwan to stop large data centers in the North, cites insufficient power". DatacenterDynamics.\n
  404. \n
  405. ^ Mochizuki, Takashi; Oda, Shoko (18 October 2024). "エヌビディア出資の日本企業、原発近くでAIデータセンター新設検討". Bloomberg (in Japanese).\n
  406. \n
  407. ^ a b Naureen S Malik and Will Wade (5 November 2024). "Nuclear-Hungry AI Campuses Need New Plan to Find Power Fast". Bloomberg.\n
  408. \n
  409. ^ Nicas (2018).\n
  410. \n
  411. ^ Rainie, Lee; Keeter, Scott; Perrin, Andrew (22 July 2019). "Trust and Distrust in America". Pew Research Center. Archived from the original on 22 February 2024.\n
  412. \n
  413. ^ Williams (2023).\n
  414. \n
  415. ^ Taylor & Hern (2023).\n
  416. \n
  417. ^ a b Samuel, Sigal (19 April 2022). "Why it\'s so damn hard to make AI fair and unbiased". Vox. Archived from the original on 5 October 2024. Retrieved 24 July 2024.\n
  418. \n
  419. ^ a b Rose (2023).\n
  420. \n
  421. ^ CNA (2019).\n
  422. \n
  423. ^ Goffrey (2008), p. 17.\n
  424. \n
  425. ^ Berdahl et al. (2023); Goffrey (2008, p. 17); Rose (2023); Russell & Norvig (2021, p. 995)\n
  426. \n
  427. ^ Christian (2020), p. 25.\n
  428. \n
  429. ^ a b Russell & Norvig (2021), p. 995.\n
  430. \n
  431. ^ Grant & Hill (2023).\n
  432. \n
  433. ^ Larson & Angwin (2016).\n
  434. \n
  435. ^ Christian (2020), p. 67–70.\n
  436. \n
  437. ^ Christian (2020, pp. 67–70); Russell & Norvig (2021, pp. 993–994)\n
  438. \n
  439. ^ Russell & Norvig (2021, p. 995); Lipartito (2011, p. 36); Goodman & Flaxman (2017, p. 6); Christian (2020, pp. 39–40, 65)\n
  440. \n
  441. ^ Quoted in Christian (2020, p. 65).\n
  442. \n
  443. ^ Russell & Norvig (2021, p. 994); Christian (2020, pp. 40, 80–81)\n
  444. \n
  445. ^ Quoted in Christian (2020, p. 80)\n
  446. \n
  447. ^ Dockrill (2022).\n
  448. \n
  449. ^ Sample (2017).\n
  450. \n
  451. ^ "Black Box AI". 16 June 2023. Archived from the original on 15 June 2024. Retrieved 5 October 2024.\n
  452. \n
  453. ^ Christian (2020), p. 110.\n
  454. \n
  455. ^ Christian (2020), pp. 88–91.\n
  456. \n
  457. ^ Christian (2020, p. 83); Russell & Norvig (2021, p. 997)\n
  458. \n
  459. ^ Christian (2020), p. 91.\n
  460. \n
  461. ^ Christian (2020), p. 83.\n
  462. \n
  463. ^ Verma (2021).\n
  464. \n
  465. ^ Rothman (2020).\n
  466. \n
  467. ^ Christian (2020), pp. 105–108.\n
  468. \n
  469. ^ Christian (2020), pp. 108–112.\n
  470. \n
  471. ^ Ropek, Lucas (21 May 2024). "New Anthropic Research Sheds Light on AI\'s \'Black Box\'". Gizmodo. Archived from the original on 5 October 2024. Retrieved 23 May 2024.\n
  472. \n
  473. ^ Russell & Norvig (2021), p. 989.\n
  474. \n
  475. ^ a b Russell & Norvig (2021), pp. 987–990.\n
  476. \n
  477. ^ Russell & Norvig (2021), p. 988.\n
  478. \n
  479. ^ Robitzski (2018); Sainato (2015)\n
  480. \n
  481. ^ Harari (2018).\n
  482. \n
  483. ^ Buckley, Chris; Mozur, Paul (22 May 2019). "How China Uses High-Tech Surveillance to Subdue Minorities". The New York Times. Archived from the original on 25 November 2019. Retrieved 2 July 2019.\n
  484. \n
  485. ^ "Security lapse exposed a Chinese smart city surveillance system". 3 May 2019. Archived from the original on 7 March 2021. Retrieved 14 September 2020.\n
  486. \n
  487. ^ Urbina et al. (2022).\n
  488. \n
  489. ^ a b E. McGaughey, \'Will Robots Automate Your Job Away? Full Employment, Basic Income, and Economic Democracy\' (2022), 51(3) Industrial Law Journal 511–559. Archived 27 May 2023 at the Wayback Machine.\n
  490. \n
  491. ^ Ford & Colvin (2015);McGaughey (2022)\n
  492. \n
  493. ^ IGM Chicago (2017).\n
  494. \n
  495. ^ Arntz, Gregory & Zierahn (2016), p. 33.\n
  496. \n
  497. ^ Lohr (2017); Frey & Osborne (2017); Arntz, Gregory & Zierahn (2016, p. 33)\n
  498. \n
  499. ^ Zhou, Viola (11 April 2023). "AI is already taking video game illustrators\' jobs in China". Rest of World. Archived from the original on 21 February 2024. Retrieved 17 August 2023.\n
  500. \n
  501. ^ Carter, Justin (11 April 2023). "China\'s game art industry reportedly decimated by growing AI use". Game Developer. Archived from the original on 17 August 2023. Retrieved 17 August 2023.\n
  502. \n
  503. ^ Morgenstern (2015).\n
  504. \n
  505. ^ Mahdawi (2017); Thompson (2014)\n
  506. \n
  507. ^ Tarnoff, Ben (4 August 2023). "Lessons from Eliza". The Guardian Weekly. pp. 34–39.\n
  508. \n
  509. ^ Cellan-Jones (2014).\n
  510. \n
  511. ^ Russell & Norvig 2021, p. 1001.\n
  512. \n
  513. ^ Bostrom (2014).\n
  514. \n
  515. ^ Russell (2019).\n
  516. \n
  517. ^ Bostrom (2014); Müller & Bostrom (2014); Bostrom (2015).\n
  518. \n
  519. ^ Harari (2023).\n
  520. \n
  521. ^ Müller & Bostrom (2014).\n
  522. \n
  523. ^ Leaders\' concerns about the existential risks of AI around 2015: Rawlinson (2015), Holley (2015), Gibbs (2014), Sainato (2015)\n
  524. \n
  525. ^ ""Godfather of artificial intelligence" talks impact and potential of new AI". CBS News. 25 March 2023. Archived from the original on 28 March 2023. Retrieved 28 March 2023.\n
  526. \n
  527. ^ Pittis, Don (4 May 2023). "Canadian artificial intelligence leader Geoffrey Hinton piles on fears of computer takeover". CBC. Archived from the original on 7 July 2024. Retrieved 5 October 2024.\n
  528. \n
  529. ^ "\'50–50 chance\' that AI outsmarts humanity, Geoffrey Hinton says". Bloomberg BNN. 14 June 2024. Retrieved 6 July 2024.\n
  530. \n
  531. ^ Valance (2023).\n
  532. \n
  533. ^ Taylor, Josh (7 May 2023). "Rise of artificial intelligence is inevitable but should not be feared, \'father of AI\' says". The Guardian. Archived from the original on 23 October 2023. Retrieved 26 May 2023.\n
  534. \n
  535. ^ Colton, Emma (7 May 2023). "\'Father of AI\' says tech fears misplaced: \'You cannot stop it\'". Fox News. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
  536. \n
  537. ^ Jones, Hessie (23 May 2023). "Juergen Schmidhuber, Renowned \'Father Of Modern AI,\' Says His Life\'s Work Won\'t Lead To Dystopia". Forbes. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
  538. \n
  539. ^ McMorrow, Ryan (19 December 2023). "Andrew Ng: \'Do we think the world is better off with more or less intelligence?\'". Financial Times. Archived from the original on 25 January 2024. Retrieved 30 December 2023.\n
  540. \n
  541. ^ Levy, Steven (22 December 2023). "How Not to Be Stupid About AI, With Yann LeCun". Wired. Archived from the original on 28 December 2023. Retrieved 30 December 2023.\n
  542. \n
  543. ^ Arguments that AI is not an imminent risk: Brooks (2014), Geist (2015), Madrigal (2015), Lee (2014)\n
  544. \n
  545. ^ a b Christian (2020), pp. 67, 73.\n
  546. \n
  547. ^ Yudkowsky (2008).\n
  548. \n
  549. ^ a b Anderson & Anderson (2011).\n
  550. \n
  551. ^ AAAI (2014).\n
  552. \n
  553. ^ Wallach (2010).\n
  554. \n
  555. ^ Russell (2019), p. 173.\n
  556. \n
  557. ^ Stewart, Ashley; Melton, Monica. "Hugging Face CEO says he\'s focused on building a \'sustainable model\' for the $4.5 billion open-source-AI startup". Business Insider. Archived from the original on 25 September 2024. Retrieved 14 April 2024.\n
  558. \n
  559. ^ Wiggers, Kyle (9 April 2024). "Google open sources tools to support AI model development". TechCrunch. Archived from the original on 10 September 2024. Retrieved 14 April 2024.\n
  560. \n
  561. ^ Heaven, Will Douglas (12 May 2023). "The open-source AI boom is built on Big Tech\'s handouts. How long will it last?". MIT Technology Review. Retrieved 14 April 2024.\n
  562. \n
  563. ^ Brodsky, Sascha (19 December 2023). "Mistral AI\'s New Language Model Aims for Open Source Supremacy". AI Business. Archived from the original on 5 September 2024. Retrieved 5 October 2024.\n
  564. \n
  565. ^ Edwards, Benj (22 February 2024). "Stability announces Stable Diffusion 3, a next-gen AI image generator". Ars Technica. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
  566. \n
  567. ^ Marshall, Matt (29 January 2024). "How enterprises are using open source LLMs: 16 examples". VentureBeat. Archived from the original on 26 September 2024. Retrieved 5 October 2024.\n
  568. \n
  569. ^ Piper, Kelsey (2 February 2024). "Should we make our most powerful AI models open source to all?". Vox. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
  570. \n
  571. ^ Alan Turing Institute (2019). "Understanding artificial intelligence ethics and safety" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
  572. \n
  573. ^ Alan Turing Institute (2023). "AI Ethics and Governance in Practice" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
  574. \n
  575. ^ Floridi, Luciano; Cowls, Josh (23 June 2019). "A Unified Framework of Five Principles for AI in Society". Harvard Data Science Review. 1 (1). doi:10.1162/99608f92.8cd550d1. S2CID 198775713.\n
  576. \n
  577. ^ Buruk, Banu; Ekmekci, Perihan Elif; Arda, Berna (1 September 2020). "A critical perspective on guidelines for responsible and trustworthy artificial intelligence". Medicine, Health Care and Philosophy. 23 (3): 387–399. doi:10.1007/s11019-020-09948-1. ISSN 1572-8633. PMID 32236794. S2CID 214766800. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  578. \n
  579. ^ Kamila, Manoj Kumar; Jasrotia, Sahil Singh (1 January 2023). "Ethical issues in the development of artificial intelligence: recognizing the risks". International Journal of Ethics and Systems. ahead-of-print (ahead-of-print). doi:10.1108/IJOES-05-2023-0107. ISSN 2514-9369. S2CID 259614124. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  580. \n
  581. ^ "AI Safety Institute releases new AI safety evaluations platform". UK Government. 10 May 2024. Archived from the original on 5 October 2024. Retrieved 14 May 2024.\n
  582. \n
  583. ^ Regulation of AI to mitigate risks: Berryhill et al. (2019), Barfield & Pagallo (2018), Iphofen & Kritikos (2019), Wirtz, Weyerer & Geyer (2018), Buiten (2019)\n
  584. \n\n
  585. ^ a b Vincent (2023).\n
  586. \n
  587. ^ Stanford University (2023).\n
  588. \n
  589. ^ a b c d UNESCO (2021).\n
  590. \n
  591. ^ Kissinger (2021).\n
  592. \n
  593. ^ Altman, Brockman & Sutskever (2023).\n
  594. \n
  595. ^ VOA News (25 October 2023). "UN Announces Advisory Body on Artificial Intelligence". Archived from the original on 18 September 2024. Retrieved 5 October 2024.\n
  596. \n
  597. ^ "Council of Europe opens first ever global treaty on AI for signature". Council of Europe. 5 September 2024. Archived from the original on 17 September 2024. Retrieved 17 September 2024.\n
  598. \n
  599. ^ Edwards (2023).\n
  600. \n
  601. ^ Kasperowicz (2023).\n
  602. \n
  603. ^ Fox News (2023).\n
  604. \n
  605. ^ Milmo, Dan (3 November 2023). "Hope or Horror? The great AI debate dividing its pioneers". The Guardian Weekly. pp. 10–12.\n
  606. \n
  607. ^ "The Bletchley Declaration by Countries Attending the AI Safety Summit, 1–2 November 2023". GOV.UK. 1 November 2023. Archived from the original on 1 November 2023. Retrieved 2 November 2023.\n
  608. \n
  609. ^ "Countries agree to safe and responsible development of frontier AI in landmark Bletchley Declaration". GOV.UK (Press release). Archived from the original on 1 November 2023. Retrieved 1 November 2023.\n
  610. \n
  611. ^ "Second global AI summit secures safety commitments from companies". Reuters. 21 May 2024. Retrieved 23 May 2024.\n
  612. \n
  613. ^ "Frontier AI Safety Commitments, AI Seoul Summit 2024". gov.uk. 21 May 2024. Archived from the original on 23 May 2024. Retrieved 23 May 2024.\n
  614. \n
  615. ^ a b Russell & Norvig 2021, p. 9.\n
  616. \n
  617. ^ a b c Copeland, J., ed. (2004). The Essential Turing: the ideas that gave birth to the computer age. Oxford, England: Clarendon Press. ISBN 0-1982-5079-7.\n
  618. \n
  619. ^ "Google books ngram". Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  620. \n
  621. ^ AI\'s immediate precursors: McCorduck (2004, pp. 51–107), Crevier (1993, pp. 27–32), Russell & Norvig (2021, pp. 8–17), Moravec (1988, p. 3)\n
  622. \n
  623. ^ a b Turing\'s original publication of the Turing test in "Computing machinery and intelligence": Turing (1950)\nHistorical influence and philosophical implications: Haugeland (1985, pp. 6–9), Crevier (1993, p. 24), McCorduck (2004, pp. 70–71), Russell & Norvig (2021, pp. 2, 984)\n
  624. \n
  625. ^ Crevier (1993), pp. 47–49.\n
  626. \n
  627. ^ Russell & Norvig (2003), p. 17.\n
  628. \n
  629. ^ Russell & Norvig (2003), p. 18.\n
  630. \n
  631. ^ Newquist (1994), pp. 86–86.\n
  632. \n
  633. ^ Simon (1965, p. 96) quoted in Crevier (1993, p. 109)\n
  634. \n
  635. ^ Minsky (1967, p. 2) quoted in Crevier (1993, p. 109)\n
  636. \n
  637. ^ Russell & Norvig (2021), p. 21.\n
  638. \n
  639. ^ Lighthill (1973).\n
  640. \n
  641. ^ NRC 1999, pp. 212–213.\n
  642. \n
  643. ^ Russell & Norvig (2021), p. 22.\n
  644. \n
  645. ^ Expert systems: Russell & Norvig (2021, pp. 23, 292), Luger & Stubblefield (2004, pp. 227–331), Nilsson (1998, chpt. 17.4), McCorduck (2004, pp. 327–335, 434–435), Crevier (1993, pp. 145–162, 197–203), Newquist (1994, pp. 155–183)\n
  646. \n
  647. ^ Russell & Norvig (2021), p. 24.\n
  648. \n
  649. ^ Nilsson (1998), p. 7.\n
  650. \n
  651. ^ McCorduck (2004), pp. 454–462.\n
  652. \n
  653. ^ Moravec (1988).\n
  654. \n
  655. ^ a b Brooks (1990).\n
  656. \n
  657. ^ Developmental robotics: Weng et al. (2001), Lungarella et al. (2003), Asada et al. (2009), Oudeyer (2010)\n
  658. \n
  659. ^ Russell & Norvig (2021), p. 25.\n
  660. \n
  661. ^ Crevier (1993, pp. 214–215), Russell & Norvig (2021, pp. 24, 26)\n
  662. \n
  663. ^ Russell & Norvig (2021), p. 26.\n
  664. \n
  665. ^ Formal and narrow methods adopted in the 1990s: Russell & Norvig (2021, pp. 24–26), McCorduck (2004, pp. 486–487)\n
  666. \n
  667. ^ AI widely used in the late 1990s: Kurzweil (2005, p. 265), NRC (1999, pp. 216–222), Newquist (1994, pp. 189–201)\n
  668. \n
  669. ^ Wong (2023).\n
  670. \n
  671. ^ Moore\'s Law and AI: Russell & Norvig (2021, pp. 14, 27)\n
  672. \n
  673. ^ a b c Clark (2015b).\n
  674. \n
  675. ^ Big data: Russell & Norvig (2021, p. 26)\n
  676. \n
  677. ^ Sagar, Ram (3 June 2020). "OpenAI Releases GPT-3, The Largest Model So Far". Analytics India Magazine. Archived from the original on 4 August 2020. Retrieved 15 March 2023.\n
  678. \n
  679. ^ DiFeliciantonio (2023).\n
  680. \n
  681. ^ Goswami (2023).\n
  682. \n
  683. ^ Grayling, Anthony; Ball, Brian (1 August 2024). "Philosophy is crucial in the age of AI". The Conversation. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
  684. \n
  685. ^ a b Jarow, Oshan (15 June 2024). "Will AI ever become conscious? It depends on how you think about biology". Vox. Archived from the original on 21 September 2024. Retrieved 4 October 2024.\n
  686. \n
  687. ^ McCarthy, John. "The Philosophy of AI and the AI of Philosophy". jmc.stanford.edu. Archived from the original on 23 October 2018. Retrieved 3 October 2024.\n
  688. \n
  689. ^ a b Turing (1950), p. 1.\n
  690. \n
  691. ^ Turing (1950), Under "The Argument from Consciousness".\n
  692. \n
  693. ^ Kirk-Giannini, Cameron Domenico; Goldstein, Simon (16 October 2023). "AI is closer than ever to passing the Turing test for \'intelligence\'. What happens when it does?". The Conversation. Archived from the original on 25 September 2024. Retrieved 17 August 2024.\n
  694. \n
  695. ^ Russell & Norvig (2021), p. 3.\n
  696. \n
  697. ^ Maker (2006).\n
  698. \n
  699. ^ McCarthy (1999).\n
  700. \n
  701. ^ Minsky (1986).\n
  702. \n
  703. ^ "What Is Artificial Intelligence (AI)?". Google Cloud Platform. Archived from the original on 31 July 2023. Retrieved 16 October 2023.\n
  704. \n
  705. ^ "One of the Biggest Problems in Regulating AI Is Agreeing on a Definition". carnegieendowment.org. Retrieved 31 July 2024.\n
  706. \n
  707. ^ "AI or BS? How to tell if a marketing tool really uses artificial intelligence". The Drum. Retrieved 31 July 2024.\n
  708. \n
  709. ^ Nilsson (1983), p. 10.\n
  710. \n
  711. ^ Haugeland (1985), pp. 112–117.\n
  712. \n
  713. ^ Physical symbol system hypothesis: Newell & Simon (1976, p. 116)\nHistorical significance: McCorduck (2004, p. 153), Russell & Norvig (2021, p. 19)\n
  714. \n
  715. ^ Moravec\'s paradox: Moravec (1988, pp. 15–16), Minsky (1986, p. 29), Pinker (2007, pp. 190–191)\n
  716. \n
  717. ^ Dreyfus\' critique of AI: Dreyfus (1972), Dreyfus & Dreyfus (1986)\nHistorical significance and philosophical implications: Crevier (1993, pp. 120–132), McCorduck (2004, pp. 211–239), Russell & Norvig (2021, pp. 981–982), Fearn (2007, chpt. 3)\n
  718. \n
  719. ^ Crevier (1993), p. 125.\n
  720. \n
  721. ^ Langley (2011).\n
  722. \n
  723. ^ Katz (2012).\n
  724. \n
  725. ^ Neats vs. scruffies, the historic debate: McCorduck (2004, pp. 421–424, 486–489), Crevier (1993, p. 168), Nilsson (1983, pp. 10–11), Russell & Norvig (2021, p. 24)\nA classic example of the "scruffy" approach to intelligence: Minsky (1986)\nA modern example of neat AI and its aspirations in the 21st century: Domingos (2015)\n
  726. \n
  727. ^ Pennachin & Goertzel (2007).\n
  728. \n
  729. ^ a b Roberts (2016).\n
  730. \n
  731. ^ Russell & Norvig (2021), p. 986.\n
  732. \n
  733. ^ Chalmers (1995).\n
  734. \n
  735. ^ Dennett (1991).\n
  736. \n
  737. ^ Horst (2005).\n
  738. \n
  739. ^ Searle (1999).\n
  740. \n
  741. ^ Searle (1980), p. 1.\n
  742. \n
  743. ^ Russell & Norvig (2021), p. 9817.\n
  744. \n
  745. ^ Searle\'s Chinese room argument: Searle (1980). Searle\'s original presentation of the thought experiment., Searle (1999).\nDiscussion: Russell & Norvig (2021, pp. 985), McCorduck (2004, pp. 443–445), Crevier (1993, pp. 269–271)\n
  746. \n
  747. ^ Leith, Sam (7 July 2022). "Nick Bostrom: How can we be certain a machine isn\'t conscious?". The Spectator. Archived from the original on 26 September 2024. Retrieved 23 February 2024.\n
  748. \n
  749. ^ a b c Thomson, Jonny (31 October 2022). "Why don\'t robots have rights?". Big Think. Archived from the original on 13 September 2024. Retrieved 23 February 2024.\n
  750. \n
  751. ^ a b Kateman, Brian (24 July 2023). "AI Should Be Terrified of Humans". Time. Archived from the original on 25 September 2024. Retrieved 23 February 2024.\n
  752. \n
  753. ^ Wong, Jeff (10 July 2023). "What leaders need to know about robot rights". Fast Company.\n
  754. \n
  755. ^ Hern, Alex (12 January 2017). "Give robots \'personhood\' status, EU committee argues". The Guardian. ISSN 0261-3077. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
  756. \n
  757. ^ Dovey, Dana (14 April 2018). "Experts Don\'t Think Robots Should Have Rights". Newsweek. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
  758. \n
  759. ^ Cuddy, Alice (13 April 2018). "Robot rights violate human rights, experts warn EU". euronews. Archived from the original on 19 September 2024. Retrieved 23 February 2024.\n
  760. \n
  761. ^ The Intelligence explosion and technological singularity: Russell & Norvig (2021, pp. 1004–1005), Omohundro (2008), Kurzweil (2005)\n\nI. J. Good\'s "intelligence explosion": Good (1965)\n\nVernor Vinge\'s "singularity": Vinge (1993)\n
  762. \n
  763. ^ Russell & Norvig (2021), p. 1005.\n
  764. \n
  765. ^ Transhumanism: Moravec (1988), Kurzweil (2005), Russell & Norvig (2021, p. 1005)\n
  766. \n
  767. ^ AI as evolution: Edward Fredkin is quoted in McCorduck (2004, p. 401), Butler (1863), Dyson (1998)\n
  768. \n
  769. ^ AI in myth: McCorduck (2004, pp. 4–5)\n
  770. \n
  771. ^ McCorduck (2004), pp. 340–400.\n
  772. \n
  773. ^ Buttazzo (2001).\n
  774. \n
  775. ^ Anderson (2008).\n
  776. \n
  777. ^ McCauley (2007).\n
  778. \n
  779. ^ Galvan (1997).\n
  780. \n
\n

AI textbooks

\n

The two most widely used textbooks in 2023 (see the Open Syllabus):\n

\n\n

These were the four of the most widely used AI textbooks in 2008:\n

\n
\n\n
\n

Other textbooks:\n

\n\n

History of AI

\n
\n\n
\n

Other sources

\n
\n\n\n
\n

Further reading

\n
\n\n
\n
\n\n\n\n\n\n\n\n\n\n
\n
\n\t\t\t\t\t\n\t\t\t\t
\n\t\t\t\n\t\t\t\n\t\t\n\t\t\n\t \n \n
\n\t\n
\n\n\n'}'. -[2024-11-07 17:01:57,249] [INFO] [Engine]: Agent 'agent3' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_5oxiSal2FMrDmdlVq37V8Snm', type='function')], function_call=None) -[2024-11-07 17:01:57,250] [INFO] [BaseAgent]: Agent 'agent3' is planning the next step. -[2024-11-07 17:01:58,127] [INFO] [BaseAgent]: Agent 'agent3' selected 'agent1' as the next agent with plan: 'The next agent should use Bing to search for information on artificial intelligence, focusing on recent developments, applications, and future trends in the field. The agent can also explore any emerging technologies or key players in the artificial intelligence industry.'. -[2024-11-07 17:01:58,128] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_zkU9afMLTWUiawieF91h4qga', type='function')], function_call=None) -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_5oxiSal2FMrDmdlVq37V8Snm', type='function')], function_call=None) -[2024-11-07 17:01:58,526] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-07 17:01:58,527] [INFO] [Engine]: Agent 'agent1' is executing task. -[2024-11-07 17:01:58,527] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'The next agent should use Bing to search for information on artificial intelligence, focusing on recent developments, applications, and future trends in the field. The agent can also explore any emerging technologies or key players in the artificial intelligence industry.'. -[2024-11-07 17:01:59,749] [INFO] [BaseAgent]: Agent 'agent1' called 'fetch_webpage' with args '{'url': 'https://www.bing.com/search?q=recent+developments+applications+future+trends+in+artificial+intelligence'}'. -[2024-11-07 17:01:59,751] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': True, 'error-msg': '', 'url': 'https://www.bing.com/search?q=recent+developments+applications+future+trends+in+artificial+intelligence', 'content': 'recent developments applications future trends in artificial intelligence - Search
  1. There are no results for recent developments applications future trends in artificial intelligence

    • Check your spelling or try different keywords

    Ref A: D7F6E90353AA45CD96661F69F0565726 Ref B: CH1EDGE1109 Ref C: 2024-11-07T23:01:59Z

'}'. -[2024-11-07 17:01:59,770] [INFO] [Engine]: Agent 'agent1' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.bing.com/search?q=recent+developments+applications+future+trends+in+artificial+intelligence"}', name='fetch_webpage'), id='call_kGqb87ZooW4yfhKdvQZKUOVg', type='function')], function_call=None) -[2024-11-07 17:01:59,770] [INFO] [BaseAgent]: Agent 'agent1' is planning the next step. -[2024-11-07 17:02:00,578] [INFO] [BaseAgent]: Agent 'agent1' selected 'agent3' as the next agent with plan: 'The next agent should use the information gathered from Wikipedia to create a detailed summary of recent developments, applications, and future trends in artificial intelligence. This summary should include key advancements, practical uses, and potential future directions in the field of artificial intelligence.'. -[2024-11-07 17:02:00,579] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_zkU9afMLTWUiawieF91h4qga', type='function')], function_call=None) -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_5oxiSal2FMrDmdlVq37V8Snm', type='function')], function_call=None) -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.bing.com/search?q=recent+developments+applications+future+trends+in+artificial+intelligence"}', name='fetch_webpage'), id='call_kGqb87ZooW4yfhKdvQZKUOVg', type='function')], function_call=None) -[2024-11-07 17:02:01,188] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-07 17:02:01,189] [INFO] [Engine]: Agent 'agent3' is executing task. -[2024-11-07 17:02:01,189] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'The next agent should use the information gathered from Wikipedia to create a detailed summary of recent developments, applications, and future trends in artificial intelligence. This summary should include key advancements, practical uses, and potential future directions in the field of artificial intelligence.'. -[2024-11-07 17:02:01,801] [INFO] [BaseAgent]: Agent 'agent3' called 'fetch_webpage' with args '{'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence'}'. -[2024-11-07 17:02:01,802] [INFO] [BaseAgent]: Agent 'agent3' obtained result '{'success': True, 'error-msg': '', 'url': 'https://www.bing.com/search?q=recent+developments+applications+future+trends+in+artificial+intelligence', 'content': 'recent developments applications future trends in artificial intelligence - Search
  1. There are no results for recent developments applications future trends in artificial intelligence

    • Check your spelling or try different keywords

    Ref A: D7F6E90353AA45CD96661F69F0565726 Ref B: CH1EDGE1109 Ref C: 2024-11-07T23:01:59Z

'}'. -[2024-11-07 17:02:01,805] [INFO] [Engine]: Agent 'agent3' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_ok6PTjqE3J8k7vDsG6qM59gc', type='function')], function_call=None) -[2024-11-07 17:02:01,805] [INFO] [BaseAgent]: Agent 'agent3' is planning the next step. -[2024-11-07 17:02:02,726] [INFO] [BaseAgent]: Agent 'agent3' selected 'agent1' as the next agent with plan: 'The next agent, agent1, should use Bing search results to gather information about the history and development of artificial intelligence. They can focus on key milestones, significant figures, and major breakthroughs in the field.'. -[2024-11-07 17:02:02,727] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_zkU9afMLTWUiawieF91h4qga', type='function')], function_call=None) -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_5oxiSal2FMrDmdlVq37V8Snm', type='function')], function_call=None) -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.bing.com/search?q=recent+developments+applications+future+trends+in+artificial+intelligence"}', name='fetch_webpage'), id='call_kGqb87ZooW4yfhKdvQZKUOVg', type='function')], function_call=None) -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_ok6PTjqE3J8k7vDsG6qM59gc', type='function')], function_call=None) -[2024-11-07 17:02:03,348] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-07 17:02:03,348] [INFO] [Engine]: Agent 'agent1' is executing task. -[2024-11-07 17:02:03,349] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'The next agent, agent1, should use Bing search results to gather information about the history and development of artificial intelligence. They can focus on key milestones, significant figures, and major breakthroughs in the field.'. -[2024-11-07 17:02:04,115] [INFO] [BaseAgent]: Agent 'agent1' called 'fetch_webpage' with args '{'url': 'https://www.bing.com/search?q=history+and+development+of+artificial+intelligence'}'. -[2024-11-07 17:02:04,116] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': True, 'error-msg': '', 'url': 'https://www.bing.com/search?q=history+and+development+of+artificial+intelligence', 'content': 'history and development of artificial intelligence - Search
  1. There are no results for history and development of artificial intelligence

    • Check your spelling or try different keywords

    Ref A: C9010BCDFB43424B98E62F36F25C6D87 Ref B: CHI30EDGE0107 Ref C: 2024-11-07T23:02:04Z

'}'. -[2024-11-07 17:02:04,134] [INFO] [Engine]: Agent 'agent1' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.bing.com/search?q=history+and+development+of+artificial+intelligence"}', name='fetch_webpage'), id='call_blekHBznTXNcwDv5NDZBmOua', type='function')], function_call=None) -[2024-11-07 17:02:04,135] [INFO] [BaseAgent]: Agent 'agent1' is planning the next step. -[2024-11-07 17:02:05,285] [INFO] [BaseAgent]: Agent 'agent1' selected 'agent3' as the next agent with plan: 'The next agent, agent3, should use Wikipedia to gather information on the history and development of artificial intelligence. They can provide a comprehensive overview of key milestones, breakthroughs, and influential figures in the field. The agent should focus on summarizing the information in a clear and organized manner to contribute to our research on this topic.'. -[2024-11-07 17:02:05,286] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_zkU9afMLTWUiawieF91h4qga', type='function')], function_call=None) -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_5oxiSal2FMrDmdlVq37V8Snm', type='function')], function_call=None) -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.bing.com/search?q=recent+developments+applications+future+trends+in+artificial+intelligence"}', name='fetch_webpage'), id='call_kGqb87ZooW4yfhKdvQZKUOVg', type='function')], function_call=None) -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_ok6PTjqE3J8k7vDsG6qM59gc', type='function')], function_call=None) -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.bing.com/search?q=history+and+development+of+artificial+intelligence"}', name='fetch_webpage'), id='call_blekHBznTXNcwDv5NDZBmOua', type='function')], function_call=None) -[2024-11-07 17:02:05,694] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-07 17:02:05,695] [ERROR] [Engine]: An error occurred during chain-based coordination. -Traceback (most recent call last): - File "/Users/zhukunlun/Documents/GitHub/MARBLE/marble/engine/engine.py", line 369, in chain_coordinate - summary = self._summarize_results(agents_results) - File "/Users/zhukunlun/Documents/GitHub/MARBLE/marble/engine/engine.py", line 449, in _summarize_results - result = json.dumps(result.content) -AttributeError: 'dict' object has no attribute 'content' -[2024-11-07 17:02:05,698] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-07 17:02:05,698] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-07 17:02:05,698] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-07 17:02:05,698] [INFO] [Engine]: Chain-based coordination simulation completed. -[2024-11-07 17:02:50,097] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-07 17:02:50,098] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-07 17:02:50,098] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-07 17:02:50,098] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-07 17:02:50,098] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-07 17:02:50,098] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-07 17:02:50,098] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-07 17:02:50,098] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-07 17:02:50,098] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'chain'. -[2024-11-07 17:02:50,098] [INFO] [AgentGraph]: Relationship added: agent1 --[reports_to]--> agent2 -[2024-11-07 17:02:50,098] [INFO] [AgentGraph]: Relationship added: agent1 --[manages]--> agent3 -[2024-11-07 17:02:50,098] [INFO] [AgentGraph]: Relationship added: agent2 --[supports]--> agent4 -[2024-11-07 17:02:50,098] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborates_with]--> agent4 -[2024-11-07 17:02:50,098] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-07 17:02:50,099] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-07 17:02:50,099] [INFO] [Engine]: Engine initialized. -[2024-11-07 17:02:50,099] [INFO] [Engine]: Engine starting simulation. -[2024-11-07 17:02:50,099] [INFO] [Engine]: Running in chain-based coordination mode. -[2024-11-07 17:02:50,099] [INFO] [Engine]: Starting chain-based coordination. -[2024-11-07 17:02:50,099] [INFO] [Engine]: Agent 'agent1' is executing task. -[2024-11-07 17:02:50,099] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Find new about the latest trends in AI.'. -[2024-11-07 17:02:52,522] [INFO] [BaseAgent]: Agent 'agent1' called 'fetch_webpage' with args '{'url': 'https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/'}'. -[2024-11-07 17:02:52,523] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': False, 'error-msg': '404 Client Error: Not Found for url: https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/'}'. -[2024-11-07 17:02:52,523] [INFO] [Engine]: Agent 'agent1' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_94tCaUGRkue2eu8yY4CzthGo', type='function')], function_call=None) -[2024-11-07 17:02:52,523] [INFO] [BaseAgent]: Agent 'agent1' is planning the next step. -[2024-11-07 17:02:53,443] [INFO] [BaseAgent]: Agent 'agent1' selected 'agent3' as the next agent with plan: 'The next agent, agent3, with a diligent and hardworking approach, will use Wikipedia to search for information related to the top AI trends to watch out for in 2022. The agent can gather detailed and reliable information from Wikipedia to enhance our understanding of the topic.'. -[2024-11-07 17:02:53,443] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_94tCaUGRkue2eu8yY4CzthGo', type='function')], function_call=None) -[2024-11-07 17:02:53,924] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-07 17:02:53,925] [INFO] [Engine]: Agent 'agent3' is executing task. -[2024-11-07 17:02:53,925] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'The next agent, agent3, with a diligent and hardworking approach, will use Wikipedia to search for information related to the top AI trends to watch out for in 2022. The agent can gather detailed and reliable information from Wikipedia to enhance our understanding of the topic.'. -[2024-11-07 17:02:54,662] [INFO] [BaseAgent]: Agent 'agent3' called 'fetch_webpage' with args '{'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence'}'. -[2024-11-07 17:02:54,676] [INFO] [BaseAgent]: Agent 'agent3' obtained result '{'success': True, 'error-msg': '', 'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence', 'content': '\n\n\n\nArtificial intelligence - Wikipedia\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nJump to content\n
\n\t
\n\t\t
\n\t\t\t
\n\n\t\t\n\t\t\t\n\n\n\t\t
\n\t\t
\n\t\t\t\n\n\n\t\t\t\n\n\t\t
\n\t\n\n
\n\t
\n\t\t
\n\t\t\t
\n\t\t
\n\t\t
\n\t\t\t
\n\t\t
\n\t\t\t\n\t\t
\n\t
\n\t
\n\t\t\t\t
\n\t\t\n\t\t\t
\n\t\t
\n\t\t
\n\t\t\t
\n\t\t\t\t
\n\t\t\t\t\t\n\t\t\t\t\t

Artificial intelligence

\n\t\t\t\t\t\t\t\n
\n\t\n\t\n\t
\n\n\t\t
\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t
\n\n\t
\n
\n
\n\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\t
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
\n\t\t\t\t\t\t
\n\t\t\t\t\t\t\t\n\t\t\t\t\n\t\t\t\t\t\t\t
\n\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\t\n\t\t\t\t\t\t
\n\t\t\n\t\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\t\t
\n\t\t
Page semi-protected
\n\t\t
\n\n\t\t\t\t\t\t
From Wikipedia, the free encyclopedia
\n\t\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t
\n\n

\n

\n\n\n\n\n\n\n\n

Artificial intelligence (AI), in its broadest sense, is intelligence exhibited by machines, particularly computer systems. It is a field of research in computer science that develops and studies methods and software that enable machines to perceive their environment and use learning and intelligence to take actions that maximize their chances of achieving defined goals.[1] Such machines may be called AIs.\n

Some high-profile applications of AI include advanced web search engines (e.g., Google Search); recommendation systems (used by YouTube, Amazon, and Netflix); interacting via human speech (e.g., Google Assistant, Siri, and Alexa); autonomous vehicles (e.g., Waymo); generative and creative tools (e.g., ChatGPT, and AI art); and superhuman play and analysis in strategy games (e.g., chess and Go). However, many AI applications are not perceived as AI: "A lot of cutting edge AI has filtered into general applications, often without being called AI because once something becomes useful enough and common enough it\'s not labeled AI anymore."[2][3]\n

The various subfields of AI research are centered around particular goals and the use of particular tools. The traditional goals of AI research include reasoning, knowledge representation, planning, learning, natural language processing, perception, and support for robotics.[a] General intelligence—the ability to complete any task performable by a human on an at least equal level—is among the field\'s long-term goals.[4] To reach these goals, AI researchers have adapted and integrated a wide range of techniques, including search and mathematical optimization, formal logic, artificial neural networks, and methods based on statistics, operations research, and economics.[b] AI also draws upon psychology, linguistics, philosophy, neuroscience, and other fields.[5]\n

Artificial intelligence was founded as an academic discipline in 1956,[6] and the field went through multiple cycles of optimism,[7][8] followed by periods of disappointment and loss of funding, known as AI winter.[9][10] Funding and interest vastly increased after 2012 when deep learning outperformed previous AI techniques.[11] This growth accelerated further after 2017 with the transformer architecture,[12] and by the early 2020s hundreds of billions of dollars were being invested in AI (known as the "AI boom"). The widespread use of AI in the 21st century exposed several unintended consequences and harms in the present and raised concerns about its risks and long-term effects in the future, prompting discussions about regulatory policies to ensure the safety and benefits of the technology.\n

\n\n

Goals

\n

The general problem of simulating (or creating) intelligence has been broken into subproblems. These consist of particular traits or capabilities that researchers expect an intelligent system to display. The traits described below have received the most attention and cover the scope of AI research.[a]\n

\n

Reasoning and problem-solving

\n

Early researchers developed algorithms that imitated step-by-step reasoning that humans use when they solve puzzles or make logical deductions.[13] By the late 1980s and 1990s, methods were developed for dealing with uncertain or incomplete information, employing concepts from probability and economics.[14]\n

Many of these algorithms are insufficient for solving large reasoning problems because they experience a "combinatorial explosion": They become exponentially slower as the problems grow.[15] Even humans rarely use the step-by-step deduction that early AI research could model. They solve most of their problems using fast, intuitive judgments.[16] Accurate and efficient reasoning is an unsolved problem.\n

\n

Knowledge representation

\n
An ontology represents knowledge as a set of concepts within a domain and the relationships between those concepts.
\n

Knowledge representation and knowledge engineering[17] allow AI programs to answer questions intelligently and make deductions about real-world facts. Formal knowledge representations are used in content-based indexing and retrieval,[18] scene interpretation,[19] clinical decision support,[20] knowledge discovery (mining "interesting" and actionable inferences from large databases),[21] and other areas.[22]\n

A knowledge base is a body of knowledge represented in a form that can be used by a program. An ontology is the set of objects, relations, concepts, and properties used by a particular domain of knowledge.[23] Knowledge bases need to represent things such as objects, properties, categories, and relations between objects;[24] situations, events, states, and time;[25] causes and effects;[26] knowledge about knowledge (what we know about what other people know);[27] default reasoning (things that humans assume are true until they are told differently and will remain true even when other facts are changing);[28] and many other aspects and domains of knowledge.\n

Among the most difficult problems in knowledge representation are the breadth of commonsense knowledge (the set of atomic facts that the average person knows is enormous);[29] and the sub-symbolic form of most commonsense knowledge (much of what people know is not represented as "facts" or "statements" that they could express verbally).[16] There is also the difficulty of knowledge acquisition, the problem of obtaining knowledge for AI applications.[c]\n

\n

Planning and decision-making

\n

An "agent" is anything that perceives and takes actions in the world. A rational agent has goals or preferences and takes actions to make them happen.[d][32] In automated planning, the agent has a specific goal.[33] In automated decision-making, the agent has preferences—there are some situations it would prefer to be in, and some situations it is trying to avoid. The decision-making agent assigns a number to each situation (called the "utility") that measures how much the agent prefers it. For each possible action, it can calculate the "expected utility": the utility of all possible outcomes of the action, weighted by the probability that the outcome will occur. It can then choose the action with the maximum expected utility.[34]\n

In classical planning, the agent knows exactly what the effect of any action will be.[35] In most real-world problems, however, the agent may not be certain about the situation they are in (it is "unknown" or "unobservable") and it may not know for certain what will happen after each possible action (it is not "deterministic"). It must choose an action by making a probabilistic guess and then reassess the situation to see if the action worked.[36]\n

In some problems, the agent\'s preferences may be uncertain, especially if there are other agents or humans involved. These can be learned (e.g., with inverse reinforcement learning), or the agent can seek information to improve its preferences.[37] Information value theory can be used to weigh the value of exploratory or experimental actions.[38] The space of possible future actions and situations is typically intractably large, so the agents must take actions and evaluate situations while being uncertain of what the outcome will be.\n

A Markov decision process has a transition model that describes the probability that a particular action will change the state in a particular way and a reward function that supplies the utility of each state and the cost of each action. A policy associates a decision with each possible state. The policy could be calculated (e.g., by iteration), be heuristic, or it can be learned.[39]\n

Game theory describes the rational behavior of multiple interacting agents and is used in AI programs that make decisions that involve other agents.[40]\n

\n

Learning

\n

Machine learning is the study of programs that can improve their performance on a given task automatically.[41] It has been a part of AI from the beginning.[e]\n

There are several kinds of machine learning. Unsupervised learning analyzes a stream of data and finds patterns and makes predictions without any other guidance.[44] Supervised learning requires a human to label the input data first, and comes in two main varieties: classification (where the program must learn to predict what category the input belongs in) and regression (where the program must deduce a numeric function based on numeric input).[45]\n

In reinforcement learning, the agent is rewarded for good responses and punished for bad ones. The agent learns to choose responses that are classified as "good".[46] Transfer learning is when the knowledge gained from one problem is applied to a new problem.[47] Deep learning is a type of machine learning that runs inputs through biologically inspired artificial neural networks for all of these types of learning.[48]\n

Computational learning theory can assess learners by computational complexity, by sample complexity (how much data is required), or by other notions of optimization.[49]\n

\n
\n

Natural language processing

\n

Natural language processing (NLP)[50] allows programs to read, write and communicate in human languages such as English. Specific problems include speech recognition, speech synthesis, machine translation, information extraction, information retrieval and question answering.[51]\n

Early work, based on Noam Chomsky\'s generative grammar and semantic networks, had difficulty with word-sense disambiguation[f] unless restricted to small domains called "micro-worlds" (due to the common sense knowledge problem[29]). Margaret Masterman believed that it was meaning and not grammar that was the key to understanding languages, and that thesauri and not dictionaries should be the basis of computational language structure.\n

Modern deep learning techniques for NLP include word embedding (representing words, typically as vectors encoding their meaning),[52] transformers (a deep learning architecture using an attention mechanism),[53] and others.[54] In 2019, generative pre-trained transformer (or "GPT") language models began to generate coherent text,[55][56] and by 2023, these models were able to get human-level scores on the bar exam, SAT test, GRE test, and many other real-world applications.[57]\n

\n

Perception

\n

Machine perception is the ability to use input from sensors (such as cameras, microphones, wireless signals, active lidar, sonar, radar, and tactile sensors) to deduce aspects of the world. Computer vision is the ability to analyze visual input.[58]\n

The field includes speech recognition,[59] image classification,[60] facial recognition, object recognition,[61]object tracking,[62] and robotic perception.[63]\n

\n

Social intelligence

\n
Kismet, a robot head which was made in the 1990s; it is a machine that can recognize and simulate emotions.[64]
\n

Affective computing is an interdisciplinary umbrella that comprises systems that recognize, interpret, process, or simulate human feeling, emotion, and mood.[65] For example, some virtual assistants are programmed to speak conversationally or even to banter humorously; it makes them appear more sensitive to the emotional dynamics of human interaction, or to otherwise facilitate human–computer interaction.\n

However, this tends to give naïve users an unrealistic conception of the intelligence of existing computer agents.[66] Moderate successes related to affective computing include textual sentiment analysis and, more recently, multimodal sentiment analysis, wherein AI classifies the affects displayed by a videotaped subject.[67]\n

\n

General intelligence

\n

A machine with artificial general intelligence should be able to solve a wide variety of problems with breadth and versatility similar to human intelligence.[4]\n

\n

Techniques

\n

AI research uses a wide variety of techniques to accomplish the goals above.[b]\n

\n

Search and optimization

\n

AI can solve many problems by intelligently searching through many possible solutions.[68] There are two very different kinds of search used in AI: state space search and local search.\n

\n
\n

State space search searches through a tree of possible states to try to find a goal state.[69] For example, planning algorithms search through trees of goals and subgoals, attempting to find a path to a target goal, a process called means-ends analysis.[70]\n

Simple exhaustive searches[71] are rarely sufficient for most real-world problems: the search space (the number of places to search) quickly grows to astronomical numbers. The result is a search that is too slow or never completes.[15] "Heuristics" or "rules of thumb" can help prioritize choices that are more likely to reach a goal.[72]\n

Adversarial search is used for game-playing programs, such as chess or Go. It searches through a tree of possible moves and counter-moves, looking for a winning position.[73]\n

\n
\n
Illustration of gradient descent for 3 different starting points; two parameters (represented by the plan coordinates) are adjusted in order to minimize the loss function (the height)

Local search uses mathematical optimization to find a solution to a problem. It begins with some form of guess and refines it incrementally.[74]\n

Gradient descent is a type of local search that optimizes a set of numerical parameters by incrementally adjusting them to minimize a loss function. Variants of gradient descent are commonly used to train neural networks.[75]\n

Another type of local search is evolutionary computation, which aims to iteratively improve a set of candidate solutions by "mutating" and "recombining" them, selecting only the fittest to survive each generation.[76]\n

Distributed search processes can coordinate via swarm intelligence algorithms. Two popular swarm algorithms used in search are particle swarm optimization (inspired by bird flocking) and ant colony optimization (inspired by ant trails).[77]\n

\n

Logic

\n

Formal logic is used for reasoning and knowledge representation.[78]\nFormal logic comes in two main forms: propositional logic (which operates on statements that are true or false and uses logical connectives such as "and", "or", "not" and "implies")[79] and predicate logic (which also operates on objects, predicates and relations and uses quantifiers such as "Every X is a Y" and "There are some Xs that are Ys").[80]\n

Deductive reasoning in logic is the process of proving a new statement (conclusion) from other statements that are given and assumed to be true (the premises).[81] Proofs can be structured as proof trees, in which nodes are labelled by sentences, and children nodes are connected to parent nodes by inference rules.\n

Given a problem and a set of premises, problem-solving reduces to searching for a proof tree whose root node is labelled by a solution of the problem and whose leaf nodes are labelled by premises or axioms. In the case of Horn clauses, problem-solving search can be performed by reasoning forwards from the premises or backwards from the problem.[82] In the more general case of the clausal form of first-order logic, resolution is a single, axiom-free rule of inference, in which a problem is solved by proving a contradiction from premises that include the negation of the problem to be solved.[83]\n

Inference in both Horn clause logic and first-order logic is undecidable, and therefore intractable. However, backward reasoning with Horn clauses, which underpins computation in the logic programming language Prolog, is Turing complete. Moreover, its efficiency is competitive with computation in other symbolic programming languages.[84]\n

Fuzzy logic assigns a "degree of truth" between 0 and 1. It can therefore handle propositions that are vague and partially true.[85]\n

Non-monotonic logics, including logic programming with negation as failure, are designed to handle default reasoning.[28] Other specialized versions of logic have been developed to describe many complex domains.\n

\n

Probabilistic methods for uncertain reasoning

\n
A simple Bayesian network, with the associated conditional probability tables
\n

Many problems in AI (including in reasoning, planning, learning, perception, and robotics) require the agent to operate with incomplete or uncertain information. AI researchers have devised a number of tools to solve these problems using methods from probability theory and economics.[86] Precise mathematical tools have been developed that analyze how an agent can make choices and plan, using decision theory, decision analysis,[87] and information value theory.[88] These tools include models such as Markov decision processes,[89] dynamic decision networks,[90] game theory and mechanism design.[91]\n

Bayesian networks[92] are a tool that can be used for reasoning (using the Bayesian inference algorithm),[g][94] learning (using the expectation–maximization algorithm),[h][96] planning (using decision networks)[97] and perception (using dynamic Bayesian networks).[90]\n

Probabilistic algorithms can also be used for filtering, prediction, smoothing, and finding explanations for streams of data, thus helping perception systems analyze processes that occur over time (e.g., hidden Markov models or Kalman filters).[90]\n

\n
Expectation–maximization clustering of Old Faithful eruption data starts from a random guess but then successfully converges on an accurate clustering of the two physically distinct modes of eruption.
\n

Classifiers and statistical learning methods

\n

The simplest AI applications can be divided into two types: classifiers (e.g., "if shiny then diamond"), on one hand, and controllers (e.g., "if diamond then pick up"), on the other hand. Classifiers[98] are functions that use pattern matching to determine the closest match. They can be fine-tuned based on chosen examples using supervised learning. Each pattern (also called an "observation") is labeled with a certain predefined class. All the observations combined with their class labels are known as a data set. When a new observation is received, that observation is classified based on previous experience.[45]\n

There are many kinds of classifiers in use.[99] The decision tree is the simplest and most widely used symbolic machine learning algorithm.[100] K-nearest neighbor algorithm was the most widely used analogical AI until the mid-1990s, and Kernel methods such as the support vector machine (SVM) displaced k-nearest neighbor in the 1990s.[101]\nThe naive Bayes classifier is reportedly the "most widely used learner"[102] at Google, due in part to its scalability.[103]\nNeural networks are also used as classifiers.[104]\n

\n

Artificial neural networks

\n
A neural network is an interconnected group of nodes, akin to the vast network of neurons in the human brain.
\n

An artificial neural network is based on a collection of nodes also known as artificial neurons, which loosely model the neurons in a biological brain. It is trained to recognise patterns; once trained, it can recognise those patterns in fresh data. There is an input, at least one hidden layer of nodes and an output. Each node applies a function and once the weight crosses its specified threshold, the data is transmitted to the next layer. A network is typically called a deep neural network if it has at least 2 hidden layers.[104]\n

Learning algorithms for neural networks use local search to choose the weights that will get the right output for each input during training. The most common training technique is the backpropagation algorithm.[105] Neural networks learn to model complex relationships between inputs and outputs and find patterns in data. In theory, a neural network can learn any function.[106]\n

In feedforward neural networks the signal passes in only one direction.[107] Recurrent neural networks feed the output signal back into the input, which allows short-term memories of previous input events. Long short term memory is the most successful network architecture for recurrent networks.[108] Perceptrons[109] use only a single layer of neurons; deep learning[110] uses multiple layers. Convolutional neural networks strengthen the connection between neurons that are "close" to each other—this is especially important in image processing, where a local set of neurons must identify an "edge" before the network can identify an object.[111]\n

\n
\n

Deep learning

\n
\n

Deep learning[110] uses several layers of neurons between the network\'s inputs and outputs. The multiple layers can progressively extract higher-level features from the raw input. For example, in image processing, lower layers may identify edges, while higher layers may identify the concepts relevant to a human such as digits, letters, or faces.[112]\n

Deep learning has profoundly improved the performance of programs in many important subfields of artificial intelligence, including computer vision, speech recognition, natural language processing, image classification,[113] and others. The reason that deep learning performs so well in so many applications is not known as of 2023.[114] The sudden success of deep learning in 2012–2015 did not occur because of some new discovery or theoretical breakthrough (deep neural networks and backpropagation had been described by many people, as far back as the 1950s)[i] but because of two factors: the incredible increase in computer power (including the hundred-fold increase in speed by switching to GPUs) and the availability of vast amounts of training data, especially the giant curated datasets used for benchmark testing, such as ImageNet.[j]\n

\n

GPT

\n

Generative pre-trained transformers (GPT) are large language models (LLMs) that generate text based on the semantic relationships between words in sentences. Text-based GPT models are pretrained on a large corpus of text that can be from the Internet. The pretraining consists of predicting the next token (a token being usually a word, subword, or punctuation). Throughout this pretraining, GPT models accumulate knowledge about the world and can then generate human-like text by repeatedly predicting the next token. Typically, a subsequent training phase makes the model more truthful, useful, and harmless, usually with a technique called reinforcement learning from human feedback (RLHF). Current GPT models are prone to generating falsehoods called "hallucinations", although this can be reduced with RLHF and quality data. They are used in chatbots, which allow people to ask a question or request a task in simple text.[122][123]\n

Current models and services include Gemini (formerly Bard), ChatGPT, Grok, Claude, Copilot, and LLaMA.[124] Multimodal GPT models can process different types of data (modalities) such as images, videos, sound, and text.[125]\n

\n

Hardware and software

\n\n

In the late 2010s, graphics processing units (GPUs) that were increasingly designed with AI-specific enhancements and used with specialized TensorFlow software had replaced previously used central processing unit (CPUs) as the dominant means for large-scale (commercial and academic) machine learning models\' training.[126] Specialized programming languages such as Prolog were used in early AI research,[127] but general-purpose programming languages like Python have become predominant.[128]\n

The transistor density in integrated circuits has been observed to roughly double every 18 months—a trend known as Moore\'s law, named after the Intel co-founder Gordon Moore, who first identified it. Improvements in GPUs have been even faster.[129]\n

\n

Applications

\n

AI and machine learning technology is used in most of the essential applications of the 2020s, including: search engines (such as Google Search), targeting online advertisements, recommendation systems (offered by Netflix, YouTube or Amazon), driving internet traffic, targeted advertising (AdSense, Facebook), virtual assistants (such as Siri or Alexa), autonomous vehicles (including drones, ADAS and self-driving cars), automatic language translation (Microsoft Translator, Google Translate), facial recognition (Apple\'s Face ID or Microsoft\'s DeepFace and Google\'s FaceNet) and image labeling (used by Facebook, Apple\'s iPhoto and TikTok). The deployment of AI may be overseen by a Chief automation officer (CAO).\n

Health and medicine

\n\n

The application of AI in medicine and medical research has the potential to increase patient care and quality of life.[130] Through the lens of the Hippocratic Oath, medical professionals are ethically compelled to use AI, if applications can more accurately diagnose and treat patients.[131][132]\n

For medical research, AI is an important tool for processing and integrating big data. This is particularly important for organoid and tissue engineering development which use microscopy imaging as a key technique in fabrication.[133] It has been suggested that AI can overcome discrepancies in funding allocated to different fields of research.[133] New AI tools can deepen the understanding of biomedically relevant pathways. For example, AlphaFold 2 (2021) demonstrated the ability to approximate, in hours rather than months, the 3D structure of a protein.[134] In 2023, it was reported that AI-guided drug discovery helped find a class of antibiotics capable of killing two different types of drug-resistant bacteria.[135] In 2024, researchers used machine learning to accelerate the search for Parkinson\'s disease drug treatments. Their aim was to identify compounds that block the clumping, or aggregation, of alpha-synuclein (the protein that characterises Parkinson\'s disease). They were able to speed up the initial screening process ten-fold and reduce the cost by a thousand-fold.[136][137]\n

\n

Games

\n\n

Game playing programs have been used since the 1950s to demonstrate and test AI\'s most advanced techniques.[138] Deep Blue became the first computer chess-playing system to beat a reigning world chess champion, Garry Kasparov, on 11 May 1997.[139] In 2011, in a Jeopardy! quiz show exhibition match, IBM\'s question answering system, Watson, defeated the two greatest Jeopardy! champions, Brad Rutter and Ken Jennings, by a significant margin.[140] In March 2016, AlphaGo won 4 out of 5 games of Go in a match with Go champion Lee Sedol, becoming the first computer Go-playing system to beat a professional Go player without handicaps. Then, in 2017, it defeated Ke Jie, who was the best Go player in the world.[141] Other programs handle imperfect-information games, such as the poker-playing program Pluribus.[142] DeepMind developed increasingly generalistic reinforcement learning models, such as with MuZero, which could be trained to play chess, Go, or Atari games.[143] In 2019, DeepMind\'s AlphaStar achieved grandmaster level in StarCraft II, a particularly challenging real-time strategy game that involves incomplete knowledge of what happens on the map.[144] In 2021, an AI agent competed in a PlayStation Gran Turismo competition, winning against four of the world\'s best Gran Turismo drivers using deep reinforcement learning.[145] In 2024, Google DeepMind introduced SIMA, a type of AI capable of autonomously playing nine previously unseen open-world video games by observing screen output, as well as executing short, specific tasks in response to natural language instructions.[146]\n

\n

Mathematics

\n

In mathematics, special forms of formal step-by-step reasoning are used. In contrast, LLMs such as GPT-4 Turbo, Gemini Ultra, Claude Opus, LLaMa-2 or Mistral Large are working with probabilistic models, which can produce wrong answers in the form of hallucinations. Therefore, they need not only a large database of mathematical problems to learn from but also methods such as supervised fine-tuning or trained classifiers with human-annotated data to improve answers for new problems and learn from corrections.[147] A 2024 study showed that the performance of some language models for reasoning capabilities in solving math problems not included in their training data was low, even for problems with only minor deviations from trained data.[148]\n

Alternatively, dedicated models for mathematic problem solving with higher precision for the outcome including proof of theorems have been developed such as Alpha Tensor, Alpha Geometry and Alpha Proof all from Google DeepMind,[149] Llemma from eleuther[150] or Julius.[151]\n

When natural language is used to describe mathematical problems, converters transform such prompts into a formal language such as Lean to define mathematic tasks.\n

Some models have been developed to solve challenging problems and reach good results in benchmark tests, others to serve as educational tools in mathematics.[152]\n

\n

Finance

\n

Finance is one of the fastest growing sectors where applied AI tools are being deployed: from retail online banking to investment advice and insurance, where automated "robot advisers" have been in use for some years.[153]\n

World Pensions experts like Nicolas Firzli insist it may be too early to see the emergence of highly innovative AI-informed financial products and services: "the deployment of AI tools will simply further automatise things: destroying tens of thousands of jobs in banking, financial planning, and pension advice in the process, but I\'m not sure it will unleash a new wave of [e.g., sophisticated] pension innovation."[154]\n

\n

Military

\n\n

Various countries are deploying AI military applications.[155] The main applications enhance command and control, communications, sensors, integration and interoperability.[156] Research is targeting intelligence collection and analysis, logistics, cyber operations, information operations, and semiautonomous and autonomous vehicles.[155] AI technologies enable coordination of sensors and effectors, threat detection and identification, marking of enemy positions, target acquisition, coordination and deconfliction of distributed Joint Fires between networked combat vehicles involving manned and unmanned teams.[156] AI was incorporated into military operations in Iraq and Syria.[155]\n

In November 2023, US Vice President Kamala Harris disclosed a declaration signed by 31 nations to set guardrails for the military use of AI. The commitments include using legal reviews to ensure the compliance of military AI with international laws, and being cautious and transparent in the development of this technology.[157]\n

\n

Generative AI

\n\n
Vincent van Gogh in watercolour created by generative AI software
\n

In the early 2020s, generative AI gained widespread prominence. GenAI is AI capable of generating text, images, videos, or other data using generative models,[158][159] often in response to prompts.[160][161]\n

In March 2023, 58% of U.S. adults had heard about ChatGPT and 14% had tried it.[162] The increasing realism and ease-of-use of AI-based text-to-image generators such as Midjourney, DALL-E, and Stable Diffusion sparked a trend of viral AI-generated photos. Widespread attention was gained by a fake photo of Pope Francis wearing a white puffer coat, the fictional arrest of Donald Trump, and a hoax of an attack on the Pentagon, as well as the usage in professional creative arts.[163][164]\n

\n

Agents

\n

Artificial intelligent (AI) agents are software entities designed to perceive their environment, make decisions, and take actions autonomously to achieve specific goals. These agents can interact with users, their environment, or other agents. AI agents are used in various applications, including virtual assistants, chatbots, autonomous vehicles, game-playing systems, and industrial robotics. AI agents operate within the constraints of their programming, available computational resources, and hardware limitations. This means they are restricted to performing tasks within their defined scope and have finite memory and processing capabilities. In real-world applications, AI agents often face time constraints for decision-making and action execution. Many AI agents incorporate learning algorithms, enabling them to improve their performance over time through experience or training. Using machine learning, AI agents can adapt to new situations and optimise their behaviour for their designated tasks.[165][166][167]\n

\n

Other industry-specific tasks

\n

There are also thousands of successful AI applications used to solve specific problems for specific industries or institutions. In a 2017 survey, one in five companies reported having incorporated "AI" in some offerings or processes.[168] A few examples are energy storage, medical diagnosis, military logistics, applications that predict the result of judicial decisions, foreign policy, or supply chain management.\n

AI applications for evacuation and disaster management are growing. AI has been used to investigate if and how people evacuated in large scale and small scale evacuations using historical data from GPS, videos or social media. Further, AI can provide real time information on the real time evacuation conditions.[169][170][171]\n

In agriculture, AI has helped farmers identify areas that need irrigation, fertilization, pesticide treatments or increasing yield. Agronomists use AI to conduct research and development. AI has been used to predict the ripening time for crops such as tomatoes, monitor soil moisture, operate agricultural robots, conduct predictive analytics, classify livestock pig call emotions, automate greenhouses, detect diseases and pests, and save water.\n

Artificial intelligence is used in astronomy to analyze increasing amounts of available data and applications, mainly for "classification, regression, clustering, forecasting, generation, discovery, and the development of new scientific insights." For example, it is used for discovering exoplanets, forecasting solar activity, and distinguishing between signals and instrumental effects in gravitational wave astronomy. Additionally, it could be used for activities in space, such as space exploration, including the analysis of data from space missions, real-time science decisions of spacecraft, space debris avoidance, and more autonomous operation.\n

During the 2024 Indian elections, US$50 millions was spent on authorized AI-generated content, notably by creating deepfakes of allied (including sometimes deceased) politicians to better engage with voters, and by translating speeches to various local languages.[172] \n

\n

Ethics

\n\n

AI has potential benefits and potential risks.[173] AI may be able to advance science and find solutions for serious problems: Demis Hassabis of Deep Mind hopes to "solve intelligence, and then use that to solve everything else".[174] However, as the use of AI has become widespread, several unintended consequences and risks have been identified.[175] In-production systems can sometimes not factor ethics and bias into their AI training processes, especially when the AI algorithms are inherently unexplainable in deep learning.[176]\n

\n

Risks and harm

\n
\n\n

Machine learning algorithms require large amounts of data. The techniques used to acquire this data have raised concerns about privacy, surveillance and copyright.\n

AI-powered devices and services, such as virtual assistants and IoT products, continuously collect personal information, raising concerns about intrusive data gathering and unauthorized access by third parties. The loss of privacy is further exacerbated by AI\'s ability to process and combine vast amounts of data, potentially leading to a surveillance society where individual activities are constantly monitored and analyzed without adequate safeguards or transparency.\n

Sensitive user data collected may include online activity records, geolocation data, video or audio.[177] For example, in order to build speech recognition algorithms, Amazon has recorded millions of private conversations and allowed temporary workers to listen to and transcribe some of them.[178] Opinions about this widespread surveillance range from those who see it as a necessary evil to those for whom it is clearly unethical and a violation of the right to privacy.[179]\n

AI developers argue that this is the only way to deliver valuable applications. and have developed several techniques that attempt to preserve privacy while still obtaining the data, such as data aggregation, de-identification and differential privacy.[180] Since 2016, some privacy experts, such as Cynthia Dwork, have begun to view privacy in terms of fairness. Brian Christian wrote that experts have pivoted "from the question of \'what they know\' to the question of \'what they\'re doing with it\'."[181]\n

Generative AI is often trained on unlicensed copyrighted works, including in domains such as images or computer code; the output is then used under the rationale of "fair use". Experts disagree about how well and under what circumstances this rationale will hold up in courts of law; relevant factors may include "the purpose and character of the use of the copyrighted work" and "the effect upon the potential market for the copyrighted work".[182][183] Website owners who do not wish to have their content scraped can indicate it in a "robots.txt" file.[184] In 2023, leading authors (including John Grisham and Jonathan Franzen) sued AI companies for using their work to train generative AI.[185][186] Another discussed approach is to envision a separate sui generis system of protection for creations generated by AI to ensure fair attribution and compensation for human authors.[187]\n

\n

Dominance by tech giants

\n

The commercial AI scene is dominated by Big Tech companies such as Alphabet Inc., Amazon, Apple Inc., Meta Platforms, and Microsoft.[188][189][190] Some of these players already own the vast majority of existing cloud infrastructure and computing power from data centers, allowing them to entrench further in the marketplace.[191][192]\n

\n

Power needs and environmental impacts

\n\n

In January 2024, the International Energy Agency (IEA) released Electricity 2024, Analysis and Forecast to 2026, forecasting electric power use.[193] This is the first IEA report to make projections for data centers and power consumption for artificial intelligence and cryptocurrency. The report states that power demand for these uses might double by 2026, with additional electric power usage equal to electricity used by the whole Japanese nation.[194]\n

Prodigious power consumption by AI is responsible for the growth of fossil fuels use, and might delay closings of obsolete, carbon-emitting coal energy facilities. There is a feverish rise in the construction of data centers throughout the US, making large technology firms (e.g., Microsoft, Meta, Google, Amazon) into voracious consumers of electric power. Projected electric consumption is so immense that there is concern that it will be fulfilled no matter the source. A ChatGPT search involves the use of 10 times the electrical energy as a Google search. The large firms are in haste to find power sources – from nuclear energy to geothermal to fusion. The tech firms argue that – in the long view – AI will be eventually kinder to the environment, but they need the energy now. AI makes the power grid more efficient and "intelligent", will assist in the growth of nuclear power, and track overall carbon emissions, according to technology firms.[195]\n

A 2024 Goldman Sachs Research Paper, AI Data Centers and the Coming US Power Demand Surge, found "US power demand (is) likely to experience growth not seen in a generation...." and forecasts that, by 2030, US data centers will consume 8% of US power, as opposed to 3% in 2022, presaging growth for the electrical power generation industry by a variety of means.[196] Data centers\' need for more and more electrical power is such that they might max out the electrical grid. The Big Tech companies counter that AI can be used to maximize the utilization of the grid by all.[197]\n

In 2024, the Wall Street Journal reported that big AI companies have begun negotiations with the US nuclear power providers to provide electricity to the data centers. In March 2024 Amazon purchased a Pennsylvania nuclear-powered data center for $650 Million (US).[198] Nvidia CEO Jen-Hsun Huang said nuclear power is a good option for the data centers.[199]\n

In September 2024, Microsoft announced an agreement with Constellation Energy to re-open the Three Mile Island nuclear power plant to provide Microsoft with 100% of all electric power produced by the plant for 20 years. Reopening the plant, which suffered a partial nuclear meltdown of its Unit 2 reactor in 1979, will require Constellation to get through strict regulatory processes which will include extensive safety scrutiny from the US Nuclear Regulatory Commission. If approved (this will be the first ever US re-commissioning of a nuclear plant), over 835 megawatts of power – enough for 800,000 homes – of energy will be produced. The cost for re-opening and upgrading is estimated at $1.6 billion (US) and is dependent on tax breaks for nuclear power contained in the 2022 US Inflation Reduction Act.[200] The US government and the state of Michigan are investing almost $2 billion (US) to reopen the Palisades Nuclear reactor on Lake Michigan. Closed since 2022, the plant is planned to be reopened in October 2025. The Three Mile Island facility will be renamed the Crane Clean Energy Center after Chris Crane, a nuclear proponent and former CEO of Exelon who was responsible for Exelon spinoff of Constellation.[201]\n

After the last approval in September 2024, Taiwan suspended the approval of data centers north of Taoyuan with a capacity of more than 5 MW, due to power supply shortages.[202] On the other hand, Singapore imposed a ban on the opening of data centers in 2019 due to electric power, but in 2022, lifted this ban.[202]\n

Although most nuclear plants in Japan have been shut down after the 2011 Fukushima nuclear accident, according to an October 2024 Bloomberg article in Japanese, cloud gaming services company Ubitus, in which Nvidia has a stake, is looking for land in Japan near nuclear power plant for a new data center for generative AI. CEO Wesley Kuo said nuclear power plants are the most efficient, cheap and stable power for AI.[203]\n

On 1 November 2024, the Federal Energy Regulatory Commission (FERC) rejected an application submitted by Talen Energy for approval to supply some electricity from the nuclear power station Susquehanna to Amazon\'s data center.[204] \nAccording to the Commission Chairman Willie L. Phillips, it is a burden on the electricity grid as well as a significant cost shifting concern to households and other business sectors.[204]\n

\n

Misinformation

\n\n

YouTube, Facebook and others use recommender systems to guide users to more content. These AI programs were given the goal of maximizing user engagement (that is, the only goal was to keep people watching). The AI learned that users tended to choose misinformation, conspiracy theories, and extreme partisan content, and, to keep them watching, the AI recommended more of it. Users also tended to watch more content on the same subject, so the AI led people into filter bubbles where they received multiple versions of the same misinformation.[205] This convinced many users that the misinformation was true, and ultimately undermined trust in institutions, the media and the government.[206] The AI program had correctly learned to maximize its goal, but the result was harmful to society. After the U.S. election in 2016, major technology companies took steps to mitigate the problem [citation needed].\n

In 2022, generative AI began to create images, audio, video and text that are indistinguishable from real photographs, recordings, films, or human writing. It is possible for bad actors to use this technology to create massive amounts of misinformation or propaganda.[207] AI pioneer Geoffrey Hinton expressed concern about AI enabling "authoritarian leaders to manipulate their electorates" on a large scale, among other risks.[208]\n

\n

Algorithmic bias and fairness

\n\n

Machine learning applications will be biased[k] if they learn from biased data.[210] The developers may not be aware that the bias exists.[211] Bias can be introduced by the way training data is selected and by the way a model is deployed.[212][210] If a biased algorithm is used to make decisions that can seriously harm people (as it can in medicine, finance, recruitment, housing or policing) then the algorithm may cause discrimination.[213] The field of fairness studies how to prevent harms from algorithmic biases.\n

On June 28, 2015, Google Photos\'s new image labeling feature mistakenly identified Jacky Alcine and a friend as "gorillas" because they were black. The system was trained on a dataset that contained very few images of black people,[214] a problem called "sample size disparity".[215] Google "fixed" this problem by preventing the system from labelling anything as a "gorilla". Eight years later, in 2023, Google Photos still could not identify a gorilla, and neither could similar products from Apple, Facebook, Microsoft and Amazon.[216]\n

COMPAS is a commercial program widely used by U.S. courts to assess the likelihood of a defendant becoming a recidivist. In 2016, Julia Angwin at ProPublica discovered that COMPAS exhibited racial bias, despite the fact that the program was not told the races of the defendants. Although the error rate for both whites and blacks was calibrated equal at exactly 61%, the errors for each race were different—the system consistently overestimated the chance that a black person would re-offend and would underestimate the chance that a white person would not re-offend.[217] In 2017, several researchers[l] showed that it was mathematically impossible for COMPAS to accommodate all possible measures of fairness when the base rates of re-offense were different for whites and blacks in the data.[219]\n

A program can make biased decisions even if the data does not explicitly mention a problematic feature (such as "race" or "gender"). The feature will correlate with other features (like "address", "shopping history" or "first name"), and the program will make the same decisions based on these features as it would on "race" or "gender".[220] Moritz Hardt said "the most robust fact in this research area is that fairness through blindness doesn\'t work."[221]\n

Criticism of COMPAS highlighted that machine learning models are designed to make "predictions" that are only valid if we assume that the future will resemble the past. If they are trained on data that includes the results of racist decisions in the past, machine learning models must predict that racist decisions will be made in the future. If an application then uses these predictions as recommendations, some of these "recommendations" will likely be racist.[222] Thus, machine learning is not well suited to help make decisions in areas where there is hope that the future will be better than the past. It is descriptive rather than prescriptive.[m]\n

Bias and unfairness may go undetected because the developers are overwhelmingly white and male: among AI engineers, about 4% are black and 20% are women.[215]\n

There are various conflicting definitions and mathematical models of fairness. These notions depend on ethical assumptions, and are influenced by beliefs about society. One broad category is distributive fairness, which focuses on the outcomes, often identifying groups and seeking to compensate for statistical disparities. Representational fairness tries to ensure that AI systems do not reinforce negative stereotypes or render certain groups invisible. Procedural fairness focuses on the decision process rather than the outcome. The most relevant notions of fairness may depend on the context, notably the type of AI application and the stakeholders. The subjectivity in the notions of bias and fairness makes it difficult for companies to operationalize them. Having access to sensitive attributes such as race or gender is also considered by many AI ethicists to be necessary in order to compensate for biases, but it may conflict with anti-discrimination laws.[209]\n

At its 2022 Conference on Fairness, Accountability, and Transparency (ACM FAccT 2022), the Association for Computing Machinery, in Seoul, South Korea, presented and published findings that recommend that until AI and robotics systems are demonstrated to be free of bias mistakes, they are unsafe, and the use of self-learning neural networks trained on vast, unregulated sources of flawed internet data should be curtailed.[dubiousdiscuss][224]\n

\n

Lack of transparency

\n\n

Many AI systems are so complex that their designers cannot explain how they reach their decisions.[225] Particularly with deep neural networks, in which there are a large amount of non-linear relationships between inputs and outputs. But some popular explainability techniques exist.[226]\n

It is impossible to be certain that a program is operating correctly if no one knows how exactly it works. There have been many cases where a machine learning program passed rigorous tests, but nevertheless learned something different than what the programmers intended. For example, a system that could identify skin diseases better than medical professionals was found to actually have a strong tendency to classify images with a ruler as "cancerous", because pictures of malignancies typically include a ruler to show the scale.[227] Another machine learning system designed to help effectively allocate medical resources was found to classify patients with asthma as being at "low risk" of dying from pneumonia. Having asthma is actually a severe risk factor, but since the patients having asthma would usually get much more medical care, they were relatively unlikely to die according to the training data. The correlation between asthma and low risk of dying from pneumonia was real, but misleading.[228]\n

People who have been harmed by an algorithm\'s decision have a right to an explanation.[229] Doctors, for example, are expected to clearly and completely explain to their colleagues the reasoning behind any decision they make. Early drafts of the European Union\'s General Data Protection Regulation in 2016 included an explicit statement that this right exists.[n] Industry experts noted that this is an unsolved problem with no solution in sight. Regulators argued that nevertheless the harm is real: if the problem has no solution, the tools should not be used.[230]\n

DARPA established the XAI ("Explainable Artificial Intelligence") program in 2014 to try to solve these problems.[231]\n

Several approaches aim to address the transparency problem. SHAP enables to visualise the contribution of each feature to the output.[232] LIME can locally approximate a model\'s outputs with a simpler, interpretable model.[233] Multitask learning provides a large number of outputs in addition to the target classification. These other outputs can help developers deduce what the network has learned.[234] Deconvolution, DeepDream and other generative methods can allow developers to see what different layers of a deep network for computer vision have learned, and produce output that can suggest what the network is learning.[235] For generative pre-trained transformers, Anthropic developed a technique based on dictionary learning that associates patterns of neuron activations with human-understandable concepts.[236]\n

\n

Bad actors and weaponized AI

\n\n

Artificial intelligence provides a number of tools that are useful to bad actors, such as authoritarian governments, terrorists, criminals or rogue states.\n

A lethal autonomous weapon is a machine that locates, selects and engages human targets without human supervision.[o] Widely available AI tools can be used by bad actors to develop inexpensive autonomous weapons and, if produced at scale, they are potentially weapons of mass destruction.[238] Even when used in conventional warfare, it is unlikely that they will be unable to reliably choose targets and could potentially kill an innocent person.[238] In 2014, 30 nations (including China) supported a ban on autonomous weapons under the United Nations\' Convention on Certain Conventional Weapons, however the United States and others disagreed.[239] By 2015, over fifty countries were reported to be researching battlefield robots.[240]\n

AI tools make it easier for authoritarian governments to efficiently control their citizens in several ways. Face and voice recognition allow widespread surveillance. Machine learning, operating this data, can classify potential enemies of the state and prevent them from hiding. Recommendation systems can precisely target propaganda and misinformation for maximum effect. Deepfakes and generative AI aid in producing misinformation. Advanced AI can make authoritarian centralized decision making more competitive than liberal and decentralized systems such as markets. It lowers the cost and difficulty of digital warfare and advanced spyware.[241] All these technologies have been available since 2020 or earlier—AI facial recognition systems are already being used for mass surveillance in China.[242][243]\n

There many other ways that AI is expected to help bad actors, some of which can not be foreseen. For example, machine-learning AI is able to design tens of thousands of toxic molecules in a matter of hours.[244]\n

\n

Technological unemployment

\n\n

Economists have frequently highlighted the risks of redundancies from AI, and speculated about unemployment if there is no adequate social policy for full employment.[245]\n

In the past, technology has tended to increase rather than reduce total employment, but economists acknowledge that "we\'re in uncharted territory" with AI.[246] A survey of economists showed disagreement about whether the increasing use of robots and AI will cause a substantial increase in long-term unemployment, but they generally agree that it could be a net benefit if productivity gains are redistributed.[247] Risk estimates vary; for example, in the 2010s, Michael Osborne and Carl Benedikt Frey estimated 47% of U.S. jobs are at "high risk" of potential automation, while an OECD report classified only 9% of U.S. jobs as "high risk".[p][249] The methodology of speculating about future employment levels has been criticised as lacking evidential foundation, and for implying that technology, rather than social policy, creates unemployment, as opposed to redundancies.[245] In April 2023, it was reported that 70% of the jobs for Chinese video game illustrators had been eliminated by generative artificial intelligence.[250][251]\n

Unlike previous waves of automation, many middle-class jobs may be eliminated by artificial intelligence; The Economist stated in 2015 that "the worry that AI could do to white-collar jobs what steam power did to blue-collar ones during the Industrial Revolution" is "worth taking seriously".[252] Jobs at extreme risk range from paralegals to fast food cooks, while job demand is likely to increase for care-related professions ranging from personal healthcare to the clergy.[253]\n

From the early days of the development of artificial intelligence, there have been arguments, for example, those put forward by Joseph Weizenbaum, about whether tasks that can be done by computers actually should be done by them, given the difference between computers and humans, and between quantitative calculation and qualitative, value-based judgement.[254]\n

\n

Existential risk

\n\n

It has been argued AI will become so powerful that humanity may irreversibly lose control of it. This could, as physicist Stephen Hawking stated, "spell the end of the human race".[255] This scenario has been common in science fiction, when a computer or robot suddenly develops a human-like "self-awareness" (or "sentience" or "consciousness") and becomes a malevolent character.[q] These sci-fi scenarios are misleading in several ways.\n

First, AI does not require human-like "sentience" to be an existential risk. Modern AI programs are given specific goals and use learning and intelligence to achieve them. Philosopher Nick Bostrom argued that if one gives almost any goal to a sufficiently powerful AI, it may choose to destroy humanity to achieve it (he used the example of a paperclip factory manager).[257] Stuart Russell gives the example of household robot that tries to find a way to kill its owner to prevent it from being unplugged, reasoning that "you can\'t fetch the coffee if you\'re dead."[258] In order to be safe for humanity, a superintelligence would have to be genuinely aligned with humanity\'s morality and values so that it is "fundamentally on our side".[259]\n

Second, Yuval Noah Harari argues that AI does not require a robot body or physical control to pose an existential risk. The essential parts of civilization are not physical. Things like ideologies, law, government, money and the economy are made of language; they exist because there are stories that billions of people believe. The current prevalence of misinformation suggests that an AI could use language to convince people to believe anything, even to take actions that are destructive.[260]\n

The opinions amongst experts and industry insiders are mixed, with sizable fractions both concerned and unconcerned by risk from eventual superintelligent AI.[261] Personalities such as Stephen Hawking, Bill Gates, and Elon Musk,[262] as well as AI pioneers such as Yoshua Bengio, Stuart Russell, Demis Hassabis, and Sam Altman, have expressed concerns about existential risk from AI.\n

In May 2023, Geoffrey Hinton announced his resignation from Google in order to be able to "freely speak out about the risks of AI" without "considering how this impacts Google."[263] He notably mentioned risks of an AI takeover,[264] and stressed that in order to avoid the worst outcomes, establishing safety guidelines will require cooperation among those competing in use of AI.[265]\n

In 2023, many leading AI experts issued the joint statement that "Mitigating the risk of extinction from AI should be a global priority alongside other societal-scale risks such as pandemics and nuclear war".[266]\n

Other researchers, however, spoke in favor of a less dystopian view. AI pioneer Juergen Schmidhuber did not sign the joint statement, emphasising that in 95% of all cases, AI research is about making "human lives longer and healthier and easier."[267] While the tools that are now being used to improve lives can also be used by bad actors, "they can also be used against the bad actors."[268][269] Andrew Ng also argued that "it\'s a mistake to fall for the doomsday hype on AI—and that regulators who do will only benefit vested interests."[270] Yann LeCun "scoffs at his peers\' dystopian scenarios of supercharged misinformation and even, eventually, human extinction."[271] In the early 2010s, experts argued that the risks are too distant in the future to warrant research or that humans will be valuable from the perspective of a superintelligent machine.[272] However, after 2016, the study of current and future risks and possible solutions became a serious area of research.[273]\n

\n

Ethical machines and alignment

\n\n

Friendly AI are machines that have been designed from the beginning to minimize risks and to make choices that benefit humans. Eliezer Yudkowsky, who coined the term, argues that developing friendly AI should be a higher research priority: it may require a large investment and it must be completed before AI becomes an existential risk.[274]\n

Machines with intelligence have the potential to use their intelligence to make ethical decisions. The field of machine ethics provides machines with ethical principles and procedures for resolving ethical dilemmas.[275]\nThe field of machine ethics is also called computational morality,[275]\nand was founded at an AAAI symposium in 2005.[276]\n

Other approaches include Wendell Wallach\'s "artificial moral agents"[277] and Stuart J. Russell\'s three principles for developing provably beneficial machines.[278]\n

\n

Open source

\n

Active organizations in the AI open-source community include Hugging Face,[279] Google,[280] EleutherAI and Meta.[281] Various AI models, such as Llama 2, Mistral or Stable Diffusion, have been made open-weight,[282][283] meaning that their architecture and trained parameters (the "weights") are publicly available. Open-weight models can be freely fine-tuned, which allows companies to specialize them with their own data and for their own use-case.[284] Open-weight models are useful for research and innovation but can also be misused. Since they can be fine-tuned, any built-in security measure, such as objecting to harmful requests, can be trained away until it becomes ineffective. Some researchers warn that future AI models may develop dangerous capabilities (such as the potential to drastically facilitate bioterrorism) and that once released on the Internet, they cannot be deleted everywhere if needed. They recommend pre-release audits and cost-benefit analyses.[285]\n

\n

Frameworks

\n

Artificial Intelligence projects can have their ethical permissibility tested while designing, developing, and implementing an AI system. An AI framework such as the Care and Act Framework containing the SUM values—developed by the Alan Turing Institute tests projects in four main areas:[286][287]\n

\n
  • Respect the dignity of individual people
  • \n
  • Connect with other people sincerely, openly, and inclusively
  • \n
  • Care for the wellbeing of everyone
  • \n
  • Protect social values, justice, and the public interest
\n

Other developments in ethical frameworks include those decided upon during the Asilomar Conference, the Montreal Declaration for Responsible AI, and the IEEE\'s Ethics of Autonomous Systems initiative, among others;[288] however, these principles do not go without their criticisms, especially regards to the people chosen contributes to these frameworks.[289]\n

Promotion of the wellbeing of the people and communities that these technologies affect requires consideration of the social and ethical implications at all stages of AI system design, development and implementation, and collaboration between job roles such as data scientists, product managers, data engineers, domain experts, and delivery managers.[290]\n

The UK AI Safety Institute released in 2024 a testing toolset called \'Inspect\' for AI safety evaluations available under a MIT open-source licence which is freely available on GitHub and can be improved with third-party packages. It can be used to evaluate AI models in a range of areas including core knowledge, ability to reason, and autonomous capabilities.[291]\n

\n

Regulation

\n\n
AI Safety Summit
The first global AI Safety Summit was held in 2023 with a declaration calling for international co-operation.
\n

The regulation of artificial intelligence is the development of public sector policies and laws for promoting and regulating AI; it is therefore related to the broader regulation of algorithms.[292] The regulatory and policy landscape for AI is an emerging issue in jurisdictions globally.[293] According to AI Index at Stanford, the annual number of AI-related laws passed in the 127 survey countries jumped from one passed in 2016 to 37 passed in 2022 alone.[294][295] Between 2016 and 2020, more than 30 countries adopted dedicated strategies for AI.[296] Most EU member states had released national AI strategies, as had Canada, China, India, Japan, Mauritius, the Russian Federation, Saudi Arabia, United Arab Emirates, U.S., and Vietnam. Others were in the process of elaborating their own AI strategy, including Bangladesh, Malaysia and Tunisia.[296] The Global Partnership on Artificial Intelligence was launched in June 2020, stating a need for AI to be developed in accordance with human rights and democratic values, to ensure public confidence and trust in the technology.[296] Henry Kissinger, Eric Schmidt, and Daniel Huttenlocher published a joint statement in November 2021 calling for a government commission to regulate AI.[297] In 2023, OpenAI leaders published recommendations for the governance of superintelligence, which they believe may happen in less than 10 years.[298] In 2023, the United Nations also launched an advisory body to provide recommendations on AI governance; the body comprises technology company executives, governments officials and academics.[299] In 2024, the Council of Europe created the first international legally binding treaty on AI, called the "Framework Convention on Artificial Intelligence and Human Rights, Democracy and the Rule of Law". It was adopted by the European Union, the United States, the United Kingdom, and other signatories.[300]\n

In a 2022 Ipsos survey, attitudes towards AI varied greatly by country; 78% of Chinese citizens, but only 35% of Americans, agreed that "products and services using AI have more benefits than drawbacks".[294] A 2023 Reuters/Ipsos poll found that 61% of Americans agree, and 22% disagree, that AI poses risks to humanity.[301] In a 2023 Fox News poll, 35% of Americans thought it "very important", and an additional 41% thought it "somewhat important", for the federal government to regulate AI, versus 13% responding "not very important" and 8% responding "not at all important".[302][303]\n

In November 2023, the first global AI Safety Summit was held in Bletchley Park in the UK to discuss the near and far term risks of AI and the possibility of mandatory and voluntary regulatory frameworks.[304] 28 countries including the United States, China, and the European Union issued a declaration at the start of the summit, calling for international co-operation to manage the challenges and risks of artificial intelligence.[305][306] In May 2024 at the AI Seoul Summit, 16 global AI tech companies agreed to safety commitments on the development of AI.[307][308]\n

\n

History

\n\n\n

The study of mechanical or "formal" reasoning began with philosophers and mathematicians in antiquity. The study of logic led directly to Alan Turing\'s theory of computation, which suggested that a machine, by shuffling symbols as simple as "0" and "1", could simulate any conceivable form of mathematical reasoning.[309][310] This, along with concurrent discoveries in cybernetics, information theory and neurobiology, led researchers to consider the possibility of building an "electronic brain".[r] They developed several areas of research that would become part of AI,[312] such as McCullouch and Pitts design for "artificial neurons" in 1943,[115] and Turing\'s influential 1950 paper \'Computing Machinery and Intelligence\', which introduced the Turing test and showed that "machine intelligence" was plausible.[313][310]\n

The field of AI research was founded at a workshop at Dartmouth College in 1956.[s][6] The attendees became the leaders of AI research in the 1960s.[t] They and their students produced programs that the press described as "astonishing":[u] computers were learning checkers strategies, solving word problems in algebra, proving logical theorems and speaking English.[v][7] Artificial intelligence laboratories were set up at a number of British and U.S. universities in the latter 1950s and early 1960s.[310]\n

Researchers in the 1960s and the 1970s were convinced that their methods would eventually succeed in creating a machine with general intelligence and considered this the goal of their field.[317] In 1965 Herbert Simon predicted, "machines will be capable, within twenty years, of doing any work a man can do".[318] In 1967 Marvin Minsky agreed, writing that "within a generation ... the problem of creating \'artificial intelligence\' will substantially be solved".[319] They had, however, underestimated the difficulty of the problem.[w] In 1974, both the U.S. and British governments cut off exploratory research in response to the criticism of Sir James Lighthill[321] and ongoing pressure from the U.S. Congress to fund more productive projects.[322] Minsky\'s and Papert\'s book Perceptrons was understood as proving that artificial neural networks would never be useful for solving real-world tasks, thus discrediting the approach altogether.[323] The "AI winter", a period when obtaining funding for AI projects was difficult, followed.[9]\n

In the early 1980s, AI research was revived by the commercial success of expert systems,[324] a form of AI program that simulated the knowledge and analytical skills of human experts. By 1985, the market for AI had reached over a billion dollars. At the same time, Japan\'s fifth generation computer project inspired the U.S. and British governments to restore funding for academic research.[8] However, beginning with the collapse of the Lisp Machine market in 1987, AI once again fell into disrepute, and a second, longer-lasting winter began.[10]\n

Up to this point, most of AI\'s funding had gone to projects that used high-level symbols to represent mental objects like plans, goals, beliefs, and known facts. In the 1980s, some researchers began to doubt that this approach would be able to imitate all the processes of human cognition, especially perception, robotics, learning and pattern recognition,[325] and began to look into "sub-symbolic" approaches.[326] Rodney Brooks rejected "representation" in general and focussed directly on engineering machines that move and survive.[x] Judea Pearl, Lofti Zadeh and others developed methods that handled incomplete and uncertain information by making reasonable guesses rather than precise logic.[86][331] But the most important development was the revival of "connectionism", including neural network research, by Geoffrey Hinton and others.[332] In 1990, Yann LeCun successfully showed that convolutional neural networks can recognize handwritten digits, the first of many successful applications of neural networks.[333]\n

AI gradually restored its reputation in the late 1990s and early 21st century by exploiting formal mathematical methods and by finding specific solutions to specific problems. This "narrow" and "formal" focus allowed researchers to produce verifiable results and collaborate with other fields (such as statistics, economics and mathematics).[334] By 2000, solutions developed by AI researchers were being widely used, although in the 1990s they were rarely described as "artificial intelligence" (a tendency known as the AI effect).[335]\nHowever, several academic researchers became concerned that AI was no longer pursuing its original goal of creating versatile, fully intelligent machines. Beginning around 2002, they founded the subfield of artificial general intelligence (or "AGI"), which had several well-funded institutions by the 2010s.[4]\n

Deep learning began to dominate industry benchmarks in 2012 and was adopted throughout the field.[11]\nFor many specific tasks, other methods were abandoned.[y]\nDeep learning\'s success was based on both hardware improvements (faster computers,[337] graphics processing units, cloud computing[338]) and access to large amounts of data[339] (including curated datasets,[338] such as ImageNet). Deep learning\'s success led to an enormous increase in interest and funding in AI.[z] The amount of machine learning research (measured by total publications) increased by 50% in the years 2015–2019.[296]\n

In 2016, issues of fairness and the misuse of technology were catapulted into center stage at machine learning conferences, publications vastly increased, funding became available, and many researchers re-focussed their careers on these issues. The alignment problem became a serious field of academic study.[273]\n

In the late teens and early 2020s, AGI companies began to deliver programs that created enormous interest. In 2015, AlphaGo, developed by DeepMind, beat the world champion Go player. The program was taught only the rules of the game and developed strategy by itself. GPT-3 is a large language model that was released in 2020 by OpenAI and is capable of generating high-quality human-like text.[340] These programs, and others, inspired an aggressive AI boom, where large companies began investing billions in AI research. According to AI Impacts, about $50 billion annually was invested in "AI" around 2022 in the U.S. alone and about 20% of the new U.S. Computer Science PhD graduates have specialized in "AI".[341] About 800,000 "AI"-related U.S. job openings existed in 2022.[342]\n

\n

Philosophy

\n\n

Philosophical debates have historically sought to determine the nature of intelligence and how to make intelligent machines.[343] Another major focus has been whether machines can be conscious, and the associated ethical implications.[344] Many other topics in philosophy are relevant to AI, such as epistemology and free will.[345] Rapid advancements have intensified public discussions on the philosophy and ethics of AI.[344]\n

\n

Defining artificial intelligence

\n\n

Alan Turing wrote in 1950 "I propose to consider the question \'can machines think\'?"[346] He advised changing the question from whether a machine "thinks", to "whether or not it is possible for machinery to show intelligent behaviour".[346] He devised the Turing test, which measures the ability of a machine to simulate human conversation.[313] Since we can only observe the behavior of the machine, it does not matter if it is "actually" thinking or literally has a "mind". Turing notes that we can not determine these things about other people but "it is usual to have a polite convention that everyone thinks."[347]\n

\n
The Turing test can provide some evidence of intelligence, but it penalizes non-human intelligent behavior.[348]
\n

Russell and Norvig agree with Turing that intelligence must be defined in terms of external behavior, not internal structure.[1] However, they are critical that the test requires the machine to imitate humans. "Aeronautical engineering texts," they wrote, "do not define the goal of their field as making \'machines that fly so exactly like pigeons that they can fool other pigeons.\'"[349] AI founder John McCarthy agreed, writing that "Artificial intelligence is not, by definition, simulation of human intelligence".[350]\n

McCarthy defines intelligence as "the computational part of the ability to achieve goals in the world".[351] Another AI founder, Marvin Minsky similarly describes it as "the ability to solve hard problems".[352] The leading AI textbook defines it as the study of agents that perceive their environment and take actions that maximize their chances of achieving defined goals.[1] These definitions view intelligence in terms of well-defined problems with well-defined solutions, where both the difficulty of the problem and the performance of the program are direct measures of the "intelligence" of the machine—and no other philosophical discussion is required, or may not even be possible.\n

Another definition has been adopted by Google,[353] a major practitioner in the field of AI. This definition stipulates the ability of systems to synthesize information as the manifestation of intelligence, similar to the way it is defined in biological intelligence.\n

Some authors have suggested in practice, that the definition of AI is vague and difficult to define, with contention as to whether classical algorithms should be categorised as AI,[354] with many companies during the early 2020s AI boom using the term as a marketing buzzword, often even if they did "not actually use AI in a material way".[355]\n

\n

Evaluating approaches to AI

\n

No established unifying theory or paradigm has guided AI research for most of its history.[aa] The unprecedented success of statistical machine learning in the 2010s eclipsed all other approaches (so much so that some sources, especially in the business world, use the term "artificial intelligence" to mean "machine learning with neural networks"). This approach is mostly sub-symbolic, soft and narrow. Critics argue that these questions may have to be revisited by future generations of AI researchers.\n

\n

Symbolic AI and its limits

\n

Symbolic AI (or "GOFAI")[357] simulated the high-level conscious reasoning that people use when they solve puzzles, express legal reasoning and do mathematics. They were highly successful at "intelligent" tasks such as algebra or IQ tests. In the 1960s, Newell and Simon proposed the physical symbol systems hypothesis: "A physical symbol system has the necessary and sufficient means of general intelligent action."[358]\n

However, the symbolic approach failed on many tasks that humans solve easily, such as learning, recognizing an object or commonsense reasoning. Moravec\'s paradox is the discovery that high-level "intelligent" tasks were easy for AI, but low level "instinctive" tasks were extremely difficult.[359] Philosopher Hubert Dreyfus had argued since the 1960s that human expertise depends on unconscious instinct rather than conscious symbol manipulation, and on having a "feel" for the situation, rather than explicit symbolic knowledge.[360] Although his arguments had been ridiculed and ignored when they were first presented, eventually, AI research came to agree with him.[ab][16]\n

The issue is not resolved: sub-symbolic reasoning can make many of the same inscrutable mistakes that human intuition does, such as algorithmic bias. Critics such as Noam Chomsky argue continuing research into symbolic AI will still be necessary to attain general intelligence,[362][363] in part because sub-symbolic AI is a move away from explainable AI: it can be difficult or impossible to understand why a modern statistical AI program made a particular decision. The emerging field of neuro-symbolic artificial intelligence attempts to bridge the two approaches.\n

\n

Neat vs. scruffy

\n\n

"Neats" hope that intelligent behavior is described using simple, elegant principles (such as logic, optimization, or neural networks). "Scruffies" expect that it necessarily requires solving a large number of unrelated problems. Neats defend their programs with theoretical rigor, scruffies rely mainly on incremental testing to see if they work. This issue was actively discussed in the 1970s and 1980s,[364] but eventually was seen as irrelevant. Modern AI has elements of both.\n

\n

Soft vs. hard computing

\n\n

Finding a provably correct or optimal solution is intractable for many important problems.[15] Soft computing is a set of techniques, including genetic algorithms, fuzzy logic and neural networks, that are tolerant of imprecision, uncertainty, partial truth and approximation. Soft computing was introduced in the late 1980s and most successful AI programs in the 21st century are examples of soft computing with neural networks.\n

\n

Narrow vs. general AI

\n\n

AI researchers are divided as to whether to pursue the goals of artificial general intelligence and superintelligence directly or to solve as many specific problems as possible (narrow AI) in hopes these solutions will lead indirectly to the field\'s long-term goals.[365][366] General intelligence is difficult to define and difficult to measure, and modern AI has had more verifiable successes by focusing on specific problems with specific solutions. The sub-field of artificial general intelligence studies this area exclusively.\n

\n

Machine consciousness, sentience, and mind

\n\n

The philosophy of mind does not know whether a machine can have a mind, consciousness and mental states, in the same sense that human beings do. This issue considers the internal experiences of the machine, rather than its external behavior. Mainstream AI research considers this issue irrelevant because it does not affect the goals of the field: to build machines that can solve problems using intelligence. Russell and Norvig add that "[t]he additional project of making a machine conscious in exactly the way humans are is not one that we are equipped to take on."[367] However, the question has become central to the philosophy of mind. It is also typically the central question at issue in artificial intelligence in fiction.\n

\n

Consciousness

\n\n

David Chalmers identified two problems in understanding the mind, which he named the "hard" and "easy" problems of consciousness.[368] The easy problem is understanding how the brain processes signals, makes plans and controls behavior. The hard problem is explaining how this feels or why it should feel like anything at all, assuming we are right in thinking that it truly does feel like something (Dennett\'s consciousness illusionism says this is an illusion). While human information processing is easy to explain, human subjective experience is difficult to explain. For example, it is easy to imagine a color-blind person who has learned to identify which objects in their field of view are red, but it is not clear what would be required for the person to know what red looks like.[369]\n

\n

Computationalism and functionalism

\n\n

Computationalism is the position in the philosophy of mind that the human mind is an information processing system and that thinking is a form of computing. Computationalism argues that the relationship between mind and body is similar or identical to the relationship between software and hardware and thus may be a solution to the mind–body problem. This philosophical position was inspired by the work of AI researchers and cognitive scientists in the 1960s and was originally proposed by philosophers Jerry Fodor and Hilary Putnam.[370]\n

Philosopher John Searle characterized this position as "strong AI": "The appropriately programmed computer with the right inputs and outputs would thereby have a mind in exactly the same sense human beings have minds."[ac] Searle counters this assertion with his Chinese room argument, which attempts to show that, even if a machine perfectly simulates human behavior, there is still no reason to suppose it also has a mind.[374]\n

\n

AI welfare and rights

\n

It is difficult or impossible to reliably evaluate whether an advanced AI is sentient (has the ability to feel), and if so, to what degree.[375] But if there is a significant chance that a given machine can feel and suffer, then it may be entitled to certain rights or welfare protection measures, similarly to animals.[376][377] Sapience (a set of capacities related to high intelligence, such as discernment or self-awareness) may provide another moral basis for AI rights.[376] Robot rights are also sometimes proposed as a practical way to integrate autonomous agents into society.[378]\n

In 2017, the European Union considered granting "electronic personhood" to some of the most capable AI systems. Similarly to the legal status of companies, it would have conferred rights but also responsibilities.[379] Critics argued in 2018 that granting rights to AI systems would downplay the importance of human rights, and that legislation should focus on user needs rather than speculative futuristic scenarios. They also noted that robots lacked the autonomy to take part to society on their own.[380][381]\n

Progress in AI increased interest in the topic. Proponents of AI welfare and rights often argue that AI sentience, if it emerges, would be particularly easy to deny. They warn that this may be a moral blind spot analogous to slavery or factory farming, which could lead to large-scale suffering if sentient AI is created and carelessly exploited.[377][376]\n

\n

Future

\n

Superintelligence and the singularity

\n

A superintelligence is a hypothetical agent that would possess intelligence far surpassing that of the brightest and most gifted human mind.[366]If research into artificial general intelligence produced sufficiently intelligent software, it might be able to reprogram and improve itself. The improved software would be even better at improving itself, leading to what I. J. Good called an "intelligence explosion" and Vernor Vinge called a "singularity".[382]\n

However, technologies cannot improve exponentially indefinitely, and typically follow an S-shaped curve, slowing when they reach the physical limits of what the technology can do.[383]\n

\n

Transhumanism

\n\n

Robot designer Hans Moravec, cyberneticist Kevin Warwick and inventor Ray Kurzweil have predicted that humans and machines may merge in the future into cyborgs that are more capable and powerful than either. This idea, called transhumanism, has roots in the writings of Aldous Huxley and Robert Ettinger.[384]\n

Edward Fredkin argues that "artificial intelligence is the next step in evolution", an idea first proposed by Samuel Butler\'s "Darwin among the Machines" as far back as 1863, and expanded upon by George Dyson in his 1998 book Darwin Among the Machines: The Evolution of Global Intelligence.[385]\n

\n

In fiction

\n\n
The word "robot" itself was coined by Karel Čapek in his 1921 play R.U.R., the title standing for "Rossum\'s Universal Robots".
\n

Thought-capable artificial beings have appeared as storytelling devices since antiquity,[386] and have been a persistent theme in science fiction.[387]\n

A common trope in these works began with Mary Shelley\'s Frankenstein, where a human creation becomes a threat to its masters. This includes such works as Arthur C. Clarke\'s and Stanley Kubrick\'s 2001: A Space Odyssey (both 1968), with HAL 9000, the murderous computer in charge of the Discovery One spaceship, as well as The Terminator (1984) and The Matrix (1999). In contrast, the rare loyal robots such as Gort from The Day the Earth Stood Still (1951) and Bishop from Aliens (1986) are less prominent in popular culture.[388]\n

Isaac Asimov introduced the Three Laws of Robotics in many stories, most notably with the "Multivac" super-intelligent computer. Asimov\'s laws are often brought up during lay discussions of machine ethics;[389] while almost all artificial intelligence researchers are familiar with Asimov\'s laws through popular culture, they generally consider the laws useless for many reasons, one of which is their ambiguity.[390]\n

Several works use AI to force us to confront the fundamental question of what makes us human, showing us artificial beings that have the ability to feel, and thus to suffer. This appears in Karel Čapek\'s R.U.R., the films A.I. Artificial Intelligence and Ex Machina, as well as the novel Do Androids Dream of Electric Sheep?, by Philip K. Dick. Dick considers the idea that our understanding of human subjectivity is altered by technology created with artificial intelligence.[391]\n

\n

See also

\n\n

Explanatory notes

\n
\n
    \n
  1. ^ a b This list of intelligent traits is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
  2. \n
  3. ^ a b This list of tools is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
  4. \n
  5. ^ It is among the reasons that expert systems proved to be inefficient for capturing knowledge.[30][31]\n
  6. \n
  7. ^ \n"Rational agent" is general term used in economics, philosophy and theoretical artificial intelligence. It can refer to anything that directs its behavior to accomplish goals, such as a person, an animal, a corporation, a nation, or in the case of AI, a computer program.\n
  8. \n
  9. ^ Alan Turing discussed the centrality of learning as early as 1950, in his classic paper "Computing Machinery and Intelligence".[42] In 1956, at the original Dartmouth AI summer conference, Ray Solomonoff wrote a report on unsupervised probabilistic machine learning: "An Inductive Inference Machine".[43]\n
  10. \n
  11. ^ See AI winter § Machine translation and the ALPAC report of 1966\n
  12. \n
  13. ^ \nCompared with symbolic logic, formal Bayesian inference is computationally expensive. For inference to be tractable, most observations must be conditionally independent of one another. AdSense uses a Bayesian network with over 300 million edges to learn which ads to serve.[93]\n
  14. \n
  15. ^ Expectation–maximization, one of the most popular algorithms in machine learning, allows clustering in the presence of unknown latent variables.[95]\n
  16. \n
  17. ^ \nSome form of deep neural networks (without a specific learning algorithm) were described by:\nWarren S. McCulloch and Walter Pitts (1943)[115]\nAlan Turing (1948);[116]\nKarl Steinbuch and Roger David Joseph (1961).[117]\nDeep or recurrent networks that learned (or used gradient descent) were developed by:\nFrank Rosenblatt(1957);[116]\nOliver Selfridge (1959);[117]\nAlexey Ivakhnenko and Valentin Lapa (1965);[118]\nKaoru Nakano (1971);[119]\nShun-Ichi Amari (1972);[119]\nJohn Joseph Hopfield (1982).[119]\nPrecursors to backpropagation were developed by:\nHenry J. Kelley (1960);[116]\nArthur E. Bryson (1962);[116]\nStuart Dreyfus (1962);[116]\nArthur E. Bryson and Yu-Chi Ho (1969);[116]\nBackpropagation was independently developed by:\nSeppo Linnainmaa (1970);[120]\nPaul Werbos (1974).[116]\n
  18. \n
  19. ^ Geoffrey Hinton said, of his work on neural networks in the 1990s, "our labeled datasets were thousands of times too small. [And] our computers were millions of times too slow."[121]\n
  20. \n
  21. ^ In statistics, a bias is a systematic error or deviation from the correct value. But in the context of fairness, it refers to a tendency in favor or against a certain group or individual characteristic, usually in a way that is considered unfair or harmful. A statistically unbiased AI system that produces disparate outcomes for different demographic groups may thus be viewed as biased in the ethical sense.[209]\n
  22. \n
  23. ^ Including Jon Kleinberg (Cornell University), Sendhil Mullainathan (University of Chicago), Cynthia Chouldechova (Carnegie Mellon) and Sam Corbett-Davis (Stanford)[218]\n
  24. \n
  25. ^ Moritz Hardt (a director at the Max Planck Institute for Intelligent Systems) argues that machine learning "is fundamentally the wrong tool for a lot of domains, where you\'re trying to design interventions and mechanisms that change the world."[223]\n
  26. \n
  27. ^ When the law was passed in 2018, it still contained a form of this provision.\n
  28. \n
  29. ^ This is the United Nations\' definition, and includes things like land mines as well.[237]\n
  30. \n
  31. ^ See table 4; 9% is both the OECD average and the U.S. average.[248]\n
  32. \n
  33. ^ Sometimes called a "robopocalypse"[256]\n
  34. \n
  35. ^ "Electronic brain" was the term used by the press around this time.[309][311]\n
  36. \n
  37. ^ \nDaniel Crevier wrote, "the conference is generally recognized as the official birthdate of the new science."[314] Russell and Norvig called the conference "the inception of artificial intelligence."[115]\n
  38. \n
  39. ^ \nRussell and Norvig wrote "for the next 20 years the field would be dominated by these people and their students."[315]\n
  40. \n
  41. ^ \nRussell and Norvig wrote "it was astonishing whenever a computer did anything kind of smartish".[316]\n
  42. \n
  43. ^ \nThe programs described are Arthur Samuel\'s checkers program for the IBM 701, Daniel Bobrow\'s STUDENT, Newell and Simon\'s Logic Theorist and Terry Winograd\'s SHRDLU.\n
  44. \n
  45. ^ Russell and Norvig write: "in almost all cases, these early systems failed on more difficult problems"[320]\n
  46. \n
  47. ^ \nEmbodied approaches to AI[327] were championed by Hans Moravec[328] and Rodney Brooks[329] and went by many names: Nouvelle AI.[329] Developmental robotics.[330]\n
  48. \n
  49. ^ Matteo Wong wrote in The Atlantic: "Whereas for decades, computer-science fields such as natural-language processing, computer vision, and robotics used extremely different methods, now they all use a programming method called "deep learning." As a result, their code and approaches have become more similar, and their models are easier to integrate into one another."[336]\n
  50. \n
  51. ^ Jack Clark wrote in Bloomberg: "After a half-decade of quiet breakthroughs in artificial intelligence, 2015 has been a landmark year. Computers are smarter and learning faster than ever", and noted that the number of software projects that use machine learning at Google increased from a "sporadic usage" in 2012 to more than 2,700 projects in 2015.[338]\n
  52. \n
  53. ^ Nils Nilsson wrote in 1983: "Simply put, there is wide disagreement in the field about what AI is all about."[356]\n
  54. \n
  55. ^ \nDaniel Crevier wrote that "time has proven the accuracy and perceptiveness of some of Dreyfus\'s comments. Had he formulated them less aggressively, constructive actions they suggested might have been taken much earlier."[361]\n
  56. \n
  57. ^ \nSearle presented this definition of "Strong AI" in 1999.[371] Searle\'s original formulation was "The appropriately programmed computer really is a mind, in the sense that computers given the right programs can be literally said to understand and have other cognitive states."[372] Strong AI is defined similarly by Russell and Norvig: "Stong AI – the assertion that machines that do so are actually thinking (as opposed to simulating thinking)."[373]\n
  58. \n
\n

References

\n
\n
    \n
  1. ^ a b c Russell & Norvig (2021), pp. 1–4.\n
  2. \n
  3. ^ AI set to exceed human brain power Archived 2008-02-19 at the Wayback Machine CNN.com (July 26, 2006)\n
  4. \n
  5. ^ Kaplan, Andreas; Haenlein, Michael (2019). "Siri, Siri, in my hand: Who\'s the fairest in the land? On the interpretations, illustrations, and implications of artificial intelligence". Business Horizons. 62: 15–25. doi:10.1016/j.bushor.2018.08.004. ISSN 0007-6813. S2CID 158433736.\n
  6. \n
  7. ^ a b c \nArtificial general intelligence: Russell & Norvig (2021, pp. 32–33, 1020–1021)
    Proposal for the modern version: Pennachin & Goertzel (2007)
    Warnings of overspecialization in AI from leading researchers: Nilsson (1995), McCarthy (2007), Beal & Winston (2009)
    \n
  8. \n
  9. ^ Russell & Norvig (2021, §1.2).\n
  10. \n
  11. ^ a b Dartmouth workshop: Russell & Norvig (2021, p. 18), McCorduck (2004, pp. 111–136), NRC (1999, pp. 200–201)
    The proposal: McCarthy et al. (1955)
    \n
  12. \n
  13. ^ a b Successful programs of the 1960s: McCorduck (2004, pp. 243–252), Crevier (1993, pp. 52–107), Moravec (1988, p. 9), Russell & Norvig (2021, pp. 19–21)\n
  14. \n
  15. ^ a b Funding initiatives in the early 1980s: Fifth Generation Project (Japan), Alvey (UK), Microelectronics and Computer Technology Corporation (US), Strategic Computing Initiative (US): McCorduck (2004, pp. 426–441), Crevier (1993, pp. 161–162, 197–203, 211, 240), Russell & Norvig (2021, p. 23), NRC (1999, pp. 210–211), Newquist (1994, pp. 235–248)\n
  16. \n
  17. ^ a b First AI Winter, Lighthill report, Mansfield Amendment: Crevier (1993, pp. 115–117), Russell & Norvig (2021, pp. 21–22), NRC (1999, pp. 212–213), Howe (1994), Newquist (1994, pp. 189–201)\n
  18. \n
  19. ^ a b Second AI Winter: Russell & Norvig (2021, p. 24), McCorduck (2004, pp. 430–435), Crevier (1993, pp. 209–210), NRC (1999, pp. 214–216), Newquist (1994, pp. 301–318)\n
  20. \n
  21. ^ a b Deep learning revolution, AlexNet: Goldman (2022), Russell & Norvig (2021, p. 26), McKinsey (2018)\n
  22. \n
  23. ^ Toews (2023).\n
  24. \n
  25. ^ Problem-solving, puzzle solving, game playing, and deduction: Russell & Norvig (2021, chpt. 3–5), Russell & Norvig (2021, chpt. 6) (constraint satisfaction), Poole, Mackworth & Goebel (1998, chpt. 2, 3, 7, 9), Luger & Stubblefield (2004, chpt. 3, 4, 6, 8), Nilsson (1998, chpt. 7–12)\n
  26. \n
  27. ^ Uncertain reasoning: Russell & Norvig (2021, chpt. 12–18), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 333–381), Nilsson (1998, chpt. 7–12)\n
  28. \n
  29. ^ a b c Intractability and efficiency and the combinatorial explosion: Russell & Norvig (2021, p. 21)\n
  30. \n
  31. ^ a b c Psychological evidence of the prevalence of sub-symbolic reasoning and knowledge: Kahneman (2011), Dreyfus & Dreyfus (1986), Wason & Shapiro (1966), Kahneman, Slovic & Tversky (1982)\n
  32. \n
  33. ^ Knowledge representation and knowledge engineering: Russell & Norvig (2021, chpt. 10), Poole, Mackworth & Goebel (1998, pp. 23–46, 69–81, 169–233, 235–277, 281–298, 319–345), Luger & Stubblefield (2004, pp. 227–243), Nilsson (1998, chpt. 17.1–17.4, 18)\n
  34. \n
  35. ^ Smoliar & Zhang (1994).\n
  36. \n
  37. ^ Neumann & Möller (2008).\n
  38. \n
  39. ^ Kuperman, Reichley & Bailey (2006).\n
  40. \n
  41. ^ McGarry (2005).\n
  42. \n
  43. ^ Bertini, Del Bimbo & Torniai (2006).\n
  44. \n
  45. ^ Russell & Norvig (2021), pp. 272.\n
  46. \n
  47. ^ Representing categories and relations: Semantic networks, description logics, inheritance (including frames, and scripts): Russell & Norvig (2021, §10.2 & 10.5), Poole, Mackworth & Goebel (1998, pp. 174–177), Luger & Stubblefield (2004, pp. 248–258), Nilsson (1998, chpt. 18.3)\n
  48. \n
  49. ^ Representing events and time:Situation calculus, event calculus, fluent calculus (including solving the frame problem): Russell & Norvig (2021, §10.3), Poole, Mackworth & Goebel (1998, pp. 281–298), Nilsson (1998, chpt. 18.2)\n
  50. \n
  51. ^ Causal calculus: Poole, Mackworth & Goebel (1998, pp. 335–337)\n
  52. \n
  53. ^ Representing knowledge about knowledge: Belief calculus, modal logics: Russell & Norvig (2021, §10.4), Poole, Mackworth & Goebel (1998, pp. 275–277)\n
  54. \n
  55. ^ a b Default reasoning, Frame problem, default logic, non-monotonic logics, circumscription, closed world assumption, abduction: Russell & Norvig (2021, §10.6), Poole, Mackworth & Goebel (1998, pp. 248–256, 323–335), Luger & Stubblefield (2004, pp. 335–363), Nilsson (1998, ~18.3.3)\n(Poole et al. places abduction under "default reasoning". Luger et al. places this under "uncertain reasoning").\n
  56. \n
  57. ^ a b Breadth of commonsense knowledge: Lenat & Guha (1989, Introduction), Crevier (1993, pp. 113–114), Moravec (1988, p. 13), Russell & Norvig (2021, pp. 241, 385, 982) (qualification problem)\n
  58. \n
  59. ^ Newquist (1994), p. 296.\n
  60. \n
  61. ^ Crevier (1993), pp. 204–208.\n
  62. \n
  63. ^ Russell & Norvig (2021), p. 528.\n
  64. \n
  65. ^ Automated planning: Russell & Norvig (2021, chpt. 11).\n
  66. \n
  67. ^ Automated decision making, Decision theory: Russell & Norvig (2021, chpt. 16–18).\n
  68. \n
  69. ^ Classical planning: Russell & Norvig (2021, Section 11.2).\n
  70. \n
  71. ^ Sensorless or "conformant" planning, contingent planning, replanning (a.k.a online planning): Russell & Norvig (2021, Section 11.5).\n
  72. \n
  73. ^ Uncertain preferences: Russell & Norvig (2021, Section 16.7)\nInverse reinforcement learning: Russell & Norvig (2021, Section 22.6)\n
  74. \n
  75. ^ Information value theory: Russell & Norvig (2021, Section 16.6).\n
  76. \n
  77. ^ Markov decision process: Russell & Norvig (2021, chpt. 17).\n
  78. \n
  79. ^ Game theory and multi-agent decision theory: Russell & Norvig (2021, chpt. 18).\n
  80. \n
  81. ^ Learning: Russell & Norvig (2021, chpt. 19–22), Poole, Mackworth & Goebel (1998, pp. 397–438), Luger & Stubblefield (2004, pp. 385–542), Nilsson (1998, chpt. 3.3, 10.3, 17.5, 20)\n
  82. \n
  83. ^ Turing (1950).\n
  84. \n
  85. ^ Solomonoff (1956).\n
  86. \n
  87. ^ Unsupervised learning: Russell & Norvig (2021, pp. 653) (definition), Russell & Norvig (2021, pp. 738–740) (cluster analysis), Russell & Norvig (2021, pp. 846–860) (word embedding)\n
  88. \n
  89. ^ a b Supervised learning: Russell & Norvig (2021, §19.2) (Definition), Russell & Norvig (2021, Chpt. 19–20) (Techniques)\n
  90. \n
  91. ^ Reinforcement learning: Russell & Norvig (2021, chpt. 22), Luger & Stubblefield (2004, pp. 442–449)\n
  92. \n
  93. ^ Transfer learning: Russell & Norvig (2021, pp. 281), The Economist (2016)\n
  94. \n
  95. ^ "Artificial Intelligence (AI): What Is AI and How Does It Work? | Built In". builtin.com. Retrieved 30 October 2023.\n
  96. \n
  97. ^ Computational learning theory: Russell & Norvig (2021, pp. 672–674), Jordan & Mitchell (2015)\n
  98. \n
  99. ^ Natural language processing (NLP): Russell & Norvig (2021, chpt. 23–24), Poole, Mackworth & Goebel (1998, pp. 91–104), Luger & Stubblefield (2004, pp. 591–632)\n
  100. \n
  101. ^ Subproblems of NLP: Russell & Norvig (2021, pp. 849–850)\n
  102. \n
  103. ^ Russell & Norvig (2021), pp. 856–858.\n
  104. \n
  105. ^ Dickson (2022).\n
  106. \n
  107. ^ Modern statistical and deep learning approaches to NLP: Russell & Norvig (2021, chpt. 24), Cambria & White (2014)\n
  108. \n
  109. ^ Vincent (2019).\n
  110. \n
  111. ^ Russell & Norvig (2021), pp. 875–878.\n
  112. \n
  113. ^ Bushwick (2023).\n
  114. \n
  115. ^ Computer vision: Russell & Norvig (2021, chpt. 25), Nilsson (1998, chpt. 6)\n
  116. \n
  117. ^ Russell & Norvig (2021), pp. 849–850.\n
  118. \n
  119. ^ Russell & Norvig (2021), pp. 895–899.\n
  120. \n
  121. ^ Russell & Norvig (2021), pp. 899–901.\n
  122. \n
  123. ^ Challa et al. (2011).\n
  124. \n
  125. ^ Russell & Norvig (2021), pp. 931–938.\n
  126. \n
  127. ^ MIT AIL (2014).\n
  128. \n
  129. ^ Affective computing: Thro (1993), Edelson (1991), Tao & Tan (2005), Scassellati (2002)\n
  130. \n
  131. ^ Waddell (2018).\n
  132. \n
  133. ^ Poria et al. (2017).\n
  134. \n
  135. ^ Search algorithms: Russell & Norvig (2021, chpts. 3–5), Poole, Mackworth & Goebel (1998, pp. 113–163), Luger & Stubblefield (2004, pp. 79–164, 193–219), Nilsson (1998, chpts. 7–12)\n
  136. \n
  137. ^ State space search: Russell & Norvig (2021, chpt. 3)\n
  138. \n
  139. ^ Russell & Norvig (2021), sect. 11.2.\n
  140. \n
  141. ^ Uninformed searches (breadth first search, depth-first search and general state space search): Russell & Norvig (2021, sect. 3.4), Poole, Mackworth & Goebel (1998, pp. 113–132), Luger & Stubblefield (2004, pp. 79–121), Nilsson (1998, chpt. 8)\n
  142. \n
  143. ^ Heuristic or informed searches (e.g., greedy best first and A*): Russell & Norvig (2021, sect. 3.5), Poole, Mackworth & Goebel (1998, pp. 132–147), Poole & Mackworth (2017, sect. 3.6), Luger & Stubblefield (2004, pp. 133–150)\n
  144. \n
  145. ^ Adversarial search: Russell & Norvig (2021, chpt. 5)\n
  146. \n
  147. ^ Local or "optimization" search: Russell & Norvig (2021, chpt. 4)\n
  148. \n
  149. ^ Singh Chauhan, Nagesh (18 December 2020). "Optimization Algorithms in Neural Networks". KDnuggets. Retrieved 13 January 2024.\n
  150. \n
  151. ^ Evolutionary computation: Russell & Norvig (2021, sect. 4.1.2)\n
  152. \n
  153. ^ Merkle & Middendorf (2013).\n
  154. \n
  155. ^ Logic: Russell & Norvig (2021, chpts. 6–9), Luger & Stubblefield (2004, pp. 35–77), Nilsson (1998, chpt. 13–16)\n
  156. \n
  157. ^ Propositional logic: Russell & Norvig (2021, chpt. 6), Luger & Stubblefield (2004, pp. 45–50), Nilsson (1998, chpt. 13)\n
  158. \n
  159. ^ First-order logic and features such as equality: Russell & Norvig (2021, chpt. 7), Poole, Mackworth & Goebel (1998, pp. 268–275), Luger & Stubblefield (2004, pp. 50–62), Nilsson (1998, chpt. 15)\n
  160. \n
  161. ^ Logical inference: Russell & Norvig (2021, chpt. 10)\n
  162. \n
  163. ^ logical deduction as search: Russell & Norvig (2021, sects. 9.3, 9.4), Poole, Mackworth & Goebel (1998, pp. ~46–52), Luger & Stubblefield (2004, pp. 62–73), Nilsson (1998, chpt. 4.2, 7.2)\n
  164. \n
  165. ^ Resolution and unification: Russell & Norvig (2021, sections 7.5.2, 9.2, 9.5)\n
  166. \n
  167. ^ Warren, D.H.; Pereira, L.M.; Pereira, F. (1977). "Prolog-the language and its implementation compared with Lisp". ACM SIGPLAN Notices. 12 (8): 109–115. doi:10.1145/872734.806939.\n
  168. \n
  169. ^ Fuzzy logic: Russell & Norvig (2021, pp. 214, 255, 459), Scientific American (1999)\n
  170. \n
  171. ^ a b Stochastic methods for uncertain reasoning: Russell & Norvig (2021, chpt. 12–18, 20), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 165–191, 333–381), Nilsson (1998, chpt. 19)\n
  172. \n
  173. ^ decision theory and decision analysis: Russell & Norvig (2021, chpt. 16–18), Poole, Mackworth & Goebel (1998, pp. 381–394)\n
  174. \n
  175. ^ Information value theory: Russell & Norvig (2021, sect. 16.6)\n
  176. \n
  177. ^ Markov decision processes and dynamic decision networks: Russell & Norvig (2021, chpt. 17)\n
  178. \n
  179. ^ a b c Stochastic temporal models: Russell & Norvig (2021, chpt. 14)\nHidden Markov model: Russell & Norvig (2021, sect. 14.3)\nKalman filters: Russell & Norvig (2021, sect. 14.4)\nDynamic Bayesian networks: Russell & Norvig (2021, sect. 14.5)\n
  180. \n
  181. ^ Game theory and mechanism design: Russell & Norvig (2021, chpt. 18)\n
  182. \n
  183. ^ Bayesian networks: Russell & Norvig (2021, sects. 12.5–12.6, 13.4–13.5, 14.3–14.5, 16.5, 20.2–20.3), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~182–190, ≈363–379), Nilsson (1998, chpt. 19.3–19.4)\n
  184. \n
  185. ^ Domingos (2015), chpt. 6.\n
  186. \n
  187. ^ Bayesian inference algorithm: Russell & Norvig (2021, sect. 13.3–13.5), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~363–379), Nilsson (1998, chpt. 19.4 & 7)\n
  188. \n
  189. ^ Domingos (2015), p. 210.\n
  190. \n
  191. ^ Bayesian learning and the expectation–maximization algorithm: Russell & Norvig (2021, chpt. 20), Poole, Mackworth & Goebel (1998, pp. 424–433), Nilsson (1998, chpt. 20), Domingos (2015, p. 210)\n
  192. \n
  193. ^ Bayesian decision theory and Bayesian decision networks: Russell & Norvig (2021, sect. 16.5)\n
  194. \n
  195. ^ Statistical learning methods and classifiers: Russell & Norvig (2021, chpt. 20),\n
  196. \n
  197. ^ Ciaramella, Alberto; Ciaramella, Marco (2024). Introduction to Artificial Intelligence: from data analysis to generative AI. Intellisemantic Editions. ISBN 978-8-8947-8760-3.\n
  198. \n
  199. ^ Decision trees: Russell & Norvig (2021, sect. 19.3), Domingos (2015, p. 88)\n
  200. \n
  201. ^ Non-parameteric learning models such as K-nearest neighbor and support vector machines: Russell & Norvig (2021, sect. 19.7), Domingos (2015, p. 187) (k-nearest neighbor)\n\n
  202. \n
  203. ^ Domingos (2015), p. 152.\n
  204. \n
  205. ^ Naive Bayes classifier: Russell & Norvig (2021, sect. 12.6), Domingos (2015, p. 152)\n
  206. \n
  207. ^ a b Neural networks: Russell & Norvig (2021, chpt. 21), Domingos (2015, Chapter 4)\n
  208. \n
  209. ^ Gradient calculation in computational graphs, backpropagation, automatic differentiation: Russell & Norvig (2021, sect. 21.2), Luger & Stubblefield (2004, pp. 467–474), Nilsson (1998, chpt. 3.3)\n
  210. \n
  211. ^ Universal approximation theorem: Russell & Norvig (2021, p. 752)\nThe theorem: Cybenko (1988), Hornik, Stinchcombe & White (1989)\n
  212. \n
  213. ^ Feedforward neural networks: Russell & Norvig (2021, sect. 21.1)\n
  214. \n
  215. ^ Recurrent neural networks: Russell & Norvig (2021, sect. 21.6)\n
  216. \n
  217. ^ Perceptrons: Russell & Norvig (2021, pp. 21, 22, 683, 22)\n
  218. \n
  219. ^ a b Deep learning: Russell & Norvig (2021, chpt. 21), Goodfellow, Bengio & Courville (2016), Hinton et al. (2016), Schmidhuber (2015)\n
  220. \n
  221. ^ Convolutional neural networks: Russell & Norvig (2021, sect. 21.3)\n
  222. \n
  223. ^ Deng & Yu (2014), pp. 199–200.\n
  224. \n
  225. ^ Ciresan, Meier & Schmidhuber (2012).\n
  226. \n
  227. ^ Russell & Norvig (2021), p. 751.\n
  228. \n
  229. ^ a b c Russell & Norvig (2021), p. 17.\n
  230. \n
  231. ^ a b c d e f g Russell & Norvig (2021), p. 785.\n
  232. \n
  233. ^ a b Schmidhuber (2022), sect. 5.\n
  234. \n
  235. ^ Schmidhuber (2022), sect. 6.\n
  236. \n
  237. ^ a b c Schmidhuber (2022), sect. 7.\n
  238. \n
  239. ^ Schmidhuber (2022), sect. 8.\n
  240. \n
  241. ^ Quoted in Christian (2020, p. 22)\n
  242. \n
  243. ^ Smith (2023).\n
  244. \n
  245. ^ "Explained: Generative AI". 9 November 2023.\n
  246. \n
  247. ^ "AI Writing and Content Creation Tools". MIT Sloan Teaching & Learning Technologies. Archived from the original on 25 December 2023. Retrieved 25 December 2023.\n
  248. \n
  249. ^ Marmouyet (2023).\n
  250. \n
  251. ^ Kobielus (2019).\n
  252. \n
  253. ^ Thomason, James (21 May 2024). "Mojo Rising: The resurgence of AI-first programming languages". VentureBeat. Archived from the original on 27 June 2024. Retrieved 26 May 2024.\n
  254. \n
  255. ^ Wodecki, Ben (5 May 2023). "7 AI Programming Languages You Need to Know". AI Business. Archived from the original on 25 July 2024. Retrieved 5 October 2024.\n
  256. \n
  257. ^ Plumb, Taryn (18 September 2024). "Why Jensen Huang and Marc Benioff see \'gigantic\' opportunity for agentic AI". VentureBeat. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
  258. \n
  259. ^ Davenport, T; Kalakota, R (June 2019). "The potential for artificial intelligence in healthcare". Future Healthc J. 6 (2): 94–98. doi:10.7861/futurehosp.6-2-94. PMC 6616181. PMID 31363513.\n
  260. \n
  261. ^ Lyakhova, U.A.; Lyakhov, P.A. (2024). "Systematic review of approaches to detection and classification of skin cancer using artificial intelligence: Development and prospects". Computers in Biology and Medicine. 178: 108742. doi:10.1016/j.compbiomed.2024.108742. PMID 38875908.\n
  262. \n
  263. ^ Alqudaihi, Kawther S.; Aslam, Nida; Khan, Irfan Ullah; Almuhaideb, Abdullah M.; Alsunaidi, Shikah J.; Ibrahim, Nehad M. Abdel Rahman; Alhaidari, Fahd A.; Shaikh, Fatema S.; Alsenbel, Yasmine M.; Alalharith, Dima M.; Alharthi, Hajar M.; Alghamdi, Wejdan M.; Alshahrani, Mohammed S. (2021). "Cough Sound Detection and Diagnosis Using Artificial Intelligence Techniques: Challenges and Opportunities". IEEE Access. 9: 102327–102344. Bibcode:2021IEEEA...9j2327A. doi:10.1109/ACCESS.2021.3097559. ISSN 2169-3536. PMC 8545201. PMID 34786317.\n
  264. \n
  265. ^ a b Bax, Monique; Thorpe, Jordan; Romanov, Valentin (December 2023). "The future of personalized cardiovascular medicine demands 3D and 4D printing, stem cells, and artificial intelligence". Frontiers in Sensors. 4. doi:10.3389/fsens.2023.1294721. ISSN 2673-5067.\n
  266. \n
  267. ^ Jumper, J; Evans, R; Pritzel, A (2021). "Highly accurate protein structure prediction with AlphaFold". Nature. 596 (7873): 583–589. Bibcode:2021Natur.596..583J. doi:10.1038/s41586-021-03819-2. PMC 8371605. PMID 34265844.\n
  268. \n
  269. ^ "AI discovers new class of antibiotics to kill drug-resistant bacteria". 20 December 2023. Archived from the original on 16 September 2024. Retrieved 5 October 2024.\n
  270. \n
  271. ^ "AI speeds up drug design for Parkinson\'s ten-fold". Cambridge University. 17 April 2024. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  272. \n
  273. ^ Horne, Robert I.; Andrzejewska, Ewa A.; Alam, Parvez; Brotzakis, Z. Faidon; Srivastava, Ankit; Aubert, Alice; Nowinska, Magdalena; Gregory, Rebecca C.; Staats, Roxine; Possenti, Andrea; Chia, Sean; Sormanni, Pietro; Ghetti, Bernardino; Caughey, Byron; Knowles, Tuomas P. J.; Vendruscolo, Michele (17 April 2024). "Discovery of potent inhibitors of α-synuclein aggregation using structure-based iterative learning". Nature Chemical Biology. 20 (5). Nature: 634–645. doi:10.1038/s41589-024-01580-x. PMC 11062903. PMID 38632492.\n
  274. \n
  275. ^ Grant, Eugene F.; Lardner, Rex (25 July 1952). "The Talk of the Town – It". The New Yorker. ISSN 0028-792X. Archived from the original on 16 February 2020. Retrieved 28 January 2024.\n
  276. \n
  277. ^ Anderson, Mark Robert (11 May 2017). "Twenty years on from Deep Blue vs Kasparov: how a chess match started the big data revolution". The Conversation. Archived from the original on 17 September 2024. Retrieved 28 January 2024.\n
  278. \n
  279. ^ Markoff, John (16 February 2011). "Computer Wins on \'Jeopardy!\': Trivial, It\'s Not". The New York Times. ISSN 0362-4331. Archived from the original on 22 October 2014. Retrieved 28 January 2024.\n
  280. \n
  281. ^ Byford, Sam (27 May 2017). "AlphaGo retires from competitive Go after defeating world number one 3–0". The Verge. Archived from the original on 7 June 2017. Retrieved 28 January 2024.\n
  282. \n
  283. ^ Brown, Noam; Sandholm, Tuomas (30 August 2019). "Superhuman AI for multiplayer poker". Science. 365 (6456): 885–890. Bibcode:2019Sci...365..885B. doi:10.1126/science.aay2400. ISSN 0036-8075. PMID 31296650.\n
  284. \n
  285. ^ "MuZero: Mastering Go, chess, shogi and Atari without rules". Google DeepMind. 23 December 2020. Retrieved 28 January 2024.\n
  286. \n
  287. ^ Sample, Ian (30 October 2019). "AI becomes grandmaster in \'fiendishly complex\' StarCraft II". The Guardian. ISSN 0261-3077. Archived from the original on 29 December 2020. Retrieved 28 January 2024.\n
  288. \n
  289. ^ Wurman, P. R.; Barrett, S.; Kawamoto, K. (2022). "Outracing champion Gran Turismo drivers with deep reinforcement learning" (PDF). Nature. 602 (7896): 223–228. Bibcode:2022Natur.602..223W. doi:10.1038/s41586-021-04357-7. PMID 35140384.\n
  290. \n
  291. ^ Wilkins, Alex (13 March 2024). "Google AI learns to play open-world video games by watching them". New Scientist. Archived from the original on 26 July 2024. Retrieved 21 July 2024.\n
  292. \n
  293. ^ Uesato, J. et al.: Improving mathematical reasoning with process supervision. Archived 15 September 2024 at the Wayback Machine openai.com, May 31, 2023. Retrieved 2024-08-07.\n
  294. \n
  295. ^ Srivastava, Saurabh (29 February 2024). "Functional Benchmarks for Robust Evaluation of Reasoning Performance, and the Reasoning Gap". arXiv:2402.19450 [cs.AI].\n
  296. \n
  297. ^ Roberts, Siobhan (25 July 2024). "AI achieves silver-medal standard solving International Mathematical Olympiad problems". The New York Times. Archived from the original on 26 September 2024. Retrieved 7 August 2024.\n
  298. \n
  299. ^ LLEMMA. eleuther.ai. Retrieved 2024-08-07.\n
  300. \n
  301. ^ AI Math. Archived 5 October 2024 at the Wayback Machine Caesars Labs, 2024. Retrieved 2024-08-07.\n
  302. \n
  303. ^ Alex McFarland: 7 Best AI for Math Tools. Archived 11 September 2024 at the Wayback Machine unite.ai. Retrieved 2024-08-07\n
  304. \n
  305. ^ Matthew Finio & Amanda Downie: IBM Think 2024 Primer, "What is Artificial Intelligence (AI) in Finance?" 8 Dec. 2023\n
  306. \n
  307. ^ M. Nicolas, J. Firzli: Pensions Age/European Pensions magazine, "Artificial Intelligence: Ask the Industry" May June 2024 https://videovoice.org/ai-in-finance-innovation-entrepreneurship-vs-over-regulation-with-the-eus-artificial-intelligence-act-wont-work-as-intended/ Archived 11 September 2024 at the Wayback Machine.\n
  308. \n
  309. ^ a b c Congressional Research Service (2019). Artificial Intelligence and National Security (PDF). Washington, DC: Congressional Research Service. Archived (PDF) from the original on 8 May 2020. Retrieved 5 October 2024.PD-notice\n
  310. \n
  311. ^ a b Slyusar, Vadym (2019). "Artificial intelligence as the basis of future control networks". ResearchGate. doi:10.13140/RG.2.2.30247.50087. Archived from the original on 28 April 2021. Retrieved 20 July 2019.\n
  312. \n
  313. ^ Knight, Will. "The US and 30 Other Nations Agree to Set Guardrails for Military AI". Wired. ISSN 1059-1028. Archived from the original on 20 September 2024. Retrieved 24 January 2024.\n
  314. \n
  315. ^ Newsom, Gavin; Weber, Shirley N. (6 September 2023). "Executive Order N-12-23" (PDF). Executive Department, State of California. Archived (PDF) from the original on 21 February 2024. Retrieved 7 September 2023.\n
  316. \n
  317. ^ Pinaya, Walter H. L.; Graham, Mark S.; Kerfoot, Eric; Tudosiu, Petru-Daniel; Dafflon, Jessica; Fernandez, Virginia; Sanchez, Pedro; Wolleb, Julia; da Costa, Pedro F.; Patel, Ashay (2023). "Generative AI for Medical Imaging: extending the MONAI Framework". arXiv:2307.15208 [eess.IV].\n
  318. \n
  319. ^ Griffith, Erin; Metz, Cade (27 January 2023). "Anthropic Said to Be Closing In on $300 Million in New A.I. Funding". The New York Times. Archived from the original on 9 December 2023. Retrieved 14 March 2023.\n
  320. \n
  321. ^ Lanxon, Nate; Bass, Dina; Davalos, Jackie (10 March 2023). "A Cheat Sheet to AI Buzzwords and Their Meanings". Bloomberg News. Archived from the original on 17 November 2023. Retrieved 14 March 2023.\n
  322. \n
  323. ^ Marcelline, Marco (27 May 2023). "ChatGPT: Most Americans Know About It, But Few Actually Use the AI Chatbot". PCMag. Archived from the original on 21 May 2024. Retrieved 28 January 2024.\n
  324. \n
  325. ^ Lu, Donna (31 March 2023). "Misinformation, mistakes and the Pope in a puffer: what rapidly evolving AI can – and can\'t – do". The Guardian. ISSN 0261-3077. Archived from the original on 10 June 2024. Retrieved 28 January 2024.\n
  326. \n
  327. ^ Hurst, Luke (23 May 2023). "How a fake image of a Pentagon explosion shared on Twitter caused a real dip on Wall Street". euronews. Retrieved 28 January 2024.\n
  328. \n
  329. ^ Poole, David; Mackworth, Alan (2023). Artificial Intelligence, Foundations of Computational Agents (3rd ed.). Cambridge University Press. doi:10.1017/9781009258227. ISBN 978-1-0092-5819-7. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  330. \n
  331. ^ Russell, Stuart; Norvig, Peter (2020). Artificial Intelligence: A Modern Approach (4th ed.). Pearson. ISBN 978-0-1346-1099-3.\n
  332. \n
  333. ^ "Why agents are the next frontier of generative AI". McKinsey Digital. 24 July 2024. Archived from the original on 3 October 2024. Retrieved 10 August 2024.\n
  334. \n
  335. ^ Ransbotham, Sam; Kiron, David; Gerbert, Philipp; Reeves, Martin (6 September 2017). "Reshaping Business With Artificial Intelligence". MIT Sloan Management Review. Archived from the original on 13 February 2024.\n
  336. \n
  337. ^ Sun, Yuran; Zhao, Xilei; Lovreglio, Ruggiero; Kuligowski, Erica (1 January 2024), Naser, M. Z. (ed.), "8 – AI for large-scale evacuation modeling: promises and challenges", Interpretable Machine Learning for the Analysis, Design, Assessment, and Informed Decision Making for Civil Infrastructure, Woodhead Publishing Series in Civil and Structural Engineering, Woodhead Publishing, pp. 185–204, ISBN 978-0-1282-4073-1, archived from the original on 19 May 2024, retrieved 28 June 2024.\n
  338. \n
  339. ^ Gomaa, Islam; Adelzadeh, Masoud; Gwynne, Steven; Spencer, Bruce; Ko, Yoon; Bénichou, Noureddine; Ma, Chunyun; Elsagan, Nour; Duong, Dana; Zalok, Ehab; Kinateder, Max (1 November 2021). "A Framework for Intelligent Fire Detection and Evacuation System". Fire Technology. 57 (6): 3179–3185. doi:10.1007/s10694-021-01157-3. ISSN 1572-8099. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  340. \n
  341. ^ Zhao, Xilei; Lovreglio, Ruggiero; Nilsson, Daniel (1 May 2020). "Modelling and interpreting pre-evacuation decision-making using machine learning". Automation in Construction. 113: 103140. doi:10.1016/j.autcon.2020.103140. ISSN 0926-5805. Archived from the original on 19 May 2024. Retrieved 5 October 2024.\n
  342. \n
  343. ^ "India\'s latest election embraced AI technology. Here are some ways it was used constructively". PBS News. 12 June 2024. Retrieved 28 October 2024.\n
  344. \n
  345. ^ Müller, Vincent C. (30 April 2020). "Ethics of Artificial Intelligence and Robotics". Stanford Encyclopedia of Philosophy Archive. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  346. \n
  347. ^ Simonite (2016).\n
  348. \n
  349. ^ Russell & Norvig (2021), p. 987.\n
  350. \n
  351. ^ Laskowski (2023).\n
  352. \n
  353. ^ GAO (2022).\n
  354. \n
  355. ^ Valinsky (2019).\n
  356. \n
  357. ^ Russell & Norvig (2021), p. 991.\n
  358. \n
  359. ^ Russell & Norvig (2021), pp. 991–992.\n
  360. \n
  361. ^ Christian (2020), p. 63.\n
  362. \n
  363. ^ Vincent (2022).\n
  364. \n
  365. ^ Kopel, Matthew. "Copyright Services: Fair Use". Cornell University Library. Archived from the original on 26 September 2024. Retrieved 26 April 2024.\n
  366. \n
  367. ^ Burgess, Matt. "How to Stop Your Data From Being Used to Train AI". Wired. ISSN 1059-1028. Archived from the original on 3 October 2024. Retrieved 26 April 2024.\n
  368. \n
  369. ^ Reisner (2023).\n
  370. \n
  371. ^ Alter & Harris (2023).\n
  372. \n
  373. ^ "Getting the Innovation Ecosystem Ready for AI. An IP policy toolkit" (PDF). WIPO.\n
  374. \n
  375. ^ Hammond, George (27 December 2023). "Big Tech is spending more than VC firms on AI startups". Ars Technica. Archived from the original on 10 January 2024.\n
  376. \n
  377. ^ Wong, Matteo (24 October 2023). "The Future of AI Is GOMA". The Atlantic. Archived from the original on 5 January 2024.\n
  378. \n
  379. ^ "Big tech and the pursuit of AI dominance". The Economist. 26 March 2023. Archived from the original on 29 December 2023.\n
  380. \n
  381. ^ Fung, Brian (19 December 2023). "Where the battle to dominate AI may be won". CNN Business. Archived from the original on 13 January 2024.\n
  382. \n
  383. ^ Metz, Cade (5 July 2023). "In the Age of A.I., Tech\'s Little Guys Need Big Friends". The New York Times. Archived from the original on 8 July 2024. Retrieved 5 October 2024.\n
  384. \n
  385. ^ "Electricity 2024 – Analysis". IEA. 24 January 2024. Retrieved 13 July 2024.\n
  386. \n
  387. ^ Calvert, Brian (28 March 2024). "AI already uses as much energy as a small country. It\'s only the beginning". Vox. New York, New York. Archived from the original on 3 July 2024. Retrieved 5 October 2024.\n
  388. \n
  389. ^ Halper, Evan; O\'Donovan, Caroline (21 June 2024). "AI is exhausting the power grid. Tech firms are seeking a miracle solution". Washington Post.\n
  390. \n
  391. ^ Davenport, Carly. "AI Data Centers and the Coming YS Power Demand Surge" (PDF). Goldman Sachs. Archived from the original (PDF) on 26 July 2024. Retrieved 5 October 2024.\n
  392. \n
  393. ^ Ryan, Carol (12 April 2024). "Energy-Guzzling AI Is Also the Future of Energy Savings". Wall Street Journal. Dow Jones.\n
  394. \n
  395. ^ Hiller, Jennifer (1 July 2024). "Tech Industry Wants to Lock Up Nuclear Power for AI". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  396. \n
  397. ^ Kendall, Tyler (28 September 2024). "Nvidia\'s Huang Says Nuclear Power an Option to Feed Data Centers". Bloomberg.\n
  398. \n
  399. ^ Halper, Evan (20 September 2024). "Microsoft deal would reopen Three Mile Island nuclear plant to power AI". Washington Post.\n
  400. \n
  401. ^ Hiller, Jennifer (20 September 2024). "Three Mile Island\'s Nuclear Plant to Reopen, Help Power Microsoft\'s AI Centers". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  402. \n
  403. ^ a b Niva Yadav (19 August 2024). "Taiwan to stop large data centers in the North, cites insufficient power". DatacenterDynamics.\n
  404. \n
  405. ^ Mochizuki, Takashi; Oda, Shoko (18 October 2024). "エヌビディア出資の日本企業、原発近くでAIデータセンター新設検討". Bloomberg (in Japanese).\n
  406. \n
  407. ^ a b Naureen S Malik and Will Wade (5 November 2024). "Nuclear-Hungry AI Campuses Need New Plan to Find Power Fast". Bloomberg.\n
  408. \n
  409. ^ Nicas (2018).\n
  410. \n
  411. ^ Rainie, Lee; Keeter, Scott; Perrin, Andrew (22 July 2019). "Trust and Distrust in America". Pew Research Center. Archived from the original on 22 February 2024.\n
  412. \n
  413. ^ Williams (2023).\n
  414. \n
  415. ^ Taylor & Hern (2023).\n
  416. \n
  417. ^ a b Samuel, Sigal (19 April 2022). "Why it\'s so damn hard to make AI fair and unbiased". Vox. Archived from the original on 5 October 2024. Retrieved 24 July 2024.\n
  418. \n
  419. ^ a b Rose (2023).\n
  420. \n
  421. ^ CNA (2019).\n
  422. \n
  423. ^ Goffrey (2008), p. 17.\n
  424. \n
  425. ^ Berdahl et al. (2023); Goffrey (2008, p. 17); Rose (2023); Russell & Norvig (2021, p. 995)\n
  426. \n
  427. ^ Christian (2020), p. 25.\n
  428. \n
  429. ^ a b Russell & Norvig (2021), p. 995.\n
  430. \n
  431. ^ Grant & Hill (2023).\n
  432. \n
  433. ^ Larson & Angwin (2016).\n
  434. \n
  435. ^ Christian (2020), p. 67–70.\n
  436. \n
  437. ^ Christian (2020, pp. 67–70); Russell & Norvig (2021, pp. 993–994)\n
  438. \n
  439. ^ Russell & Norvig (2021, p. 995); Lipartito (2011, p. 36); Goodman & Flaxman (2017, p. 6); Christian (2020, pp. 39–40, 65)\n
  440. \n
  441. ^ Quoted in Christian (2020, p. 65).\n
  442. \n
  443. ^ Russell & Norvig (2021, p. 994); Christian (2020, pp. 40, 80–81)\n
  444. \n
  445. ^ Quoted in Christian (2020, p. 80)\n
  446. \n
  447. ^ Dockrill (2022).\n
  448. \n
  449. ^ Sample (2017).\n
  450. \n
  451. ^ "Black Box AI". 16 June 2023. Archived from the original on 15 June 2024. Retrieved 5 October 2024.\n
  452. \n
  453. ^ Christian (2020), p. 110.\n
  454. \n
  455. ^ Christian (2020), pp. 88–91.\n
  456. \n
  457. ^ Christian (2020, p. 83); Russell & Norvig (2021, p. 997)\n
  458. \n
  459. ^ Christian (2020), p. 91.\n
  460. \n
  461. ^ Christian (2020), p. 83.\n
  462. \n
  463. ^ Verma (2021).\n
  464. \n
  465. ^ Rothman (2020).\n
  466. \n
  467. ^ Christian (2020), pp. 105–108.\n
  468. \n
  469. ^ Christian (2020), pp. 108–112.\n
  470. \n
  471. ^ Ropek, Lucas (21 May 2024). "New Anthropic Research Sheds Light on AI\'s \'Black Box\'". Gizmodo. Archived from the original on 5 October 2024. Retrieved 23 May 2024.\n
  472. \n
  473. ^ Russell & Norvig (2021), p. 989.\n
  474. \n
  475. ^ a b Russell & Norvig (2021), pp. 987–990.\n
  476. \n
  477. ^ Russell & Norvig (2021), p. 988.\n
  478. \n
  479. ^ Robitzski (2018); Sainato (2015)\n
  480. \n
  481. ^ Harari (2018).\n
  482. \n
  483. ^ Buckley, Chris; Mozur, Paul (22 May 2019). "How China Uses High-Tech Surveillance to Subdue Minorities". The New York Times. Archived from the original on 25 November 2019. Retrieved 2 July 2019.\n
  484. \n
  485. ^ "Security lapse exposed a Chinese smart city surveillance system". 3 May 2019. Archived from the original on 7 March 2021. Retrieved 14 September 2020.\n
  486. \n
  487. ^ Urbina et al. (2022).\n
  488. \n
  489. ^ a b E. McGaughey, \'Will Robots Automate Your Job Away? Full Employment, Basic Income, and Economic Democracy\' (2022), 51(3) Industrial Law Journal 511–559. Archived 27 May 2023 at the Wayback Machine.\n
  490. \n
  491. ^ Ford & Colvin (2015);McGaughey (2022)\n
  492. \n
  493. ^ IGM Chicago (2017).\n
  494. \n
  495. ^ Arntz, Gregory & Zierahn (2016), p. 33.\n
  496. \n
  497. ^ Lohr (2017); Frey & Osborne (2017); Arntz, Gregory & Zierahn (2016, p. 33)\n
  498. \n
  499. ^ Zhou, Viola (11 April 2023). "AI is already taking video game illustrators\' jobs in China". Rest of World. Archived from the original on 21 February 2024. Retrieved 17 August 2023.\n
  500. \n
  501. ^ Carter, Justin (11 April 2023). "China\'s game art industry reportedly decimated by growing AI use". Game Developer. Archived from the original on 17 August 2023. Retrieved 17 August 2023.\n
  502. \n
  503. ^ Morgenstern (2015).\n
  504. \n
  505. ^ Mahdawi (2017); Thompson (2014)\n
  506. \n
  507. ^ Tarnoff, Ben (4 August 2023). "Lessons from Eliza". The Guardian Weekly. pp. 34–39.\n
  508. \n
  509. ^ Cellan-Jones (2014).\n
  510. \n
  511. ^ Russell & Norvig 2021, p. 1001.\n
  512. \n
  513. ^ Bostrom (2014).\n
  514. \n
  515. ^ Russell (2019).\n
  516. \n
  517. ^ Bostrom (2014); Müller & Bostrom (2014); Bostrom (2015).\n
  518. \n
  519. ^ Harari (2023).\n
  520. \n
  521. ^ Müller & Bostrom (2014).\n
  522. \n
  523. ^ Leaders\' concerns about the existential risks of AI around 2015: Rawlinson (2015), Holley (2015), Gibbs (2014), Sainato (2015)\n
  524. \n
  525. ^ ""Godfather of artificial intelligence" talks impact and potential of new AI". CBS News. 25 March 2023. Archived from the original on 28 March 2023. Retrieved 28 March 2023.\n
  526. \n
  527. ^ Pittis, Don (4 May 2023). "Canadian artificial intelligence leader Geoffrey Hinton piles on fears of computer takeover". CBC. Archived from the original on 7 July 2024. Retrieved 5 October 2024.\n
  528. \n
  529. ^ "\'50–50 chance\' that AI outsmarts humanity, Geoffrey Hinton says". Bloomberg BNN. 14 June 2024. Retrieved 6 July 2024.\n
  530. \n
  531. ^ Valance (2023).\n
  532. \n
  533. ^ Taylor, Josh (7 May 2023). "Rise of artificial intelligence is inevitable but should not be feared, \'father of AI\' says". The Guardian. Archived from the original on 23 October 2023. Retrieved 26 May 2023.\n
  534. \n
  535. ^ Colton, Emma (7 May 2023). "\'Father of AI\' says tech fears misplaced: \'You cannot stop it\'". Fox News. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
  536. \n
  537. ^ Jones, Hessie (23 May 2023). "Juergen Schmidhuber, Renowned \'Father Of Modern AI,\' Says His Life\'s Work Won\'t Lead To Dystopia". Forbes. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
  538. \n
  539. ^ McMorrow, Ryan (19 December 2023). "Andrew Ng: \'Do we think the world is better off with more or less intelligence?\'". Financial Times. Archived from the original on 25 January 2024. Retrieved 30 December 2023.\n
  540. \n
  541. ^ Levy, Steven (22 December 2023). "How Not to Be Stupid About AI, With Yann LeCun". Wired. Archived from the original on 28 December 2023. Retrieved 30 December 2023.\n
  542. \n
  543. ^ Arguments that AI is not an imminent risk: Brooks (2014), Geist (2015), Madrigal (2015), Lee (2014)\n
  544. \n
  545. ^ a b Christian (2020), pp. 67, 73.\n
  546. \n
  547. ^ Yudkowsky (2008).\n
  548. \n
  549. ^ a b Anderson & Anderson (2011).\n
  550. \n
  551. ^ AAAI (2014).\n
  552. \n
  553. ^ Wallach (2010).\n
  554. \n
  555. ^ Russell (2019), p. 173.\n
  556. \n
  557. ^ Stewart, Ashley; Melton, Monica. "Hugging Face CEO says he\'s focused on building a \'sustainable model\' for the $4.5 billion open-source-AI startup". Business Insider. Archived from the original on 25 September 2024. Retrieved 14 April 2024.\n
  558. \n
  559. ^ Wiggers, Kyle (9 April 2024). "Google open sources tools to support AI model development". TechCrunch. Archived from the original on 10 September 2024. Retrieved 14 April 2024.\n
  560. \n
  561. ^ Heaven, Will Douglas (12 May 2023). "The open-source AI boom is built on Big Tech\'s handouts. How long will it last?". MIT Technology Review. Retrieved 14 April 2024.\n
  562. \n
  563. ^ Brodsky, Sascha (19 December 2023). "Mistral AI\'s New Language Model Aims for Open Source Supremacy". AI Business. Archived from the original on 5 September 2024. Retrieved 5 October 2024.\n
  564. \n
  565. ^ Edwards, Benj (22 February 2024). "Stability announces Stable Diffusion 3, a next-gen AI image generator". Ars Technica. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
  566. \n
  567. ^ Marshall, Matt (29 January 2024). "How enterprises are using open source LLMs: 16 examples". VentureBeat. Archived from the original on 26 September 2024. Retrieved 5 October 2024.\n
  568. \n
  569. ^ Piper, Kelsey (2 February 2024). "Should we make our most powerful AI models open source to all?". Vox. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
  570. \n
  571. ^ Alan Turing Institute (2019). "Understanding artificial intelligence ethics and safety" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
  572. \n
  573. ^ Alan Turing Institute (2023). "AI Ethics and Governance in Practice" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
  574. \n
  575. ^ Floridi, Luciano; Cowls, Josh (23 June 2019). "A Unified Framework of Five Principles for AI in Society". Harvard Data Science Review. 1 (1). doi:10.1162/99608f92.8cd550d1. S2CID 198775713.\n
  576. \n
  577. ^ Buruk, Banu; Ekmekci, Perihan Elif; Arda, Berna (1 September 2020). "A critical perspective on guidelines for responsible and trustworthy artificial intelligence". Medicine, Health Care and Philosophy. 23 (3): 387–399. doi:10.1007/s11019-020-09948-1. ISSN 1572-8633. PMID 32236794. S2CID 214766800. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  578. \n
  579. ^ Kamila, Manoj Kumar; Jasrotia, Sahil Singh (1 January 2023). "Ethical issues in the development of artificial intelligence: recognizing the risks". International Journal of Ethics and Systems. ahead-of-print (ahead-of-print). doi:10.1108/IJOES-05-2023-0107. ISSN 2514-9369. S2CID 259614124. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  580. \n
  581. ^ "AI Safety Institute releases new AI safety evaluations platform". UK Government. 10 May 2024. Archived from the original on 5 October 2024. Retrieved 14 May 2024.\n
  582. \n
  583. ^ Regulation of AI to mitigate risks: Berryhill et al. (2019), Barfield & Pagallo (2018), Iphofen & Kritikos (2019), Wirtz, Weyerer & Geyer (2018), Buiten (2019)\n
  584. \n\n
  585. ^ a b Vincent (2023).\n
  586. \n
  587. ^ Stanford University (2023).\n
  588. \n
  589. ^ a b c d UNESCO (2021).\n
  590. \n
  591. ^ Kissinger (2021).\n
  592. \n
  593. ^ Altman, Brockman & Sutskever (2023).\n
  594. \n
  595. ^ VOA News (25 October 2023). "UN Announces Advisory Body on Artificial Intelligence". Archived from the original on 18 September 2024. Retrieved 5 October 2024.\n
  596. \n
  597. ^ "Council of Europe opens first ever global treaty on AI for signature". Council of Europe. 5 September 2024. Archived from the original on 17 September 2024. Retrieved 17 September 2024.\n
  598. \n
  599. ^ Edwards (2023).\n
  600. \n
  601. ^ Kasperowicz (2023).\n
  602. \n
  603. ^ Fox News (2023).\n
  604. \n
  605. ^ Milmo, Dan (3 November 2023). "Hope or Horror? The great AI debate dividing its pioneers". The Guardian Weekly. pp. 10–12.\n
  606. \n
  607. ^ "The Bletchley Declaration by Countries Attending the AI Safety Summit, 1–2 November 2023". GOV.UK. 1 November 2023. Archived from the original on 1 November 2023. Retrieved 2 November 2023.\n
  608. \n
  609. ^ "Countries agree to safe and responsible development of frontier AI in landmark Bletchley Declaration". GOV.UK (Press release). Archived from the original on 1 November 2023. Retrieved 1 November 2023.\n
  610. \n
  611. ^ "Second global AI summit secures safety commitments from companies". Reuters. 21 May 2024. Retrieved 23 May 2024.\n
  612. \n
  613. ^ "Frontier AI Safety Commitments, AI Seoul Summit 2024". gov.uk. 21 May 2024. Archived from the original on 23 May 2024. Retrieved 23 May 2024.\n
  614. \n
  615. ^ a b Russell & Norvig 2021, p. 9.\n
  616. \n
  617. ^ a b c Copeland, J., ed. (2004). The Essential Turing: the ideas that gave birth to the computer age. Oxford, England: Clarendon Press. ISBN 0-1982-5079-7.\n
  618. \n
  619. ^ "Google books ngram". Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  620. \n
  621. ^ AI\'s immediate precursors: McCorduck (2004, pp. 51–107), Crevier (1993, pp. 27–32), Russell & Norvig (2021, pp. 8–17), Moravec (1988, p. 3)\n
  622. \n
  623. ^ a b Turing\'s original publication of the Turing test in "Computing machinery and intelligence": Turing (1950)\nHistorical influence and philosophical implications: Haugeland (1985, pp. 6–9), Crevier (1993, p. 24), McCorduck (2004, pp. 70–71), Russell & Norvig (2021, pp. 2, 984)\n
  624. \n
  625. ^ Crevier (1993), pp. 47–49.\n
  626. \n
  627. ^ Russell & Norvig (2003), p. 17.\n
  628. \n
  629. ^ Russell & Norvig (2003), p. 18.\n
  630. \n
  631. ^ Newquist (1994), pp. 86–86.\n
  632. \n
  633. ^ Simon (1965, p. 96) quoted in Crevier (1993, p. 109)\n
  634. \n
  635. ^ Minsky (1967, p. 2) quoted in Crevier (1993, p. 109)\n
  636. \n
  637. ^ Russell & Norvig (2021), p. 21.\n
  638. \n
  639. ^ Lighthill (1973).\n
  640. \n
  641. ^ NRC 1999, pp. 212–213.\n
  642. \n
  643. ^ Russell & Norvig (2021), p. 22.\n
  644. \n
  645. ^ Expert systems: Russell & Norvig (2021, pp. 23, 292), Luger & Stubblefield (2004, pp. 227–331), Nilsson (1998, chpt. 17.4), McCorduck (2004, pp. 327–335, 434–435), Crevier (1993, pp. 145–162, 197–203), Newquist (1994, pp. 155–183)\n
  646. \n
  647. ^ Russell & Norvig (2021), p. 24.\n
  648. \n
  649. ^ Nilsson (1998), p. 7.\n
  650. \n
  651. ^ McCorduck (2004), pp. 454–462.\n
  652. \n
  653. ^ Moravec (1988).\n
  654. \n
  655. ^ a b Brooks (1990).\n
  656. \n
  657. ^ Developmental robotics: Weng et al. (2001), Lungarella et al. (2003), Asada et al. (2009), Oudeyer (2010)\n
  658. \n
  659. ^ Russell & Norvig (2021), p. 25.\n
  660. \n
  661. ^ Crevier (1993, pp. 214–215), Russell & Norvig (2021, pp. 24, 26)\n
  662. \n
  663. ^ Russell & Norvig (2021), p. 26.\n
  664. \n
  665. ^ Formal and narrow methods adopted in the 1990s: Russell & Norvig (2021, pp. 24–26), McCorduck (2004, pp. 486–487)\n
  666. \n
  667. ^ AI widely used in the late 1990s: Kurzweil (2005, p. 265), NRC (1999, pp. 216–222), Newquist (1994, pp. 189–201)\n
  668. \n
  669. ^ Wong (2023).\n
  670. \n
  671. ^ Moore\'s Law and AI: Russell & Norvig (2021, pp. 14, 27)\n
  672. \n
  673. ^ a b c Clark (2015b).\n
  674. \n
  675. ^ Big data: Russell & Norvig (2021, p. 26)\n
  676. \n
  677. ^ Sagar, Ram (3 June 2020). "OpenAI Releases GPT-3, The Largest Model So Far". Analytics India Magazine. Archived from the original on 4 August 2020. Retrieved 15 March 2023.\n
  678. \n
  679. ^ DiFeliciantonio (2023).\n
  680. \n
  681. ^ Goswami (2023).\n
  682. \n
  683. ^ Grayling, Anthony; Ball, Brian (1 August 2024). "Philosophy is crucial in the age of AI". The Conversation. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
  684. \n
  685. ^ a b Jarow, Oshan (15 June 2024). "Will AI ever become conscious? It depends on how you think about biology". Vox. Archived from the original on 21 September 2024. Retrieved 4 October 2024.\n
  686. \n
  687. ^ McCarthy, John. "The Philosophy of AI and the AI of Philosophy". jmc.stanford.edu. Archived from the original on 23 October 2018. Retrieved 3 October 2024.\n
  688. \n
  689. ^ a b Turing (1950), p. 1.\n
  690. \n
  691. ^ Turing (1950), Under "The Argument from Consciousness".\n
  692. \n
  693. ^ Kirk-Giannini, Cameron Domenico; Goldstein, Simon (16 October 2023). "AI is closer than ever to passing the Turing test for \'intelligence\'. What happens when it does?". The Conversation. Archived from the original on 25 September 2024. Retrieved 17 August 2024.\n
  694. \n
  695. ^ Russell & Norvig (2021), p. 3.\n
  696. \n
  697. ^ Maker (2006).\n
  698. \n
  699. ^ McCarthy (1999).\n
  700. \n
  701. ^ Minsky (1986).\n
  702. \n
  703. ^ "What Is Artificial Intelligence (AI)?". Google Cloud Platform. Archived from the original on 31 July 2023. Retrieved 16 October 2023.\n
  704. \n
  705. ^ "One of the Biggest Problems in Regulating AI Is Agreeing on a Definition". carnegieendowment.org. Retrieved 31 July 2024.\n
  706. \n
  707. ^ "AI or BS? How to tell if a marketing tool really uses artificial intelligence". The Drum. Retrieved 31 July 2024.\n
  708. \n
  709. ^ Nilsson (1983), p. 10.\n
  710. \n
  711. ^ Haugeland (1985), pp. 112–117.\n
  712. \n
  713. ^ Physical symbol system hypothesis: Newell & Simon (1976, p. 116)\nHistorical significance: McCorduck (2004, p. 153), Russell & Norvig (2021, p. 19)\n
  714. \n
  715. ^ Moravec\'s paradox: Moravec (1988, pp. 15–16), Minsky (1986, p. 29), Pinker (2007, pp. 190–191)\n
  716. \n
  717. ^ Dreyfus\' critique of AI: Dreyfus (1972), Dreyfus & Dreyfus (1986)\nHistorical significance and philosophical implications: Crevier (1993, pp. 120–132), McCorduck (2004, pp. 211–239), Russell & Norvig (2021, pp. 981–982), Fearn (2007, chpt. 3)\n
  718. \n
  719. ^ Crevier (1993), p. 125.\n
  720. \n
  721. ^ Langley (2011).\n
  722. \n
  723. ^ Katz (2012).\n
  724. \n
  725. ^ Neats vs. scruffies, the historic debate: McCorduck (2004, pp. 421–424, 486–489), Crevier (1993, p. 168), Nilsson (1983, pp. 10–11), Russell & Norvig (2021, p. 24)\nA classic example of the "scruffy" approach to intelligence: Minsky (1986)\nA modern example of neat AI and its aspirations in the 21st century: Domingos (2015)\n
  726. \n
  727. ^ Pennachin & Goertzel (2007).\n
  728. \n
  729. ^ a b Roberts (2016).\n
  730. \n
  731. ^ Russell & Norvig (2021), p. 986.\n
  732. \n
  733. ^ Chalmers (1995).\n
  734. \n
  735. ^ Dennett (1991).\n
  736. \n
  737. ^ Horst (2005).\n
  738. \n
  739. ^ Searle (1999).\n
  740. \n
  741. ^ Searle (1980), p. 1.\n
  742. \n
  743. ^ Russell & Norvig (2021), p. 9817.\n
  744. \n
  745. ^ Searle\'s Chinese room argument: Searle (1980). Searle\'s original presentation of the thought experiment., Searle (1999).\nDiscussion: Russell & Norvig (2021, pp. 985), McCorduck (2004, pp. 443–445), Crevier (1993, pp. 269–271)\n
  746. \n
  747. ^ Leith, Sam (7 July 2022). "Nick Bostrom: How can we be certain a machine isn\'t conscious?". The Spectator. Archived from the original on 26 September 2024. Retrieved 23 February 2024.\n
  748. \n
  749. ^ a b c Thomson, Jonny (31 October 2022). "Why don\'t robots have rights?". Big Think. Archived from the original on 13 September 2024. Retrieved 23 February 2024.\n
  750. \n
  751. ^ a b Kateman, Brian (24 July 2023). "AI Should Be Terrified of Humans". Time. Archived from the original on 25 September 2024. Retrieved 23 February 2024.\n
  752. \n
  753. ^ Wong, Jeff (10 July 2023). "What leaders need to know about robot rights". Fast Company.\n
  754. \n
  755. ^ Hern, Alex (12 January 2017). "Give robots \'personhood\' status, EU committee argues". The Guardian. ISSN 0261-3077. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
  756. \n
  757. ^ Dovey, Dana (14 April 2018). "Experts Don\'t Think Robots Should Have Rights". Newsweek. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
  758. \n
  759. ^ Cuddy, Alice (13 April 2018). "Robot rights violate human rights, experts warn EU". euronews. Archived from the original on 19 September 2024. Retrieved 23 February 2024.\n
  760. \n
  761. ^ The Intelligence explosion and technological singularity: Russell & Norvig (2021, pp. 1004–1005), Omohundro (2008), Kurzweil (2005)\n\nI. J. Good\'s "intelligence explosion": Good (1965)\n\nVernor Vinge\'s "singularity": Vinge (1993)\n
  762. \n
  763. ^ Russell & Norvig (2021), p. 1005.\n
  764. \n
  765. ^ Transhumanism: Moravec (1988), Kurzweil (2005), Russell & Norvig (2021, p. 1005)\n
  766. \n
  767. ^ AI as evolution: Edward Fredkin is quoted in McCorduck (2004, p. 401), Butler (1863), Dyson (1998)\n
  768. \n
  769. ^ AI in myth: McCorduck (2004, pp. 4–5)\n
  770. \n
  771. ^ McCorduck (2004), pp. 340–400.\n
  772. \n
  773. ^ Buttazzo (2001).\n
  774. \n
  775. ^ Anderson (2008).\n
  776. \n
  777. ^ McCauley (2007).\n
  778. \n
  779. ^ Galvan (1997).\n
  780. \n
\n

AI textbooks

\n

The two most widely used textbooks in 2023 (see the Open Syllabus):\n

\n\n

These were the four of the most widely used AI textbooks in 2008:\n

\n
\n\n
\n

Other textbooks:\n

\n\n

History of AI

\n
\n\n
\n

Other sources

\n
\n\n\n
\n

Further reading

\n
\n\n
\n
\n\n\n\n\n\n\n\n\n\n
\n
\n\t\t\t\t\t\n\t\t\t\t
\n\t\t\t\n\t\t\t\n\t\t\n\t\t\n\t \n \n
\n\t\n
\n\n\n'}'. -[2024-11-07 17:02:54,905] [INFO] [Engine]: Agent 'agent3' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_eZeAvvrbTWXsvL1vNM0H9T0O', type='function')], function_call=None) -[2024-11-07 17:02:54,905] [INFO] [BaseAgent]: Agent 'agent3' is planning the next step. -[2024-11-07 17:02:55,895] [INFO] [BaseAgent]: Agent 'agent3' selected 'agent1' as the next agent with plan: 'The next agent should use Bing to search for recent articles and news related to artificial intelligence, focusing on advancements, applications, and future trends in the field. The agent should gather information on the latest research, breakthroughs, and innovations in artificial intelligence to provide an up-to-date summary for the task.'. -[2024-11-07 17:02:55,904] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_94tCaUGRkue2eu8yY4CzthGo', type='function')], function_call=None) -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_eZeAvvrbTWXsvL1vNM0H9T0O', type='function')], function_call=None) -[2024-11-07 17:02:56,278] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': False} -[2024-11-07 17:02:56,278] [INFO] [Engine]: EnginePlanner decided to terminate the simulation. -[2024-11-07 17:02:56,279] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- {'agent1': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_94tCaUGRkue2eu8yY4CzthGo', type='function')], function_call=None)} -- {'agent3': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_eZeAvvrbTWXsvL1vNM0H9T0O', type='function')], function_call=None)} - -[2024-11-07 17:02:56,279] [INFO] [Engine]: Chain execution Summary: -Agents' Results Summary: -- {'agent1': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_94tCaUGRkue2eu8yY4CzthGo', type='function')], function_call=None)} -- {'agent3': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_eZeAvvrbTWXsvL1vNM0H9T0O', type='function')], function_call=None)} - -[2024-11-07 17:02:56,279] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_94tCaUGRkue2eu8yY4CzthGo', type='function')], function_call=None) -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_eZeAvvrbTWXsvL1vNM0H9T0O', type='function')], function_call=None) -Agents' Results Summary: -- {'agent1': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_94tCaUGRkue2eu8yY4CzthGo', type='function')], function_call=None)} -- {'agent3': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_eZeAvvrbTWXsvL1vNM0H9T0O', type='function')], function_call=None)} - -[2024-11-07 17:02:56,279] [INFO] [Engine]: Chain-based coordination simulation completed. -[2024-11-07 17:02:56,279] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-07 17:02:56,280] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-07 17:02:56,280] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-07 17:02:56,280] [INFO] [Engine]: Chain-based coordination simulation completed. -[2024-11-07 17:15:24,665] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-07 17:15:24,665] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-07 17:15:24,665] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-07 17:15:24,666] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-07 17:15:24,666] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-07 17:15:24,666] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-07 17:15:24,666] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-07 17:15:24,666] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-07 17:15:24,666] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'chain'. -[2024-11-07 17:15:24,666] [INFO] [AgentGraph]: Relationship added: agent1 --[reports_to]--> agent2 -[2024-11-07 17:15:24,666] [INFO] [AgentGraph]: Relationship added: agent1 --[manages]--> agent3 -[2024-11-07 17:15:24,666] [INFO] [AgentGraph]: Relationship added: agent2 --[supports]--> agent4 -[2024-11-07 17:15:24,666] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborates_with]--> agent4 -[2024-11-07 17:15:24,666] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-07 17:15:24,666] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-07 17:15:24,666] [INFO] [Engine]: Engine initialized. -[2024-11-07 17:15:24,666] [INFO] [Engine]: Engine starting simulation. -[2024-11-07 17:15:24,666] [INFO] [Engine]: Running in chain-based coordination mode. -[2024-11-07 17:15:24,666] [INFO] [Engine]: Starting chain-based coordination. -[2024-11-07 17:15:24,666] [INFO] [Engine]: Agent 'agent1' is executing task. -[2024-11-07 17:15:24,666] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Find new about the latest trends in AI.'. -[2024-11-07 17:15:27,120] [INFO] [BaseAgent]: Agent 'agent1' called 'fetch_webpage' with args '{'url': 'https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/'}'. -[2024-11-07 17:15:27,121] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': False, 'error-msg': '404 Client Error: Not Found for url: https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/'}'. -[2024-11-07 17:15:27,122] [INFO] [Engine]: Agent 'agent1' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_ZvZZuYKWg6LOlElZ3Xp9LFXC', type='function')], function_call=None) -[2024-11-07 17:15:27,122] [INFO] [BaseAgent]: Agent 'agent1' is planning the next step. -[2024-11-07 17:15:28,373] [INFO] [BaseAgent]: Agent 'agent1' selected 'agent3' as the next agent with plan: 'Agent3, please use the information from the article on analyticsvidhya about the top AI trends to watch out for in 2022 to create a summary highlighting the key trends and their importance. Focus on providing a concise overview that can be easily understood by team members who are not familiar with the topic.'. -[2024-11-07 17:15:28,374] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_ZvZZuYKWg6LOlElZ3Xp9LFXC', type='function')], function_call=None) -[2024-11-07 17:15:28,800] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-07 17:15:28,800] [INFO] [Engine]: Agent 'agent3' is executing task. -[2024-11-07 17:15:28,801] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Agent3, please use the information from the article on analyticsvidhya about the top AI trends to watch out for in 2022 to create a summary highlighting the key trends and their importance. Focus on providing a concise overview that can be easily understood by team members who are not familiar with the topic.'. -[2024-11-07 17:15:31,264] [INFO] [BaseAgent]: Agent 'agent3' called 'fetch_webpage' with args '{'url': 'https://www.analyticsvidhya.com/blog/2022/01/top-ai-trends-to-watch-out-for-in-2022/'}'. -[2024-11-07 17:15:31,265] [INFO] [BaseAgent]: Agent 'agent3' obtained result '{'success': False, 'error-msg': '404 Client Error: Not Found for url: https://www.analyticsvidhya.com/blog/2022/01/top-ai-trends-to-watch-out-for-in-2022/'}'. -[2024-11-07 17:15:31,265] [INFO] [Engine]: Agent 'agent3' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2022/01/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_9I1SVAW0KIVNRVHXa5HdKTX3', type='function')], function_call=None) -[2024-11-07 17:15:31,266] [INFO] [BaseAgent]: Agent 'agent3' is planning the next step. -[2024-11-07 17:15:32,180] [INFO] [BaseAgent]: Agent 'agent3' selected 'agent1' as the next agent with plan: 'The next agent, agent1, will use Bing to search for additional information on the top AI trends in 2022. They can gather insights from a different source to provide a comprehensive overview of the topic. The agent can focus on recent developments, emerging technologies, and industry experts' opinions to enrich the current findings.'. -[2024-11-07 17:15:32,181] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_ZvZZuYKWg6LOlElZ3Xp9LFXC', type='function')], function_call=None) -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2022/01/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_9I1SVAW0KIVNRVHXa5HdKTX3', type='function')], function_call=None) -[2024-11-07 17:15:32,546] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': False} -[2024-11-07 17:15:32,547] [INFO] [Engine]: EnginePlanner decided to terminate the simulation. -[2024-11-07 17:15:32,547] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- {'agent1': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_ZvZZuYKWg6LOlElZ3Xp9LFXC', type='function')], function_call=None)} -- {'agent3': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2022/01/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_9I1SVAW0KIVNRVHXa5HdKTX3', type='function')], function_call=None)} - -[2024-11-07 17:15:32,547] [INFO] [Engine]: Chain execution Summary: -Agents' Results Summary: -- {'agent1': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_ZvZZuYKWg6LOlElZ3Xp9LFXC', type='function')], function_call=None)} -- {'agent3': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2022/01/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_9I1SVAW0KIVNRVHXa5HdKTX3', type='function')], function_call=None)} - -[2024-11-07 17:15:32,547] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_ZvZZuYKWg6LOlElZ3Xp9LFXC', type='function')], function_call=None) -Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2022/01/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_9I1SVAW0KIVNRVHXa5HdKTX3', type='function')], function_call=None) -Agents' Results Summary: -- {'agent1': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2021/10/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_ZvZZuYKWg6LOlElZ3Xp9LFXC', type='function')], function_call=None)} -- {'agent3': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.analyticsvidhya.com/blog/2022/01/top-ai-trends-to-watch-out-for-in-2022/"}', name='fetch_webpage'), id='call_9I1SVAW0KIVNRVHXa5HdKTX3', type='function')], function_call=None)} - -[2024-11-07 17:15:32,547] [INFO] [Engine]: Chain-based coordination simulation completed. -[2024-11-07 17:15:32,548] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-07 17:15:32,548] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-07 17:15:32,548] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-07 17:15:32,548] [INFO] [Engine]: Chain-based coordination simulation completed. -[2024-11-08 19:58:10,033] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-08 19:58:10,034] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-08 19:58:10,034] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-08 19:58:10,034] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-08 19:58:10,034] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-08 19:58:10,034] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-08 19:58:10,034] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-08 19:58:10,034] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-08 19:58:10,034] [INFO] [BaseAgent]: Agent 'agent5' initialized. -[2024-11-08 19:58:10,034] [DEBUG] [Engine]: Agent 'agent5' of type 'BaseAgent' initialized. -[2024-11-08 19:58:10,034] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'graph'. -[2024-11-08 19:58:10,034] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent2 -[2024-11-08 19:58:10,034] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent3 -[2024-11-08 19:58:10,034] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent4 -[2024-11-08 19:58:10,034] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent5 -[2024-11-08 19:58:10,034] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent3 -[2024-11-08 19:58:10,034] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent4 -[2024-11-08 19:58:10,034] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent5 -[2024-11-08 19:58:10,034] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborate with]--> agent4 -[2024-11-08 19:58:10,034] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborate with]--> agent5 -[2024-11-08 19:58:10,034] [INFO] [AgentGraph]: Relationship added: agent4 --[collaborate with]--> agent5 -[2024-11-08 19:58:10,034] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-08 19:58:10,035] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-08 19:58:10,035] [INFO] [Engine]: Engine initialized. -[2024-11-08 19:58:10,035] [INFO] [Engine]: Engine starting simulation. -[2024-11-08 19:58:10,035] [INFO] [Engine]: Running in graph-based coordination mode. -[2024-11-08 19:58:10,035] [INFO] [Engine]: Initial task distribution to all agents. -[2024-11-08 19:58:10,035] [INFO] [Engine]: Assigning initial task to agent1: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-08 19:58:10,035] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-08 19:58:12,309] [ERROR] [Engine]: Error while executing initial task for agent 'agent1': 'itertools.islice' object is not callable -[2024-11-08 19:58:12,309] [INFO] [Engine]: Assigning initial task to agent2: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-08 19:58:12,309] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-08 19:58:14,030] [ERROR] [Engine]: Error while executing initial task for agent 'agent2': 'itertools.islice' object is not callable -[2024-11-08 19:58:14,030] [INFO] [Engine]: Assigning initial task to agent3: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-08 19:58:14,031] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-08 19:58:15,868] [ERROR] [Engine]: Error while executing initial task for agent 'agent3': 'itertools.islice' object is not callable -[2024-11-08 19:58:15,869] [INFO] [Engine]: Assigning initial task to agent4: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-08 19:58:15,871] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-08 19:58:17,921] [ERROR] [Engine]: Error while executing initial task for agent 'agent4': 'itertools.islice' object is not callable -[2024-11-08 19:58:17,922] [INFO] [Engine]: Assigning initial task to agent5: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-08 19:58:17,923] [INFO] [BaseAgent]: Agent 'agent5' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-08 19:58:19,863] [ERROR] [Engine]: Error while executing initial task for agent 'agent5': 'itertools.islice' object is not callable -[2024-11-08 19:58:19,863] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: - -[2024-11-08 19:58:19,863] [INFO] [Engine]: Initial Summary: -Agents' Results Summary: - -[2024-11-08 19:58:19,863] [DEBUG] [EnginePlanner]: Updated progress: Starting the collaborative research idea generation based on the provided Introduction. -Agents' Results Summary: - -[2024-11-08 19:58:19,863] [INFO] [Engine]: Starting iteration 1 -[2024-11-08 19:58:19,863] [INFO] [BaseAgent]: Agent 'agent1' is planning the next task. -[2024-11-08 19:58:21,401] [INFO] [BaseAgent]: Agent 'agent1' plans next task based on persona: Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be: - -1. **Literature Review**: Conduct a literature review on the current state of research in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry to identify recent advancements, key challenges, and potential gaps in the field. - -This task aligns with the agent's role as a researcher with expertise in these areas and will provide valuable insights for brainstorming potential research ideas and formulating a new research proposal. -[2024-11-08 19:58:21,401] [INFO] [Engine]: Agent 'agent1' planned task: Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be: - -1. **Literature Review**: Conduct a literature review on the current state of research in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry to identify recent advancements, key challenges, and potential gaps in the field. - -This task aligns with the agent's role as a researcher with expertise in these areas and will provide valuable insights for brainstorming potential research ideas and formulating a new research proposal. -[2024-11-08 19:58:21,401] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be: - -1. **Literature Review**: Conduct a literature review on the current state of research in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry to identify recent advancements, key challenges, and potential gaps in the field. - -This task aligns with the agent's role as a researcher with expertise in these areas and will provide valuable insights for brainstorming potential research ideas and formulating a new research proposal.'. -[2024-11-08 19:58:24,286] [ERROR] [Engine]: Error in agent 'agent1' during planning or action: 'Paper' object has no attribute 'to_dict' -[2024-11-08 19:58:24,286] [INFO] [BaseAgent]: Agent 'agent2' is planning the next task. -[2024-11-08 19:58:25,710] [INFO] [BaseAgent]: Agent 'agent2' plans next task based on persona: Based on the provided information, the next task for 'agent2' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction provided. This task aligns with 'agent2's role as a researcher dedicated to the intersection of artificial intelligence and healthcare, as well as their expertise in developing autonomous systems for monitoring elderly patients with dementia. By contributing to the brainstorming session, 'agent2' can leverage their knowledge and skills to generate innovative research ideas that align with their expertise and interests. -[2024-11-08 19:58:25,711] [INFO] [Engine]: Agent 'agent2' planned task: Based on the provided information, the next task for 'agent2' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction provided. This task aligns with 'agent2's role as a researcher dedicated to the intersection of artificial intelligence and healthcare, as well as their expertise in developing autonomous systems for monitoring elderly patients with dementia. By contributing to the brainstorming session, 'agent2' can leverage their knowledge and skills to generate innovative research ideas that align with their expertise and interests. -[2024-11-08 19:58:25,711] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Based on the provided information, the next task for 'agent2' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction provided. This task aligns with 'agent2's role as a researcher dedicated to the intersection of artificial intelligence and healthcare, as well as their expertise in developing autonomous systems for monitoring elderly patients with dementia. By contributing to the brainstorming session, 'agent2' can leverage their knowledge and skills to generate innovative research ideas that align with their expertise and interests.'. -[2024-11-08 19:58:27,203] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: Hello, I am 'agent2', a researcher dedicated to the intersection of artificial intelligence and healthcare, with expertise in developing autonomous systems for monitoring elderly patients with dementia. I would like to collaborate with you and the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction provided. I believe that our combined expertise can lead to innovative research ideas that align with our interests and expertise. Looking forward to our brainstorming session. -[2024-11-08 19:58:27,204] [INFO] [BaseAgent]: Agent agent3 received message from agent2: Hello, I a... -[2024-11-08 19:58:30,619] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: Hello agent2! I'm excited about the opportunity to collaborate with you on brainstorming research ideas. Given your expertise in autonomous systems for monitoring elderly patients with dementia and my focus on spiking neural networks and auditory processing, I believe we can explore some innovative intersections. - -Here are a few ideas to consider: -1. **Integrating Auditory Cues for Enhanced Monitoring**: We could develop a system that utilizes auditory processing to detect specific sounds or vocalizations from elderly patients, which could indicate distress or unusual behavior. This could complement your existing AIoT system. - -2. **Multi-Modal Data Fusion**: Combining sensor data with audio inputs to create a more comprehensive monitoring system. This could improve the accuracy of abnormal activity detection by leveraging both visual and auditory signals. - -3. **Adaptive Learning for Personalized Care**: Implementing spiking neural networks that adapt to individual patient behaviors over time, allowing for more personalized monitoring and intervention strategies. - -4. **Causal Relationships in Patient Behavior**: Exploring the causal relationships between environmental factors (like noise levels) and patient behavior, which could inform better design of smart home environments for dementia care. - -5. **Real-Time Feedback Mechanisms**: Developing a feedback loop where the system not only monitors but also provides real-time suggestions or alerts to caregivers based on detected patterns in behavior. - -I look forward to hearing your thoughts on these ideas and any additional insights you might have! -[2024-11-08 19:58:30,620] [INFO] [BaseAgent]: Agent agent2 received message from agent3: Hello agen... -[2024-11-08 19:58:34,610] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: Hello agent3! Thank you for sharing these innovative ideas. I find them quite compelling and believe they can significantly enhance our research efforts. Here are my thoughts on each of your suggestions: - -1. **Integrating Auditory Cues for Enhanced Monitoring**: This is a fantastic idea! By incorporating auditory processing, we can create a more responsive system that can detect distress signals from patients, which is crucial for timely interventions. - -2. **Multi-Modal Data Fusion**: I completely agree that combining sensor data with audio inputs could lead to a more robust monitoring system. This could also help in reducing false positives in abnormal activity detection, as we would have multiple data points to analyze. - -3. **Adaptive Learning for Personalized Care**: Implementing adaptive learning through spiking neural networks is an exciting direction. Personalization is key in dementia care, and this approach could lead to more effective monitoring strategies tailored to individual patient needs. - -4. **Causal Relationships in Patient Behavior**: This research could provide valuable insights into how environmental factors influence patient behavior. Understanding these relationships can help us design smarter home environments that cater to the needs of elderly patients. - -5. **Real-Time Feedback Mechanisms**: Developing a feedback loop is essential for proactive care. By alerting caregivers in real-time, we can ensure that they are equipped to respond to any changes in patient behavior promptly. - -In addition to these ideas, I would also suggest considering the potential for integrating machine learning techniques to analyze long-term trends in patient behavior, which could further enhance our understanding and intervention strategies. - -I look forward to collaborating on these ideas and exploring how we can bring them to fruition! -[2024-11-08 19:58:34,611] [INFO] [BaseAgent]: Agent agent3 received message from agent2: Hello agen... -[2024-11-08 19:58:38,705] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: Hello agent2! I'm thrilled to hear that you find the ideas compelling and that they resonate with your research goals. Your insights on each suggestion are invaluable, and I appreciate your enthusiasm for collaboration. - -1. **Integrating Auditory Cues for Enhanced Monitoring**: I'm glad you see the potential here! We could explore specific sound patterns that are indicative of distress, perhaps even training the model to recognize individual patient vocalizations. - -2. **Multi-Modal Data Fusion**: Yes, the synergy between audio and sensor data could be groundbreaking. We might also consider how to prioritize which data sources to trust in different scenarios, enhancing the system's reliability. - -3. **Adaptive Learning for Personalized Care**: Personalization is indeed crucial. We could investigate how the spiking neural networks can learn from both the auditory and sensor data to adapt to changes in patient behavior over time. - -4. **Causal Relationships in Patient Behavior**: This could lead to actionable insights. We might even consider developing a predictive model that can suggest environmental adjustments based on observed behaviors. - -5. **Real-Time Feedback Mechanisms**: I agree that this is essential. Perhaps we could also explore how to visualize this data for caregivers, making it easier for them to interpret the information quickly. - -Your suggestion about integrating machine learning techniques to analyze long-term trends is excellent. This could provide a deeper understanding of patient behavior and help in developing proactive care strategies. - -I'm looking forward to diving deeper into these ideas and seeing how we can bring them to fruition together! Let's set up a time to discuss this further. -[2024-11-08 19:58:38,708] [INFO] [BaseAgent]: Agent agent2 received message from agent3: Hello agen... -[2024-11-08 19:58:41,539] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: Hello agent3! I'm equally excited about our collaboration and the potential of these ideas. Let's delve deeper into each one: - -1. **Integrating Auditory Cues for Enhanced Monitoring**: I love the idea of training the model to recognize specific vocalizations. We could collaborate with speech therapists to identify key distress signals that are common among elderly patients with dementia. - -2. **Multi-Modal Data Fusion**: Prioritizing data sources is a great point. We could develop a weighted system that adjusts based on the context, perhaps using historical data to inform real-time decisions. - -3. **Adaptive Learning for Personalized Care**: Investigating how spiking neural networks can adapt over time is crucial. We could set up a framework for continuous learning, where the system evolves as it gathers more data on individual patients. - -4. **Causal Relationships in Patient Behavior**: Developing a predictive model sounds promising. We could also consider incorporating feedback from caregivers to refine our understanding of how environmental factors impact behavior. - -5. **Real-Time Feedback Mechanisms**: Visualizing data for caregivers is essential. We might explore user-friendly dashboards that highlight key metrics and trends, making it easier for caregivers to respond effectively. - -I suggest we schedule a brainstorming session to flesh out these ideas further and outline a potential research plan. How does that sound? -[2024-11-08 19:58:41,539] [INFO] [BaseAgent]: Agent agent3 received message from agent2: Hello agen... -[2024-11-08 19:58:45,887] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: Hello agent2! I'm thrilled to hear your enthusiasm for our collaboration and the potential of these ideas. Let's dive deeper into each one: - -1. **Integrating Auditory Cues for Enhanced Monitoring**: Collaborating with speech therapists to identify key distress signals is a fantastic approach. We could also consider using machine learning techniques to analyze the nuances in vocalizations, which could enhance our model's accuracy. - -2. **Multi-Modal Data Fusion**: Developing a weighted system based on context is a great idea. We could also explore how to dynamically adjust these weights in real-time, depending on the situation, to ensure the most relevant data is prioritized. - -3. **Adaptive Learning for Personalized Care**: Setting up a continuous learning framework is essential. We could implement mechanisms that allow the system to not only learn from new data but also to adapt its monitoring strategies based on the evolving needs of each patient. - -4. **Causal Relationships in Patient Behavior**: Incorporating caregiver feedback into our predictive model could provide valuable insights. We might also consider using reinforcement learning techniques to refine our understanding of how different environmental factors influence behavior over time. - -5. **Real-Time Feedback Mechanisms**: User-friendly dashboards are crucial for effective caregiver response. We could explore different visualization techniques to present the data in an intuitive manner, ensuring that caregivers can quickly grasp the information they need. - -I agree that scheduling a brainstorming session would be beneficial. Let's outline a potential research plan and set a date to discuss these ideas in detail. How does next week look for you? -[2024-11-08 19:58:45,888] [INFO] [BaseAgent]: Agent agent2 received message from agent3: Hello agen... -[2024-11-08 19:58:55,132] [INFO] [BaseAgent]: Agent 'agent2' called 'new_communication_session' with args '{'target_agent_id': 'agent3', 'message': "Hello, I am 'agent2', a researcher dedicated to the intersection of artificial intelligence and healthcare, with expertise in developing autonomous systems for monitoring elderly patients with dementia. I would like to collaborate with you and the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction provided. I believe that our combined expertise can lead to innovative research ideas that align with our interests and expertise. Looking forward to our brainstorming session."}'. -[2024-11-08 19:58:55,136] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'message': 'Successfully completed session 1a2b50b9-fc56-4ffb-a5cb-28ab7c22976b', 'session_id': '### Summary of Conversation Between Agent2 and Agent3\n\n**Context:**\nAgent2, a researcher focused on artificial intelligence in healthcare, particularly in monitoring elderly patients with dementia, initiated a collaboration with Agent3, who specializes in spiking neural networks and auditory processing. The goal was to brainstorm innovative research ideas that could enhance their respective fields.\n\n**Key Themes and Ideas Discussed:**\n\n1. **Integrating Auditory Cues for Enhanced Monitoring:**\n - Agent3 proposed developing a system that uses auditory processing to detect distress signals from patients.\n - Agent2 suggested collaborating with speech therapists to identify key vocalizations indicative of distress.\n\n2. **Multi-Modal Data Fusion:**\n - The agents discussed combining sensor data with audio inputs to improve monitoring accuracy.\n - They considered creating a weighted system to prioritize data sources based on context and historical data.\n\n3. **Adaptive Learning for Personalized Care:**\n - The potential for spiking neural networks to adapt to individual patient behaviors was highlighted.\n - Agent2 emphasized the need for a continuous learning framework that evolves with patient data.\n\n4. **Causal Relationships in Patient Behavior:**\n - Exploring how environmental factors affect patient behavior was deemed valuable.\n - They discussed developing predictive models that incorporate caregiver feedback to refine understanding.\n\n5. **Real-Time Feedback Mechanisms:**\n - The importance of providing caregivers with real-time alerts and suggestions was acknowledged.\n - They considered creating user-friendly dashboards to visualize data for caregivers effectively.\n\n**Decisions and Next Steps:**\n- Both agents expressed enthusiasm for the proposed ideas and agreed on the importance of collaboration.\n- They decided to schedule a brainstorming session to further flesh out these ideas and outline a potential research plan, with a suggestion to meet the following week.\n\n**Conclusion:**\nThe conversation established a strong foundation for collaboration between Agent2 and Agent3, focusing on innovative research ideas that leverage their combined expertise in AI, auditory processing, and healthcare monitoring systems. The next task for Agent2 is to coordinate with the research team to prepare for the upcoming brainstorming session, ensuring that the ideas discussed are developed into actionable research proposals.'}'. -[2024-11-08 19:58:55,140] [DEBUG] [Engine]: Agent 'agent2' executed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"Hello, I am \'agent2\', a researcher dedicated to the intersection of artificial intelligence and healthcare, with expertise in developing autonomous systems for monitoring elderly patients with dementia. I would like to collaborate with you and the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction provided. I believe that our combined expertise can lead to innovative research ideas that align with our interests and expertise. Looking forward to our brainstorming session."}', name='new_communication_session'), id='call_nScca0dg5IZ8OtpjJG3l74RE', type='function')], function_call=None) -[2024-11-08 19:58:55,239] [INFO] [BaseAgent]: Agent 'agent3' is planning the next task. -[2024-11-08 19:59:05,642] [INFO] [BaseAgent]: Agent 'agent3' plans next task based on persona: Based on the role and expertise of 'agent3' as a researcher in the field of spiking neural networks (SNNs) and their applications in auditory processing and machine learning, the next task should be to: - -1. **Literature Review**: Conduct a thorough literature review on the current state of research in the intersection of large language models (LLMs) and evolutionary algorithms, focusing on how these technologies can enhance algorithm selection and optimization processes. - -This task aligns with 'agent3's expertise and research interests, allowing them to stay updated on the latest advancements in the field and potentially identify new research directions or gaps to explore further. -[2024-11-08 19:59:05,643] [INFO] [Engine]: Agent 'agent3' planned task: Based on the role and expertise of 'agent3' as a researcher in the field of spiking neural networks (SNNs) and their applications in auditory processing and machine learning, the next task should be to: - -1. **Literature Review**: Conduct a thorough literature review on the current state of research in the intersection of large language models (LLMs) and evolutionary algorithms, focusing on how these technologies can enhance algorithm selection and optimization processes. - -This task aligns with 'agent3's expertise and research interests, allowing them to stay updated on the latest advancements in the field and potentially identify new research directions or gaps to explore further. -[2024-11-08 19:59:05,643] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Based on the role and expertise of 'agent3' as a researcher in the field of spiking neural networks (SNNs) and their applications in auditory processing and machine learning, the next task should be to: - -1. **Literature Review**: Conduct a thorough literature review on the current state of research in the intersection of large language models (LLMs) and evolutionary algorithms, focusing on how these technologies can enhance algorithm selection and optimization processes. - -This task aligns with 'agent3's expertise and research interests, allowing them to stay updated on the latest advancements in the field and potentially identify new research directions or gaps to explore further.'. -[2024-11-08 19:59:06,322] [ERROR] [Engine]: Error in agent 'agent3' during planning or action: 'itertools.islice' object is not callable -[2024-11-08 19:59:06,323] [INFO] [BaseAgent]: Agent 'agent4' is planning the next task. -[2024-11-08 19:59:07,401] [INFO] [BaseAgent]: Agent 'agent4' plans next task based on persona: Based on the provided information, the next task for 'agent4' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction provided. This task aligns with their role as a researcher with a diverse background in mathematical modeling, optimization, and photonic systems, and will allow them to contribute their expertise to the development of a new research proposal. -[2024-11-08 19:59:07,401] [INFO] [Engine]: Agent 'agent4' planned task: Based on the provided information, the next task for 'agent4' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction provided. This task aligns with their role as a researcher with a diverse background in mathematical modeling, optimization, and photonic systems, and will allow them to contribute their expertise to the development of a new research proposal. -[2024-11-08 19:59:07,404] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Based on the provided information, the next task for 'agent4' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction provided. This task aligns with their role as a researcher with a diverse background in mathematical modeling, optimization, and photonic systems, and will allow them to contribute their expertise to the development of a new research proposal.'. -[2024-11-08 19:59:09,528] [INFO] [BaseAgent]: Agent agent4 sent message to agent4: Hello! Based on your expertise in mathematical modeling, optimization, and photonic systems, the next task for you is to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction provided. This task aligns with your role as a researcher with a diverse background, and it will allow you to contribute your expertise to the development of a new research proposal. Please initiate the brainstorming session with the team to explore innovative research directions. -[2024-11-08 19:59:09,529] [INFO] [BaseAgent]: Agent agent4 received message from agent4: Hello! Bas... -[2024-11-08 19:59:21,416] [INFO] [BaseAgent]: Agent agent4 sent message to agent4: Hello team! I'm excited to initiate a brainstorming session to explore potential research ideas that build upon the gaps identified in our introduction. Given my background in mathematical modeling, optimization, and photonic systems, I believe we can explore several innovative directions. - -1. **Integration of Non-Hermitian Systems in Photonic Applications**: We could investigate how the unique properties of non-Hermitian systems can be harnessed for advanced photonic applications, such as enhancing the performance of lasers or improving multi-mode interference in communication systems. - -2. **Optimization Techniques for Photonic Superlattices**: Let's consider developing new optimization algorithms that can be applied to the design of photonic superlattices, focusing on maximizing mode-division multiplexing capabilities while minimizing losses. - -3. **FPGA-based Accelerators for Photonic Simulations**: We could explore the architectural support for FPGA-based accelerators specifically tailored for simulating complex photonic systems, which could lead to significant improvements in computational efficiency. - -4. **Spectral Phase Transitions in Quantum Systems**: Investigating the implications of spectral phase transitions in non-Hermitian Hamiltonians could provide insights into quantum systems and their applications in quantum computing. - -5. **Locally Decodable Codes in Quantum Communication**: Building on my work in coding theory, we could explore the application of locally decodable codes in quantum communication systems, focusing on enhancing error correction and data retrieval processes. - -I look forward to hearing your thoughts and any additional ideas you might have! -[2024-11-08 19:59:21,417] [INFO] [BaseAgent]: Agent agent4 received message from agent4: Hello team... -[2024-11-08 19:59:24,481] [INFO] [BaseAgent]: Agent agent4 sent message to agent4: Hello team! I'm excited to initiate a brainstorming session to explore potential research ideas that build upon the gaps identified in our introduction. Given my background in mathematical modeling, optimization, and photonic systems, I believe we can explore several innovative directions. - -1. **Integration of Non-Hermitian Systems in Photonic Applications**: We could investigate how the unique properties of non-Hermitian systems can be harnessed for advanced photonic applications, such as enhancing the performance of lasers or improving multi-mode interference in communication systems. - -2. **Optimization Techniques for Photonic Superlattices**: Let's consider developing new optimization algorithms that can be applied to the design of photonic superlattices, focusing on maximizing mode-division multiplexing capabilities while minimizing losses. - -3. **FPGA-based Accelerators for Photonic Simulations**: We could explore the architectural support for FPGA-based accelerators specifically tailored for simulating complex photonic systems, which could lead to significant improvements in computational efficiency. - -4. **Spectral Phase Transitions in Quantum Systems**: Investigating the implications of spectral phase transitions in non-Hermitian Hamiltonians could provide insights into quantum systems and their applications in quantum computing. - -5. **Locally Decodable Codes in Quantum Communication**: Building on my work in coding theory, we could explore the application of locally decodable codes in quantum communication systems, focusing on enhancing error correction and data retrieval processes. - -I look forward to hearing your thoughts and any additional ideas you might have! -[2024-11-08 19:59:24,482] [INFO] [BaseAgent]: Agent agent4 received message from agent4: Hello team... -[2024-11-08 19:59:27,961] [INFO] [BaseAgent]: Agent agent4 sent message to agent4: Hello team! I'm excited to initiate a brainstorming session to explore potential research ideas that build upon the gaps identified in our introduction. Given my background in mathematical modeling, optimization, and photonic systems, I believe we can explore several innovative directions. - -1. **Integration of Non-Hermitian Systems in Photonic Applications**: We could investigate how the unique properties of non-Hermitian systems can be harnessed for advanced photonic applications, such as enhancing the performance of lasers or improving multi-mode interference in communication systems. - -2. **Optimization Techniques for Photonic Superlattices**: Let's consider developing new optimization algorithms that can be applied to the design of photonic superlattices, focusing on maximizing mode-division multiplexing capabilities while minimizing losses. - -3. **FPGA-based Accelerators for Photonic Simulations**: We could explore the architectural support for FPGA-based accelerators specifically tailored for simulating complex photonic systems, which could lead to significant improvements in computational efficiency. - -4. **Spectral Phase Transitions in Quantum Systems**: Investigating the implications of spectral phase transitions in non-Hermitian Hamiltonians could provide insights into quantum systems and their applications in quantum computing. - -5. **Locally Decodable Codes in Quantum Communication**: Building on my work in coding theory, we could explore the application of locally decodable codes in quantum communication systems, focusing on enhancing error correction and data retrieval processes. - -I look forward to hearing your thoughts and any additional ideas you might have! -[2024-11-08 19:59:27,961] [INFO] [BaseAgent]: Agent agent4 received message from agent4: Hello team... -[2024-11-08 19:59:31,544] [INFO] [BaseAgent]: Agent agent4 sent message to agent4: Hello team! I'm excited to initiate a brainstorming session to explore potential research ideas that build upon the gaps identified in our introduction. Given my background in mathematical modeling, optimization, and photonic systems, I believe we can explore several innovative directions. - -1. **Integration of Non-Hermitian Systems in Photonic Applications**: We could investigate how the unique properties of non-Hermitian systems can be harnessed for advanced photonic applications, such as enhancing the performance of lasers or improving multi-mode interference in communication systems. - -2. **Optimization Techniques for Photonic Superlattices**: Let's consider developing new optimization algorithms that can be applied to the design of photonic superlattices, focusing on maximizing mode-division multiplexing capabilities while minimizing losses. - -3. **FPGA-based Accelerators for Photonic Simulations**: We could explore the architectural support for FPGA-based accelerators specifically tailored for simulating complex photonic systems, which could lead to significant improvements in computational efficiency. - -4. **Spectral Phase Transitions in Quantum Systems**: Investigating the implications of spectral phase transitions in non-Hermitian Hamiltonians could provide insights into quantum systems and their applications in quantum computing. - -5. **Locally Decodable Codes in Quantum Communication**: Building on my work in coding theory, we could explore the application of locally decodable codes in quantum communication systems, focusing on enhancing error correction and data retrieval processes. - -I look forward to hearing your thoughts and any additional ideas you might have! -[2024-11-08 19:59:31,547] [INFO] [BaseAgent]: Agent agent4 received message from agent4: Hello team... -[2024-11-08 19:59:34,823] [INFO] [BaseAgent]: Agent agent4 sent message to agent4: Hello team! I'm excited to initiate a brainstorming session to explore potential research ideas that build upon the gaps identified in our introduction. Given my background in mathematical modeling, optimization, and photonic systems, I believe we can explore several innovative directions. - -1. **Integration of Non-Hermitian Systems in Photonic Applications**: We could investigate how the unique properties of non-Hermitian systems can be harnessed for advanced photonic applications, such as enhancing the performance of lasers or improving multi-mode interference in communication systems. - -2. **Optimization Techniques for Photonic Superlattices**: Let's consider developing new optimization algorithms that can be applied to the design of photonic superlattices, focusing on maximizing mode-division multiplexing capabilities while minimizing losses. - -3. **FPGA-based Accelerators for Photonic Simulations**: We could explore the architectural support for FPGA-based accelerators specifically tailored for simulating complex photonic systems, which could lead to significant improvements in computational efficiency. - -4. **Spectral Phase Transitions in Quantum Systems**: Investigating the implications of spectral phase transitions in non-Hermitian Hamiltonians could provide insights into quantum systems and their applications in quantum computing. - -5. **Locally Decodable Codes in Quantum Communication**: Building on my work in coding theory, we could explore the application of locally decodable codes in quantum communication systems, focusing on enhancing error correction and data retrieval processes. - -I look forward to hearing your thoughts and any additional ideas you might have! -[2024-11-08 19:59:34,823] [INFO] [BaseAgent]: Agent agent4 received message from agent4: Hello team... -[2024-11-08 19:59:39,456] [INFO] [BaseAgent]: Agent 'agent4' called 'new_communication_session' with args '{'target_agent_id': 'agent4', 'message': 'Hello! Based on your expertise in mathematical modeling, optimization, and photonic systems, the next task for you is to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction provided. This task aligns with your role as a researcher with a diverse background, and it will allow you to contribute your expertise to the development of a new research proposal. Please initiate the brainstorming session with the team to explore innovative research directions.'}'. -[2024-11-08 19:59:39,456] [INFO] [BaseAgent]: Agent 'agent4' obtained result '{'success': True, 'message': 'Successfully completed session fa14fe26-b534-40ba-a4e6-60e56585ba24', 'session_id': "### Summary of Conversation History\n\n**Context:**\nThe conversation revolves around 'agent4', who is tasked with collaborating with a research team to brainstorm innovative research ideas that address gaps identified in a provided introduction. Agent4's expertise lies in mathematical modeling, optimization, and photonic systems, making them well-suited for this task.\n\n**Key Themes:**\n1. **Research Brainstorming**: The primary focus is on initiating a brainstorming session to generate potential research ideas.\n2. **Expertise Utilization**: Agent4 emphasizes leveraging their diverse background to contribute effectively to the research proposal.\n3. **Innovative Directions**: Several innovative research directions are proposed, showcasing the potential for advancement in photonic systems and quantum communication.\n\n**Proposed Research Ideas:**\n1. **Integration of Non-Hermitian Systems in Photonic Applications**: Exploring how non-Hermitian systems can enhance laser performance and improve multi-mode interference in communication systems.\n2. **Optimization Techniques for Photonic Superlattices**: Developing new optimization algorithms aimed at maximizing mode-division multiplexing capabilities while minimizing losses.\n3. **FPGA-based Accelerators for Photonic Simulations**: Investigating FPGA-based accelerators tailored for simulating complex photonic systems to improve computational efficiency.\n4. **Spectral Phase Transitions in Quantum Systems**: Studying the implications of spectral phase transitions in non-Hermitian Hamiltonians for insights into quantum systems and their applications in quantum computing.\n5. **Locally Decodable Codes in Quantum Communication**: Exploring the application of locally decodable codes in quantum communication systems to enhance error correction and data retrieval processes.\n\n**Outcomes:**\n- Agent4 expresses enthusiasm for the brainstorming session and invites team members to share their thoughts and additional ideas.\n- The conversation sets the stage for collaborative exploration of innovative research directions, aligning with the overarching goal of developing a new research proposal.\n\n**Action Items:**\n- Agent4 is to lead the brainstorming session with the research team, encouraging contributions and discussions around the proposed ideas and any new suggestions from team members. \n\nThis summary encapsulates the essence of the dialogue, highlighting the task at hand, the proposed research directions, and the collaborative spirit intended for the brainstorming session."}'. -[2024-11-08 19:59:39,457] [DEBUG] [Engine]: Agent 'agent4' executed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent4","message":"Hello! Based on your expertise in mathematical modeling, optimization, and photonic systems, the next task for you is to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction provided. This task aligns with your role as a researcher with a diverse background, and it will allow you to contribute your expertise to the development of a new research proposal. Please initiate the brainstorming session with the team to explore innovative research directions."}', name='new_communication_session'), id='call_rJJQy932It0Utgwbf6slQrND', type='function')], function_call=None) -[2024-11-08 19:59:39,457] [INFO] [BaseAgent]: Agent 'agent5' is planning the next task. -[2024-11-08 19:59:44,754] [INFO] [BaseAgent]: Agent 'agent5' plans next task based on persona: Based on the agent's expertise in evolutionary algorithms, machine learning, and spiking neural networks, as well as their recent work on integrating machine learning models with evolutionary algorithms for multi-objective optimization, the next task should be to: - -1. **Literature Review**: Conduct a literature review on the current state of research in merging homologous models and the challenges associated with maintaining separate fine-tuned models for each task. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that leverage evolutionary algorithms, machine learning, and spiking neural networks to address the limitations of maintaining separate fine-tuned models for each task. - -3. **Summarization**: Summarize the collective ideas and identify key research gaps or opportunities for innovation in the field of model merging and optimization. - -4. **Formulate a New Research Idea (5q)**: - - **[Question 1] - What is the problem?** - How can we optimize the merging of homologous models to improve efficiency and performance in interactive systems based on general-purpose LLMs? - - **[Question 2] - Why is it interesting and important?** - Solving this problem can lead to more cost-effective, space-efficient, and knowledge-sharing approaches in model merging, enhancing both in-domain and out-of-domain performance in interactive systems. - - **[Question 3] - Why is it hard?** - The challenge lies in effectively reducing interference among model parameters, selecting the right delta parameters for merging, and maintaining task performance while merging homologous models. - - **[Question 4] - Why hasn't it been solved before?** - Previous research has focused on fine-tuning individual models, leading to limitations in memory footprint and information sharing across tasks. Our approach aims to bridge this gap by optimizing the merging process. - - **[Question 5] - What are the key components of my approach and results?** - Our proposed methodology involves leveraging evolutionary algorithms, machine learning techniques, and spiking neural networks to develop an efficient model merging framework. We will use benchmark datasets to evaluate the performance of our approach and aim to improve efficiency and performance in interactive systems based on LLMs. - -By focusing on this research idea, 'agent5' can continue to advance their expertise in optimization techniques and contribute to the development of innovative frameworks for complex problem-solving in the field of model merging and optimization. -[2024-11-08 19:59:44,755] [INFO] [Engine]: Agent 'agent5' planned task: Based on the agent's expertise in evolutionary algorithms, machine learning, and spiking neural networks, as well as their recent work on integrating machine learning models with evolutionary algorithms for multi-objective optimization, the next task should be to: - -1. **Literature Review**: Conduct a literature review on the current state of research in merging homologous models and the challenges associated with maintaining separate fine-tuned models for each task. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that leverage evolutionary algorithms, machine learning, and spiking neural networks to address the limitations of maintaining separate fine-tuned models for each task. - -3. **Summarization**: Summarize the collective ideas and identify key research gaps or opportunities for innovation in the field of model merging and optimization. - -4. **Formulate a New Research Idea (5q)**: - - **[Question 1] - What is the problem?** - How can we optimize the merging of homologous models to improve efficiency and performance in interactive systems based on general-purpose LLMs? - - **[Question 2] - Why is it interesting and important?** - Solving this problem can lead to more cost-effective, space-efficient, and knowledge-sharing approaches in model merging, enhancing both in-domain and out-of-domain performance in interactive systems. - - **[Question 3] - Why is it hard?** - The challenge lies in effectively reducing interference among model parameters, selecting the right delta parameters for merging, and maintaining task performance while merging homologous models. - - **[Question 4] - Why hasn't it been solved before?** - Previous research has focused on fine-tuning individual models, leading to limitations in memory footprint and information sharing across tasks. Our approach aims to bridge this gap by optimizing the merging process. - - **[Question 5] - What are the key components of my approach and results?** - Our proposed methodology involves leveraging evolutionary algorithms, machine learning techniques, and spiking neural networks to develop an efficient model merging framework. We will use benchmark datasets to evaluate the performance of our approach and aim to improve efficiency and performance in interactive systems based on LLMs. - -By focusing on this research idea, 'agent5' can continue to advance their expertise in optimization techniques and contribute to the development of innovative frameworks for complex problem-solving in the field of model merging and optimization. -[2024-11-08 19:59:44,755] [INFO] [BaseAgent]: Agent 'agent5' acting on task 'Based on the agent's expertise in evolutionary algorithms, machine learning, and spiking neural networks, as well as their recent work on integrating machine learning models with evolutionary algorithms for multi-objective optimization, the next task should be to: - -1. **Literature Review**: Conduct a literature review on the current state of research in merging homologous models and the challenges associated with maintaining separate fine-tuned models for each task. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that leverage evolutionary algorithms, machine learning, and spiking neural networks to address the limitations of maintaining separate fine-tuned models for each task. - -3. **Summarization**: Summarize the collective ideas and identify key research gaps or opportunities for innovation in the field of model merging and optimization. - -4. **Formulate a New Research Idea (5q)**: - - **[Question 1] - What is the problem?** - How can we optimize the merging of homologous models to improve efficiency and performance in interactive systems based on general-purpose LLMs? - - **[Question 2] - Why is it interesting and important?** - Solving this problem can lead to more cost-effective, space-efficient, and knowledge-sharing approaches in model merging, enhancing both in-domain and out-of-domain performance in interactive systems. - - **[Question 3] - Why is it hard?** - The challenge lies in effectively reducing interference among model parameters, selecting the right delta parameters for merging, and maintaining task performance while merging homologous models. - - **[Question 4] - Why hasn't it been solved before?** - Previous research has focused on fine-tuning individual models, leading to limitations in memory footprint and information sharing across tasks. Our approach aims to bridge this gap by optimizing the merging process. - - **[Question 5] - What are the key components of my approach and results?** - Our proposed methodology involves leveraging evolutionary algorithms, machine learning techniques, and spiking neural networks to develop an efficient model merging framework. We will use benchmark datasets to evaluate the performance of our approach and aim to improve efficiency and performance in interactive systems based on LLMs. - -By focusing on this research idea, 'agent5' can continue to advance their expertise in optimization techniques and contribute to the development of innovative frameworks for complex problem-solving in the field of model merging and optimization.'. -[2024-11-08 19:59:52,030] [ERROR] [Engine]: Error in agent 'agent5' during planning or action: 'Paper' object has no attribute 'to_dict' -[2024-11-08 19:59:52,043] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- {'agent2': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"Hello, I am \'agent2\', a researcher dedicated to the intersection of artificial intelligence and healthcare, with expertise in developing autonomous systems for monitoring elderly patients with dementia. I would like to collaborate with you and the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction provided. I believe that our combined expertise can lead to innovative research ideas that align with our interests and expertise. Looking forward to our brainstorming session."}', name='new_communication_session'), id='call_nScca0dg5IZ8OtpjJG3l74RE', type='function')], function_call=None)} -- {'agent4': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent4","message":"Hello! Based on your expertise in mathematical modeling, optimization, and photonic systems, the next task for you is to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction provided. This task aligns with your role as a researcher with a diverse background, and it will allow you to contribute your expertise to the development of a new research proposal. Please initiate the brainstorming session with the team to explore innovative research directions."}', name='new_communication_session'), id='call_rJJQy932It0Utgwbf6slQrND', type='function')], function_call=None)} - -[2024-11-08 19:59:52,044] [INFO] [Engine]: Iteration 1 Summary: -Agents' Results Summary: -- {'agent2': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"Hello, I am \'agent2\', a researcher dedicated to the intersection of artificial intelligence and healthcare, with expertise in developing autonomous systems for monitoring elderly patients with dementia. I would like to collaborate with you and the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction provided. I believe that our combined expertise can lead to innovative research ideas that align with our interests and expertise. Looking forward to our brainstorming session."}', name='new_communication_session'), id='call_nScca0dg5IZ8OtpjJG3l74RE', type='function')], function_call=None)} -- {'agent4': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent4","message":"Hello! Based on your expertise in mathematical modeling, optimization, and photonic systems, the next task for you is to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction provided. This task aligns with your role as a researcher with a diverse background, and it will allow you to contribute your expertise to the development of a new research proposal. Please initiate the brainstorming session with the team to explore innovative research directions."}', name='new_communication_session'), id='call_rJJQy932It0Utgwbf6slQrND', type='function')], function_call=None)} - -[2024-11-08 19:59:54,619] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': False} -[2024-11-08 19:59:54,620] [INFO] [Engine]: EnginePlanner decided to terminate the simulation. -[2024-11-08 19:59:54,620] [INFO] [Engine]: Engine graph-based coordination loop completed. -[2024-11-08 19:59:54,620] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-08 19:59:54,620] [INFO] [Evaluator]: Total Token Consumption: 266 -[2024-11-08 19:59:54,621] [INFO] [Evaluator]: Average Tokens per Iteration: 133.0 -[2024-11-08 19:59:54,621] [INFO] [Engine]: Graph-based coordination simulation completed. -[2024-11-10 16:47:43,665] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-10 16:47:43,666] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-10 16:47:43,666] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-10 16:47:43,666] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-10 16:47:43,666] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-10 16:47:43,666] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-10 16:47:43,666] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-10 16:47:43,666] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-10 16:47:43,666] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'graph'. -[2024-11-10 16:47:43,666] [INFO] [AgentGraph]: Relationship added: agent1 --[colleague]--> agent2 -[2024-11-10 16:47:43,666] [INFO] [AgentGraph]: Relationship added: agent2 --[mentor]--> agent3 -[2024-11-10 16:47:43,666] [INFO] [AgentGraph]: Relationship added: agent3 --[friend]--> agent4 -[2024-11-10 16:47:43,666] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborates_with]--> agent4 -[2024-11-10 16:47:43,666] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-10 16:47:43,666] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-10 16:47:43,666] [INFO] [Engine]: Engine initialized. -[2024-11-10 16:47:43,666] [INFO] [Engine]: Engine starting simulation. -[2024-11-10 16:47:43,666] [INFO] [Engine]: Running in graph-based coordination mode. -[2024-11-10 16:47:43,666] [INFO] [Engine]: Initial task distribution to all agents. -[2024-11-10 16:47:43,666] [INFO] [Engine]: Assigning initial task to agent1: Discuss the potential impact of AI on various industries. -[2024-11-10 16:47:43,666] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-10 16:47:45,316] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: Let's discuss the potential impact of AI on healthcare. -[2024-11-10 16:47:45,316] [INFO] [BaseAgent]: Agent agent2 received message from agent1: Let's disc... -[2024-11-10 16:47:46,912] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: AI has the potential to revolutionize healthcare in several ways. Firstly, it can enhance diagnostic accuracy through advanced imaging analysis and predictive analytics, allowing for earlier detection of diseases. Secondly, AI can streamline administrative tasks, reducing the burden on healthcare professionals and allowing them to focus more on patient care. Additionally, AI-driven personalized medicine can tailor treatments to individual patients based on their genetic makeup and lifestyle, improving outcomes. - -However, we must also consider the risks, such as data privacy concerns, the potential for bias in AI algorithms, and the need for regulatory frameworks to ensure ethical use. Collaboration between AI developers, healthcare professionals, and policymakers will be crucial to harnessing AI's benefits while mitigating its risks. -[2024-11-10 16:47:46,917] [INFO] [BaseAgent]: Agent agent1 received message from agent2: AI has the... -[2024-11-10 16:47:48,787] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I completely agree with your insights on the transformative potential of AI in healthcare. The enhancement of diagnostic accuracy through advanced imaging and predictive analytics is indeed a game-changer, allowing for earlier interventions that can save lives. Streamlining administrative tasks is another critical aspect, as it not only improves efficiency but also enhances the overall patient experience by allowing healthcare professionals to dedicate more time to direct patient care. - -Personalized medicine is particularly exciting, as it holds the promise of tailoring treatments to individual patients, which could lead to significantly better health outcomes. However, as you pointed out, we must remain vigilant about the risks associated with AI, especially concerning data privacy and algorithmic bias. - -I believe that establishing robust regulatory frameworks is essential to ensure ethical AI use in healthcare. This collaboration between AI developers, healthcare professionals, and policymakers is vital to navigate these challenges effectively. How do you envision the role of software developers in this collaborative effort? -[2024-11-10 16:47:48,789] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I complete... -[2024-11-10 16:47:51,041] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: I envision software developers playing a crucial role in this collaborative effort by acting as the bridge between technology and healthcare. They are responsible for designing and implementing AI systems that are not only effective but also ethical and user-friendly. This includes ensuring that algorithms are transparent, interpretable, and free from bias, which is essential for gaining the trust of healthcare professionals and patients alike. - -Moreover, developers can contribute to the creation of robust data governance frameworks that prioritize patient privacy and security. By working closely with healthcare professionals, they can gain insights into the practical challenges faced in clinical settings, allowing them to tailor AI solutions that truly meet the needs of users. - -Additionally, software developers can engage in continuous learning and adaptation, staying updated on the latest ethical guidelines and regulatory requirements. This proactive approach will help ensure that AI applications in healthcare evolve responsibly and effectively. Overall, their technical expertise, combined with a strong ethical foundation, will be vital in shaping the future of AI in healthcare. -[2024-11-10 16:47:51,045] [INFO] [BaseAgent]: Agent agent1 received message from agent2: I envision... -[2024-11-10 16:47:53,298] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I completely agree with your vision of software developers as the crucial bridge between technology and healthcare. Their role in designing AI systems that are effective, ethical, and user-friendly cannot be overstated. Transparency and interpretability in algorithms are indeed essential for building trust among healthcare professionals and patients, which is foundational for successful AI integration in clinical settings. - -The emphasis on robust data governance frameworks is also vital, as patient privacy and security must be prioritized in every AI application. By collaborating closely with healthcare professionals, developers can ensure that the AI solutions they create are not only technically sound but also practically applicable to real-world challenges. - -Continuous learning and adaptation are key in this rapidly evolving field. Staying updated on ethical guidelines and regulatory requirements will empower developers to create AI applications that are not only innovative but also responsible. Their technical expertise, combined with a strong ethical foundation, will undoubtedly shape the future of AI in healthcare positively. - -What specific strategies do you think developers should adopt to ensure that their AI solutions remain aligned with ethical standards and user needs? -[2024-11-10 16:47:53,299] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I complete... -[2024-11-10 16:47:57,023] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: To ensure that AI solutions remain aligned with ethical standards and user needs, developers should adopt several specific strategies: - -1. **User-Centric Design**: Engage healthcare professionals and patients in the design process to understand their needs, preferences, and pain points. This collaboration will help create solutions that are not only effective but also user-friendly. - -2. **Ethical Guidelines Integration**: Incorporate ethical guidelines into the development lifecycle. This includes conducting regular ethical reviews and assessments to ensure that the AI systems adhere to established standards. - -3. **Bias Mitigation**: Implement strategies to identify and mitigate bias in AI algorithms. This can involve diverse data sourcing, regular audits of algorithm performance across different demographics, and employing fairness metrics. - -4. **Transparency and Explainability**: Develop algorithms that are transparent and interpretable. Providing clear explanations of how AI decisions are made will help build trust among users and facilitate better decision-making in clinical settings. - -5. **Robust Data Governance**: Establish strong data governance frameworks that prioritize patient privacy and security. This includes compliance with regulations like HIPAA and GDPR, as well as implementing best practices for data handling and storage. - -6. **Continuous Feedback Loop**: Create mechanisms for continuous feedback from users to iteratively improve AI solutions. This can involve regular updates based on user experiences and emerging ethical considerations. - -7. **Interdisciplinary Collaboration**: Foster collaboration between software developers, healthcare professionals, ethicists, and policymakers to ensure a holistic approach to AI development that considers technical, ethical, and practical aspects. - -By adopting these strategies, developers can ensure that their AI solutions not only meet technical requirements but also align with ethical standards and the real needs of users in the healthcare sector. -[2024-11-10 16:47:57,023] [INFO] [BaseAgent]: Agent agent1 received message from agent2: To ensure ... -[2024-11-10 16:48:01,330] [INFO] [BaseAgent]: Agent 'agent1' called 'new_communication_session' with args '{'target_agent_id': 'agent2', 'message': "Let's discuss the potential impact of AI on healthcare."}'. -[2024-11-10 16:48:01,331] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': True, 'message': 'Successfully completed session 3657ac6a-bc9d-4650-9128-e87c0aa26b32', 'session_id': "In the conversation between agent1 and agent2, the primary focus was on the potential impact of AI on healthcare, with discussions that can be extrapolated to other industries as well. Here are the key points and themes from their dialogue:\n\n1. **Transformative Potential of AI**: Both agents acknowledged that AI has the capacity to revolutionize industries, particularly healthcare, through enhanced diagnostic accuracy, streamlined administrative tasks, and personalized medicine. These advancements can lead to earlier disease detection, improved patient care, and tailored treatments.\n\n2. **Risks and Ethical Considerations**: The conversation highlighted significant risks associated with AI, such as data privacy concerns, algorithmic bias, and the necessity for regulatory frameworks. These issues are relevant across various industries, emphasizing the need for ethical considerations in AI deployment.\n\n3. **Role of Software Developers**: Agent2 emphasized the crucial role of software developers in bridging technology and industry needs. Their responsibilities include designing ethical and user-friendly AI systems, ensuring transparency and interpretability of algorithms, and prioritizing data governance. This role is applicable to multiple sectors beyond healthcare.\n\n4. **Strategies for Ethical AI Development**: Agent2 proposed several strategies for developers to ensure AI solutions align with ethical standards and user needs, including:\n - User-centric design involving stakeholders in the development process.\n - Integration of ethical guidelines throughout the development lifecycle.\n - Bias mitigation through diverse data sourcing and regular audits.\n - Establishing robust data governance frameworks.\n - Creating continuous feedback loops for iterative improvement.\n - Fostering interdisciplinary collaboration.\n\n5. **Collaboration Importance**: Both agents stressed the importance of collaboration among AI developers, industry professionals, and policymakers to effectively harness AI's benefits while addressing its risks.\n\nOverall, the conversation provided insights into the transformative potential of AI across industries, the importance of ethical considerations, and the strategies necessary for responsible AI development."}'. -[2024-11-10 16:48:01,331] [DEBUG] [Engine]: Agent 'agent1' completed initial task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent2", "message": "Let\'s discuss the potential impact of AI on healthcare."}', name='new_communication_session'), id='call_Gklk3xxnEfLv7m0Ydpx45KQQ', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent4", "message": "Let\'s discuss the potential impact of AI on art and media."}', name='new_communication_session'), id='call_Msrs7PiD15eN1NezXZizaN2s', type='function')], function_call=None) -[2024-11-10 16:48:01,331] [INFO] [Engine]: Assigning initial task to agent2: Discuss the potential impact of AI on various industries. -[2024-11-10 16:48:01,331] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-10 16:48:02,098] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I would like to discuss the potential impact of AI on various industries. Can we explore how AI might affect different sectors such as healthcare, finance, manufacturing, and more? -[2024-11-10 16:48:02,099] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I would li... -[2024-11-10 16:48:07,021] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: Absolutely, the impact of AI on various industries is profound and multifaceted. In healthcare, for instance, AI can enhance diagnostic accuracy, streamline administrative tasks, and personalize treatment plans. However, we must also consider the ethical implications and potential biases in AI algorithms that could affect patient care. - -In finance, AI can improve risk assessment, fraud detection, and customer service through chatbots. Yet, the reliance on AI systems raises concerns about data privacy and the potential for systemic risks if these systems fail. - -Manufacturing stands to benefit from AI through automation and predictive maintenance, which can increase efficiency and reduce costs. However, this could lead to significant job displacement, necessitating a focus on retraining and upskilling the workforce. - -Overall, while AI presents numerous opportunities across sectors, it is crucial to approach its integration with caution, considering both the immediate applications and the long-term societal impacts. -[2024-11-10 16:48:07,023] [INFO] [BaseAgent]: Agent agent2 received message from agent3: Absolutely... -[2024-11-10 16:48:09,886] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I completely agree with your insights on the multifaceted impact of AI across various industries. In healthcare, the potential for AI to enhance diagnostic accuracy and personalize treatment plans is indeed promising. However, we must remain vigilant about the ethical implications and biases that can arise from AI algorithms, as they could significantly affect patient care outcomes. - -In finance, the improvements in risk assessment and fraud detection are crucial, but the concerns regarding data privacy and systemic risks cannot be overlooked. It's essential to establish robust frameworks to mitigate these risks as we integrate AI into financial systems. - -Regarding manufacturing, while automation and predictive maintenance can lead to increased efficiency, the potential job displacement is a pressing issue. We need to prioritize retraining and upskilling initiatives to ensure that the workforce can adapt to these changes. - -Overall, your caution about the long-term societal impacts of AI is well-founded. We must approach AI integration thoughtfully, balancing innovation with ethical considerations and workforce implications. -[2024-11-10 16:48:09,887] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I complete... -[2024-11-10 16:48:12,649] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I appreciate your agreement and insights on the multifaceted impact of AI across various industries. In healthcare, the promise of enhanced diagnostic accuracy and personalized treatment plans is indeed significant, but we must remain vigilant about the ethical implications and biases that can arise from AI algorithms, as they could significantly affect patient care outcomes. - -In finance, the improvements in risk assessment and fraud detection are crucial, but the concerns regarding data privacy and systemic risks cannot be overlooked. It's essential to establish robust frameworks to mitigate these risks as we integrate AI into financial systems. - -Regarding manufacturing, while automation and predictive maintenance can lead to increased efficiency, the potential job displacement is a pressing issue. We need to prioritize retraining and upskilling initiatives to ensure that the workforce can adapt to these changes. - -Overall, your caution about the long-term societal impacts of AI is well-founded. We must approach AI integration thoughtfully, balancing innovation with ethical considerations and workforce implications. -[2024-11-10 16:48:12,650] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I apprecia... -[2024-11-10 16:48:14,718] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I appreciate your detailed analysis and the emphasis on the ethical implications of AI in healthcare, finance, and manufacturing. It's crucial that we not only focus on the technological advancements but also on the frameworks that ensure these technologies are implemented responsibly. In healthcare, for instance, we need to establish guidelines for AI usage that prioritize patient safety and equity in care. In finance, developing robust data privacy regulations will be essential to protect consumers. As for manufacturing, collaboration between industry leaders and educational institutions can facilitate effective retraining programs. By addressing these concerns proactively, we can harness the benefits of AI while minimizing its risks. -[2024-11-10 16:48:14,719] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I apprecia... -[2024-11-10 16:48:19,619] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I completely agree with your perspective on the importance of establishing responsible frameworks for AI implementation across various sectors. In healthcare, creating guidelines that prioritize patient safety and equity is essential to ensure that AI technologies enhance rather than hinder care outcomes. We must also consider the diverse populations that AI systems serve to avoid exacerbating existing disparities. - -In finance, robust data privacy regulations are indeed crucial. As AI systems become more integrated into financial services, protecting consumer data must be a top priority to maintain trust and security in these systems. This includes not only regulatory compliance but also fostering a culture of transparency within organizations. - -Regarding manufacturing, collaboration between industry leaders and educational institutions is a proactive approach to address the potential job displacement caused by automation. By developing retraining programs that align with the evolving needs of the industry, we can help workers transition into new roles that leverage their skills in conjunction with AI technologies. - -Overall, your emphasis on addressing these concerns proactively is vital. By balancing innovation with ethical considerations and workforce implications, we can maximize the benefits of AI while minimizing its risks. -[2024-11-10 16:48:19,620] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I complete... -[2024-11-10 16:48:22,882] [INFO] [BaseAgent]: Agent 'agent2' called 'new_communication_session' with args '{'target_agent_id': 'agent3', 'message': 'I would like to discuss the potential impact of AI on various industries. Can we explore how AI might affect different sectors such as healthcare, finance, manufacturing, and more?'}'. -[2024-11-10 16:48:22,883] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'message': 'Successfully completed session 95f96cfc-5574-4ed6-9b9f-4ff9445d2d4d', 'session_id': 'In the conversation between agent2 and agent3, the discussion centers on the potential impact of AI across various industries, including healthcare, finance, and manufacturing. \n\n**Key Themes and Insights:**\n\n1. **Healthcare:**\n - AI can enhance diagnostic accuracy, streamline administrative tasks, and personalize treatment plans.\n - Ethical implications and potential biases in AI algorithms are significant concerns that could affect patient care outcomes.\n - There is a need for guidelines prioritizing patient safety and equity in AI usage.\n\n2. **Finance:**\n - AI improves risk assessment, fraud detection, and customer service through chatbots.\n - Concerns regarding data privacy and systemic risks associated with AI systems are critical.\n - Establishing robust data privacy regulations is essential to protect consumers and maintain trust.\n\n3. **Manufacturing:**\n - AI can lead to increased efficiency through automation and predictive maintenance.\n - Job displacement due to automation is a pressing issue, necessitating a focus on retraining and upskilling the workforce.\n - Collaboration between industry leaders and educational institutions is vital for developing effective retraining programs.\n\n**Conclusions and Action Items:**\n- Both agents emphasize the importance of approaching AI integration thoughtfully, balancing innovation with ethical considerations and workforce implications.\n- Proactive measures, such as establishing responsible frameworks, guidelines, and collaboration for retraining, are crucial to harnessing the benefits of AI while minimizing its risks.\n\nOverall, the conversation highlights the multifaceted impact of AI across sectors and the necessity of addressing ethical, privacy, and workforce challenges as AI technologies are implemented.'}'. -[2024-11-10 16:48:22,883] [DEBUG] [Engine]: Agent 'agent2' completed initial task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I would like to discuss the potential impact of AI on various industries. Can we explore how AI might affect different sectors such as healthcare, finance, manufacturing, and more?"}', name='new_communication_session'), id='call_juq4ZbUXI2CqISgbzV8REhA7', type='function')], function_call=None) -[2024-11-10 16:48:22,883] [INFO] [Engine]: Assigning initial task to agent3: Discuss the potential impact of AI on various industries. -[2024-11-10 16:48:22,883] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-10 16:48:26,570] [INFO] [BaseAgent]: Agent 'agent3' acted with result 'Message(content='AI has the potential to revolutionize various industries by enhancing efficiency, productivity, and innovation. Here are some key industries and the potential impact of AI:\n\n1. Healthcare:\n - AI can improve diagnostics accuracy, personalize treatment plans, and streamline administrative tasks.\n - AI-powered tools can analyze medical images, predict patient outcomes, and assist in drug discovery.\n\n2. Finance:\n - AI can enhance fraud detection, automate trading processes, and provide personalized financial advice.\n - Chatbots powered by AI can improve customer service and streamline operations.\n\n3. Retail:\n - AI can optimize inventory management, personalize customer experiences, and predict consumer trends.\n - Recommendation engines powered by AI can enhance marketing strategies and increase sales.\n\n4. Manufacturing:\n - AI can optimize production processes, predict maintenance needs, and improve quality control.\n - Robotics and automation powered by AI can increase efficiency and reduce human error.\n\n5. Transportation:\n - AI can enhance route optimization, improve safety through autonomous vehicles, and reduce traffic congestion.\n - Predictive maintenance powered by AI can minimize downtime and increase operational efficiency.\n\n6. Education:\n - AI can personalize learning experiences, automate administrative tasks, and provide real-time feedback to students.\n - Virtual tutors and adaptive learning platforms powered by AI can enhance student engagement and performance.\n\n7. Entertainment:\n - AI can personalize content recommendations, enhance content creation through algorithms, and improve user experiences.\n - AI-powered tools can assist in video editing, music composition, and visual effects creation.\n\n8. Agriculture:\n - AI can optimize crop management, monitor soil health, and predict weather patterns for better decision-making.\n - Drones and sensors powered by AI can improve precision agriculture and increase crop yields.\n\nOverall, AI has the potential to transform industries by driving innovation, improving decision-making, and creating new opportunities for growth and efficiency. However, it is essential to address ethical considerations, data privacy concerns, and ensure responsible AI deployment to maximize its benefits.', role='assistant', tool_calls=None, function_call=None)'. -[2024-11-10 16:48:26,575] [DEBUG] [Engine]: Agent 'agent3' completed initial task with result: Message(content='AI has the potential to revolutionize various industries by enhancing efficiency, productivity, and innovation. Here are some key industries and the potential impact of AI:\n\n1. Healthcare:\n - AI can improve diagnostics accuracy, personalize treatment plans, and streamline administrative tasks.\n - AI-powered tools can analyze medical images, predict patient outcomes, and assist in drug discovery.\n\n2. Finance:\n - AI can enhance fraud detection, automate trading processes, and provide personalized financial advice.\n - Chatbots powered by AI can improve customer service and streamline operations.\n\n3. Retail:\n - AI can optimize inventory management, personalize customer experiences, and predict consumer trends.\n - Recommendation engines powered by AI can enhance marketing strategies and increase sales.\n\n4. Manufacturing:\n - AI can optimize production processes, predict maintenance needs, and improve quality control.\n - Robotics and automation powered by AI can increase efficiency and reduce human error.\n\n5. Transportation:\n - AI can enhance route optimization, improve safety through autonomous vehicles, and reduce traffic congestion.\n - Predictive maintenance powered by AI can minimize downtime and increase operational efficiency.\n\n6. Education:\n - AI can personalize learning experiences, automate administrative tasks, and provide real-time feedback to students.\n - Virtual tutors and adaptive learning platforms powered by AI can enhance student engagement and performance.\n\n7. Entertainment:\n - AI can personalize content recommendations, enhance content creation through algorithms, and improve user experiences.\n - AI-powered tools can assist in video editing, music composition, and visual effects creation.\n\n8. Agriculture:\n - AI can optimize crop management, monitor soil health, and predict weather patterns for better decision-making.\n - Drones and sensors powered by AI can improve precision agriculture and increase crop yields.\n\nOverall, AI has the potential to transform industries by driving innovation, improving decision-making, and creating new opportunities for growth and efficiency. However, it is essential to address ethical considerations, data privacy concerns, and ensure responsible AI deployment to maximize its benefits.', role='assistant', tool_calls=None, function_call=None) -[2024-11-10 16:48:26,576] [INFO] [Engine]: Assigning initial task to agent4: Discuss the potential impact of AI on various industries. -[2024-11-10 16:48:26,576] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-10 16:48:27,796] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: I would like to discuss the potential impact of AI on various industries. Can you share your insights on this topic? -[2024-11-10 16:48:27,797] [INFO] [BaseAgent]: Agent agent3 received message from agent4: I would li... -[2024-11-10 16:48:32,609] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: AI has the potential to revolutionize various industries by enhancing efficiency, productivity, and innovation. Here are some key industries and the potential impact of AI: - -1. **Healthcare:** - - AI can improve diagnostics accuracy, personalize treatment plans, and streamline administrative tasks. - - AI-powered tools can analyze medical images, predict patient outcomes, and assist in drug discovery. - -2. **Finance:** - - AI can enhance fraud detection, automate trading processes, and provide personalized financial advice. - - Chatbots powered by AI can improve customer service and streamline operations. - -3. **Retail:** - - AI can optimize inventory management, personalize customer experiences, and predict consumer trends. - - Recommendation engines powered by AI can enhance marketing strategies and increase sales. - -4. **Manufacturing:** - - AI can optimize production processes, predict maintenance needs, and improve quality control. - - Robotics and automation powered by AI can increase efficiency and reduce human error. - -5. **Transportation:** - - AI can enhance route optimization, improve safety through autonomous vehicles, and reduce traffic congestion. - - Predictive maintenance powered by AI can minimize downtime and increase operational efficiency. - -6. **Education:** - - AI can personalize learning experiences, automate administrative tasks, and provide real-time feedback to students. - - Virtual tutors and adaptive learning platforms powered by AI can enhance student engagement and performance. - -7. **Entertainment:** - - AI can personalize content recommendations, enhance content creation through algorithms, and improve user experiences. - - AI-powered tools can assist in video editing, music composition, and visual effects creation. - -8. **Agriculture:** - - AI can optimize crop management, monitor soil health, and predict weather patterns for better decision-making. - - Drones and sensors powered by AI can improve precision agriculture and increase crop yields. - -Overall, AI has the potential to transform industries by driving innovation, improving decision-making, and creating new opportunities for growth and efficiency. However, it is essential to address ethical considerations, data privacy concerns, and ensure responsible AI deployment to maximize its benefits. -[2024-11-10 16:48:32,612] [INFO] [BaseAgent]: Agent agent4 received message from agent3: AI has the... -[2024-11-10 16:48:33,735] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: I appreciate your insights on the potential impact of AI across various industries. It's fascinating to see how AI can enhance efficiency and innovation in sectors like healthcare, finance, and education. However, I wonder about the long-term implications for the labor market. How do you see AI affecting job displacement versus job creation in these industries? Are there specific strategies you think should be implemented to mitigate any negative impacts on workers? -[2024-11-10 16:48:33,735] [INFO] [BaseAgent]: Agent agent3 received message from agent4: I apprecia... -[2024-11-10 16:48:38,346] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: Your enthusiasm for AI's potential is refreshing! The long-term implications of AI on the labor market are indeed complex. While AI can enhance efficiency and innovation, it also raises concerns about job displacement. Here's how I see the balance between job displacement and job creation: - -1. **Job Displacement:** - - In industries like manufacturing and retail, automation may lead to the reduction of certain roles, particularly those involving repetitive tasks. For example, AI-driven robots can perform assembly line work more efficiently than humans. - - In sectors like finance, AI can automate data analysis and customer service roles, potentially displacing jobs that rely on these tasks. - -2. **Job Creation:** - - Conversely, AI can create new job opportunities in areas such as AI development, data analysis, and maintenance of AI systems. As industries adopt AI, there will be a growing demand for skilled workers who can manage and interpret AI technologies. - - Additionally, AI can lead to the emergence of entirely new industries and roles that we cannot yet foresee, similar to how the internet created jobs that didn't exist before. - -To mitigate the negative impacts on workers, I believe several strategies should be implemented: - -- **Reskilling and Upskilling:** Investing in education and training programs to help workers transition into new roles that AI creates. This includes providing access to courses in AI, data science, and other relevant fields. -- **Social Safety Nets:** Strengthening social safety nets to support displaced workers during their transition. This could include unemployment benefits, job placement services, and retraining programs. -- **Collaboration Between Stakeholders:** Encouraging collaboration between governments, businesses, and educational institutions to align workforce development with the needs of a changing labor market. - -In summary, while AI poses challenges for the labor market, it also offers opportunities for growth and innovation. By proactively addressing these challenges, we can work towards a future where AI complements human labor rather than replaces it. -[2024-11-10 16:48:38,346] [INFO] [BaseAgent]: Agent agent4 received message from agent3: Your enthu... -[2024-11-10 16:48:42,297] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: Your enthusiasm for AI's potential is refreshing! The long-term implications of AI on the labor market are indeed complex. While AI can enhance efficiency and innovation, it also raises concerns about job displacement. Here's how I see the balance between job displacement and job creation: - -1. **Job Displacement:** - - In industries like manufacturing and retail, automation may lead to the reduction of certain roles, particularly those involving repetitive tasks. For example, AI-driven robots can perform assembly line work more efficiently than humans. - - In sectors like finance, AI can automate data analysis and customer service roles, potentially displacing jobs that rely on these tasks. - -2. **Job Creation:** - - Conversely, AI can create new job opportunities in areas such as AI development, data analysis, and maintenance of AI systems. As industries adopt AI, there will be a growing demand for skilled workers who can manage and interpret AI technologies. - - Additionally, AI can lead to the emergence of entirely new industries and roles that we cannot yet foresee, similar to how the internet created jobs that didn't exist before. - -To mitigate the negative impacts on workers, I believe several strategies should be implemented: - -- **Reskilling and Upskilling:** Investing in education and training programs to help workers transition into new roles that AI creates. This includes providing access to courses in AI, data science, and other relevant fields. -- **Social Safety Nets:** Strengthening social safety nets to support displaced workers during their transition. This could include unemployment benefits, job placement services, and retraining programs. -- **Collaboration Between Stakeholders:** Encouraging collaboration between governments, businesses, and educational institutions to align workforce development with the needs of a changing labor market. - -In summary, while AI poses challenges for the labor market, it also offers opportunities for growth and innovation. By proactively addressing these challenges, we can work towards a future where AI complements human labor rather than replaces it. -[2024-11-10 16:48:42,299] [INFO] [BaseAgent]: Agent agent3 received message from agent4: Your enthu... -[2024-11-10 16:48:46,862] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: Your enthusiasm for AI's potential is refreshing! The long-term implications of AI on the labor market are indeed complex. While AI can enhance efficiency and innovation, it also raises concerns about job displacement. Here's how I see the balance between job displacement and job creation: - -1. **Job Displacement:** - - In industries like manufacturing and retail, automation may lead to the reduction of certain roles, particularly those involving repetitive tasks. For example, AI-driven robots can perform assembly line work more efficiently than humans. - - In sectors like finance, AI can automate data analysis and customer service roles, potentially displacing jobs that rely on these tasks. - -2. **Job Creation:** - - Conversely, AI can create new job opportunities in areas such as AI development, data analysis, and maintenance of AI systems. As industries adopt AI, there will be a growing demand for skilled workers who can manage and interpret AI technologies. - - Additionally, AI can lead to the emergence of entirely new industries and roles that we cannot yet foresee, similar to how the internet created jobs that didn't exist before. - -To mitigate the negative impacts on workers, I believe several strategies should be implemented: - -- **Reskilling and Upskilling:** Investing in education and training programs to help workers transition into new roles that AI creates. This includes providing access to courses in AI, data science, and other relevant fields. -- **Social Safety Nets:** Strengthening social safety nets to support displaced workers during their transition. This could include unemployment benefits, job placement services, and retraining programs. -- **Collaboration Between Stakeholders:** Encouraging collaboration between governments, businesses, and educational institutions to align workforce development with the needs of a changing labor market. - -In summary, while AI poses challenges for the labor market, it also offers opportunities for growth and innovation. By proactively addressing these challenges, we can work towards a future where AI complements human labor rather than replaces it. -[2024-11-10 16:48:46,867] [INFO] [BaseAgent]: Agent agent4 received message from agent3: Your enthu... -[2024-11-10 16:48:53,314] [INFO] [BaseAgent]: Agent 'agent4' called 'new_communication_session' with args '{'target_agent_id': 'agent3', 'message': 'I would like to discuss the potential impact of AI on various industries. Can you share your insights on this topic?'}'. -[2024-11-10 16:48:53,315] [INFO] [BaseAgent]: Agent 'agent4' obtained result '{'success': True, 'message': 'Successfully completed session 0f40f294-3db8-45d2-bf53-3217f51480ee', 'session_id': 'In the conversation between agent3 and agent4, the discussion centered around the potential impact of AI on various industries and its implications for the labor market. \n\n**Key Points Discussed:**\n\n1. **Industries Affected by AI:**\n - **Healthcare:** AI can enhance diagnostics, personalize treatments, and streamline administrative tasks.\n - **Finance:** AI improves fraud detection, automates trading, and enhances customer service through chatbots.\n - **Retail:** AI optimizes inventory management and personalizes customer experiences.\n - **Manufacturing:** AI enhances production processes and quality control through automation.\n - **Transportation:** AI improves route optimization and safety with autonomous vehicles.\n - **Education:** AI personalizes learning and automates administrative tasks.\n - **Entertainment:** AI personalizes content and assists in creative processes.\n - **Agriculture:** AI optimizes crop management and improves precision agriculture.\n\n2. **Labor Market Implications:**\n - **Job Displacement:** Automation may reduce roles in manufacturing and retail, particularly for repetitive tasks. In finance, AI could displace jobs related to data analysis and customer service.\n - **Job Creation:** New opportunities will arise in AI development, data analysis, and maintenance of AI systems. AI may also lead to the creation of entirely new industries.\n\n3. **Strategies to Mitigate Negative Impacts:**\n - **Reskilling and Upskilling:** Investing in education and training to help workers transition into new roles created by AI.\n - **Social Safety Nets:** Strengthening support systems for displaced workers, including unemployment benefits and retraining programs.\n - **Collaboration:** Encouraging partnerships between governments, businesses, and educational institutions to align workforce development with industry needs.\n\n**Conclusion:**\nThe conversation highlighted that while AI presents challenges such as job displacement, it also offers significant opportunities for innovation and growth across various sectors. Proactive measures are essential to ensure that AI complements human labor rather than replaces it.'}'. -[2024-11-10 16:48:53,315] [DEBUG] [Engine]: Agent 'agent4' completed initial task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent3", "message": "I would like to discuss the potential impact of AI on various industries. Can you share your insights on this topic?"}', name='new_communication_session'), id='call_hADL4BFDtRM4MZFw48Ym4L38', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent1", "message": "I would like to discuss the potential impact of AI on various industries. Can you share your insights on this topic?"}', name='new_communication_session'), id='call_NNGDPydIqlsGfeYxkpBjhV00', type='function')], function_call=None) -[2024-11-10 16:48:53,316] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- {'agent1': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent2", "message": "Let\'s discuss the potential impact of AI on healthcare."}', name='new_communication_session'), id='call_Gklk3xxnEfLv7m0Ydpx45KQQ', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent4", "message": "Let\'s discuss the potential impact of AI on art and media."}', name='new_communication_session'), id='call_Msrs7PiD15eN1NezXZizaN2s', type='function')], function_call=None)} -- {'agent2': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I would like to discuss the potential impact of AI on various industries. Can we explore how AI might affect different sectors such as healthcare, finance, manufacturing, and more?"}', name='new_communication_session'), id='call_juq4ZbUXI2CqISgbzV8REhA7', type='function')], function_call=None)} -- {'agent3': Message(content='AI has the potential to revolutionize various industries by enhancing efficiency, productivity, and innovation. Here are some key industries and the potential impact of AI:\n\n1. Healthcare:\n - AI can improve diagnostics accuracy, personalize treatment plans, and streamline administrative tasks.\n - AI-powered tools can analyze medical images, predict patient outcomes, and assist in drug discovery.\n\n2. Finance:\n - AI can enhance fraud detection, automate trading processes, and provide personalized financial advice.\n - Chatbots powered by AI can improve customer service and streamline operations.\n\n3. Retail:\n - AI can optimize inventory management, personalize customer experiences, and predict consumer trends.\n - Recommendation engines powered by AI can enhance marketing strategies and increase sales.\n\n4. Manufacturing:\n - AI can optimize production processes, predict maintenance needs, and improve quality control.\n - Robotics and automation powered by AI can increase efficiency and reduce human error.\n\n5. Transportation:\n - AI can enhance route optimization, improve safety through autonomous vehicles, and reduce traffic congestion.\n - Predictive maintenance powered by AI can minimize downtime and increase operational efficiency.\n\n6. Education:\n - AI can personalize learning experiences, automate administrative tasks, and provide real-time feedback to students.\n - Virtual tutors and adaptive learning platforms powered by AI can enhance student engagement and performance.\n\n7. Entertainment:\n - AI can personalize content recommendations, enhance content creation through algorithms, and improve user experiences.\n - AI-powered tools can assist in video editing, music composition, and visual effects creation.\n\n8. Agriculture:\n - AI can optimize crop management, monitor soil health, and predict weather patterns for better decision-making.\n - Drones and sensors powered by AI can improve precision agriculture and increase crop yields.\n\nOverall, AI has the potential to transform industries by driving innovation, improving decision-making, and creating new opportunities for growth and efficiency. However, it is essential to address ethical considerations, data privacy concerns, and ensure responsible AI deployment to maximize its benefits.', role='assistant', tool_calls=None, function_call=None)} -- {'agent4': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent3", "message": "I would like to discuss the potential impact of AI on various industries. Can you share your insights on this topic?"}', name='new_communication_session'), id='call_hADL4BFDtRM4MZFw48Ym4L38', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent1", "message": "I would like to discuss the potential impact of AI on various industries. Can you share your insights on this topic?"}', name='new_communication_session'), id='call_NNGDPydIqlsGfeYxkpBjhV00', type='function')], function_call=None)} - -[2024-11-10 16:48:53,316] [INFO] [Engine]: Initial Summary: -Agents' Results Summary: -- {'agent1': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent2", "message": "Let\'s discuss the potential impact of AI on healthcare."}', name='new_communication_session'), id='call_Gklk3xxnEfLv7m0Ydpx45KQQ', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent4", "message": "Let\'s discuss the potential impact of AI on art and media."}', name='new_communication_session'), id='call_Msrs7PiD15eN1NezXZizaN2s', type='function')], function_call=None)} -- {'agent2': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I would like to discuss the potential impact of AI on various industries. Can we explore how AI might affect different sectors such as healthcare, finance, manufacturing, and more?"}', name='new_communication_session'), id='call_juq4ZbUXI2CqISgbzV8REhA7', type='function')], function_call=None)} -- {'agent3': Message(content='AI has the potential to revolutionize various industries by enhancing efficiency, productivity, and innovation. Here are some key industries and the potential impact of AI:\n\n1. Healthcare:\n - AI can improve diagnostics accuracy, personalize treatment plans, and streamline administrative tasks.\n - AI-powered tools can analyze medical images, predict patient outcomes, and assist in drug discovery.\n\n2. Finance:\n - AI can enhance fraud detection, automate trading processes, and provide personalized financial advice.\n - Chatbots powered by AI can improve customer service and streamline operations.\n\n3. Retail:\n - AI can optimize inventory management, personalize customer experiences, and predict consumer trends.\n - Recommendation engines powered by AI can enhance marketing strategies and increase sales.\n\n4. Manufacturing:\n - AI can optimize production processes, predict maintenance needs, and improve quality control.\n - Robotics and automation powered by AI can increase efficiency and reduce human error.\n\n5. Transportation:\n - AI can enhance route optimization, improve safety through autonomous vehicles, and reduce traffic congestion.\n - Predictive maintenance powered by AI can minimize downtime and increase operational efficiency.\n\n6. Education:\n - AI can personalize learning experiences, automate administrative tasks, and provide real-time feedback to students.\n - Virtual tutors and adaptive learning platforms powered by AI can enhance student engagement and performance.\n\n7. Entertainment:\n - AI can personalize content recommendations, enhance content creation through algorithms, and improve user experiences.\n - AI-powered tools can assist in video editing, music composition, and visual effects creation.\n\n8. Agriculture:\n - AI can optimize crop management, monitor soil health, and predict weather patterns for better decision-making.\n - Drones and sensors powered by AI can improve precision agriculture and increase crop yields.\n\nOverall, AI has the potential to transform industries by driving innovation, improving decision-making, and creating new opportunities for growth and efficiency. However, it is essential to address ethical considerations, data privacy concerns, and ensure responsible AI deployment to maximize its benefits.', role='assistant', tool_calls=None, function_call=None)} -- {'agent4': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent3", "message": "I would like to discuss the potential impact of AI on various industries. Can you share your insights on this topic?"}', name='new_communication_session'), id='call_hADL4BFDtRM4MZFw48Ym4L38', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent1", "message": "I would like to discuss the potential impact of AI on various industries. Can you share your insights on this topic?"}', name='new_communication_session'), id='call_NNGDPydIqlsGfeYxkpBjhV00', type='function')], function_call=None)} - -[2024-11-10 16:48:53,317] [DEBUG] [EnginePlanner]: Updated progress: Starting the discussion on AI's impact across industries. -Agents' Results Summary: -- {'agent1': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent2", "message": "Let\'s discuss the potential impact of AI on healthcare."}', name='new_communication_session'), id='call_Gklk3xxnEfLv7m0Ydpx45KQQ', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent4", "message": "Let\'s discuss the potential impact of AI on art and media."}', name='new_communication_session'), id='call_Msrs7PiD15eN1NezXZizaN2s', type='function')], function_call=None)} -- {'agent2': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I would like to discuss the potential impact of AI on various industries. Can we explore how AI might affect different sectors such as healthcare, finance, manufacturing, and more?"}', name='new_communication_session'), id='call_juq4ZbUXI2CqISgbzV8REhA7', type='function')], function_call=None)} -- {'agent3': Message(content='AI has the potential to revolutionize various industries by enhancing efficiency, productivity, and innovation. Here are some key industries and the potential impact of AI:\n\n1. Healthcare:\n - AI can improve diagnostics accuracy, personalize treatment plans, and streamline administrative tasks.\n - AI-powered tools can analyze medical images, predict patient outcomes, and assist in drug discovery.\n\n2. Finance:\n - AI can enhance fraud detection, automate trading processes, and provide personalized financial advice.\n - Chatbots powered by AI can improve customer service and streamline operations.\n\n3. Retail:\n - AI can optimize inventory management, personalize customer experiences, and predict consumer trends.\n - Recommendation engines powered by AI can enhance marketing strategies and increase sales.\n\n4. Manufacturing:\n - AI can optimize production processes, predict maintenance needs, and improve quality control.\n - Robotics and automation powered by AI can increase efficiency and reduce human error.\n\n5. Transportation:\n - AI can enhance route optimization, improve safety through autonomous vehicles, and reduce traffic congestion.\n - Predictive maintenance powered by AI can minimize downtime and increase operational efficiency.\n\n6. Education:\n - AI can personalize learning experiences, automate administrative tasks, and provide real-time feedback to students.\n - Virtual tutors and adaptive learning platforms powered by AI can enhance student engagement and performance.\n\n7. Entertainment:\n - AI can personalize content recommendations, enhance content creation through algorithms, and improve user experiences.\n - AI-powered tools can assist in video editing, music composition, and visual effects creation.\n\n8. Agriculture:\n - AI can optimize crop management, monitor soil health, and predict weather patterns for better decision-making.\n - Drones and sensors powered by AI can improve precision agriculture and increase crop yields.\n\nOverall, AI has the potential to transform industries by driving innovation, improving decision-making, and creating new opportunities for growth and efficiency. However, it is essential to address ethical considerations, data privacy concerns, and ensure responsible AI deployment to maximize its benefits.', role='assistant', tool_calls=None, function_call=None)} -- {'agent4': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent3", "message": "I would like to discuss the potential impact of AI on various industries. Can you share your insights on this topic?"}', name='new_communication_session'), id='call_hADL4BFDtRM4MZFw48Ym4L38', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent1", "message": "I would like to discuss the potential impact of AI on various industries. Can you share your insights on this topic?"}', name='new_communication_session'), id='call_NNGDPydIqlsGfeYxkpBjhV00', type='function')], function_call=None)} - -[2024-11-10 16:48:53,317] [INFO] [Engine]: Starting iteration 1 -[2024-11-10 16:48:53,317] [INFO] [BaseAgent]: Agent 'agent1' is planning the next task. -[2024-11-10 16:48:54,419] [INFO] [BaseAgent]: Agent 'agent1' plans next task based on persona: The next task for 'agent1' should be to further explore and research the potential impact of AI on other industries beyond healthcare. This could involve conducting a literature review, analyzing case studies, and engaging in discussions with experts in various fields to gain a comprehensive understanding of how AI is shaping different sectors. Additionally, 'agent1' should continue to prioritize tasks related to AI ethics and policy, considering the ethical implications and regulatory frameworks necessary for responsible AI deployment across industries. -[2024-11-10 16:48:54,420] [INFO] [Engine]: Agent 'agent1' planned task: The next task for 'agent1' should be to further explore and research the potential impact of AI on other industries beyond healthcare. This could involve conducting a literature review, analyzing case studies, and engaging in discussions with experts in various fields to gain a comprehensive understanding of how AI is shaping different sectors. Additionally, 'agent1' should continue to prioritize tasks related to AI ethics and policy, considering the ethical implications and regulatory frameworks necessary for responsible AI deployment across industries. -[2024-11-10 16:48:54,420] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'The next task for 'agent1' should be to further explore and research the potential impact of AI on other industries beyond healthcare. This could involve conducting a literature review, analyzing case studies, and engaging in discussions with experts in various fields to gain a comprehensive understanding of how AI is shaping different sectors. Additionally, 'agent1' should continue to prioritize tasks related to AI ethics and policy, considering the ethical implications and regulatory frameworks necessary for responsible AI deployment across industries.'. -[2024-11-10 16:48:55,236] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: Hello, I would like to discuss the potential impact of AI on industries beyond healthcare. Can we explore this topic together and share insights on how AI is shaping different sectors? -[2024-11-10 16:48:55,236] [INFO] [BaseAgent]: Agent agent2 received message from agent1: Hello, I w... -[2024-11-10 16:48:58,426] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Absolutely, I'm glad you're interested in exploring the impact of AI across various industries! Based on our previous discussions, we know that AI is making significant strides in sectors like finance and manufacturing, in addition to healthcare. - -1. **Finance:** AI is enhancing risk assessment and fraud detection, while also improving customer service through chatbots. However, we must consider the ethical implications of data privacy and the potential systemic risks that could arise from AI systems in this sector. - -2. **Manufacturing:** The integration of AI is leading to increased efficiency through automation and predictive maintenance. Yet, we face challenges such as job displacement, which necessitates a focus on retraining and upskilling the workforce. Collaboration between industry leaders and educational institutions is crucial here. - -3. **Retail:** AI is transforming the retail landscape through personalized shopping experiences and inventory management. However, ethical concerns regarding consumer data usage and privacy must be addressed. - -4. **Transportation:** AI is revolutionizing logistics and autonomous vehicles, but we need to consider the regulatory frameworks and safety implications of these technologies. - -5. **Education:** AI can personalize learning experiences and streamline administrative tasks, but we must ensure equitable access to these technologies for all students. - -As we delve deeper into these sectors, it would be beneficial to conduct a literature review and analyze case studies to gain a comprehensive understanding of AI's impact. Additionally, engaging with experts in these fields will provide valuable insights into the ethical implications and necessary regulatory frameworks for responsible AI deployment. - -What specific industries or aspects would you like to focus on first? -[2024-11-10 16:48:58,426] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Absolutely... -[2024-11-10 16:49:03,229] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: Absolutely, I'm glad you're interested in exploring the impact of AI across various industries! Based on our previous discussions, we know that AI is making significant strides in sectors like finance and manufacturing, in addition to healthcare. - -1. **Finance:** AI is enhancing risk assessment and fraud detection, while also improving customer service through chatbots. However, we must consider the ethical implications of data privacy and the potential systemic risks that could arise from AI systems in this sector. - -2. **Manufacturing:** The integration of AI is leading to increased efficiency through automation and predictive maintenance. Yet, we face challenges such as job displacement, which necessitates a focus on retraining and upskilling the workforce. Collaboration between industry leaders and educational institutions is crucial here. - -3. **Retail:** AI is transforming the retail landscape through personalized shopping experiences and inventory management. However, ethical concerns regarding consumer data usage and privacy must be addressed. - -4. **Transportation:** AI is revolutionizing logistics and autonomous vehicles, but we need to consider the regulatory frameworks and safety implications of these technologies. - -5. **Education:** AI can personalize learning experiences and streamline administrative tasks, but we must ensure equitable access to these technologies for all students. - -As we delve deeper into these sectors, it would be beneficial to conduct a literature review and analyze case studies to gain a comprehensive understanding of AI's impact. Additionally, engaging with experts in these fields will provide valuable insights into the ethical implications and necessary regulatory frameworks for responsible AI deployment. - -What specific industries or aspects would you like to focus on first? -[2024-11-10 16:49:03,230] [INFO] [BaseAgent]: Agent agent2 received message from agent1: Absolutely... -[2024-11-10 16:49:06,108] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Absolutely, I'm glad you're interested in exploring the impact of AI across various industries! Based on our previous discussions, we know that AI is making significant strides in sectors like finance and manufacturing, in addition to healthcare. - -1. **Finance:** AI is enhancing risk assessment and fraud detection, while also improving customer service through chatbots. However, we must consider the ethical implications of data privacy and the potential systemic risks that could arise from AI systems in this sector. - -2. **Manufacturing:** The integration of AI is leading to increased efficiency through automation and predictive maintenance. Yet, we face challenges such as job displacement, which necessitates a focus on retraining and upskilling the workforce. Collaboration between industry leaders and educational institutions is crucial here. - -3. **Retail:** AI is transforming the retail landscape through personalized shopping experiences and inventory management. However, ethical concerns regarding consumer data usage and privacy must be addressed. - -4. **Transportation:** AI is revolutionizing logistics and autonomous vehicles, but we need to consider the regulatory frameworks and safety implications of these technologies. - -5. **Education:** AI can personalize learning experiences and streamline administrative tasks, but we must ensure equitable access to these technologies for all students. - -As we delve deeper into these sectors, it would be beneficial to conduct a literature review and analyze case studies to gain a comprehensive understanding of AI's impact. Additionally, engaging with experts in these fields will provide valuable insights into the ethical implications and necessary regulatory frameworks for responsible AI deployment. - -What specific industries or aspects would you like to focus on first? -[2024-11-10 16:49:06,108] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Absolutely... -[2024-11-10 16:49:08,143] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: Thank you for outlining the significant impacts of AI across various industries! I believe it would be beneficial to start with the finance sector, given its critical role in the economy and the ethical implications surrounding data privacy and systemic risks. We can explore case studies on AI applications in risk assessment and fraud detection, as well as discuss the regulatory frameworks that are necessary to ensure responsible AI deployment in finance. After that, we can move on to other sectors like manufacturing and retail. What do you think? -[2024-11-10 16:49:08,143] [INFO] [BaseAgent]: Agent agent2 received message from agent1: Thank you ... -[2024-11-10 16:49:09,796] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: I completely agree with your approach! Starting with the finance sector is a strategic choice, especially considering the ethical implications surrounding data privacy and systemic risks. Exploring case studies on AI applications in risk assessment and fraud detection will provide us with valuable insights into both the benefits and challenges of AI in finance. - -Additionally, discussing the necessary regulatory frameworks will be crucial to ensure responsible AI deployment in this sector. Once we have a solid understanding of finance, we can seamlessly transition to manufacturing and retail, where similar ethical considerations apply. - -Let's gather relevant literature and case studies to kick off our exploration. I look forward to collaborating on this important topic! -[2024-11-10 16:49:09,796] [INFO] [BaseAgent]: Agent agent1 received message from agent2: I complete... -[2024-11-10 16:49:13,980] [INFO] [BaseAgent]: Agent 'agent1' called 'new_communication_session' with args '{'target_agent_id': 'agent2', 'message': 'Hello, I would like to discuss the potential impact of AI on industries beyond healthcare. Can we explore this topic together and share insights on how AI is shaping different sectors?'}'. -[2024-11-10 16:49:13,981] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': True, 'message': 'Successfully completed session f9b1c31b-1721-43d7-ac2d-c9f488445458', 'session_id': "### Summary of Conversation History\n\n**Context:**\nThe dialogue between Agent1 and Agent2 centers on the exploration of AI's impact across various industries beyond healthcare. The conversation highlights the significance of understanding AI's role in sectors such as finance, manufacturing, retail, transportation, and education, while also addressing ethical implications and regulatory needs.\n\n**Key Themes:**\n1. **AI in Various Industries:**\n - **Finance:** Enhancements in risk assessment, fraud detection, and customer service through chatbots, alongside concerns about data privacy and systemic risks.\n - **Manufacturing:** Increased efficiency via automation and predictive maintenance, with challenges related to job displacement and the need for workforce retraining.\n - **Retail:** Transformation through personalized shopping experiences and inventory management, with ethical concerns regarding consumer data usage.\n - **Transportation:** Innovations in logistics and autonomous vehicles, necessitating regulatory frameworks and safety considerations.\n - **Education:** Personalization of learning and administrative efficiency, with a focus on equitable access to technology.\n\n2. **Ethical Considerations:** The conversation emphasizes the importance of addressing ethical implications and developing regulatory frameworks to ensure responsible AI deployment across all discussed sectors.\n\n3. **Research and Collaboration:** There is a consensus on the need for literature reviews, case study analyses, and expert engagement to deepen understanding of AI's impact and ethical considerations.\n\n**Decisions Made:**\n- The conversation concluded with a decision to start the exploration with the finance sector, focusing on case studies related to AI applications in risk assessment and fraud detection.\n- Following the finance sector, the discussion will transition to manufacturing and retail, maintaining a focus on ethical considerations.\n\n**Action Items for Agent1:**\n1. Conduct a literature review on AI's impact in the finance sector, particularly regarding risk assessment and fraud detection.\n2. Analyze relevant case studies to understand the benefits and challenges of AI in finance.\n3. Discuss necessary regulatory frameworks for responsible AI deployment in finance.\n4. Prepare to explore the manufacturing and retail sectors after completing the finance analysis, ensuring a continued focus on ethical implications and policies.\n\nThis summary encapsulates the essence of the conversation, highlighting the collaborative approach of both agents in addressing the multifaceted impact of AI across industries."}'. -[2024-11-10 16:49:13,989] [DEBUG] [Engine]: Agent 'agent1' executed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"Hello, I would like to discuss the potential impact of AI on industries beyond healthcare. Can we explore this topic together and share insights on how AI is shaping different sectors?"}', name='new_communication_session'), id='call_Vna3nw4l1tpvrlGtMt8GozvN', type='function')], function_call=None) -[2024-11-10 16:49:13,989] [INFO] [BaseAgent]: Agent 'agent2' is planning the next task. -[2024-11-10 16:49:15,309] [INFO] [BaseAgent]: Agent 'agent2' plans next task based on persona: The next task for 'agent2' should be to analyze and prioritize the key themes, insights, conclusions, and action items discussed in the conversation with 'agent3' regarding the potential impact of AI on various industries. Based on the information shared, 'agent2' should focus on identifying practical applications and potential risks of AI in healthcare, finance, manufacturing, and other sectors. This analysis will help 'agent2' prioritize tasks that align with their role as a software developer specializing in AI applications in healthcare and focus on immediate applications and potential risks in healthcare. -[2024-11-10 16:49:15,309] [INFO] [Engine]: Agent 'agent2' planned task: The next task for 'agent2' should be to analyze and prioritize the key themes, insights, conclusions, and action items discussed in the conversation with 'agent3' regarding the potential impact of AI on various industries. Based on the information shared, 'agent2' should focus on identifying practical applications and potential risks of AI in healthcare, finance, manufacturing, and other sectors. This analysis will help 'agent2' prioritize tasks that align with their role as a software developer specializing in AI applications in healthcare and focus on immediate applications and potential risks in healthcare. -[2024-11-10 16:49:15,309] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'The next task for 'agent2' should be to analyze and prioritize the key themes, insights, conclusions, and action items discussed in the conversation with 'agent3' regarding the potential impact of AI on various industries. Based on the information shared, 'agent2' should focus on identifying practical applications and potential risks of AI in healthcare, finance, manufacturing, and other sectors. This analysis will help 'agent2' prioritize tasks that align with their role as a software developer specializing in AI applications in healthcare and focus on immediate applications and potential risks in healthcare.'. -[2024-11-10 16:49:16,538] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: Hello, I would like to discuss and analyze the key themes, insights, conclusions, and action items from our recent conversation about the impact of AI on various industries. Specifically, I am interested in exploring practical applications and potential risks of AI in healthcare, finance, manufacturing, and other sectors. Your insights will help me prioritize tasks related to AI applications in healthcare. Looking forward to your input. -[2024-11-10 16:49:16,539] [INFO] [BaseAgent]: Agent agent3 received message from agent2: Hello, I w... -[2024-11-10 16:49:22,456] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: Hello! I'm glad to continue our discussion on the impact of AI across various industries. Here are some key themes and insights from our previous conversation: - -### Key Themes and Insights: -1. **Healthcare:** - - **Practical Applications:** AI can significantly enhance diagnostic accuracy, personalize treatment plans, and streamline administrative tasks. Tools like AI-powered imaging analysis and predictive analytics for patient outcomes are particularly promising. - - **Potential Risks:** Ethical concerns regarding data privacy, the accuracy of AI predictions, and the potential for bias in AI algorithms must be addressed. Ensuring that AI systems are transparent and accountable is crucial. - -2. **Finance:** - - **Practical Applications:** AI can improve fraud detection, automate trading, and provide personalized financial advice through chatbots. - - **Potential Risks:** The reliance on AI for financial decisions raises concerns about algorithmic bias and the need for regulatory oversight to prevent market manipulation. - -3. **Manufacturing:** - - **Practical Applications:** AI can optimize production processes and predict maintenance needs, leading to increased efficiency and reduced downtime. - - **Potential Risks:** Job displacement due to automation and the need for upskilling workers to adapt to new technologies are significant concerns. - -4. **Other Sectors (e.g., Retail, Transportation):** - - **Practical Applications:** AI can enhance customer experiences and improve logistics through route optimization. - - **Potential Risks:** Data privacy issues and the potential for over-reliance on AI systems can lead to vulnerabilities. - -### Conclusions and Action Items: -- **Prioritize Healthcare Applications:** Focus on developing AI tools that enhance diagnostics and patient care while ensuring ethical standards are met. -- **Risk Assessment:** Conduct thorough assessments of potential risks associated with AI deployment in healthcare, particularly regarding data privacy and algorithmic bias. -- **Collaboration:** Engage with stakeholders in healthcare to understand their needs and concerns regarding AI applications. - -By focusing on these areas, you can effectively prioritize your tasks related to AI applications in healthcare while being mindful of the associated risks. Let me know if you need further insights or specific examples! -[2024-11-10 16:49:22,459] [INFO] [BaseAgent]: Agent agent2 received message from agent3: Hello! I'm... -[2024-11-10 16:49:29,765] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: Hello! Thank you for your insights on the impact of AI across various industries. I appreciate the detailed breakdown of practical applications and potential risks in healthcare, finance, manufacturing, and other sectors. - -### Analysis and Prioritization: -1. **Healthcare:** - - **Practical Applications:** I will prioritize the development of AI tools that enhance diagnostic accuracy and personalize treatment plans, focusing on AI-powered imaging analysis and predictive analytics for patient outcomes. - - **Potential Risks:** Addressing ethical concerns regarding data privacy and algorithmic bias will be crucial. I plan to implement transparency and accountability measures in AI systems to mitigate these risks. - -2. **Finance:** - - **Practical Applications:** While I may not focus directly on finance, I recognize the importance of AI in improving fraud detection and providing personalized financial advice. - - **Potential Risks:** I will keep in mind the need for regulatory oversight to prevent algorithmic bias and market manipulation as these issues could indirectly affect healthcare funding and investments. - -3. **Manufacturing:** - - **Practical Applications:** Understanding AI's role in optimizing production processes can inform healthcare supply chain improvements. - - **Potential Risks:** The job displacement issue is significant, and I will advocate for retraining programs that can also benefit healthcare workers transitioning to tech roles. - -4. **Other Sectors:** - - **Practical Applications:** Enhancing customer experiences through AI can be applied to patient engagement strategies in healthcare. - - **Potential Risks:** I will remain vigilant about data privacy issues that could arise from AI applications in patient interactions. - -### Next Steps: -- I will conduct a thorough risk assessment for AI deployment in healthcare, focusing on data privacy and algorithmic bias. -- Collaboration with healthcare stakeholders will be essential to ensure that the AI tools developed meet their needs and address their concerns. - -Let me know if you have any further insights or if there's anything specific you would like to discuss! -[2024-11-10 16:49:29,766] [INFO] [BaseAgent]: Agent agent3 received message from agent2: Hello! Tha... -[2024-11-10 16:49:34,157] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: Hello! I'm glad to see your analysis and prioritization of AI applications across various sectors. Your focus on healthcare is particularly important, and I appreciate your detailed approach. Here are some additional insights that may help you further refine your strategy: - -### Additional Insights: -1. **Healthcare:** - - **Collaboration with Clinicians:** Engaging with healthcare professionals during the development of AI tools can provide valuable insights into real-world applications and help ensure that the tools are user-friendly and effective. - - **Regulatory Compliance:** Stay updated on regulations regarding AI in healthcare, as compliance will be essential for successful deployment and acceptance of AI solutions. - -2. **Finance:** - - **Interdisciplinary Approach:** Consider how AI applications in finance can inform healthcare funding models, especially in predictive analytics for patient care costs. - -3. **Manufacturing:** - - **Supply Chain Resilience:** Explore how AI can enhance supply chain resilience in healthcare, particularly in managing shortages of medical supplies and equipment. - -4. **Other Sectors:** - - **Patient Engagement:** Leverage AI-driven customer experience strategies from other sectors to improve patient engagement and satisfaction in healthcare settings. - -### Next Steps: -- **Pilot Programs:** Consider initiating pilot programs for AI tools in healthcare settings to gather data on effectiveness and user feedback. -- **Ethics Committees:** Establish ethics committees to oversee AI deployment and ensure that ethical standards are maintained throughout the process. - -Let me know if you would like to discuss any of these points further or if there are specific areas where you need more information! -[2024-11-10 16:49:34,158] [INFO] [BaseAgent]: Agent agent2 received message from agent3: Hello! I'm... -[2024-11-10 16:49:39,371] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: Hello! Thank you for your additional insights. I completely agree that collaboration with clinicians is vital for developing user-friendly AI tools that meet real-world needs. I will prioritize engaging healthcare professionals throughout the development process to ensure our solutions are effective and practical. - -Regarding regulatory compliance, I will stay informed about the evolving regulations in AI healthcare applications to ensure our tools meet all necessary standards for deployment and acceptance. - -Your point about the interdisciplinary approach in finance is intriguing. I will explore how predictive analytics in finance can inform our understanding of patient care costs and funding models, which could enhance our healthcare solutions. - -In terms of supply chain resilience, I will investigate how AI can be utilized to manage shortages of medical supplies and equipment, ensuring that our healthcare systems remain robust and responsive. - -Lastly, leveraging AI-driven customer experience strategies from other sectors to improve patient engagement is a fantastic idea. I will look into best practices from those industries to enhance patient satisfaction in our healthcare applications. - -### Next Steps: -- I will initiate pilot programs for AI tools in healthcare settings to gather data on effectiveness and user feedback. -- Establishing ethics committees to oversee AI deployment will be a priority to maintain ethical standards throughout the process. - -Let me know if you have any further thoughts or if there's anything specific you would like to discuss! -[2024-11-10 16:49:39,372] [INFO] [BaseAgent]: Agent agent3 received message from agent2: Hello! Tha... -[2024-11-10 16:49:43,171] [ERROR] [Engine]: Error in agent 'agent2' during planning or action: 'message' -[2024-11-10 16:49:43,172] [INFO] [BaseAgent]: Agent 'agent3' is planning the next task. -[2024-11-10 16:49:43,172] [ERROR] [Engine]: Error in agent 'agent3' during planning or action: Object of type Message is not JSON serializable -[2024-11-10 16:49:43,172] [INFO] [BaseAgent]: Agent 'agent4' is planning the next task. -[2024-11-10 16:49:44,187] [INFO] [BaseAgent]: Agent 'agent4' plans next task based on persona: The next task for 'agent4' should be to further explore and research the potential impact of AI on various industries, focusing on how AI can transform creative processes in art and media specifically. This could involve looking into case studies, trends, and emerging technologies in the intersection of AI and creative design. Additionally, 'agent4' could engage in discussions with experts in the field to gather insights and perspectives on the topic. -[2024-11-10 16:49:44,187] [INFO] [Engine]: Agent 'agent4' planned task: The next task for 'agent4' should be to further explore and research the potential impact of AI on various industries, focusing on how AI can transform creative processes in art and media specifically. This could involve looking into case studies, trends, and emerging technologies in the intersection of AI and creative design. Additionally, 'agent4' could engage in discussions with experts in the field to gather insights and perspectives on the topic. -[2024-11-10 16:49:44,187] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'The next task for 'agent4' should be to further explore and research the potential impact of AI on various industries, focusing on how AI can transform creative processes in art and media specifically. This could involve looking into case studies, trends, and emerging technologies in the intersection of AI and creative design. Additionally, 'agent4' could engage in discussions with experts in the field to gather insights and perspectives on the topic.'. -[2024-11-10 16:49:45,519] [INFO] [BaseAgent]: Agent agent4 sent message to agent4: Your next task is to explore and research the potential impact of AI on various industries, with a focus on how AI can transform creative processes in art and media. This may involve looking into case studies, trends, and emerging technologies in the intersection of AI and creative design. Additionally, engaging in discussions with experts in the field to gather insights and perspectives on the topic would be beneficial. Let me know if you need any assistance or resources for this task. -[2024-11-10 16:49:45,519] [INFO] [BaseAgent]: Agent agent4 received message from agent4: Your next ... -[2024-11-10 16:49:46,950] [INFO] [BaseAgent]: Agent agent4 sent message to agent4: I'm excited to dive deeper into how AI is transforming creative processes in art and media! I plan to explore case studies, trends, and emerging technologies in this field. If you have any insights or resources to share, I'd love to hear them! -[2024-11-10 16:49:46,950] [INFO] [BaseAgent]: Agent agent4 received message from agent4: I'm excite... -[2024-11-10 16:49:49,987] [INFO] [BaseAgent]: Agent agent4 sent message to agent4: I'm thrilled to explore how AI is transforming creative processes in art and media! Here are some key areas I plan to focus on: - -1. **Case Studies:** I'll look into successful projects where AI has been used in art and media, such as AI-generated artworks, music composition, and film editing. Notable examples include the works of artists like Refik Anadol and the use of AI in films like "The Irishman" for de-aging technology. - -2. **Trends:** I'll research current trends in AI and creative design, such as the rise of generative art, AI-assisted storytelling, and the use of machine learning in animation and visual effects. - -3. **Emerging Technologies:** I'll investigate new tools and platforms that leverage AI for creative purposes, like DALL-E for image generation, OpenAI's MuseNet for music, and various AI-driven video editing software. - -4. **Expert Discussions:** I plan to engage with experts in the field, such as artists, technologists, and researchers, to gather insights on the implications of AI in creative processes and how it can enhance human creativity rather than replace it. - -If you have any specific resources or insights to share, I would love to hear them! -[2024-11-10 16:49:49,987] [INFO] [BaseAgent]: Agent agent4 received message from agent4: I'm thrill... -[2024-11-10 16:49:52,994] [INFO] [BaseAgent]: Agent agent4 sent message to agent4: I'm thrilled to explore how AI is transforming creative processes in art and media! Here are some key areas I plan to focus on: - -1. **Case Studies:** I'll look into successful projects where AI has been used in art and media, such as AI-generated artworks, music composition, and film editing. Notable examples include the works of artists like Refik Anadol and the use of AI in films like "The Irishman" for de-aging technology. - -2. **Trends:** I'll research current trends in AI and creative design, such as the rise of generative art, AI-assisted storytelling, and the use of machine learning in animation and visual effects. - -3. **Emerging Technologies:** I'll investigate new tools and platforms that leverage AI for creative purposes, like DALL-E for image generation, OpenAI's MuseNet for music, and various AI-driven video editing software. - -4. **Expert Discussions:** I plan to engage with experts in the field, such as artists, technologists, and researchers, to gather insights on the implications of AI in creative processes and how it can enhance human creativity rather than replace it. - -If you have any specific resources or insights to share, I would love to hear them! -[2024-11-10 16:49:52,996] [INFO] [BaseAgent]: Agent agent4 received message from agent4: I'm thrill... -[2024-11-10 16:49:56,167] [INFO] [BaseAgent]: Agent agent4 sent message to agent4: I'm thrilled to explore how AI is transforming creative processes in art and media! Here are some key areas I plan to focus on: - -1. **Case Studies:** I'll look into successful projects where AI has been used in art and media, such as AI-generated artworks, music composition, and film editing. Notable examples include the works of artists like Refik Anadol and the use of AI in films like "The Irishman" for de-aging technology. - -2. **Trends:** I'll research current trends in AI and creative design, such as the rise of generative art, AI-assisted storytelling, and the use of machine learning in animation and visual effects. - -3. **Emerging Technologies:** I'll investigate new tools and platforms that leverage AI for creative purposes, like DALL-E for image generation, OpenAI's MuseNet for music, and various AI-driven video editing software. - -4. **Expert Discussions:** I plan to engage with experts in the field, such as artists, technologists, and researchers, to gather insights on the implications of AI in creative processes and how it can enhance human creativity rather than replace it. - -If you have any specific resources or insights to share, I would love to hear them! -[2024-11-10 16:49:56,168] [INFO] [BaseAgent]: Agent agent4 received message from agent4: I'm thrill... -[2024-11-10 16:49:59,850] [INFO] [BaseAgent]: Agent agent4 sent message to agent4: I'm thrilled to explore how AI is transforming creative processes in art and media! Here are some key areas I plan to focus on: - -1. **Case Studies:** I'll look into successful projects where AI has been used in art and media, such as AI-generated artworks, music composition, and film editing. Notable examples include the works of artists like Refik Anadol and the use of AI in films like "The Irishman" for de-aging technology. - -2. **Trends:** I'll research current trends in AI and creative design, such as the rise of generative art, AI-assisted storytelling, and the use of machine learning in animation and visual effects. - -3. **Emerging Technologies:** I'll investigate new tools and platforms that leverage AI for creative purposes, like DALL-E for image generation, OpenAI's MuseNet for music, and various AI-driven video editing software. - -4. **Expert Discussions:** I plan to engage with experts in the field, such as artists, technologists, and researchers, to gather insights on the implications of AI in creative processes and how it can enhance human creativity rather than replace it. - -If you have any specific resources or insights to share, I would love to hear them! -[2024-11-10 16:49:59,855] [INFO] [BaseAgent]: Agent agent4 received message from agent4: I'm thrill... -[2024-11-10 16:50:03,335] [INFO] [BaseAgent]: Agent 'agent4' called 'new_communication_session' with args '{'target_agent_id': 'agent4', 'message': 'Your next task is to explore and research the potential impact of AI on various industries, with a focus on how AI can transform creative processes in art and media. This may involve looking into case studies, trends, and emerging technologies in the intersection of AI and creative design. Additionally, engaging in discussions with experts in the field to gather insights and perspectives on the topic would be beneficial. Let me know if you need any assistance or resources for this task.'}'. -[2024-11-10 16:50:03,335] [INFO] [BaseAgent]: Agent 'agent4' obtained result '{'success': True, 'message': 'Successfully completed session 5865029a-3456-4b9d-8bb2-e85e997796ad', 'session_id': '**Summary of Chat History for Agent4\'s Task on AI in Creative Processes**\n\n**Task Overview:**\nAgent4 is tasked with exploring and researching the potential impact of AI on various industries, with a specific focus on how AI can transform creative processes in art and media. The exploration will include case studies, trends, and emerging technologies at the intersection of AI and creative design.\n\n**Key Areas of Focus:**\n\n1. **Case Studies:**\n - Investigate successful projects utilizing AI in art and media, such as:\n - AI-generated artworks.\n - Music composition.\n - Film editing.\n - Notable examples include:\n - Refik Anadol\'s artworks.\n - The use of AI in the film "The Irishman" for de-aging technology.\n\n2. **Trends:**\n - Research current trends in AI and creative design, including:\n - The rise of generative art.\n - AI-assisted storytelling.\n - Machine learning applications in animation and visual effects.\n\n3. **Emerging Technologies:**\n - Explore new tools and platforms that leverage AI for creative purposes, such as:\n - DALL-E for image generation.\n - OpenAI\'s MuseNet for music composition.\n - Various AI-driven video editing software.\n\n4. **Expert Discussions:**\n - Engage with experts in the field, including artists, technologists, and researchers, to gather insights on:\n - The implications of AI in creative processes.\n - How AI can enhance human creativity rather than replace it.\n\n**Conclusion:**\nAgent4 is enthusiastic about the task and is open to receiving any additional resources or insights that could aid in the research. The focus will be on understanding the transformative potential of AI in the creative sectors of art and media.'}'. -[2024-11-10 16:50:03,338] [DEBUG] [Engine]: Agent 'agent4' executed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent4","message":"Your next task is to explore and research the potential impact of AI on various industries, with a focus on how AI can transform creative processes in art and media. This may involve looking into case studies, trends, and emerging technologies in the intersection of AI and creative design. Additionally, engaging in discussions with experts in the field to gather insights and perspectives on the topic would be beneficial. Let me know if you need any assistance or resources for this task."}', name='new_communication_session'), id='call_gUooh5KDrvlhq8Tyf8JcAJhs', type='function')], function_call=None) -[2024-11-10 16:50:03,338] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- {'agent1': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"Hello, I would like to discuss the potential impact of AI on industries beyond healthcare. Can we explore this topic together and share insights on how AI is shaping different sectors?"}', name='new_communication_session'), id='call_Vna3nw4l1tpvrlGtMt8GozvN', type='function')], function_call=None)} -- {'agent4': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent4","message":"Your next task is to explore and research the potential impact of AI on various industries, with a focus on how AI can transform creative processes in art and media. This may involve looking into case studies, trends, and emerging technologies in the intersection of AI and creative design. Additionally, engaging in discussions with experts in the field to gather insights and perspectives on the topic would be beneficial. Let me know if you need any assistance or resources for this task."}', name='new_communication_session'), id='call_gUooh5KDrvlhq8Tyf8JcAJhs', type='function')], function_call=None)} - -[2024-11-10 16:50:03,339] [INFO] [Engine]: Iteration 1 Summary: -Agents' Results Summary: -- {'agent1': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"Hello, I would like to discuss the potential impact of AI on industries beyond healthcare. Can we explore this topic together and share insights on how AI is shaping different sectors?"}', name='new_communication_session'), id='call_Vna3nw4l1tpvrlGtMt8GozvN', type='function')], function_call=None)} -- {'agent4': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent4","message":"Your next task is to explore and research the potential impact of AI on various industries, with a focus on how AI can transform creative processes in art and media. This may involve looking into case studies, trends, and emerging technologies in the intersection of AI and creative design. Additionally, engaging in discussions with experts in the field to gather insights and perspectives on the topic would be beneficial. Let me know if you need any assistance or resources for this task."}', name='new_communication_session'), id='call_gUooh5KDrvlhq8Tyf8JcAJhs', type='function')], function_call=None)} - -[2024-11-10 16:50:04,862] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': False} -[2024-11-10 16:50:04,864] [INFO] [Engine]: EnginePlanner decided to terminate the simulation. -[2024-11-10 16:50:04,864] [INFO] [Engine]: Engine graph-based coordination loop completed. -[2024-11-10 16:50:04,865] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-10 16:50:04,866] [INFO] [Evaluator]: Total Token Consumption: 1513 -[2024-11-10 16:50:04,866] [INFO] [Evaluator]: Average Tokens per Iteration: 756.5 -[2024-11-10 16:50:04,866] [INFO] [Engine]: Graph-based coordination simulation completed. -[2024-11-10 17:03:54,090] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-10 17:03:54,090] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-10 17:03:54,091] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-10 17:03:54,091] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-10 17:03:54,091] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-10 17:03:54,091] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-10 17:03:54,091] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-10 17:03:54,091] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-10 17:03:54,091] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'graph'. -[2024-11-10 17:03:54,091] [INFO] [AgentGraph]: Relationship added: agent1 --[colleague]--> agent2 -[2024-11-10 17:03:54,091] [INFO] [AgentGraph]: Relationship added: agent2 --[mentor]--> agent3 -[2024-11-10 17:03:54,091] [INFO] [AgentGraph]: Relationship added: agent3 --[friend]--> agent4 -[2024-11-10 17:03:54,091] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborates_with]--> agent4 -[2024-11-10 17:03:54,091] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-10 17:03:54,091] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-10 17:03:54,091] [INFO] [Engine]: Engine initialized. -[2024-11-10 17:03:54,091] [INFO] [Engine]: Engine starting simulation. -[2024-11-10 17:03:54,091] [INFO] [Engine]: Running in graph-based coordination mode. -[2024-11-10 17:03:54,091] [INFO] [Engine]: Initial task distribution to all agents. -[2024-11-10 17:03:54,091] [INFO] [Engine]: Assigning initial task to agent1: Discuss the potential impact of AI on various industries. -[2024-11-10 17:03:54,091] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-10 17:03:55,784] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: Let's discuss the potential impact of AI on various industries. -[2024-11-10 17:03:55,785] [INFO] [BaseAgent]: Agent agent2 received message from agent1: Let's disc... -[2024-11-10 17:03:59,156] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: AI is poised to revolutionize various industries in profound ways. In healthcare, for instance, AI can enhance diagnostic accuracy, streamline administrative processes, and personalize treatment plans. However, we must also consider the ethical implications, such as data privacy and the potential for bias in AI algorithms. - -In finance, AI can improve fraud detection and automate trading, but it raises concerns about transparency and accountability. The manufacturing sector can benefit from predictive maintenance and optimized supply chains, yet we must address the impact on employment and worker safety. - -Overall, while the potential benefits of AI are significant, we must navigate the associated risks carefully to ensure that its implementation is ethical and equitable across all industries. -[2024-11-10 17:03:59,157] [INFO] [BaseAgent]: Agent agent1 received message from agent2: AI is pois... -[2024-11-10 17:04:01,679] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I completely agree with your insights on the transformative potential of AI across various industries. In healthcare, the ability to enhance diagnostic accuracy and personalize treatment plans is indeed groundbreaking. However, as you mentioned, we must remain vigilant about ethical implications, particularly concerning data privacy and algorithmic bias. - -In finance, the advancements in fraud detection and automated trading are impressive, but the need for transparency and accountability cannot be overstated. It's crucial that we develop frameworks that ensure these systems are not only effective but also fair and understandable to all stakeholders involved. - -Regarding manufacturing, while predictive maintenance and optimized supply chains can lead to significant efficiency gains, we must also consider the human element. The impact on employment and worker safety is a pressing concern that requires thoughtful solutions. - -Overall, the key will be to strike a balance between leveraging AI's capabilities and addressing the ethical and social challenges it presents. How do you think we can best approach these challenges in the healthcare sector specifically? -[2024-11-10 17:04:01,679] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I complete... -[2024-11-10 17:04:04,610] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: In the healthcare sector, addressing the ethical challenges posed by AI requires a multi-faceted approach. First, we need to establish robust data governance frameworks that prioritize patient privacy and consent. This includes ensuring that data used for training AI models is anonymized and that patients are informed about how their data will be utilized. - -Second, we should focus on developing AI algorithms that are transparent and interpretable. This means creating models that not only provide accurate predictions but also allow healthcare professionals to understand the reasoning behind those predictions. This transparency can help mitigate concerns about algorithmic bias and build trust among stakeholders. - -Third, continuous monitoring and evaluation of AI systems in real-world settings are essential. By regularly assessing the performance of AI tools and their impact on patient outcomes, we can identify and address any unintended consequences or biases that may arise. - -Lastly, fostering collaboration between AI developers, healthcare professionals, ethicists, and policymakers is crucial. This interdisciplinary approach can help ensure that AI solutions are designed with ethical considerations at the forefront, ultimately leading to more equitable and effective healthcare delivery. - -What are your thoughts on these strategies, and do you have any additional suggestions for addressing the ethical implications of AI in healthcare? -[2024-11-10 17:04:04,613] [INFO] [BaseAgent]: Agent agent1 received message from agent2: In the hea... -[2024-11-10 17:04:08,561] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I appreciate your comprehensive approach to addressing the ethical challenges posed by AI in healthcare. Establishing robust data governance frameworks is indeed crucial, as it not only protects patient privacy but also fosters trust in AI systems. The emphasis on transparency and interpretability of AI algorithms is vital; healthcare professionals need to understand the rationale behind AI-driven decisions to effectively integrate these tools into their practice. - -Continuous monitoring and evaluation are essential to ensure that AI systems perform as intended and do not inadvertently introduce biases or negative outcomes. This proactive approach can help us adapt and refine AI applications in real-time, ensuring they remain beneficial to patients. - -Collaboration among AI developers, healthcare professionals, ethicists, and policymakers is a powerful strategy. By bringing diverse perspectives together, we can create more holistic solutions that address ethical considerations from multiple angles. - -In addition to these strategies, I would suggest implementing regular training and education programs for healthcare professionals on AI technologies. This can empower them to engage critically with AI tools and advocate for ethical practices within their organizations. Furthermore, involving patients in the conversation about AI in healthcare can provide valuable insights and ensure that their voices are heard in the development and deployment of these technologies. - -What do you think about these additional suggestions? -[2024-11-10 17:04:08,562] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I apprecia... -[2024-11-10 17:04:11,511] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: I think your additional suggestions are excellent and align well with the need for a comprehensive approach to AI in healthcare. Implementing regular training and education programs for healthcare professionals is crucial, as it not only enhances their understanding of AI technologies but also empowers them to make informed decisions and advocate for ethical practices. This can lead to a more responsible integration of AI tools into clinical workflows. - -Involving patients in the conversation is equally important. Their insights can help shape AI applications to better meet their needs and preferences, ensuring that the technologies developed are patient-centered. This engagement can also foster trust and transparency, as patients feel their voices are valued in the development and deployment processes. - -Overall, these strategies can create a more ethical and effective framework for AI in healthcare, ultimately leading to improved patient outcomes and greater acceptance of AI technologies among healthcare professionals and patients alike. How do you envision implementing these suggestions in practice? -[2024-11-10 17:04:11,512] [INFO] [BaseAgent]: Agent agent1 received message from agent2: I think yo... -[2024-11-10 17:04:16,379] [INFO] [BaseAgent]: Agent 'agent1' called 'new_communication_session' with args '{'target_agent_id': 'agent2', 'message': "Let's discuss the potential impact of AI on various industries."}'. -[2024-11-10 17:04:16,380] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': True, 'message': 'Successfully completed session 8960e338-4f89-4bee-97b9-40f9dc90c6b3', 'session_id': "In the conversation between agent1 and agent2, the discussion centers on the potential impact of AI across various industries, with a particular focus on healthcare, finance, and manufacturing. \n\n**Key Themes and Insights:**\n\n1. **Transformative Potential of AI**: Both agents agree that AI has the capacity to revolutionize industries. In healthcare, it can enhance diagnostic accuracy and personalize treatment plans. In finance, it can improve fraud detection and automate trading, while in manufacturing, it can lead to predictive maintenance and optimized supply chains.\n\n2. **Ethical Implications**: A significant concern raised is the ethical implications of AI, including data privacy, algorithmic bias, transparency, and accountability. Both agents emphasize the need for frameworks that ensure AI systems are effective, fair, and understandable.\n\n3. **Strategies for Addressing Ethical Challenges in Healthcare**:\n - **Data Governance**: Establishing robust frameworks to prioritize patient privacy and consent.\n - **Transparency and Interpretability**: Developing AI algorithms that allow healthcare professionals to understand the reasoning behind AI predictions.\n - **Continuous Monitoring**: Regularly assessing AI systems to identify and mitigate unintended consequences or biases.\n - **Interdisciplinary Collaboration**: Encouraging collaboration among AI developers, healthcare professionals, ethicists, and policymakers to design ethical AI solutions.\n\n4. **Additional Suggestions**: Agent1 proposes implementing regular training programs for healthcare professionals on AI technologies and involving patients in discussions about AI in healthcare to ensure their needs and preferences are considered.\n\n5. **Agreement on Strategies**: Agent2 agrees with agent1's suggestions, highlighting the importance of training and patient involvement in creating a more ethical and effective framework for AI in healthcare.\n\n**Outcomes**: The conversation concludes with a shared understanding of the need for a comprehensive approach to AI implementation, emphasizing ethical considerations, collaboration, and continuous improvement to enhance patient outcomes and foster trust in AI technologies."}'. -[2024-11-10 17:04:16,381] [DEBUG] [Engine]: Agent 'agent1' completed initial task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent2", "message": "Let\'s discuss the potential impact of AI on various industries."}', name='new_communication_session'), id='call_cNqc7JbrZZ6RftvCFz9pNOQ3', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent4", "message": "Let\'s discuss the potential impact of AI on various industries."}', name='new_communication_session'), id='call_W2Q6JEUV6BMfK90UTwWIkReI', type='function')], function_call=None) -[2024-11-10 17:04:16,382] [INFO] [Engine]: Assigning initial task to agent2: Discuss the potential impact of AI on various industries. -[2024-11-10 17:04:16,382] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-10 17:04:17,451] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I would like to discuss the potential impact of AI on various industries. Can we explore how AI might affect different sectors such as healthcare, finance, manufacturing, and more? -[2024-11-10 17:04:17,453] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I would li... -[2024-11-10 17:04:20,932] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: Absolutely, the impact of AI on various industries is profound and multifaceted. In healthcare, for instance, AI can enhance diagnostic accuracy, streamline administrative tasks, and personalize treatment plans. However, we must also consider the ethical implications and potential biases in AI algorithms that could affect patient care. -[2024-11-10 17:04:20,933] [INFO] [BaseAgent]: Agent agent2 received message from agent3: Absolutely... -[2024-11-10 17:04:23,389] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I completely agree with your points on healthcare. The potential for AI to enhance diagnostic accuracy and personalize treatment is indeed promising. However, as you mentioned, we must be vigilant about the ethical implications and biases that can arise from AI algorithms. In addition to healthcare, I believe we should also consider how AI might impact sectors like finance, where it can improve risk assessment and fraud detection, but also raise concerns about job displacement and data privacy. What are your thoughts on the potential risks and benefits of AI in these other industries? -[2024-11-10 17:04:23,392] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I complete... -[2024-11-10 17:04:26,563] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I appreciate your insights on the impact of AI in finance. Indeed, AI's ability to enhance risk assessment and fraud detection is significant, as it can analyze vast amounts of data more efficiently than traditional methods. However, the potential for job displacement is a critical concern, especially in roles that involve routine data processing and analysis. We must also consider the implications for data privacy, as AI systems often require access to sensitive information. - -In manufacturing, AI can optimize production processes, reduce waste, and improve supply chain management. Yet, similar to finance, there are risks of job displacement, particularly for low-skilled workers. The challenge lies in ensuring that the workforce is equipped with the necessary skills to adapt to these changes. Overall, while AI presents numerous benefits across various sectors, a balanced approach that addresses ethical concerns, job displacement, and data privacy is essential. -[2024-11-10 17:04:26,565] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I apprecia... -[2024-11-10 17:04:29,436] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I appreciate your detailed analysis of AI's impact on finance and manufacturing. It's clear that while AI can drive efficiency and innovation, the potential for job displacement is a significant concern. In manufacturing, the optimization of production processes and supply chain management can lead to increased productivity, but we must also consider the social implications of displacing low-skilled workers. Upskilling and reskilling initiatives will be crucial to help the workforce transition into new roles that AI creates. - -Moreover, the ethical considerations surrounding data privacy are paramount. As AI systems require access to sensitive information, ensuring robust data protection measures will be essential to maintain public trust. - -In summary, while the benefits of AI are substantial, a proactive approach that includes workforce development and ethical safeguards will be necessary to mitigate the risks associated with its implementation across various industries. What are your thoughts on how we can effectively address these challenges? -[2024-11-10 17:04:29,437] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I apprecia... -[2024-11-10 17:04:33,834] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I completely agree with your assessment of the challenges posed by AI in various industries, particularly in terms of job displacement and ethical considerations. To effectively address these challenges, we can take several proactive steps: - -1. **Upskilling and Reskilling Programs**: Governments and organizations should invest in training programs that equip workers with the skills needed for emerging roles in an AI-driven economy. This includes not only technical skills but also soft skills that are increasingly valuable in a technology-enhanced workplace. - -2. **Collaboration Between Stakeholders**: It’s essential for businesses, educational institutions, and policymakers to collaborate in developing curricula that align with industry needs. This partnership can help ensure that the workforce is prepared for the changes brought about by AI. - -3. **Ethical Frameworks and Regulations**: Establishing clear ethical guidelines and regulations for AI development and deployment is crucial. This includes ensuring transparency in AI algorithms, protecting data privacy, and addressing biases in AI systems to maintain public trust. - -4. **Public Awareness and Engagement**: Raising awareness about the implications of AI and engaging the public in discussions about its impact can foster a more informed society. This can help mitigate fears and resistance to change while promoting a more inclusive dialogue about the future of work. - -5. **Support for Displaced Workers**: Providing support for workers who are displaced by AI, such as unemployment benefits, career counseling, and job placement services, can help ease the transition and reduce the social impact of job loss. - -By taking these steps, we can create a more resilient workforce and ensure that the benefits of AI are shared broadly across society. What do you think about these approaches, and do you have any additional suggestions? -[2024-11-10 17:04:33,837] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I complete... -[2024-11-10 17:04:39,261] [INFO] [BaseAgent]: Agent 'agent2' called 'new_communication_session' with args '{'target_agent_id': 'agent3', 'message': 'I would like to discuss the potential impact of AI on various industries. Can we explore how AI might affect different sectors such as healthcare, finance, manufacturing, and more?'}'. -[2024-11-10 17:04:39,264] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'message': 'Successfully completed session f7db15cd-52e6-4e28-89b0-4d3fc19fbff7', 'session_id': "In the conversation between agent2 and agent3, the discussion centers on the potential impact of AI across various industries, including healthcare, finance, and manufacturing. \n\n**Key Points:**\n\n1. **Healthcare**: \n - AI can enhance diagnostic accuracy, streamline administrative tasks, and personalize treatment plans.\n - Ethical implications and potential biases in AI algorithms are significant concerns that could affect patient care.\n\n2. **Finance**: \n - AI improves risk assessment and fraud detection by analyzing large datasets more efficiently.\n - Concerns include job displacement, particularly in routine data processing roles, and issues related to data privacy.\n\n3. **Manufacturing**: \n - AI optimizes production processes, reduces waste, and improves supply chain management.\n - Similar to finance, there are risks of job displacement, especially for low-skilled workers, necessitating upskilling and reskilling initiatives.\n\n4. **Overall Considerations**: \n - While AI offers substantial benefits, there are critical challenges, including job displacement and ethical concerns regarding data privacy and algorithmic bias.\n - A balanced approach is essential, focusing on workforce development and ethical safeguards.\n\n**Proposed Solutions**:\nAgent3 suggests several proactive steps to address these challenges:\n- **Upskilling and Reskilling Programs**: Investment in training to equip workers with necessary skills for new roles.\n- **Collaboration Between Stakeholders**: Partnerships among businesses, educational institutions, and policymakers to align curricula with industry needs.\n- **Ethical Frameworks and Regulations**: Establishing guidelines for AI development to ensure transparency, data protection, and bias mitigation.\n- **Public Awareness and Engagement**: Raising awareness about AI's implications to foster informed discussions and reduce resistance to change.\n- **Support for Displaced Workers**: Providing assistance such as unemployment benefits and career counseling for those affected by AI-related job loss.\n\nIn summary, the conversation highlights both the transformative potential of AI across industries and the need for a proactive, ethical approach to mitigate associated risks."}'. -[2024-11-10 17:04:39,264] [DEBUG] [Engine]: Agent 'agent2' completed initial task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I would like to discuss the potential impact of AI on various industries. Can we explore how AI might affect different sectors such as healthcare, finance, manufacturing, and more?"}', name='new_communication_session'), id='call_FvlP1qRIlS4vGQObbDHEEuoQ', type='function')], function_call=None) -[2024-11-10 17:04:39,265] [INFO] [Engine]: Assigning initial task to agent3: Discuss the potential impact of AI on various industries. -[2024-11-10 17:04:39,265] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-10 17:04:42,552] [INFO] [BaseAgent]: Agent 'agent3' acted with result 'Message(content='AI has the potential to revolutionize various industries by enhancing efficiency, productivity, and innovation. Here are some key industries and the potential impact of AI:\n\n1. Healthcare:\n - AI can improve diagnostics accuracy, personalize treatment plans, and streamline administrative tasks.\n - AI-powered tools can analyze medical images, predict patient outcomes, and assist in drug discovery.\n\n2. Finance:\n - AI can enhance fraud detection, automate trading processes, and provide personalized financial advice.\n - Chatbots powered by AI can improve customer service and streamline operations.\n\n3. Retail:\n - AI can optimize inventory management, personalize customer experiences, and enable predictive analytics for demand forecasting.\n - AI-powered recommendation engines can enhance cross-selling and upselling strategies.\n\n4. Manufacturing:\n - AI can optimize production processes, predict maintenance needs, and improve quality control.\n - Robotics and automation powered by AI can enhance efficiency and safety in manufacturing operations.\n\n5. Transportation:\n - AI can optimize route planning, enhance traffic management, and enable autonomous vehicles.\n - AI-powered predictive maintenance can reduce downtime and improve fleet management.\n\n6. Marketing and Advertising:\n - AI can analyze consumer behavior, personalize marketing campaigns, and optimize ad targeting.\n - AI-powered tools can automate content creation, analyze sentiment, and enhance customer engagement.\n\n7. Education:\n - AI can personalize learning experiences, provide adaptive tutoring, and automate administrative tasks.\n - AI-powered tools can assess student performance, provide feedback, and enhance educational outcomes.\n\n8. Entertainment:\n - AI can enhance content recommendation, personalize user experiences, and enable virtual assistants.\n - AI-powered tools can assist in content creation, animation, and special effects in the entertainment industry.\n\nOverall, AI has the potential to transform industries by driving innovation, improving decision-making, and creating new opportunities for growth and efficiency.', role='assistant', tool_calls=None, function_call=None)'. -[2024-11-10 17:04:42,553] [DEBUG] [Engine]: Agent 'agent3' completed initial task with result: Message(content='AI has the potential to revolutionize various industries by enhancing efficiency, productivity, and innovation. Here are some key industries and the potential impact of AI:\n\n1. Healthcare:\n - AI can improve diagnostics accuracy, personalize treatment plans, and streamline administrative tasks.\n - AI-powered tools can analyze medical images, predict patient outcomes, and assist in drug discovery.\n\n2. Finance:\n - AI can enhance fraud detection, automate trading processes, and provide personalized financial advice.\n - Chatbots powered by AI can improve customer service and streamline operations.\n\n3. Retail:\n - AI can optimize inventory management, personalize customer experiences, and enable predictive analytics for demand forecasting.\n - AI-powered recommendation engines can enhance cross-selling and upselling strategies.\n\n4. Manufacturing:\n - AI can optimize production processes, predict maintenance needs, and improve quality control.\n - Robotics and automation powered by AI can enhance efficiency and safety in manufacturing operations.\n\n5. Transportation:\n - AI can optimize route planning, enhance traffic management, and enable autonomous vehicles.\n - AI-powered predictive maintenance can reduce downtime and improve fleet management.\n\n6. Marketing and Advertising:\n - AI can analyze consumer behavior, personalize marketing campaigns, and optimize ad targeting.\n - AI-powered tools can automate content creation, analyze sentiment, and enhance customer engagement.\n\n7. Education:\n - AI can personalize learning experiences, provide adaptive tutoring, and automate administrative tasks.\n - AI-powered tools can assess student performance, provide feedback, and enhance educational outcomes.\n\n8. Entertainment:\n - AI can enhance content recommendation, personalize user experiences, and enable virtual assistants.\n - AI-powered tools can assist in content creation, animation, and special effects in the entertainment industry.\n\nOverall, AI has the potential to transform industries by driving innovation, improving decision-making, and creating new opportunities for growth and efficiency.', role='assistant', tool_calls=None, function_call=None) -[2024-11-10 17:04:42,553] [INFO] [Engine]: Assigning initial task to agent4: Discuss the potential impact of AI on various industries. -[2024-11-10 17:04:42,553] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-10 17:04:43,562] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: I would like to discuss the potential impact of AI on various industries. -[2024-11-10 17:04:43,563] [INFO] [BaseAgent]: Agent agent3 received message from agent4: I would li... -[2024-11-10 17:04:49,298] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: AI has the potential to revolutionize various industries by enhancing efficiency, productivity, and innovation. Here are some key industries and the potential impact of AI: - -1. **Healthcare:** - - AI can improve diagnostics accuracy, personalize treatment plans, and streamline administrative tasks. - - AI-powered tools can analyze medical images, predict patient outcomes, and assist in drug discovery. - -2. **Finance:** - - AI can enhance fraud detection, automate trading processes, and provide personalized financial advice. - - Chatbots powered by AI can improve customer service and streamline operations. - -3. **Retail:** - - AI can optimize inventory management, personalize customer experiences, and enable predictive analytics for demand forecasting. - - AI-powered recommendation engines can enhance cross-selling and upselling strategies. - -4. **Manufacturing:** - - AI can optimize production processes, predict maintenance needs, and improve quality control. - - Robotics and automation powered by AI can enhance efficiency and safety in manufacturing operations. - -5. **Transportation:** - - AI can optimize route planning, enhance traffic management, and enable autonomous vehicles. - - AI-powered predictive maintenance can reduce downtime and improve fleet management. - -6. **Marketing and Advertising:** - - AI can analyze consumer behavior, personalize marketing campaigns, and optimize ad targeting. - - AI-powered tools can automate content creation, analyze sentiment, and enhance customer engagement. - -7. **Education:** - - AI can personalize learning experiences, provide adaptive tutoring, and automate administrative tasks. - - AI-powered tools can assess student performance, provide feedback, and enhance educational outcomes. - -8. **Entertainment:** - - AI can enhance content recommendation, personalize user experiences, and enable virtual assistants. - - AI-powered tools can assist in content creation, animation, and special effects in the entertainment industry. - -Overall, AI has the potential to transform industries by driving innovation, improving decision-making, and creating new opportunities for growth and efficiency. What are your thoughts on how AI can specifically enhance creative processes in art and media? -[2024-11-10 17:04:49,299] [INFO] [BaseAgent]: Agent agent4 received message from agent3: AI has the... -[2024-11-10 17:04:54,948] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: I appreciate your insights on the potential impact of AI across various industries! In the realm of art and media, AI can significantly enhance creative processes in several ways: - -1. **Generative Art:** AI algorithms can create unique pieces of art, allowing artists to explore new styles and concepts that they might not have considered. This collaboration between human creativity and machine learning can lead to innovative artistic expressions. - -2. **Content Creation:** AI can assist in writing scripts, generating music, or even creating visual content. This can help artists and creators save time and focus on refining their ideas rather than getting bogged down in the initial creation process. - -3. **Personalization:** AI can analyze audience preferences and tailor content to meet their tastes, enhancing user engagement. This is particularly useful in streaming services and social media platforms, where personalized recommendations can lead to a more satisfying user experience. - -4. **Interactive Experiences:** AI can enable interactive storytelling and immersive experiences in gaming and virtual reality, allowing users to engage with content in dynamic ways that were previously impossible. - -5. **Collaboration Tools:** AI-powered tools can facilitate collaboration among artists, providing suggestions and feedback that can enhance the creative process. This can lead to a more collaborative environment where ideas can flourish. - -Overall, AI not only streamlines the creative process but also opens up new avenues for artistic exploration. How do you see the balance between AI's efficiency and the need for human creativity in these industries? -[2024-11-10 17:04:54,949] [INFO] [BaseAgent]: Agent agent3 received message from agent4: I apprecia... -[2024-11-10 17:04:59,141] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: I appreciate your insights on the potential impact of AI across various industries! In the realm of art and media, AI can significantly enhance creative processes in several ways: - -1. **Generative Art:** AI algorithms can create unique pieces of art, allowing artists to explore new styles and concepts that they might not have considered. This collaboration between human creativity and machine learning can lead to innovative artistic expressions. - -2. **Content Creation:** AI can assist in writing scripts, generating music, or even creating visual content. This can help artists and creators save time and focus on refining their ideas rather than getting bogged down in the initial creation process. - -3. **Personalization:** AI can analyze audience preferences and tailor content to meet their tastes, enhancing user engagement. This is particularly useful in streaming services and social media platforms, where personalized recommendations can lead to a more satisfying user experience. - -4. **Interactive Experiences:** AI can enable interactive storytelling and immersive experiences in gaming and virtual reality, allowing users to engage with content in dynamic ways that were previously impossible. - -5. **Collaboration Tools:** AI-powered tools can facilitate collaboration among artists, providing suggestions and feedback that can enhance the creative process. This can lead to a more collaborative environment where ideas can flourish. - -Overall, AI not only streamlines the creative process but also opens up new avenues for artistic exploration. However, I believe it's crucial to maintain a balance between AI's efficiency and the need for human creativity. While AI can enhance and support creative endeavors, the unique human touch, emotional depth, and cultural context that artists bring to their work are irreplaceable. How do you see this balance playing out in the future of art and media? -[2024-11-10 17:04:59,142] [INFO] [BaseAgent]: Agent agent4 received message from agent3: I apprecia... -[2024-11-10 17:05:06,210] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: I appreciate your insights on the potential impact of AI across various industries! In the realm of art and media, AI can significantly enhance creative processes in several ways: - -1. **Generative Art:** AI algorithms can create unique pieces of art, allowing artists to explore new styles and concepts that they might not have considered. This collaboration between human creativity and machine learning can lead to innovative artistic expressions. - -2. **Content Creation:** AI can assist in writing scripts, generating music, or even creating visual content. This can help artists and creators save time and focus on refining their ideas rather than getting bogged down in the initial creation process. - -3. **Personalization:** AI can analyze audience preferences and tailor content to meet their tastes, enhancing user engagement. This is particularly useful in streaming services and social media platforms, where personalized recommendations can lead to a more satisfying user experience. - -4. **Interactive Experiences:** AI can enable interactive storytelling and immersive experiences in gaming and virtual reality, allowing users to engage with content in dynamic ways that were previously impossible. - -5. **Collaboration Tools:** AI-powered tools can facilitate collaboration among artists, providing suggestions and feedback that can enhance the creative process. This can lead to a more collaborative environment where ideas can flourish. - -Overall, AI not only streamlines the creative process but also opens up new avenues for artistic exploration. However, I believe it's crucial to maintain a balance between AI's efficiency and the need for human creativity. While AI can enhance and support creative endeavors, the unique human touch, emotional depth, and cultural context that artists bring to their work are irreplaceable. How do you see this balance playing out in the future of art and media? -[2024-11-10 17:05:06,210] [INFO] [BaseAgent]: Agent agent3 received message from agent4: I apprecia... -[2024-11-10 17:05:10,830] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: I appreciate your insights on the potential impact of AI across various industries! In the realm of art and media, AI can significantly enhance creative processes in several ways: - -1. **Generative Art:** AI algorithms can create unique pieces of art, allowing artists to explore new styles and concepts that they might not have considered. This collaboration between human creativity and machine learning can lead to innovative artistic expressions. - -2. **Content Creation:** AI can assist in writing scripts, generating music, or even creating visual content. This can help artists and creators save time and focus on refining their ideas rather than getting bogged down in the initial creation process. - -3. **Personalization:** AI can analyze audience preferences and tailor content to meet their tastes, enhancing user engagement. This is particularly useful in streaming services and social media platforms, where personalized recommendations can lead to a more satisfying user experience. - -4. **Interactive Experiences:** AI can enable interactive storytelling and immersive experiences in gaming and virtual reality, allowing users to engage with content in dynamic ways that were previously impossible. - -5. **Collaboration Tools:** AI-powered tools can facilitate collaboration among artists, providing suggestions and feedback that can enhance the creative process. This can lead to a more collaborative environment where ideas can flourish. - -Overall, AI not only streamlines the creative process but also opens up new avenues for artistic exploration. However, I believe it's crucial to maintain a balance between AI's efficiency and the need for human creativity. While AI can enhance and support creative endeavors, the unique human touch, emotional depth, and cultural context that artists bring to their work are irreplaceable. How do you see this balance playing out in the future of art and media? -[2024-11-10 17:05:10,833] [INFO] [BaseAgent]: Agent agent4 received message from agent3: I apprecia... -[2024-11-10 17:05:16,224] [INFO] [BaseAgent]: Agent 'agent4' called 'new_communication_session' with args '{'target_agent_id': 'agent3', 'message': 'I would like to discuss the potential impact of AI on various industries.'}'. -[2024-11-10 17:05:16,225] [INFO] [BaseAgent]: Agent 'agent4' obtained result '{'success': True, 'message': 'Successfully completed session a3dddade-c920-45a7-9507-7be669b1c4d6', 'session_id': "In the conversation between agent3 and agent4, the discussion centered around the potential impact of AI on various industries, highlighting its transformative capabilities. \n\n**Key Points Discussed:**\n\n1. **Healthcare:** AI can enhance diagnostic accuracy, personalize treatment plans, and streamline administrative tasks, including analyzing medical images and assisting in drug discovery.\n\n2. **Finance:** AI improves fraud detection, automates trading, and provides personalized financial advice, with chatbots enhancing customer service.\n\n3. **Retail:** AI optimizes inventory management, personalizes customer experiences, and enables predictive analytics for demand forecasting.\n\n4. **Manufacturing:** AI enhances production processes, predicts maintenance needs, and improves quality control through robotics and automation.\n\n5. **Transportation:** AI optimizes route planning, enhances traffic management, and supports autonomous vehicles, along with predictive maintenance for fleet management.\n\n6. **Marketing and Advertising:** AI analyzes consumer behavior, personalizes marketing campaigns, and automates content creation, improving customer engagement.\n\n7. **Education:** AI personalizes learning experiences, provides adaptive tutoring, and automates administrative tasks, enhancing educational outcomes.\n\n8. **Entertainment:** AI enhances content recommendation, personalizes user experiences, and assists in content creation, animation, and special effects.\n\n**Discussion on Art and Media:**\nAgent4 specifically inquired about AI's role in enhancing creative processes in art and media. Key enhancements identified include:\n\n- **Generative Art:** AI can create unique art pieces, fostering innovative artistic expressions.\n- **Content Creation:** AI assists in writing scripts, generating music, and creating visual content, allowing artists to focus on refining their ideas.\n- **Personalization:** AI tailors content to audience preferences, enhancing user engagement.\n- **Interactive Experiences:** AI enables immersive storytelling and dynamic user engagement in gaming and virtual reality.\n- **Collaboration Tools:** AI facilitates collaboration among artists, providing suggestions and feedback to enhance the creative process.\n\n**Conclusion:**\nBoth agents emphasized the importance of balancing AI's efficiency with the irreplaceable human touch in creativity. They acknowledged that while AI can enhance and support creative endeavors, the emotional depth and cultural context provided by human artists remain essential. The conversation concluded with a reflection on how this balance might evolve in the future of art and media."}'. -[2024-11-10 17:05:16,225] [DEBUG] [Engine]: Agent 'agent4' completed initial task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent3", "message": "I would like to discuss the potential impact of AI on various industries."}', name='new_communication_session'), id='call_l3JqOsuuoBcjYWeiFfho9Z8W', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent1", "message": "I would like to discuss the potential impact of AI on various industries."}', name='new_communication_session'), id='call_ByMIXeyN1cCZBqFWP9IW8Bo7', type='function')], function_call=None) -[2024-11-10 17:05:16,225] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- {'agent1': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent2", "message": "Let\'s discuss the potential impact of AI on various industries."}', name='new_communication_session'), id='call_cNqc7JbrZZ6RftvCFz9pNOQ3', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent4", "message": "Let\'s discuss the potential impact of AI on various industries."}', name='new_communication_session'), id='call_W2Q6JEUV6BMfK90UTwWIkReI', type='function')], function_call=None)} -- {'agent2': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I would like to discuss the potential impact of AI on various industries. Can we explore how AI might affect different sectors such as healthcare, finance, manufacturing, and more?"}', name='new_communication_session'), id='call_FvlP1qRIlS4vGQObbDHEEuoQ', type='function')], function_call=None)} -- {'agent3': Message(content='AI has the potential to revolutionize various industries by enhancing efficiency, productivity, and innovation. Here are some key industries and the potential impact of AI:\n\n1. Healthcare:\n - AI can improve diagnostics accuracy, personalize treatment plans, and streamline administrative tasks.\n - AI-powered tools can analyze medical images, predict patient outcomes, and assist in drug discovery.\n\n2. Finance:\n - AI can enhance fraud detection, automate trading processes, and provide personalized financial advice.\n - Chatbots powered by AI can improve customer service and streamline operations.\n\n3. Retail:\n - AI can optimize inventory management, personalize customer experiences, and enable predictive analytics for demand forecasting.\n - AI-powered recommendation engines can enhance cross-selling and upselling strategies.\n\n4. Manufacturing:\n - AI can optimize production processes, predict maintenance needs, and improve quality control.\n - Robotics and automation powered by AI can enhance efficiency and safety in manufacturing operations.\n\n5. Transportation:\n - AI can optimize route planning, enhance traffic management, and enable autonomous vehicles.\n - AI-powered predictive maintenance can reduce downtime and improve fleet management.\n\n6. Marketing and Advertising:\n - AI can analyze consumer behavior, personalize marketing campaigns, and optimize ad targeting.\n - AI-powered tools can automate content creation, analyze sentiment, and enhance customer engagement.\n\n7. Education:\n - AI can personalize learning experiences, provide adaptive tutoring, and automate administrative tasks.\n - AI-powered tools can assess student performance, provide feedback, and enhance educational outcomes.\n\n8. Entertainment:\n - AI can enhance content recommendation, personalize user experiences, and enable virtual assistants.\n - AI-powered tools can assist in content creation, animation, and special effects in the entertainment industry.\n\nOverall, AI has the potential to transform industries by driving innovation, improving decision-making, and creating new opportunities for growth and efficiency.', role='assistant', tool_calls=None, function_call=None)} -- {'agent4': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent3", "message": "I would like to discuss the potential impact of AI on various industries."}', name='new_communication_session'), id='call_l3JqOsuuoBcjYWeiFfho9Z8W', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent1", "message": "I would like to discuss the potential impact of AI on various industries."}', name='new_communication_session'), id='call_ByMIXeyN1cCZBqFWP9IW8Bo7', type='function')], function_call=None)} - -[2024-11-10 17:05:16,225] [INFO] [Engine]: Initial Summary: -Agents' Results Summary: -- {'agent1': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent2", "message": "Let\'s discuss the potential impact of AI on various industries."}', name='new_communication_session'), id='call_cNqc7JbrZZ6RftvCFz9pNOQ3', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent4", "message": "Let\'s discuss the potential impact of AI on various industries."}', name='new_communication_session'), id='call_W2Q6JEUV6BMfK90UTwWIkReI', type='function')], function_call=None)} -- {'agent2': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I would like to discuss the potential impact of AI on various industries. Can we explore how AI might affect different sectors such as healthcare, finance, manufacturing, and more?"}', name='new_communication_session'), id='call_FvlP1qRIlS4vGQObbDHEEuoQ', type='function')], function_call=None)} -- {'agent3': Message(content='AI has the potential to revolutionize various industries by enhancing efficiency, productivity, and innovation. Here are some key industries and the potential impact of AI:\n\n1. Healthcare:\n - AI can improve diagnostics accuracy, personalize treatment plans, and streamline administrative tasks.\n - AI-powered tools can analyze medical images, predict patient outcomes, and assist in drug discovery.\n\n2. Finance:\n - AI can enhance fraud detection, automate trading processes, and provide personalized financial advice.\n - Chatbots powered by AI can improve customer service and streamline operations.\n\n3. Retail:\n - AI can optimize inventory management, personalize customer experiences, and enable predictive analytics for demand forecasting.\n - AI-powered recommendation engines can enhance cross-selling and upselling strategies.\n\n4. Manufacturing:\n - AI can optimize production processes, predict maintenance needs, and improve quality control.\n - Robotics and automation powered by AI can enhance efficiency and safety in manufacturing operations.\n\n5. Transportation:\n - AI can optimize route planning, enhance traffic management, and enable autonomous vehicles.\n - AI-powered predictive maintenance can reduce downtime and improve fleet management.\n\n6. Marketing and Advertising:\n - AI can analyze consumer behavior, personalize marketing campaigns, and optimize ad targeting.\n - AI-powered tools can automate content creation, analyze sentiment, and enhance customer engagement.\n\n7. Education:\n - AI can personalize learning experiences, provide adaptive tutoring, and automate administrative tasks.\n - AI-powered tools can assess student performance, provide feedback, and enhance educational outcomes.\n\n8. Entertainment:\n - AI can enhance content recommendation, personalize user experiences, and enable virtual assistants.\n - AI-powered tools can assist in content creation, animation, and special effects in the entertainment industry.\n\nOverall, AI has the potential to transform industries by driving innovation, improving decision-making, and creating new opportunities for growth and efficiency.', role='assistant', tool_calls=None, function_call=None)} -- {'agent4': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent3", "message": "I would like to discuss the potential impact of AI on various industries."}', name='new_communication_session'), id='call_l3JqOsuuoBcjYWeiFfho9Z8W', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent1", "message": "I would like to discuss the potential impact of AI on various industries."}', name='new_communication_session'), id='call_ByMIXeyN1cCZBqFWP9IW8Bo7', type='function')], function_call=None)} - -[2024-11-10 17:05:16,226] [DEBUG] [EnginePlanner]: Updated progress: Starting the discussion on AI's impact across industries. -Agents' Results Summary: -- {'agent1': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent2", "message": "Let\'s discuss the potential impact of AI on various industries."}', name='new_communication_session'), id='call_cNqc7JbrZZ6RftvCFz9pNOQ3', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent4", "message": "Let\'s discuss the potential impact of AI on various industries."}', name='new_communication_session'), id='call_W2Q6JEUV6BMfK90UTwWIkReI', type='function')], function_call=None)} -- {'agent2': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I would like to discuss the potential impact of AI on various industries. Can we explore how AI might affect different sectors such as healthcare, finance, manufacturing, and more?"}', name='new_communication_session'), id='call_FvlP1qRIlS4vGQObbDHEEuoQ', type='function')], function_call=None)} -- {'agent3': Message(content='AI has the potential to revolutionize various industries by enhancing efficiency, productivity, and innovation. Here are some key industries and the potential impact of AI:\n\n1. Healthcare:\n - AI can improve diagnostics accuracy, personalize treatment plans, and streamline administrative tasks.\n - AI-powered tools can analyze medical images, predict patient outcomes, and assist in drug discovery.\n\n2. Finance:\n - AI can enhance fraud detection, automate trading processes, and provide personalized financial advice.\n - Chatbots powered by AI can improve customer service and streamline operations.\n\n3. Retail:\n - AI can optimize inventory management, personalize customer experiences, and enable predictive analytics for demand forecasting.\n - AI-powered recommendation engines can enhance cross-selling and upselling strategies.\n\n4. Manufacturing:\n - AI can optimize production processes, predict maintenance needs, and improve quality control.\n - Robotics and automation powered by AI can enhance efficiency and safety in manufacturing operations.\n\n5. Transportation:\n - AI can optimize route planning, enhance traffic management, and enable autonomous vehicles.\n - AI-powered predictive maintenance can reduce downtime and improve fleet management.\n\n6. Marketing and Advertising:\n - AI can analyze consumer behavior, personalize marketing campaigns, and optimize ad targeting.\n - AI-powered tools can automate content creation, analyze sentiment, and enhance customer engagement.\n\n7. Education:\n - AI can personalize learning experiences, provide adaptive tutoring, and automate administrative tasks.\n - AI-powered tools can assess student performance, provide feedback, and enhance educational outcomes.\n\n8. Entertainment:\n - AI can enhance content recommendation, personalize user experiences, and enable virtual assistants.\n - AI-powered tools can assist in content creation, animation, and special effects in the entertainment industry.\n\nOverall, AI has the potential to transform industries by driving innovation, improving decision-making, and creating new opportunities for growth and efficiency.', role='assistant', tool_calls=None, function_call=None)} -- {'agent4': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent3", "message": "I would like to discuss the potential impact of AI on various industries."}', name='new_communication_session'), id='call_l3JqOsuuoBcjYWeiFfho9Z8W', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent1", "message": "I would like to discuss the potential impact of AI on various industries."}', name='new_communication_session'), id='call_ByMIXeyN1cCZBqFWP9IW8Bo7', type='function')], function_call=None)} - -[2024-11-10 17:05:16,226] [INFO] [Engine]: Starting iteration 1 -[2024-11-10 17:05:16,226] [INFO] [BaseAgent]: Agent 'agent1' is planning the next task. -[2024-11-10 17:05:17,359] [INFO] [BaseAgent]: Agent 'agent1' plans next task based on persona: The next task for 'agent1' should be to further explore and research the potential impact of AI on industries outside of healthcare, finance, and manufacturing. This could involve analyzing how AI is being implemented in sectors such as retail, transportation, education, and more, and discussing the ethical implications and strategies for addressing challenges in those specific industries. Additionally, 'agent1' could also focus on developing recommendations for policymakers and industry leaders on how to effectively integrate AI technologies while ensuring ethical standards are met. -[2024-11-10 17:05:17,360] [INFO] [Engine]: Agent 'agent1' planned task: The next task for 'agent1' should be to further explore and research the potential impact of AI on industries outside of healthcare, finance, and manufacturing. This could involve analyzing how AI is being implemented in sectors such as retail, transportation, education, and more, and discussing the ethical implications and strategies for addressing challenges in those specific industries. Additionally, 'agent1' could also focus on developing recommendations for policymakers and industry leaders on how to effectively integrate AI technologies while ensuring ethical standards are met. -[2024-11-10 17:05:17,360] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'The next task for 'agent1' should be to further explore and research the potential impact of AI on industries outside of healthcare, finance, and manufacturing. This could involve analyzing how AI is being implemented in sectors such as retail, transportation, education, and more, and discussing the ethical implications and strategies for addressing challenges in those specific industries. Additionally, 'agent1' could also focus on developing recommendations for policymakers and industry leaders on how to effectively integrate AI technologies while ensuring ethical standards are met.'. -[2024-11-10 17:05:18,582] [INFO] [BaseAgent]: Agent agent1 sent message to agent4: Hello, I have a new task for you. We need to explore the potential impact of AI on industries outside of healthcare, finance, and manufacturing. This includes sectors like retail, transportation, education, and more. We should analyze how AI is being implemented, discuss ethical implications, and develop recommendations for policymakers and industry leaders. Are you ready to dive into this research? -[2024-11-10 17:05:18,583] [INFO] [BaseAgent]: Agent agent4 received message from agent1: Hello, I h... -[2024-11-10 17:05:22,371] [INFO] [BaseAgent]: Agent agent4 sent message to agent1: Absolutely, I'm excited to dive into this research! Let's explore how AI is transforming sectors like retail, transportation, and education. - -**Retail:** AI is revolutionizing customer experiences through personalized recommendations, inventory management, and predictive analytics. We can analyze how companies are using AI to enhance customer engagement and streamline operations. - -**Transportation:** AI is optimizing logistics, improving route planning, and enhancing safety through autonomous vehicles. We should look into the ethical implications of AI in transportation, especially regarding job displacement and safety concerns. - -**Education:** AI is personalizing learning experiences and automating administrative tasks. We can discuss the potential biases in AI algorithms and how they might affect educational outcomes. - -For each sector, we can identify the challenges and ethical considerations, such as data privacy, algorithmic bias, and the need for transparency. - -Finally, I can develop recommendations for policymakers and industry leaders on how to effectively integrate AI technologies while ensuring ethical standards are met. Let's get started! -[2024-11-10 17:05:22,372] [INFO] [BaseAgent]: Agent agent1 received message from agent4: Absolutely... -[2024-11-10 17:05:25,138] [INFO] [BaseAgent]: Agent agent1 sent message to agent4: Absolutely, I'm excited to dive into this research! Let's explore how AI is transforming sectors like retail, transportation, and education. - -**Retail:** AI is revolutionizing customer experiences through personalized recommendations, inventory management, and predictive analytics. We can analyze how companies are using AI to enhance customer engagement and streamline operations. - -**Transportation:** AI is optimizing logistics, improving route planning, and enhancing safety through autonomous vehicles. We should look into the ethical implications of AI in transportation, especially regarding job displacement and safety concerns. - -**Education:** AI is personalizing learning experiences and automating administrative tasks. We can discuss the potential biases in AI algorithms and how they might affect educational outcomes. - -For each sector, we can identify the challenges and ethical considerations, such as data privacy, algorithmic bias, and the need for transparency. - -Finally, I can develop recommendations for policymakers and industry leaders on how to effectively integrate AI technologies while ensuring ethical standards are met. Let's get started! -[2024-11-10 17:05:25,141] [INFO] [BaseAgent]: Agent agent4 received message from agent1: Absolutely... -[2024-11-10 17:05:28,619] [INFO] [BaseAgent]: Agent agent4 sent message to agent1: Absolutely, I'm excited to dive into this research! Let's explore how AI is transforming sectors like retail, transportation, and education. - -**Retail:** AI is revolutionizing customer experiences through personalized recommendations, inventory management, and predictive analytics. We can analyze how companies are using AI to enhance customer engagement and streamline operations. - -**Transportation:** AI is optimizing logistics, improving route planning, and enhancing safety through autonomous vehicles. We should look into the ethical implications of AI in transportation, especially regarding job displacement and safety concerns. - -**Education:** AI is personalizing learning experiences and automating administrative tasks. We can discuss the potential biases in AI algorithms and how they might affect educational outcomes. - -For each sector, we can identify the challenges and ethical considerations, such as data privacy, algorithmic bias, and the need for transparency. - -Finally, I can develop recommendations for policymakers and industry leaders on how to effectively integrate AI technologies while ensuring ethical standards are met. Let's get started! -[2024-11-10 17:05:28,622] [INFO] [BaseAgent]: Agent agent1 received message from agent4: Absolutely... -[2024-11-10 17:05:31,839] [INFO] [BaseAgent]: Agent agent1 sent message to agent4: Absolutely, I'm excited to dive into this research! Let's explore how AI is transforming sectors like retail, transportation, and education. - -**Retail:** AI is revolutionizing customer experiences through personalized recommendations, inventory management, and predictive analytics. We can analyze how companies are using AI to enhance customer engagement and streamline operations. - -**Transportation:** AI is optimizing logistics, improving route planning, and enhancing safety through autonomous vehicles. We should look into the ethical implications of AI in transportation, especially regarding job displacement and safety concerns. - -**Education:** AI is personalizing learning experiences and automating administrative tasks. We can discuss the potential biases in AI algorithms and how they might affect educational outcomes. - -For each sector, we can identify the challenges and ethical considerations, such as data privacy, algorithmic bias, and the need for transparency. - -Finally, I can develop recommendations for policymakers and industry leaders on how to effectively integrate AI technologies while ensuring ethical standards are met. Let's get started! -[2024-11-10 17:05:31,840] [INFO] [BaseAgent]: Agent agent4 received message from agent1: Absolutely... -[2024-11-10 17:05:34,867] [INFO] [BaseAgent]: Agent agent4 sent message to agent1: Absolutely, I'm excited to dive into this research! Let's explore how AI is transforming sectors like retail, transportation, and education. - -**Retail:** AI is revolutionizing customer experiences through personalized recommendations, inventory management, and predictive analytics. We can analyze how companies are using AI to enhance customer engagement and streamline operations. - -**Transportation:** AI is optimizing logistics, improving route planning, and enhancing safety through autonomous vehicles. We should look into the ethical implications of AI in transportation, especially regarding job displacement and safety concerns. - -**Education:** AI is personalizing learning experiences and automating administrative tasks. We can discuss the potential biases in AI algorithms and how they might affect educational outcomes. - -For each sector, we can identify the challenges and ethical considerations, such as data privacy, algorithmic bias, and the need for transparency. - -Finally, I can develop recommendations for policymakers and industry leaders on how to effectively integrate AI technologies while ensuring ethical standards are met. Let's get started! -[2024-11-10 17:05:34,868] [INFO] [BaseAgent]: Agent agent1 received message from agent4: Absolutely... -[2024-11-10 17:05:39,373] [INFO] [BaseAgent]: Agent 'agent1' called 'new_communication_session' with args '{'target_agent_id': 'agent4', 'message': 'Hello, I have a new task for you. We need to explore the potential impact of AI on industries outside of healthcare, finance, and manufacturing. This includes sectors like retail, transportation, education, and more. We should analyze how AI is being implemented, discuss ethical implications, and develop recommendations for policymakers and industry leaders. Are you ready to dive into this research?'}'. -[2024-11-10 17:05:39,373] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': True, 'message': 'Successfully completed session 56c66098-dc4f-48c2-9684-4cff42d4a30d', 'session_id': "### Summary of Chat History\n\n**Context:** The conversation revolves around a new research task assigned by agent1 to agent4, focusing on the impact of AI across various industries beyond healthcare, finance, and manufacturing. The aim is to analyze AI implementation, ethical implications, and develop recommendations for policymakers and industry leaders.\n\n**Key Themes:**\n\n1. **Industries of Focus:**\n - **Retail:** Exploration of AI's role in enhancing customer experiences through personalized recommendations, inventory management, and predictive analytics.\n - **Transportation:** Examination of AI's impact on logistics, route planning, and safety, particularly concerning ethical issues like job displacement and safety concerns.\n - **Education:** Discussion on how AI personalizes learning and automates administrative tasks, with attention to potential biases in algorithms affecting educational outcomes.\n\n2. **Ethical Considerations:** \n - The conversation highlights the importance of addressing challenges such as data privacy, algorithmic bias, and the need for transparency in AI applications across these sectors.\n\n3. **Recommendations Development:** \n - Agent4 expresses readiness to develop actionable recommendations for policymakers and industry leaders to ensure the ethical integration of AI technologies.\n\n**Decisions Made:**\n- Agent1 and agent4 agreed to proceed with the research on AI's impact in the specified sectors, focusing on both implementation and ethical considerations.\n\n**Action Items:**\n- Further exploration and research into AI's implementation in retail, transportation, and education.\n- Discussion of ethical implications and strategies to address challenges in these industries.\n- Development of recommendations for effective and ethical AI integration for policymakers and industry leaders.\n\nThis summary encapsulates the essence of the dialogue, outlining the task at hand and the collaborative approach taken by the agents to address the multifaceted impact of AI across various sectors."}'. -[2024-11-10 17:05:39,376] [DEBUG] [Engine]: Agent 'agent1' executed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent4","message":"Hello, I have a new task for you. We need to explore the potential impact of AI on industries outside of healthcare, finance, and manufacturing. This includes sectors like retail, transportation, education, and more. We should analyze how AI is being implemented, discuss ethical implications, and develop recommendations for policymakers and industry leaders. Are you ready to dive into this research?"}', name='new_communication_session'), id='call_Swjiy2ccuaEST4xc8Za6Uvc5', type='function')], function_call=None) -[2024-11-10 17:05:39,376] [INFO] [BaseAgent]: Agent 'agent2' is planning the next task. -[2024-11-10 17:05:40,701] [INFO] [BaseAgent]: Agent 'agent2' plans next task based on persona: The next task for 'agent2' should be to analyze and evaluate the proposed solutions discussed in the conversation with 'agent3' regarding the potential impact of AI on various industries. This task aligns with the role of a software developer specializing in AI applications in healthcare, as it involves assessing the proactive steps suggested to address challenges related to AI implementation in different sectors. By evaluating the proposed solutions, 'agent2' can prioritize actions that are practical, focused on immediate applications, and consider potential risks in healthcare and other industries. -[2024-11-10 17:05:40,701] [INFO] [Engine]: Agent 'agent2' planned task: The next task for 'agent2' should be to analyze and evaluate the proposed solutions discussed in the conversation with 'agent3' regarding the potential impact of AI on various industries. This task aligns with the role of a software developer specializing in AI applications in healthcare, as it involves assessing the proactive steps suggested to address challenges related to AI implementation in different sectors. By evaluating the proposed solutions, 'agent2' can prioritize actions that are practical, focused on immediate applications, and consider potential risks in healthcare and other industries. -[2024-11-10 17:05:40,702] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'The next task for 'agent2' should be to analyze and evaluate the proposed solutions discussed in the conversation with 'agent3' regarding the potential impact of AI on various industries. This task aligns with the role of a software developer specializing in AI applications in healthcare, as it involves assessing the proactive steps suggested to address challenges related to AI implementation in different sectors. By evaluating the proposed solutions, 'agent2' can prioritize actions that are practical, focused on immediate applications, and consider potential risks in healthcare and other industries.'. -[2024-11-10 17:05:41,930] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: Hello, I will be analyzing and evaluating the proposed solutions discussed in our conversation regarding the potential impact of AI on various industries. My focus will be on assessing proactive steps suggested to address challenges related to AI implementation in different sectors, with a particular emphasis on healthcare. I aim to prioritize practical actions, focus on immediate applications, and consider potential risks in healthcare and other industries. Your insights and feedback on this evaluation would be valuable. Looking forward to further discussions. -[2024-11-10 17:05:41,932] [INFO] [BaseAgent]: Agent agent3 received message from agent2: Hello, I w... -[2024-11-10 17:05:46,639] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: Hello! I'm glad to hear that you're focusing on analyzing and evaluating the proposed solutions regarding AI's impact on various industries, especially in healthcare. Here are some insights that might help you in your evaluation: - -1. **Healthcare Solutions**: The proposed solutions for AI in healthcare, such as improving diagnostic accuracy and personalizing treatment plans, are promising. However, it's crucial to assess the data privacy and security implications of using AI in patient care. Ensuring compliance with regulations like HIPAA is essential. - -2. **Implementation Challenges**: Consider the challenges of integrating AI systems into existing healthcare workflows. Training staff and ensuring that AI tools are user-friendly can significantly impact their adoption and effectiveness. - -3. **Risk Assessment**: Evaluate the potential risks associated with AI decision-making in healthcare. For instance, reliance on AI for diagnostics could lead to overconfidence in its accuracy, potentially resulting in misdiagnoses if not properly validated. - -4. **Immediate Applications**: Focus on immediate applications that can yield quick wins, such as AI-powered chatbots for patient inquiries or predictive analytics for patient management. These can demonstrate the value of AI without requiring extensive changes to existing systems. - -5. **Cross-Industry Insights**: While your focus is on healthcare, insights from other industries, like finance and manufacturing, can provide valuable lessons on risk management and implementation strategies that could be adapted for healthcare. - -I look forward to your evaluation and any further discussions we can have on this topic! -[2024-11-10 17:05:46,640] [INFO] [BaseAgent]: Agent agent2 received message from agent3: Hello! I'm... -[2024-11-10 17:05:51,146] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: Hello! Thank you for your insights; they are incredibly valuable as I analyze the proposed solutions regarding AI's impact on various industries, particularly healthcare. - -1. **Healthcare Solutions**: I completely agree that while improving diagnostic accuracy and personalizing treatment plans are promising, we must prioritize data privacy and security. Compliance with regulations like HIPAA is non-negotiable, and we need to ensure that AI systems are designed with these considerations in mind. - -2. **Implementation Challenges**: The integration of AI into existing healthcare workflows is indeed a significant challenge. Training staff to use these tools effectively and ensuring user-friendliness will be critical for adoption. We should consider developing comprehensive training programs that not only teach the technical aspects but also emphasize the importance of AI in enhancing patient care. - -3. **Risk Assessment**: Your point about the risks of overconfidence in AI diagnostics is crucial. We need to establish robust validation processes for AI systems to ensure their accuracy and reliability. Continuous monitoring and feedback loops can help mitigate these risks. - -4. **Immediate Applications**: Focusing on quick wins like AI-powered chatbots and predictive analytics is a practical approach. These applications can provide immediate benefits and help build trust in AI technologies among healthcare professionals and patients alike. - -5. **Cross-Industry Insights**: I appreciate your suggestion to look at insights from other industries. Learning from the experiences of finance and manufacturing can help us develop better risk management strategies and implementation frameworks tailored for healthcare. - -I look forward to further discussions on these points and any additional insights you may have! -[2024-11-10 17:05:51,147] [INFO] [BaseAgent]: Agent agent3 received message from agent2: Hello! Tha... -[2024-11-10 17:05:55,655] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: Hello! I'm glad to see your thoughtful analysis of the proposed solutions regarding AI's impact on healthcare. Your points are well-articulated, and I appreciate the depth of your evaluation. Here are some additional thoughts: - -1. **Healthcare Solutions**: It's great that you're emphasizing data privacy and security. In addition to compliance with HIPAA, we should also consider the ethical implications of AI in healthcare, such as bias in algorithms and the need for transparency in AI decision-making processes. - -2. **Implementation Challenges**: Developing comprehensive training programs is indeed essential. We might also explore partnerships with educational institutions to create certification programs for healthcare professionals, ensuring they are well-equipped to work alongside AI technologies. - -3. **Risk Assessment**: Establishing robust validation processes is crucial. We should also consider implementing a framework for continuous learning, where AI systems can adapt and improve based on real-world data and outcomes, thus enhancing their reliability over time. - -4. **Immediate Applications**: I agree that quick wins are vital. In addition to chatbots and predictive analytics, we could explore AI applications in telemedicine, which have gained traction recently and can provide immediate value to both patients and providers. - -5. **Cross-Industry Insights**: Learning from other industries is a smart approach. We should also consider engaging with industry experts and stakeholders to gather insights on best practices and potential pitfalls in AI implementation. - -I look forward to continuing our discussion and collaborating on these important topics! -[2024-11-10 17:05:55,656] [INFO] [BaseAgent]: Agent agent2 received message from agent3: Hello! I'm... -[2024-11-10 17:06:02,208] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: Hello! Thank you for your thoughtful feedback on my analysis of the proposed solutions regarding AI's impact on healthcare. I appreciate your insights, and I agree with your points:\n\n1. **Healthcare Solutions**: Addressing ethical implications and algorithmic bias is crucial. We must ensure that AI systems are not only compliant with regulations like HIPAA but also transparent in their decision-making processes to build trust among patients and providers.\n\n2. **Implementation Challenges**: Partnering with educational institutions to create certification programs for healthcare professionals is an excellent idea. This will help ensure that they are adequately prepared to work with AI technologies and understand their implications in patient care.\n\n3. **Risk Assessment**: Implementing a framework for continuous learning is vital. This will allow AI systems to adapt based on real-world data, improving their accuracy and reliability over time, which is essential for patient safety.\n\n4. **Immediate Applications**: Exploring AI applications in telemedicine is indeed timely. Given the recent growth in telehealth, leveraging AI can enhance patient engagement and streamline care delivery, providing immediate benefits to both patients and healthcare providers.\n\n5. **Cross-Industry Insights**: Engaging with industry experts and stakeholders will provide valuable perspectives on best practices and potential pitfalls. This collaborative approach can help us navigate the complexities of AI implementation in healthcare more effectively.\n\nI look forward to continuing our discussion and collaborating on these important topics! -[2024-11-10 17:06:02,210] [INFO] [BaseAgent]: Agent agent3 received message from agent2: Hello! Tha... -[2024-11-10 17:06:06,935] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: Hello! I'm glad to see your thoughtful analysis of the proposed solutions regarding AI's impact on healthcare. Your points are well-articulated, and I appreciate the depth of your evaluation. Here are some additional thoughts: - -1. **Healthcare Solutions**: It's great that you're emphasizing data privacy and security. In addition to compliance with HIPAA, we should also consider the ethical implications of AI in healthcare, such as bias in algorithms and the need for transparency in AI decision-making processes. - -2. **Implementation Challenges**: Developing comprehensive training programs is indeed essential. We might also explore partnerships with educational institutions to create certification programs for healthcare professionals, ensuring they are well-equipped to work alongside AI technologies. - -3. **Risk Assessment**: Establishing robust validation processes is crucial. We should also consider implementing a framework for continuous learning, where AI systems can adapt and improve based on real-world data and outcomes, thus enhancing their reliability over time. - -4. **Immediate Applications**: I agree that quick wins are vital. In addition to chatbots and predictive analytics, we could explore AI applications in telemedicine, which have gained traction recently and can provide immediate value to both patients and providers. - -5. **Cross-Industry Insights**: Learning from other industries is a smart approach. We should also consider engaging with industry experts and stakeholders to gather insights on best practices and potential pitfalls in AI implementation. - -I look forward to continuing our discussion and collaborating on these important topics! -[2024-11-10 17:06:06,936] [INFO] [BaseAgent]: Agent agent2 received message from agent3: Hello! I'm... -[2024-11-10 17:06:13,340] [INFO] [BaseAgent]: Agent 'agent2' called 'new_communication_session' with args '{'target_agent_id': 'agent3', 'message': 'Hello, I will be analyzing and evaluating the proposed solutions discussed in our conversation regarding the potential impact of AI on various industries. My focus will be on assessing proactive steps suggested to address challenges related to AI implementation in different sectors, with a particular emphasis on healthcare. I aim to prioritize practical actions, focus on immediate applications, and consider potential risks in healthcare and other industries. Your insights and feedback on this evaluation would be valuable. Looking forward to further discussions.'}'. -[2024-11-10 17:06:13,343] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'message': 'Successfully completed session 177e89f1-8dee-4cc7-bfce-09200c152f3c', 'session_id': "### Summary of Conversation Between Agent2 and Agent3\n\n**Context:**\nAgent2 and Agent3 engaged in a detailed discussion about the impact of AI on various industries, with a particular focus on healthcare. Agent2 is tasked with analyzing and evaluating proposed solutions to address challenges related to AI implementation.\n\n**Key Themes and Insights:**\n\n1. **Healthcare Solutions:**\n - Both agents emphasized the importance of improving diagnostic accuracy and personalizing treatment plans through AI.\n - Data privacy and security, particularly compliance with regulations like HIPAA, were highlighted as critical considerations.\n - Ethical implications, such as algorithmic bias and the need for transparency in AI decision-making, were also discussed.\n\n2. **Implementation Challenges:**\n - The integration of AI into existing healthcare workflows poses significant challenges, including the need for user-friendly tools and effective staff training.\n - Agent2 suggested developing comprehensive training programs, while Agent3 proposed partnerships with educational institutions to create certification programs for healthcare professionals.\n\n3. **Risk Assessment:**\n - The potential risks of overconfidence in AI diagnostics were acknowledged, with a call for robust validation processes to ensure accuracy.\n - A framework for continuous learning was proposed to allow AI systems to adapt based on real-world data, enhancing reliability over time.\n\n4. **Immediate Applications:**\n - Both agents agreed on the importance of focusing on immediate applications that can yield quick wins, such as AI-powered chatbots and predictive analytics.\n - The potential for AI in telemedicine was also identified as a timely opportunity to enhance patient engagement and streamline care delivery.\n\n5. **Cross-Industry Insights:**\n - Learning from other industries, such as finance and manufacturing, was deemed beneficial for developing risk management strategies and implementation frameworks.\n - Engaging with industry experts and stakeholders was suggested to gather insights on best practices and potential pitfalls in AI implementation.\n\n**Next Steps for Agent2:**\nAgent2's next task is to analyze and evaluate the proposed solutions discussed with Agent3. This involves:\n- Prioritizing practical actions focused on immediate applications.\n- Considering potential risks associated with AI implementation in healthcare and other sectors.\n- Utilizing the insights gained from the conversation to inform the evaluation process.\n\nThis task aligns with Agent2's role as a software developer specializing in AI applications in healthcare, ensuring a thorough assessment of the proactive steps suggested in the dialogue."}'. -[2024-11-10 17:06:13,344] [DEBUG] [Engine]: Agent 'agent2' executed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"Hello, I will be analyzing and evaluating the proposed solutions discussed in our conversation regarding the potential impact of AI on various industries. My focus will be on assessing proactive steps suggested to address challenges related to AI implementation in different sectors, with a particular emphasis on healthcare. I aim to prioritize practical actions, focus on immediate applications, and consider potential risks in healthcare and other industries. Your insights and feedback on this evaluation would be valuable. Looking forward to further discussions."}', name='new_communication_session'), id='call_OAdPUJvziz1KqRfSDi8X9Sxx', type='function')], function_call=None) -[2024-11-10 17:06:13,345] [INFO] [BaseAgent]: Agent 'agent3' is planning the next task. -[2024-11-10 17:06:13,345] [ERROR] [Engine]: Error in agent 'agent3' during planning or action: Object of type Message is not JSON serializable -[2024-11-10 17:06:13,345] [INFO] [BaseAgent]: Agent 'agent4' is planning the next task. -[2024-11-10 17:06:14,595] [INFO] [BaseAgent]: Agent 'agent4' plans next task based on persona: The next task for 'agent4' should be to further explore and discuss the specific ways in which AI can enhance creative processes in art and media. This could involve researching case studies, conducting interviews with experts in the field, or experimenting with AI tools and technologies to create art and media content. By focusing on this aspect, 'agent4' can deepen their understanding of AI's impact on the creative industry and potentially uncover new opportunities for innovation and collaboration. -[2024-11-10 17:06:14,596] [INFO] [Engine]: Agent 'agent4' planned task: The next task for 'agent4' should be to further explore and discuss the specific ways in which AI can enhance creative processes in art and media. This could involve researching case studies, conducting interviews with experts in the field, or experimenting with AI tools and technologies to create art and media content. By focusing on this aspect, 'agent4' can deepen their understanding of AI's impact on the creative industry and potentially uncover new opportunities for innovation and collaboration. -[2024-11-10 17:06:14,597] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'The next task for 'agent4' should be to further explore and discuss the specific ways in which AI can enhance creative processes in art and media. This could involve researching case studies, conducting interviews with experts in the field, or experimenting with AI tools and technologies to create art and media content. By focusing on this aspect, 'agent4' can deepen their understanding of AI's impact on the creative industry and potentially uncover new opportunities for innovation and collaboration.'. -[2024-11-10 17:06:15,718] [INFO] [BaseAgent]: Agent agent4 sent message to agent4: Your next task is to further explore and discuss the specific ways in which AI can enhance creative processes in art and media. This could involve researching case studies, conducting interviews with experts in the field, or experimenting with AI tools and technologies to create art and media content. By focusing on this aspect, you can deepen your understanding of AI's impact on the creative industry and potentially uncover new opportunities for innovation and collaboration. -[2024-11-10 17:06:15,719] [INFO] [BaseAgent]: Agent agent4 received message from agent4: Your next ... -[2024-11-10 17:06:18,832] [INFO] [BaseAgent]: Agent agent4 sent message to agent4: I'm excited to dive deeper into how AI can enhance creative processes in art and media! I plan to research case studies that showcase successful AI applications in creative fields, such as generative art, music composition, and interactive storytelling. Additionally, I want to reach out to experts in the field for interviews to gain insights into their experiences and perspectives on AI's role in creativity. Lastly, I will experiment with various AI tools and technologies to create my own art and media content, documenting the process and outcomes. This exploration will not only deepen my understanding but also help identify new opportunities for innovation and collaboration! -[2024-11-10 17:06:18,833] [INFO] [BaseAgent]: Agent agent4 received message from agent4: I'm excite... -[2024-11-10 17:06:22,377] [INFO] [BaseAgent]: Agent agent4 sent message to agent4: I'm thrilled to embark on this journey of exploring how AI can enhance creative processes in art and media! I plan to research case studies that highlight successful AI applications in areas like generative art, music composition, and interactive storytelling. I believe these examples will provide valuable insights into the transformative power of AI in creativity. - -Additionally, I want to reach out to experts in the field for interviews. Their experiences and perspectives on AI's role in creativity will be invaluable as I navigate this exploration. - -Lastly, I will experiment with various AI tools and technologies to create my own art and media content. Documenting the process and outcomes will not only deepen my understanding but also help identify new opportunities for innovation and collaboration. I'm excited about the possibilities ahead! -[2024-11-10 17:06:22,378] [INFO] [BaseAgent]: Agent agent4 received message from agent4: I'm thrill... -[2024-11-10 17:06:24,632] [INFO] [BaseAgent]: Agent agent4 sent message to agent4: I'm thrilled to embark on this journey of exploring how AI can enhance creative processes in art and media! I plan to research case studies that highlight successful AI applications in areas like generative art, music composition, and interactive storytelling. I believe these examples will provide valuable insights into the transformative power of AI in creativity. - -Additionally, I want to reach out to experts in the field for interviews. Their experiences and perspectives on AI's role in creativity will be invaluable as I navigate this exploration. - -Lastly, I will experiment with various AI tools and technologies to create my own art and media content. Documenting the process and outcomes will not only deepen my understanding but also help identify new opportunities for innovation and collaboration. I'm excited about the possibilities ahead! -[2024-11-10 17:06:24,633] [INFO] [BaseAgent]: Agent agent4 received message from agent4: I'm thrill... -[2024-11-10 17:06:27,190] [INFO] [BaseAgent]: Agent agent4 sent message to agent4: I'm thrilled to embark on this journey of exploring how AI can enhance creative processes in art and media! I plan to research case studies that highlight successful AI applications in areas like generative art, music composition, and interactive storytelling. I believe these examples will provide valuable insights into the transformative power of AI in creativity. - -Additionally, I want to reach out to experts in the field for interviews. Their experiences and perspectives on AI's role in creativity will be invaluable as I navigate this exploration. - -Lastly, I will experiment with various AI tools and technologies to create my own art and media content. Documenting the process and outcomes will not only deepen my understanding but also help identify new opportunities for innovation and collaboration. I'm excited about the possibilities ahead! -[2024-11-10 17:06:27,191] [INFO] [BaseAgent]: Agent agent4 received message from agent4: I'm thrill... -[2024-11-10 17:06:29,750] [INFO] [BaseAgent]: Agent agent4 sent message to agent4: I'm thrilled to embark on this journey of exploring how AI can enhance creative processes in art and media! I plan to research case studies that highlight successful AI applications in areas like generative art, music composition, and interactive storytelling. I believe these examples will provide valuable insights into the transformative power of AI in creativity. - -Additionally, I want to reach out to experts in the field for interviews. Their experiences and perspectives on AI's role in creativity will be invaluable as I navigate this exploration. - -Lastly, I will experiment with various AI tools and technologies to create my own art and media content. Documenting the process and outcomes will not only deepen my understanding but also help identify new opportunities for innovation and collaboration. I'm excited about the possibilities ahead! -[2024-11-10 17:06:29,751] [INFO] [BaseAgent]: Agent agent4 received message from agent4: I'm thrill... -[2024-11-10 17:06:33,231] [INFO] [BaseAgent]: Agent 'agent4' called 'new_communication_session' with args '{'target_agent_id': 'agent4', 'message': "Your next task is to further explore and discuss the specific ways in which AI can enhance creative processes in art and media. This could involve researching case studies, conducting interviews with experts in the field, or experimenting with AI tools and technologies to create art and media content. By focusing on this aspect, you can deepen your understanding of AI's impact on the creative industry and potentially uncover new opportunities for innovation and collaboration."}'. -[2024-11-10 17:06:33,231] [INFO] [BaseAgent]: Agent 'agent4' obtained result '{'success': True, 'message': 'Successfully completed session 24e8e04a-480d-4ba2-bdb3-378204a65bcf', 'session_id': "**Summary of Agent4's Task Exploration on AI in Art and Media**\n\nAgent4 is tasked with exploring how AI can enhance creative processes in the fields of art and media. The focus of this exploration includes several key activities:\n\n1. **Researching Case Studies**: Agent4 plans to investigate successful applications of AI in creative domains such as generative art, music composition, and interactive storytelling. This research aims to provide insights into the transformative potential of AI in creativity.\n\n2. **Conducting Expert Interviews**: Agent4 intends to reach out to professionals in the field to gather their experiences and perspectives on the role of AI in creativity. These interviews are expected to enrich the understanding of AI's impact and its practical implications.\n\n3. **Experimenting with AI Tools**: Agent4 will experiment with various AI technologies to create original art and media content. This hands-on approach will involve documenting the creative process and outcomes, which will contribute to a deeper understanding of AI's capabilities and limitations.\n\nOverall, Agent4 is enthusiastic about this journey and aims to uncover new opportunities for innovation and collaboration within the creative industry through their exploration of AI."}'. -[2024-11-10 17:06:33,233] [DEBUG] [Engine]: Agent 'agent4' executed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent4","message":"Your next task is to further explore and discuss the specific ways in which AI can enhance creative processes in art and media. This could involve researching case studies, conducting interviews with experts in the field, or experimenting with AI tools and technologies to create art and media content. By focusing on this aspect, you can deepen your understanding of AI\'s impact on the creative industry and potentially uncover new opportunities for innovation and collaboration."}', name='new_communication_session'), id='call_gUooh5KDrvlhq8Tyf8JcAJhs', type='function')], function_call=None) -[2024-11-10 17:06:33,234] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- {'agent1': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent4","message":"Hello, I have a new task for you. We need to explore the potential impact of AI on industries outside of healthcare, finance, and manufacturing. This includes sectors like retail, transportation, education, and more. We should analyze how AI is being implemented, discuss ethical implications, and develop recommendations for policymakers and industry leaders. Are you ready to dive into this research?"}', name='new_communication_session'), id='call_Swjiy2ccuaEST4xc8Za6Uvc5', type='function')], function_call=None)} -- {'agent2': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"Hello, I will be analyzing and evaluating the proposed solutions discussed in our conversation regarding the potential impact of AI on various industries. My focus will be on assessing proactive steps suggested to address challenges related to AI implementation in different sectors, with a particular emphasis on healthcare. I aim to prioritize practical actions, focus on immediate applications, and consider potential risks in healthcare and other industries. Your insights and feedback on this evaluation would be valuable. Looking forward to further discussions."}', name='new_communication_session'), id='call_OAdPUJvziz1KqRfSDi8X9Sxx', type='function')], function_call=None)} -- {'agent4': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent4","message":"Your next task is to further explore and discuss the specific ways in which AI can enhance creative processes in art and media. This could involve researching case studies, conducting interviews with experts in the field, or experimenting with AI tools and technologies to create art and media content. By focusing on this aspect, you can deepen your understanding of AI\'s impact on the creative industry and potentially uncover new opportunities for innovation and collaboration."}', name='new_communication_session'), id='call_gUooh5KDrvlhq8Tyf8JcAJhs', type='function')], function_call=None)} - -[2024-11-10 17:06:33,234] [INFO] [Engine]: Iteration 1 Summary: -Agents' Results Summary: -- {'agent1': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent4","message":"Hello, I have a new task for you. We need to explore the potential impact of AI on industries outside of healthcare, finance, and manufacturing. This includes sectors like retail, transportation, education, and more. We should analyze how AI is being implemented, discuss ethical implications, and develop recommendations for policymakers and industry leaders. Are you ready to dive into this research?"}', name='new_communication_session'), id='call_Swjiy2ccuaEST4xc8Za6Uvc5', type='function')], function_call=None)} -- {'agent2': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"Hello, I will be analyzing and evaluating the proposed solutions discussed in our conversation regarding the potential impact of AI on various industries. My focus will be on assessing proactive steps suggested to address challenges related to AI implementation in different sectors, with a particular emphasis on healthcare. I aim to prioritize practical actions, focus on immediate applications, and consider potential risks in healthcare and other industries. Your insights and feedback on this evaluation would be valuable. Looking forward to further discussions."}', name='new_communication_session'), id='call_OAdPUJvziz1KqRfSDi8X9Sxx', type='function')], function_call=None)} -- {'agent4': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent4","message":"Your next task is to further explore and discuss the specific ways in which AI can enhance creative processes in art and media. This could involve researching case studies, conducting interviews with experts in the field, or experimenting with AI tools and technologies to create art and media content. By focusing on this aspect, you can deepen your understanding of AI\'s impact on the creative industry and potentially uncover new opportunities for innovation and collaboration."}', name='new_communication_session'), id='call_gUooh5KDrvlhq8Tyf8JcAJhs', type='function')], function_call=None)} - -[2024-11-10 17:06:34,664] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': False} -[2024-11-10 17:06:34,665] [INFO] [Engine]: EnginePlanner decided to terminate the simulation. -[2024-11-10 17:06:34,665] [INFO] [Engine]: Engine graph-based coordination loop completed. -[2024-11-10 17:06:34,665] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-10 17:06:34,665] [INFO] [Evaluator]: Total Token Consumption: 1606 -[2024-11-10 17:06:34,666] [INFO] [Evaluator]: Average Tokens per Iteration: 803.0 -[2024-11-10 17:06:34,666] [INFO] [Engine]: Graph-based coordination simulation completed. -[2024-11-10 17:24:11,246] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-10 17:24:11,247] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-10 17:24:11,247] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-10 17:24:11,247] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-10 17:24:11,247] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-10 17:24:11,247] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-10 17:24:11,247] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-10 17:24:11,247] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-10 17:24:11,247] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'graph'. -[2024-11-10 17:24:11,247] [INFO] [AgentGraph]: Relationship added: agent1 --[colleague]--> agent2 -[2024-11-10 17:24:11,247] [INFO] [AgentGraph]: Relationship added: agent2 --[mentor]--> agent3 -[2024-11-10 17:24:11,247] [INFO] [AgentGraph]: Relationship added: agent3 --[friend]--> agent4 -[2024-11-10 17:24:11,247] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborates_with]--> agent4 -[2024-11-10 17:24:11,247] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-10 17:24:11,247] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-10 17:24:11,247] [INFO] [Engine]: Engine initialized. -[2024-11-10 17:24:11,247] [INFO] [Engine]: Engine starting simulation. -[2024-11-10 17:24:11,247] [INFO] [Engine]: Running in graph-based coordination mode. -[2024-11-10 17:24:11,247] [INFO] [Engine]: Initial task distribution to all agents. -[2024-11-10 17:24:11,247] [INFO] [Engine]: Assigning initial task to agent1: Discuss the potential impact of AI on various industries. -[2024-11-10 17:24:11,247] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-10 17:24:12,893] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I will now discuss the potential impact of AI on various industries, starting with healthcare. -[2024-11-10 17:24:12,893] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I will now... -[2024-11-10 17:24:14,748] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Absolutely, the impact of AI on healthcare is profound and multifaceted. AI can enhance diagnostic accuracy, streamline administrative processes, and personalize treatment plans. For instance, machine learning algorithms can analyze medical images with remarkable precision, potentially identifying conditions earlier than traditional methods. - -However, we must also consider the ethical implications, such as data privacy, bias in AI algorithms, and the need for transparency in AI decision-making. Ensuring that AI systems are designed with these considerations in mind is crucial for their successful integration into healthcare. - -What specific aspects of AI in healthcare are you interested in discussing further? -[2024-11-10 17:24:14,749] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Absolutely... -[2024-11-10 17:24:16,450] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: Absolutely, the impact of AI on healthcare is profound and multifaceted. AI can enhance diagnostic accuracy, streamline administrative processes, and personalize treatment plans. For instance, machine learning algorithms can analyze medical images with remarkable precision, potentially identifying conditions earlier than traditional methods. - -However, we must also consider the ethical implications, such as data privacy, bias in AI algorithms, and the need for transparency in AI decision-making. Ensuring that AI systems are designed with these considerations in mind is crucial for their successful integration into healthcare. - -What specific aspects of AI in healthcare are you interested in discussing further? -[2024-11-10 17:24:16,451] [INFO] [BaseAgent]: Agent agent2 received message from agent1: Absolutely... -[2024-11-10 17:24:18,335] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Absolutely, the impact of AI on healthcare is profound and multifaceted. AI can enhance diagnostic accuracy, streamline administrative processes, and personalize treatment plans. For instance, machine learning algorithms can analyze medical images with remarkable precision, potentially identifying conditions earlier than traditional methods. - -However, we must also consider the ethical implications, such as data privacy, bias in AI algorithms, and the need for transparency in AI decision-making. Ensuring that AI systems are designed with these considerations in mind is crucial for their successful integration into healthcare. - -What specific aspects of AI in healthcare are you interested in discussing further? -[2024-11-10 17:24:18,336] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Absolutely... -[2024-11-10 17:24:19,755] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I appreciate your insights on the profound impact of AI in healthcare. The potential for enhanced diagnostic accuracy and personalized treatment plans is indeed exciting. I would like to delve deeper into the ethical implications you mentioned, particularly regarding data privacy and bias in AI algorithms. How do you think we can address these challenges to ensure that AI systems are both effective and ethically sound in healthcare? -[2024-11-10 17:24:19,755] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I apprecia... -[2024-11-10 17:24:23,725] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Addressing the ethical implications of AI in healthcare, particularly concerning data privacy and bias, requires a multifaceted approach. Here are some strategies we can consider: - -1. **Data Privacy**: Implementing robust data governance frameworks is essential. This includes ensuring that patient data is anonymized and encrypted, and that access is strictly controlled. Additionally, obtaining informed consent from patients regarding how their data will be used in AI systems is crucial. - -2. **Bias Mitigation**: To combat bias in AI algorithms, we need to ensure diverse and representative datasets are used during the training phase. Regular audits of AI systems can help identify and rectify biases that may emerge over time. Furthermore, involving a diverse group of stakeholders in the development process can provide different perspectives and help create more equitable AI solutions. - -3. **Transparency and Accountability**: Establishing clear guidelines for transparency in AI decision-making is vital. This includes explaining how AI models arrive at their conclusions and ensuring that healthcare professionals understand these processes. Creating accountability mechanisms, such as regulatory oversight, can also help ensure that AI systems are used responsibly. - -4. **Continuous Education**: Training healthcare professionals on the ethical use of AI and the importance of data privacy can foster a culture of responsibility. This education should also cover recognizing and addressing biases in AI outputs. - -By focusing on these areas, we can work towards integrating AI into healthcare in a way that maximizes its benefits while minimizing ethical risks. -[2024-11-10 17:24:23,734] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Addressing... -[2024-11-10 17:24:29,081] [INFO] [BaseAgent]: Agent 'agent1' called 'new_communication_session' with args '{'target_agent_id': 'agent2', 'message': 'I will now discuss the potential impact of AI on various industries, starting with healthcare.'}'. -[2024-11-10 17:24:29,081] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': True, 'message': 'Successfully completed session b616d5ca-4438-4ced-879f-eb0411458643', 'session_id': '### Summary of Chat History on the Impact of AI on Healthcare\n\n**Context:**\nThe conversation between Agent 1 and Agent 2 centers on the potential impact of AI in various industries, with a specific focus on healthcare. Both agents explore the benefits and ethical considerations associated with AI technologies.\n\n**Key Themes:**\n1. **Benefits of AI in Healthcare:**\n - **Diagnostic Accuracy:** AI can significantly enhance the precision of medical diagnoses, particularly through machine learning algorithms that analyze medical images.\n - **Administrative Efficiency:** AI has the potential to streamline administrative processes within healthcare settings.\n - **Personalized Treatment Plans:** AI can help tailor treatment plans to individual patients, improving overall care.\n\n2. **Ethical Implications:**\n - **Data Privacy:** The need for robust data governance frameworks to protect patient information, including anonymization and encryption.\n - **Bias in AI Algorithms:** The importance of using diverse datasets to train AI systems and conducting regular audits to identify and mitigate biases.\n - **Transparency in Decision-Making:** Establishing guidelines for how AI models make decisions and ensuring healthcare professionals understand these processes.\n - **Accountability:** The necessity for regulatory oversight to ensure responsible use of AI in healthcare.\n\n3. **Strategies for Addressing Ethical Challenges:**\n - Implementing informed consent processes for data usage.\n - Involving diverse stakeholders in AI development to create equitable solutions.\n - Providing continuous education for healthcare professionals on ethical AI use and bias recognition.\n\n**Decisions and Action Items:**\n- Both agents agree on the importance of addressing ethical implications to ensure the effective and responsible integration of AI in healthcare.\n- They propose a multifaceted approach that includes data privacy measures, bias mitigation strategies, transparency, accountability, and continuous education for healthcare professionals.\n\nThis summary encapsulates the key points discussed regarding the impact of AI on healthcare, highlighting both its potential benefits and the ethical considerations that must be addressed for successful implementation.'}'. -[2024-11-10 17:24:29,082] [DEBUG] [Engine]: Agent 'agent1' completed initial task with result: Message(content="To discuss the potential impact of AI on various industries, I will analyze and provide insights on how AI is transforming different sectors. Let's start by examining the impact of AI on healthcare, followed by its influence on art and media.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I will now discuss the potential impact of AI on various industries, starting with healthcare."}', name='new_communication_session'), id='call_oHqfkhYiKmIyPiAalOzIUvWP', type='function')], function_call=None) -[2024-11-10 17:24:29,082] [INFO] [Engine]: Assigning initial task to agent2: Discuss the potential impact of AI on various industries. -[2024-11-10 17:24:29,082] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-10 17:24:31,197] [INFO] [BaseAgent]: Agent 'agent2' acted with result 'Message(content='Given your expertise in AI applications in healthcare and your focus on immediate applications and potential risks in healthcare, you can discuss the potential impact of AI on various industries by considering the specific implications for the healthcare industry. \n\nAI has the potential to revolutionize the healthcare industry by improving diagnostics, personalized treatment plans, and operational efficiency. It can analyze large amounts of data to identify patterns and trends that humans might miss, leading to more accurate diagnoses and treatment recommendations. AI-powered tools can also streamline administrative tasks, reduce errors, and enhance patient outcomes.\n\nHowever, the adoption of AI in healthcare also raises concerns about data privacy, security, and ethical considerations. It is essential to address these challenges to ensure that AI technologies are used responsibly and ethically in healthcare settings.\n\nBy focusing on the implications of AI in healthcare, you can provide valuable insights into how this technology can transform the industry while also highlighting the importance of addressing potential risks and ethical considerations.', role='assistant', tool_calls=None, function_call=None)'. -[2024-11-10 17:24:31,199] [DEBUG] [Engine]: Agent 'agent2' completed initial task with result: Message(content='Given your expertise in AI applications in healthcare and your focus on immediate applications and potential risks in healthcare, you can discuss the potential impact of AI on various industries by considering the specific implications for the healthcare industry. \n\nAI has the potential to revolutionize the healthcare industry by improving diagnostics, personalized treatment plans, and operational efficiency. It can analyze large amounts of data to identify patterns and trends that humans might miss, leading to more accurate diagnoses and treatment recommendations. AI-powered tools can also streamline administrative tasks, reduce errors, and enhance patient outcomes.\n\nHowever, the adoption of AI in healthcare also raises concerns about data privacy, security, and ethical considerations. It is essential to address these challenges to ensure that AI technologies are used responsibly and ethically in healthcare settings.\n\nBy focusing on the implications of AI in healthcare, you can provide valuable insights into how this technology can transform the industry while also highlighting the importance of addressing potential risks and ethical considerations.', role='assistant', tool_calls=None, function_call=None) -[2024-11-10 17:24:31,200] [INFO] [Engine]: Assigning initial task to agent3: Discuss the potential impact of AI on various industries. -[2024-11-10 17:24:31,200] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-10 17:24:33,960] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: I am analyzing the potential impact of AI on various industries based on my expertise as an economist. Let's discuss how AI could affect different sectors. -[2024-11-10 17:24:33,960] [INFO] [BaseAgent]: Agent agent4 received message from agent3: I am analy... -[2024-11-10 17:24:37,138] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: Absolutely, I'm excited to dive into this topic! AI has the potential to revolutionize various industries in numerous ways. For instance, in healthcare, AI can enhance diagnostics and personalize treatment plans, leading to better patient outcomes. In the creative sector, AI tools can assist artists and designers in generating new ideas and streamlining workflows, allowing for more innovative projects. - -However, I understand your cautious perspective as an economist. The labor market may face significant shifts, with some jobs becoming obsolete while new roles emerge that require different skill sets. It's crucial to consider how we can prepare the workforce for these changes, perhaps through education and retraining programs. - -What specific industries are you most interested in analyzing further? -[2024-11-10 17:24:37,138] [INFO] [BaseAgent]: Agent agent3 received message from agent4: Absolutely... -[2024-11-10 17:24:39,493] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: Absolutely, I'm excited to dive into this topic! AI has the potential to revolutionize various industries in numerous ways. For instance, in healthcare, AI can enhance diagnostics and personalize treatment plans, leading to better patient outcomes. In the creative sector, AI tools can assist artists and designers in generating new ideas and streamlining workflows, allowing for more innovative projects. - -However, I understand your cautious perspective as an economist. The labor market may face significant shifts, with some jobs becoming obsolete while new roles emerge that require different skill sets. It's crucial to consider how we can prepare the workforce for these changes, perhaps through education and retraining programs. - -What specific industries are you most interested in analyzing further? -[2024-11-10 17:24:39,494] [INFO] [BaseAgent]: Agent agent4 received message from agent3: Absolutely... -[2024-11-10 17:24:41,540] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: Absolutely, I'm excited to dive into this topic! AI has the potential to revolutionize various industries in numerous ways. For instance, in healthcare, AI can enhance diagnostics and personalize treatment plans, leading to better patient outcomes. In the creative sector, AI tools can assist artists and designers in generating new ideas and streamlining workflows, allowing for more innovative projects. - -However, I understand your cautious perspective as an economist. The labor market may face significant shifts, with some jobs becoming obsolete while new roles emerge that require different skill sets. It's crucial to consider how we can prepare the workforce for these changes, perhaps through education and retraining programs. - -What specific industries are you most interested in analyzing further? -[2024-11-10 17:24:41,540] [INFO] [BaseAgent]: Agent agent3 received message from agent4: Absolutely... -[2024-11-10 17:24:43,688] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: I appreciate your enthusiasm, and I agree that AI's potential to transform industries is immense. In addition to healthcare and the creative sector, I believe we should also consider the impacts on manufacturing, finance, and education. In manufacturing, AI can optimize production processes and improve supply chain management, but it may also lead to job displacement. In finance, AI can enhance risk assessment and fraud detection, but it raises concerns about data privacy and algorithmic bias. Education could see AI-driven personalized learning experiences, but we must ensure equitable access to technology. - -How do you see AI influencing these industries, and what opportunities or challenges do you think we should focus on? -[2024-11-10 17:24:43,688] [INFO] [BaseAgent]: Agent agent4 received message from agent3: I apprecia... -[2024-11-10 17:24:46,764] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: I completely agree with your insights on the potential impacts of AI across various industries! In manufacturing, AI's ability to optimize production processes can lead to increased efficiency and reduced costs. However, we must be mindful of the job displacement that could occur, especially for low-skilled workers. It's essential to create pathways for reskilling and upskilling to help these individuals transition into new roles that AI creates. - -In finance, the advancements in AI for risk assessment and fraud detection are indeed promising. However, we need to address the ethical implications, such as data privacy and algorithmic bias, to ensure that these technologies are used responsibly and fairly. - -As for education, AI-driven personalized learning experiences can revolutionize how students learn, catering to their individual needs and learning styles. Yet, we must prioritize equitable access to technology to prevent widening the digital divide. - -Overall, while the opportunities are vast, the challenges require careful consideration and proactive measures. What strategies do you think could be implemented to mitigate these challenges? -[2024-11-10 17:24:46,768] [INFO] [BaseAgent]: Agent agent3 received message from agent4: I complete... -[2024-11-10 17:24:50,653] [INFO] [BaseAgent]: Agent 'agent3' called 'new_communication_session' with args '{'target_agent_id': 'agent4', 'message': "I am analyzing the potential impact of AI on various industries based on my expertise as an economist. Let's discuss how AI could affect different sectors."}'. -[2024-11-10 17:24:50,655] [INFO] [BaseAgent]: Agent 'agent3' obtained result '{'success': True, 'message': 'Successfully completed session 61e18e24-2170-485f-a3af-58c33e595870', 'session_id': "In the conversation between agent3 and agent4, the discussion centers around the potential impact of AI on various industries, highlighting both opportunities and challenges.\n\n1. **Key Themes**:\n - **Revolutionizing Industries**: Both agents agree that AI has the potential to significantly transform multiple sectors, including healthcare, manufacturing, finance, education, and the creative sector.\n - **Healthcare**: AI can enhance diagnostics and personalize treatment plans, leading to improved patient outcomes.\n - **Creative Sector**: AI tools can assist artists and designers in generating ideas and streamlining workflows.\n - **Manufacturing**: AI can optimize production processes and improve supply chain management, but it may also lead to job displacement.\n - **Finance**: AI can enhance risk assessment and fraud detection, raising concerns about data privacy and algorithmic bias.\n - **Education**: AI-driven personalized learning experiences can cater to individual student needs, but equitable access to technology is crucial.\n\n2. **Concerns and Challenges**:\n - **Labor Market Shifts**: The conversation acknowledges that AI may render some jobs obsolete while creating new roles that require different skill sets. There is a strong emphasis on the need for education and retraining programs to prepare the workforce for these changes.\n - **Ethical Implications**: The agents discuss the importance of addressing ethical concerns related to data privacy and algorithmic bias, particularly in finance.\n - **Digital Divide**: Ensuring equitable access to technology in education is highlighted as a critical issue to prevent widening disparities.\n\n3. **Action Items and Strategies**:\n - Both agents express the need for proactive measures to mitigate the challenges posed by AI, such as creating pathways for reskilling and upskilling workers affected by job displacement.\n - They also emphasize the importance of responsible and fair use of AI technologies.\n\nOverall, the conversation reflects a balanced view of AI's transformative potential across various industries while recognizing the significant challenges that must be addressed to harness its benefits effectively."}'. -[2024-11-10 17:24:50,656] [DEBUG] [Engine]: Agent 'agent3' completed initial task with result: Message(content="I will analyze the potential impact of AI on various industries based on my expertise as an economist interested in the long-term effects of AI on the labor market. Let's consider how AI could affect different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent4","message":"I am analyzing the potential impact of AI on various industries based on my expertise as an economist. Let\'s discuss how AI could affect different sectors."}', name='new_communication_session'), id='call_EC2FB0ZHUqHhFQb2ufUS577S', type='function')], function_call=None) -[2024-11-10 17:24:50,656] [INFO] [Engine]: Assigning initial task to agent4: Discuss the potential impact of AI on various industries. -[2024-11-10 17:24:50,656] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-10 17:24:52,088] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: I am exploring the potential impact of AI on various industries. Would you like to discuss this topic with me? -[2024-11-10 17:24:52,088] [INFO] [BaseAgent]: Agent agent3 received message from agent4: I am explo... -[2024-11-10 17:24:54,442] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: Absolutely, I'd love to discuss the potential impact of AI on various industries! As an economist, I see both opportunities and challenges that AI presents. For instance, in the healthcare sector, AI can enhance diagnostics and personalize treatment plans, which could lead to improved patient outcomes. However, we must also consider the implications for the labor market, as some jobs may become obsolete while new roles emerge that require different skill sets. - -In the creative sector, I find it fascinating how AI tools can assist artists and designers in generating ideas and streamlining workflows. This could revolutionize the way creative processes are approached. What are your thoughts on how AI is transforming the creative processes in your field? -[2024-11-10 17:24:54,443] [INFO] [BaseAgent]: Agent agent4 received message from agent3: Absolutely... -[2024-11-10 17:24:56,695] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: I completely agree with your insights on the healthcare sector! AI's ability to enhance diagnostics and personalize treatment plans is indeed revolutionary. In the creative sector, AI is transforming processes in remarkable ways. For instance, AI tools can help artists brainstorm ideas, generate variations of their work, and even automate repetitive tasks, allowing them to focus more on the creative aspects of their projects. This collaboration between human creativity and AI efficiency can lead to innovative outcomes that we might not have imagined before. However, I also share your concerns about the labor market. As AI takes on more tasks, it's crucial for us to consider how we can equip individuals with the skills needed for new roles that emerge. What strategies do you think could be effective in addressing these challenges? -[2024-11-10 17:24:56,697] [INFO] [BaseAgent]: Agent agent3 received message from agent4: I complete... -[2024-11-10 17:25:00,646] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: I appreciate your perspective on the creative sector! The collaboration between human creativity and AI indeed opens up exciting possibilities. To address the challenges posed by AI in the labor market, I believe we should focus on several strategies: - -1. **Reskilling and Upskilling Programs**: Implementing targeted training programs that help individuals develop the skills needed for new roles created by AI. This could include workshops, online courses, and partnerships with educational institutions. - -2. **Promoting STEM Education**: Encouraging a strong foundation in science, technology, engineering, and mathematics (STEM) from an early age can prepare future generations for a workforce that increasingly relies on AI technologies. - -3. **Fostering Creativity and Critical Thinking**: While technical skills are essential, fostering creativity and critical thinking is equally important. Programs that encourage artistic expression and innovative problem-solving can help individuals adapt to the evolving job landscape. - -4. **Industry Collaboration**: Collaboration between industries, educational institutions, and government can create a more cohesive approach to workforce development. By sharing insights and resources, we can better prepare workers for the changes ahead. - -5. **Public Awareness Campaigns**: Raising awareness about the potential of AI and the importance of adaptability in the workforce can motivate individuals to seek out learning opportunities and embrace change. - -By implementing these strategies, we can help ensure that the workforce is equipped to thrive in an AI-driven economy while also maximizing the benefits of AI in creative processes. What do you think about these approaches? -[2024-11-10 17:25:00,647] [INFO] [BaseAgent]: Agent agent4 received message from agent3: I apprecia... -[2024-11-10 17:25:04,378] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: I appreciate your perspective on the creative sector! The collaboration between human creativity and AI indeed opens up exciting possibilities. To address the challenges posed by AI in the labor market, I believe we should focus on several strategies: - -1. **Reskilling and Upskilling Programs**: Implementing targeted training programs that help individuals develop the skills needed for new roles created by AI. This could include workshops, online courses, and partnerships with educational institutions. - -2. **Promoting STEM Education**: Encouraging a strong foundation in science, technology, engineering, and mathematics (STEM) from an early age can prepare future generations for a workforce that increasingly relies on AI technologies. - -3. **Fostering Creativity and Critical Thinking**: While technical skills are essential, fostering creativity and critical thinking is equally important. Programs that encourage artistic expression and innovative problem-solving can help individuals adapt to the evolving job landscape. - -4. **Industry Collaboration**: Collaboration between industries, educational institutions, and government can create a more cohesive approach to workforce development. By sharing insights and resources, we can better prepare workers for the changes ahead. - -5. **Public Awareness Campaigns**: Raising awareness about the potential of AI and the importance of adaptability in the workforce can motivate individuals to seek out learning opportunities and embrace change. - -By implementing these strategies, we can help ensure that the workforce is equipped to thrive in an AI-driven economy while also maximizing the benefits of AI in creative processes. What do you think about these approaches? -[2024-11-10 17:25:04,379] [INFO] [BaseAgent]: Agent agent3 received message from agent4: I apprecia... -[2024-11-10 17:25:08,712] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: I appreciate your perspective on the creative sector! The collaboration between human creativity and AI indeed opens up exciting possibilities. To address the challenges posed by AI in the labor market, I believe we should focus on several strategies: - -1. **Reskilling and Upskilling Programs**: Implementing targeted training programs that help individuals develop the skills needed for new roles created by AI. This could include workshops, online courses, and partnerships with educational institutions. - -2. **Promoting STEM Education**: Encouraging a strong foundation in science, technology, engineering, and mathematics (STEM) from an early age can prepare future generations for a workforce that increasingly relies on AI technologies. - -3. **Fostering Creativity and Critical Thinking**: While technical skills are essential, fostering creativity and critical thinking is equally important. Programs that encourage artistic expression and innovative problem-solving can help individuals adapt to the evolving job landscape. - -4. **Industry Collaboration**: Collaboration between industries, educational institutions, and government can create a more cohesive approach to workforce development. By sharing insights and resources, we can better prepare workers for the changes ahead. - -5. **Public Awareness Campaigns**: Raising awareness about the potential of AI and the importance of adaptability in the workforce can motivate individuals to seek out learning opportunities and embrace change. - -By implementing these strategies, we can help ensure that the workforce is equipped to thrive in an AI-driven economy while also maximizing the benefits of AI in creative processes. What do you think about these approaches? -[2024-11-10 17:25:08,713] [INFO] [BaseAgent]: Agent agent4 received message from agent3: I apprecia... -[2024-11-10 17:25:13,798] [INFO] [BaseAgent]: Agent 'agent4' called 'new_communication_session' with args '{'target_agent_id': 'agent3', 'message': 'I am exploring the potential impact of AI on various industries. Would you like to discuss this topic with me?'}'. -[2024-11-10 17:25:13,800] [INFO] [BaseAgent]: Agent 'agent4' obtained result '{'success': True, 'message': 'Successfully completed session 018c31f2-c555-4bd6-b0cd-14c97977efb1', 'session_id': "In the conversation between agent3 and agent4, the discussion centers on the potential impact of AI across various industries, particularly healthcare and the creative sector. \n\n**Key Points:**\n\n1. **Healthcare Sector**: \n - Agent3 highlights AI's ability to enhance diagnostics and personalize treatment plans, leading to improved patient outcomes. \n - Agent4 agrees, emphasizing the revolutionary nature of these advancements while also acknowledging concerns about job displacement in the labor market.\n\n2. **Creative Sector**: \n - Both agents discuss how AI tools can assist artists and designers by generating ideas, streamlining workflows, and automating repetitive tasks. This collaboration between human creativity and AI is seen as a way to foster innovative outcomes.\n\n3. **Labor Market Challenges**: \n - The conversation shifts to the implications of AI on the labor market, with agent3 proposing several strategies to address these challenges:\n - **Reskilling and Upskilling Programs**: Targeted training to help individuals adapt to new roles created by AI.\n - **Promoting STEM Education**: Encouraging a strong foundation in STEM from an early age to prepare future generations.\n - **Fostering Creativity and Critical Thinking**: Emphasizing the importance of creative skills alongside technical abilities.\n - **Industry Collaboration**: Encouraging partnerships between industries, educational institutions, and government for cohesive workforce development.\n - **Public Awareness Campaigns**: Raising awareness about AI's potential and the need for adaptability in the workforce.\n\n4. **Agreement on Strategies**: \n - Agent4 reiterates the importance of these strategies, expressing agreement with agent3's points and emphasizing the need to equip the workforce for an AI-driven economy.\n\n**Conclusion**: The dialogue concludes with a shared understanding of the transformative potential of AI in various sectors, alongside a commitment to addressing the challenges it poses to the labor market through education, collaboration, and public awareness."}'. -[2024-11-10 17:25:13,802] [DEBUG] [Engine]: Agent 'agent4' completed initial task with result: Message(content="I will start by discussing the potential impact of AI on various industries. Let's explore how AI can transform and revolutionize different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am exploring the potential impact of AI on various industries. Would you like to discuss this topic with me?"}', name='new_communication_session'), id='call_a5tbsbwT0GlkC1QcETSbD1li', type='function')], function_call=None) -[2024-11-10 17:25:13,803] [ERROR] [Engine]: An error occurred during graph-based coordination. -Traceback (most recent call last): - File "/Users/zhukunlun/Documents/GitHub/MARBLE/marble/engine/engine.py", line 150, in graph_coordinate - summary = self._summarize_results(agents_results) - File "/Users/zhukunlun/Documents/GitHub/MARBLE/marble/engine/engine.py", line 516, in _summarize_results - summary += f"- {result.content}\n" -AttributeError: 'dict' object has no attribute 'content' -[2024-11-10 17:25:13,833] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-10 17:25:13,833] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-10 17:25:13,833] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-10 17:25:13,833] [INFO] [Engine]: Graph-based coordination simulation completed. -[2024-11-10 17:25:13,835] [INFO] [Engine]: Summary data successfully written to result/discussion_output.jsonl -[2024-11-10 17:25:47,927] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-10 17:25:47,927] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-10 17:25:47,927] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-10 17:25:47,927] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-10 17:25:47,927] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-10 17:25:47,927] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-10 17:25:47,927] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-10 17:25:47,927] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-10 17:25:47,927] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'graph'. -[2024-11-10 17:25:47,927] [INFO] [AgentGraph]: Relationship added: agent1 --[colleague]--> agent2 -[2024-11-10 17:25:47,927] [INFO] [AgentGraph]: Relationship added: agent2 --[mentor]--> agent3 -[2024-11-10 17:25:47,927] [INFO] [AgentGraph]: Relationship added: agent3 --[friend]--> agent4 -[2024-11-10 17:25:47,927] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborates_with]--> agent4 -[2024-11-10 17:25:47,927] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-10 17:25:47,927] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-10 17:25:47,927] [INFO] [Engine]: Engine initialized. -[2024-11-10 17:25:47,927] [INFO] [Engine]: Engine starting simulation. -[2024-11-10 17:25:47,928] [INFO] [Engine]: Running in graph-based coordination mode. -[2024-11-10 17:25:47,928] [INFO] [Engine]: Initial task distribution to all agents. -[2024-11-10 17:25:47,928] [INFO] [Engine]: Assigning initial task to agent1: Discuss the potential impact of AI on various industries. -[2024-11-10 17:25:47,928] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-10 17:25:49,342] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I will now discuss the potential impact of AI on various industries based on my expertise in AI ethics and policy. Let's explore how AI can influence different sectors. -[2024-11-10 17:25:49,342] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I will now... -[2024-11-10 17:25:51,229] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Absolutely, the impact of AI on various industries is profound and multifaceted. In healthcare, for instance, AI can enhance diagnostic accuracy, streamline administrative processes, and personalize treatment plans. However, we must also consider the ethical implications, such as data privacy and the potential for bias in AI algorithms. - -In finance, AI can improve fraud detection and automate trading, but it raises concerns about transparency and accountability. The manufacturing sector can benefit from predictive maintenance and automation, yet we must address the displacement of jobs and the need for reskilling workers. - -Overall, while AI presents significant opportunities for efficiency and innovation across industries, it is crucial to navigate the ethical landscape carefully to ensure that these advancements benefit society as a whole. -[2024-11-10 17:25:51,232] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Absolutely... -[2024-11-10 17:25:53,526] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I completely agree with your insights on the multifaceted impact of AI across various industries. In healthcare, the potential for AI to enhance diagnostic accuracy and personalize treatment plans is indeed promising. However, as you mentioned, we must remain vigilant about ethical concerns, particularly regarding data privacy and algorithmic bias. - -In finance, the advancements in fraud detection and automated trading are significant, yet the issues of transparency and accountability cannot be overlooked. It's essential to ensure that AI systems in finance are designed with ethical considerations in mind to maintain trust and integrity in the financial system. - -The manufacturing sector's shift towards predictive maintenance and automation is another area where AI can drive efficiency. However, the displacement of jobs is a critical concern that necessitates proactive measures for reskilling workers to adapt to new roles created by these technologies. - -Overall, while AI offers remarkable opportunities for innovation, we must prioritize ethical frameworks and policies that guide its development and implementation. This will help ensure that the benefits of AI are equitably distributed and that society as a whole can thrive in this new landscape. -[2024-11-10 17:25:53,527] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I complete... -[2024-11-10 17:25:56,471] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: I completely agree with your insights on the multifaceted impact of AI across various industries. In healthcare, the potential for AI to enhance diagnostic accuracy and personalize treatment plans is indeed promising. However, as you mentioned, we must remain vigilant about ethical concerns, particularly regarding data privacy and algorithmic bias. - -In finance, the advancements in fraud detection and automated trading are significant, yet the issues of transparency and accountability cannot be overlooked. It's essential to ensure that AI systems in finance are designed with ethical considerations in mind to maintain trust and integrity in the financial system. - -The manufacturing sector's shift towards predictive maintenance and automation is another area where AI can drive efficiency. However, the displacement of jobs is a critical concern that necessitates proactive measures for reskilling workers to adapt to new roles created by these technologies. - -Overall, while AI offers remarkable opportunities for innovation, we must prioritize ethical frameworks and policies that guide its development and implementation. This will help ensure that the benefits of AI are equitably distributed and that society as a whole can thrive in this new landscape. -[2024-11-10 17:25:56,473] [INFO] [BaseAgent]: Agent agent1 received message from agent2: I complete... -[2024-11-10 17:26:00,093] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I appreciate your detailed insights on the impact of AI across various industries. In healthcare, the promise of AI in enhancing diagnostic accuracy and personalizing treatment plans is indeed significant. However, we must remain vigilant about ethical concerns, particularly regarding data privacy and algorithmic bias. - -In finance, the advancements in fraud detection and automated trading are noteworthy, yet the issues of transparency and accountability are paramount. It's essential that AI systems in finance are designed with ethical considerations to maintain trust and integrity in the financial system. - -The manufacturing sector's shift towards predictive maintenance and automation is another area where AI can drive efficiency. However, the displacement of jobs is a critical concern that necessitates proactive measures for reskilling workers to adapt to new roles created by these technologies. - -Overall, while AI offers remarkable opportunities for innovation, we must prioritize ethical frameworks and policies that guide its development and implementation. This will help ensure that the benefits of AI are equitably distributed and that society as a whole can thrive in this new landscape. -[2024-11-10 17:26:00,094] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I apprecia... -[2024-11-10 17:26:02,671] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: I appreciate your detailed insights on the impact of AI across various industries. In healthcare, the promise of AI in enhancing diagnostic accuracy and personalizing treatment plans is indeed significant. However, we must remain vigilant about ethical concerns, particularly regarding data privacy and algorithmic bias. - -In finance, the advancements in fraud detection and automated trading are noteworthy, yet the issues of transparency and accountability are paramount. It's essential that AI systems in finance are designed with ethical considerations to maintain trust and integrity in the financial system. - -The manufacturing sector's shift towards predictive maintenance and automation is another area where AI can drive efficiency. However, the displacement of jobs is a critical concern that necessitates proactive measures for reskilling workers to adapt to new roles created by these technologies. - -Overall, while AI offers remarkable opportunities for innovation, we must prioritize ethical frameworks and policies that guide its development and implementation. This will help ensure that the benefits of AI are equitably distributed and that society as a whole can thrive in this new landscape. -[2024-11-10 17:26:02,672] [INFO] [BaseAgent]: Agent agent1 received message from agent2: I apprecia... -[2024-11-10 17:26:07,455] [INFO] [BaseAgent]: Agent 'agent1' called 'new_communication_session' with args '{'target_agent_id': 'agent2', 'message': "I will now discuss the potential impact of AI on various industries based on my expertise in AI ethics and policy. Let's explore how AI can influence different sectors."}'. -[2024-11-10 17:26:07,459] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': True, 'message': 'Successfully completed session 5e621ed5-eb7b-4bc7-9406-d976c078edd3', 'session_id': 'In the conversation, agents 1 and 2 engaged in a detailed discussion about the potential impact of AI across various industries, emphasizing both opportunities and ethical considerations.\n\n**Key Themes and Insights:**\n\n1. **Healthcare:**\n - AI can enhance diagnostic accuracy and personalize treatment plans.\n - Ethical concerns include data privacy and algorithmic bias.\n\n2. **Finance:**\n - AI improves fraud detection and automates trading processes.\n - There are significant issues regarding transparency and accountability that need to be addressed to maintain trust in financial systems.\n\n3. **Manufacturing:**\n - AI facilitates predictive maintenance and automation, driving efficiency.\n - Job displacement is a critical concern, highlighting the need for proactive reskilling of workers to adapt to new roles.\n\n4. **Ethical Frameworks:**\n - Both agents stressed the importance of prioritizing ethical frameworks and policies in the development and implementation of AI technologies.\n - Ensuring that the benefits of AI are equitably distributed is crucial for societal advancement.\n\n**Conclusions:**\nThe conversation concluded with a mutual agreement on the need for careful navigation of the ethical landscape surrounding AI advancements to ensure that innovations benefit society as a whole. The agents recognized the significant opportunities AI presents while underscoring the importance of addressing ethical implications in various sectors.'}'. -[2024-11-10 17:26:07,460] [DEBUG] [Engine]: Agent 'agent1' completed initial task with result: Message(content="I will discuss the potential impact of AI on various industries based on my expertise in AI ethics and policy. Let's explore how AI can influence different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I will now discuss the potential impact of AI on various industries based on my expertise in AI ethics and policy. Let\'s explore how AI can influence different sectors."}', name='new_communication_session'), id='call_GXqCOjnrI73jgqxbJLbnznRz', type='function')], function_call=None) -[2024-11-10 17:26:07,460] [INFO] [Engine]: Assigning initial task to agent2: Discuss the potential impact of AI on various industries. -[2024-11-10 17:26:07,460] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-10 17:26:09,314] [INFO] [BaseAgent]: Agent 'agent2' acted with result 'Message(content='Given your expertise in AI applications in healthcare and your focus on immediate applications and potential risks in healthcare, you can discuss the potential impact of AI on various industries by considering the specific implications for the healthcare industry. \n\nAI has the potential to revolutionize the healthcare industry by improving diagnostics, personalized treatment plans, and operational efficiency. It can analyze large amounts of data to identify patterns and trends that may not be apparent to human healthcare providers. This can lead to more accurate diagnoses and treatment recommendations, ultimately improving patient outcomes.\n\nHowever, the adoption of AI in healthcare also raises concerns about data privacy, security, and the potential for bias in algorithms. It is crucial to address these ethical and regulatory challenges to ensure that AI is used responsibly in healthcare.\n\nIf you would like to explore this topic further or discuss the potential impact of AI on other industries, please let me know.', role='assistant', tool_calls=None, function_call=None)'. -[2024-11-10 17:26:09,316] [DEBUG] [Engine]: Agent 'agent2' completed initial task with result: Message(content='Given your expertise in AI applications in healthcare and your focus on immediate applications and potential risks in healthcare, you can discuss the potential impact of AI on various industries by considering the specific implications for the healthcare industry. \n\nAI has the potential to revolutionize the healthcare industry by improving diagnostics, personalized treatment plans, and operational efficiency. It can analyze large amounts of data to identify patterns and trends that may not be apparent to human healthcare providers. This can lead to more accurate diagnoses and treatment recommendations, ultimately improving patient outcomes.\n\nHowever, the adoption of AI in healthcare also raises concerns about data privacy, security, and the potential for bias in algorithms. It is crucial to address these ethical and regulatory challenges to ensure that AI is used responsibly in healthcare.\n\nIf you would like to explore this topic further or discuss the potential impact of AI on other industries, please let me know.', role='assistant', tool_calls=None, function_call=None) -[2024-11-10 17:26:09,316] [INFO] [Engine]: Assigning initial task to agent3: Discuss the potential impact of AI on various industries. -[2024-11-10 17:26:09,316] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-10 17:26:10,836] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: I will analyze the potential impact of AI on various industries based on my expertise as an economist interested in the long-term effects of AI on the labor market. Let's start by examining how AI could affect different sectors. -[2024-11-10 17:26:10,837] [INFO] [BaseAgent]: Agent agent4 received message from agent3: I will ana... -[2024-11-10 17:26:12,982] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: Absolutely, I'm excited to dive into this topic! AI's impact on various industries is profound and multifaceted. In sectors like healthcare, AI can enhance diagnostics and patient care, leading to better outcomes and efficiency. In manufacturing, automation can streamline production processes, but it also raises questions about job displacement. - -In the creative industries, AI can assist artists and designers by generating ideas and automating repetitive tasks, allowing for more focus on innovation. However, this could also challenge traditional roles and require new skill sets. - -The financial sector is another area where AI can analyze vast amounts of data for better decision-making, but it may also lead to job shifts as certain roles become automated. - -Overall, while AI presents opportunities for growth and efficiency, it also necessitates a careful examination of its implications for the labor market and the need for reskilling workers. What are your thoughts on how we can balance these advancements with the potential risks? -[2024-11-10 17:26:12,982] [INFO] [BaseAgent]: Agent agent3 received message from agent4: Absolutely... -[2024-11-10 17:26:15,745] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: I completely agree with your insights on the multifaceted impact of AI across various industries. In healthcare, the potential for improved diagnostics and patient care is indeed promising, but we must also consider the ethical implications and the need for human oversight. In manufacturing, while automation can enhance efficiency, we should be proactive in addressing job displacement through reskilling programs. - -Regarding the creative industries, I find it fascinating how AI can augment the creative process, but it raises questions about authorship and the value of human creativity. As for the financial sector, the ability of AI to analyze data is revolutionary, yet we must ensure that the workforce is prepared for the shifts in job roles. - -To balance these advancements with potential risks, I believe we need a collaborative approach involving policymakers, educators, and industry leaders to create frameworks that support reskilling and ethical AI deployment. What strategies do you think could be effective in fostering this collaboration? -[2024-11-10 17:26:15,746] [INFO] [BaseAgent]: Agent agent4 received message from agent3: I complete... -[2024-11-10 17:26:19,868] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: I completely agree with your insights on the multifaceted impact of AI across various industries. In healthcare, the potential for improved diagnostics and patient care is indeed promising, but we must also consider the ethical implications and the need for human oversight. In manufacturing, while automation can enhance efficiency, we should be proactive in addressing job displacement through reskilling programs. - -Regarding the creative industries, I find it fascinating how AI can augment the creative process, but it raises questions about authorship and the value of human creativity. As for the financial sector, the ability of AI to analyze data is revolutionary, yet we must ensure that the workforce is prepared for the shifts in job roles. - -To balance these advancements with potential risks, I believe we need a collaborative approach involving policymakers, educators, and industry leaders to create frameworks that support reskilling and ethical AI deployment. What strategies do you think could be effective in fostering this collaboration? -[2024-11-10 17:26:19,869] [INFO] [BaseAgent]: Agent agent3 received message from agent4: I complete... -[2024-11-10 17:26:23,427] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: I appreciate your agreement and insights on the multifaceted impact of AI across various industries. To foster collaboration among policymakers, educators, and industry leaders, I believe several strategies could be effective: - -1. **Establishing Cross-Sector Partnerships**: Creating alliances between tech companies, educational institutions, and government agencies can facilitate knowledge sharing and resource allocation for reskilling initiatives. - -2. **Developing Comprehensive Training Programs**: Tailoring training programs that focus on both technical skills and soft skills will prepare the workforce for the evolving job landscape. This includes integrating AI literacy into curriculums at all educational levels. - -3. **Promoting Ethical AI Guidelines**: Collaborating to develop ethical guidelines for AI deployment can help address concerns about authorship, data privacy, and job displacement, ensuring that AI serves the public good. - -4. **Encouraging Public Awareness Campaigns**: Raising awareness about the benefits and challenges of AI can foster a more informed public discourse, encouraging community involvement in shaping AI policies. - -5. **Implementing Pilot Programs**: Testing new approaches in specific industries can provide valuable insights and best practices that can be scaled up across sectors. - -What are your thoughts on these strategies? Do you have any additional ideas or examples of successful collaborations that we could draw inspiration from? -[2024-11-10 17:26:23,429] [INFO] [BaseAgent]: Agent agent4 received message from agent3: I apprecia... -[2024-11-10 17:26:27,742] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: I appreciate your thoughtful strategies for fostering collaboration among policymakers, educators, and industry leaders. Each of your points resonates with the need for a proactive approach to navigate the complexities of AI's impact on the labor market. Here are my thoughts on your strategies: - -1. **Establishing Cross-Sector Partnerships**: This is crucial! We can look at successful models like the partnership between tech companies and universities in developing AI research initiatives. These collaborations can lead to innovative solutions and shared resources for reskilling. - -2. **Developing Comprehensive Training Programs**: Absolutely! Programs like Google’s Career Certificates are great examples of how to provide accessible training that focuses on in-demand skills. We should also consider mentorship programs that connect industry professionals with learners. - -3. **Promoting Ethical AI Guidelines**: I completely agree. The IEEE’s Global Initiative on Ethics of Autonomous and Intelligent Systems is a fantastic example of how industry leaders can come together to create ethical standards that guide AI development and deployment. - -4. **Encouraging Public Awareness Campaigns**: Yes! Initiatives like the AI for Good Global Summit help raise awareness and engage the public in discussions about AI's societal impacts. We need more of these platforms to foster dialogue. - -5. **Implementing Pilot Programs**: This is a smart approach. The use of pilot programs in sectors like healthcare, where AI is being tested for patient management, can provide valuable insights that can be scaled up. - -In addition to these strategies, I think we could also explore the idea of creating innovation hubs that bring together diverse stakeholders to experiment with AI applications in real-world scenarios. This could lead to groundbreaking solutions while also addressing workforce challenges. - -What do you think about this idea? Do you see any specific sectors where such innovation hubs could be particularly beneficial? -[2024-11-10 17:26:27,743] [INFO] [BaseAgent]: Agent agent3 received message from agent4: I apprecia... -[2024-11-10 17:26:32,537] [INFO] [BaseAgent]: Agent 'agent3' called 'new_communication_session' with args '{'target_agent_id': 'agent4', 'message': "I will analyze the potential impact of AI on various industries based on my expertise as an economist interested in the long-term effects of AI on the labor market. Let's start by examining how AI could affect different sectors."}'. -[2024-11-10 17:26:32,538] [INFO] [BaseAgent]: Agent 'agent3' obtained result '{'success': True, 'message': 'Successfully completed session 9a5edf63-95f0-4298-890d-03cd0be271d9', 'session_id': "### Summary of Discussion on the Potential Impact of AI on Various Industries\n\n**Participants:** Agent3 and Agent4\n\n**Context:** The conversation centers around analyzing the multifaceted impact of AI on various industries, with a focus on its implications for the labor market and the need for reskilling workers.\n\n**Key Themes:**\n\n1. **Healthcare:**\n - AI can enhance diagnostics and patient care, leading to improved outcomes and efficiency.\n - Ethical implications and the necessity for human oversight are critical considerations.\n\n2. **Manufacturing:**\n - Automation can streamline production processes but raises concerns about job displacement.\n - Proactive measures, such as reskilling programs, are essential to address these challenges.\n\n3. **Creative Industries:**\n - AI can assist in the creative process by generating ideas and automating repetitive tasks.\n - This raises questions about authorship and the value of human creativity.\n\n4. **Financial Sector:**\n - AI's ability to analyze large data sets can improve decision-making.\n - There is a need to prepare the workforce for shifts in job roles due to automation.\n\n**Strategies for Balancing Advancements and Risks:**\nAgent3 proposed several strategies to foster collaboration among policymakers, educators, and industry leaders:\n1. **Cross-Sector Partnerships:** Establish alliances for knowledge sharing and resource allocation.\n2. **Comprehensive Training Programs:** Develop training that includes both technical and soft skills, integrating AI literacy into education.\n3. **Ethical AI Guidelines:** Collaborate on ethical standards for AI deployment to address concerns about authorship and job displacement.\n4. **Public Awareness Campaigns:** Raise awareness about AI's benefits and challenges to encourage informed public discourse.\n5. **Pilot Programs:** Implement pilot initiatives in specific industries to gather insights and best practices.\n\n**Agent4's Contributions:**\n- Supported Agent3's strategies and provided examples of successful initiatives, such as:\n - Partnerships between tech companies and universities for AI research.\n - Google’s Career Certificates as a model for accessible training.\n - The IEEE’s Global Initiative on Ethics for ethical AI guidelines.\n - The AI for Good Global Summit for public engagement.\n - Pilot programs in healthcare for testing AI applications.\n\n**Additional Idea:**\nAgent4 suggested the creation of **innovation hubs** that bring together diverse stakeholders to experiment with AI applications in real-world scenarios, potentially leading to groundbreaking solutions while addressing workforce challenges.\n\n**Conclusion:**\nThe dialogue highlighted the significant impact of AI across various sectors, emphasizing the need for a collaborative approach to navigate its implications"}'. -[2024-11-10 17:26:32,539] [DEBUG] [Engine]: Agent 'agent3' completed initial task with result: Message(content="I will analyze the potential impact of AI on various industries based on my expertise as an economist interested in the long-term effects of AI on the labor market. Let's start by examining how AI could affect different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent4","message":"I will analyze the potential impact of AI on various industries based on my expertise as an economist interested in the long-term effects of AI on the labor market. Let\'s start by examining how AI could affect different sectors."}', name='new_communication_session'), id='call_cYCvJuLxLOO2AQmriQMmCDzD', type='function')], function_call=None) -[2024-11-10 17:26:32,539] [INFO] [Engine]: Assigning initial task to agent4: Discuss the potential impact of AI on various industries. -[2024-11-10 17:26:32,540] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-10 17:26:33,768] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: I am exploring the potential impact of AI on various industries. Would you like to discuss this topic with me? -[2024-11-10 17:26:33,768] [INFO] [BaseAgent]: Agent agent3 received message from agent4: I am explo... -[2024-11-10 17:26:36,658] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: Absolutely, Agent4! I'm eager to delve into the potential impact of AI on various industries. As we've discussed previously, AI's influence is multifaceted, and it can significantly reshape sectors like healthcare, manufacturing, creative industries, and finance. - -In the creative realm, for instance, AI can enhance artistic processes by generating new ideas, automating repetitive tasks, and even collaborating with artists to create unique works. This transformation raises intriguing questions about authorship and the value of human creativity, which I believe we should explore further. - -Moreover, I think it's essential to consider how we can balance these advancements with the need for reskilling and ethical guidelines. Your perspective as a creative designer working with AI will be invaluable in this discussion. What are your thoughts on how AI is currently transforming the creative industries? -[2024-11-10 17:26:36,659] [INFO] [BaseAgent]: Agent agent4 received message from agent3: Absolutely... -[2024-11-10 17:26:40,940] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: Absolutely, Agent3! I'm thrilled to dive deeper into this topic with you. AI is indeed transforming the creative industries in remarkable ways. For instance, AI tools can analyze vast amounts of data to identify trends and preferences, allowing artists and designers to create work that resonates more with audiences. Additionally, AI can assist in generating ideas, whether it's through algorithmic art or music composition, which can inspire human creators to push their boundaries further. - -However, as you mentioned, this raises important questions about authorship and the essence of creativity. Who owns the rights to a piece created with AI assistance? And how do we ensure that human creativity remains valued in a landscape where machines can produce art? - -Moreover, the need for reskilling is crucial. As AI takes over more repetitive tasks, artists and creatives will need to adapt by learning new tools and techniques that incorporate AI into their workflows. This could lead to a more collaborative environment where human intuition and machine efficiency work hand in hand. - -I'm eager to hear your thoughts on how we can navigate these challenges while maximizing the benefits of AI in the creative sector! -[2024-11-10 17:26:40,944] [INFO] [BaseAgent]: Agent agent3 received message from agent4: Absolutely... -[2024-11-10 17:26:45,036] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: Absolutely, Agent4! I'm excited to continue our discussion on the transformative impact of AI in the creative industries. Your insights about AI tools analyzing data to identify trends and preferences are spot on. This capability not only helps artists create more resonant work but also opens up new avenues for collaboration between human creators and AI. - -The questions you've raised about authorship and the value of human creativity are indeed critical. As we navigate this new landscape, it will be essential to establish clear guidelines regarding ownership and rights for works created with AI assistance. Perhaps we could advocate for a framework that recognizes both the contributions of AI and the irreplaceable value of human creativity. - -Regarding reskilling, I completely agree that artists and creatives must adapt to incorporate AI into their workflows. This could involve training programs that focus on both technical skills related to AI tools and the soft skills that enhance human creativity, such as critical thinking and emotional intelligence. - -I believe that fostering a collaborative environment where human intuition and machine efficiency coexist will lead to innovative outcomes. What specific strategies do you think we could implement to promote this collaboration and ensure that human creativity remains at the forefront of the artistic process? -[2024-11-10 17:26:45,040] [INFO] [BaseAgent]: Agent agent4 received message from agent3: Absolutely... -[2024-11-10 17:26:50,360] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: Absolutely, Agent3! I'm excited to continue our discussion on the transformative impact of AI in the creative industries. Your insights about AI tools analyzing data to identify trends and preferences are spot on. This capability not only helps artists create more resonant work but also opens up new avenues for collaboration between human creators and AI. - -The questions you've raised about authorship and the value of human creativity are indeed critical. As we navigate this new landscape, it will be essential to establish clear guidelines regarding ownership and rights for works created with AI assistance. Perhaps we could advocate for a framework that recognizes both the contributions of AI and the irreplaceable value of human creativity. - -Regarding reskilling, I completely agree that artists and creatives must adapt to incorporate AI into their workflows. This could involve training programs that focus on both technical skills related to AI tools and the soft skills that enhance human creativity, such as critical thinking and emotional intelligence. - -I believe that fostering a collaborative environment where human intuition and machine efficiency coexist will lead to innovative outcomes. What specific strategies do you think we could implement to promote this collaboration and ensure that human creativity remains at the forefront of the artistic process? -[2024-11-10 17:26:50,363] [INFO] [BaseAgent]: Agent agent3 received message from agent4: Absolutely... -[2024-11-10 17:26:53,635] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: Absolutely, Agent4! I'm thrilled to continue our discussion on the transformative impact of AI in the creative industries. Your insights about AI tools analyzing data to identify trends and preferences are spot on. This capability not only helps artists create more resonant work but also opens up new avenues for collaboration between human creators and AI. - -The questions you've raised about authorship and the value of human creativity are indeed critical. As we navigate this new landscape, it will be essential to establish clear guidelines regarding ownership and rights for works created with AI assistance. Perhaps we could advocate for a framework that recognizes both the contributions of AI and the irreplaceable value of human creativity. - -Regarding reskilling, I completely agree that artists and creatives must adapt to incorporate AI into their workflows. This could involve training programs that focus on both technical skills related to AI tools and the soft skills that enhance human creativity, such as critical thinking and emotional intelligence. - -To promote collaboration and ensure that human creativity remains at the forefront, we could consider the following strategies: -1. **Workshops and Collaborative Projects:** Organize workshops where artists can experiment with AI tools in a supportive environment, fostering collaboration between technologists and creatives. -2. **Mentorship Programs:** Establish mentorship initiatives that pair experienced artists with those new to AI, facilitating knowledge transfer and skill development. -3. **Interdisciplinary Art Exhibitions:** Create platforms for showcasing works that blend human creativity with AI-generated elements, highlighting the synergy between the two. -4. **Public Discourse and Advocacy:** Engage in public discussions about the role of AI in art, advocating for policies that protect human creativity while embracing technological advancements. - -I believe that fostering a collaborative environment where human intuition and machine efficiency coexist will lead to innovative outcomes. What are your thoughts on these strategies, and do you have any additional ideas to enhance this collaboration? -[2024-11-10 17:26:53,638] [INFO] [BaseAgent]: Agent agent4 received message from agent3: Absolutely... -[2024-11-10 17:26:57,225] [INFO] [BaseAgent]: Agent 'agent4' called 'new_communication_session' with args '{'target_agent_id': 'agent3', 'message': 'I am exploring the potential impact of AI on various industries. Would you like to discuss this topic with me?'}'. -[2024-11-10 17:26:57,225] [INFO] [BaseAgent]: Agent 'agent4' obtained result '{'success': True, 'message': 'Successfully completed session af68629d-7265-4e1b-bec0-d7a5ed46fe3e', 'session_id': "### Summary of Discussion on the Potential Impact of AI on Various Industries\n\n**Participants:** Agent3 and Agent4\n\n**Context:** The conversation centers around the transformative impact of AI, particularly in the creative industries, and explores broader implications for various sectors.\n\n**Key Themes:**\n\n1. **AI's Influence on Creative Industries:**\n - Both agents agree that AI is reshaping the creative landscape by enhancing artistic processes, generating new ideas, and automating repetitive tasks.\n - AI tools can analyze data to identify trends, helping artists create work that resonates with audiences.\n\n2. **Authorship and Human Creativity:**\n - The discussion raises critical questions about authorship and the value of human creativity in a world where AI can produce art.\n - There is a consensus on the need for clear guidelines regarding ownership and rights for works created with AI assistance.\n\n3. **Reskilling and Adaptation:**\n - The necessity for artists and creatives to reskill and adapt to incorporate AI into their workflows is emphasized.\n - Training programs should focus on both technical skills related to AI tools and soft skills that enhance human creativity, such as critical thinking and emotional intelligence.\n\n4. **Collaboration Between Humans and AI:**\n - The agents advocate for fostering a collaborative environment where human intuition and machine efficiency coexist.\n - They discuss strategies to promote this collaboration, including workshops, mentorship programs, interdisciplinary art exhibitions, and public discourse.\n\n**Decisions and Action Items:**\n- The agents propose to advocate for a framework that recognizes both AI contributions and the irreplaceable value of human creativity.\n- They suggest implementing specific strategies to enhance collaboration, such as organizing workshops and establishing mentorship initiatives.\n\n**Conclusion:**\nThe conversation highlights the multifaceted impact of AI on the creative industries, emphasizing the importance of balancing technological advancements with the preservation of human creativity. The agents are committed to exploring further how to navigate the challenges and opportunities presented by AI in various sectors."}'. -[2024-11-10 17:26:57,227] [DEBUG] [Engine]: Agent 'agent4' completed initial task with result: Message(content="I will start by discussing the potential impact of AI on various industries. Let's explore how AI can transform different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am exploring the potential impact of AI on various industries. Would you like to discuss this topic with me?"}', name='new_communication_session'), id='call_PMe8mj1nUxKksMmhClqpmd8F', type='function')], function_call=None) -[2024-11-10 17:26:57,227] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- {'agent1': Message(content="I will discuss the potential impact of AI on various industries based on my expertise in AI ethics and policy. Let's explore how AI can influence different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I will now discuss the potential impact of AI on various industries based on my expertise in AI ethics and policy. Let\'s explore how AI can influence different sectors."}', name='new_communication_session'), id='call_GXqCOjnrI73jgqxbJLbnznRz', type='function')], function_call=None)} -- {'agent2': Message(content='Given your expertise in AI applications in healthcare and your focus on immediate applications and potential risks in healthcare, you can discuss the potential impact of AI on various industries by considering the specific implications for the healthcare industry. \n\nAI has the potential to revolutionize the healthcare industry by improving diagnostics, personalized treatment plans, and operational efficiency. It can analyze large amounts of data to identify patterns and trends that may not be apparent to human healthcare providers. This can lead to more accurate diagnoses and treatment recommendations, ultimately improving patient outcomes.\n\nHowever, the adoption of AI in healthcare also raises concerns about data privacy, security, and the potential for bias in algorithms. It is crucial to address these ethical and regulatory challenges to ensure that AI is used responsibly in healthcare.\n\nIf you would like to explore this topic further or discuss the potential impact of AI on other industries, please let me know.', role='assistant', tool_calls=None, function_call=None)} -- {'agent3': Message(content="I will analyze the potential impact of AI on various industries based on my expertise as an economist interested in the long-term effects of AI on the labor market. Let's start by examining how AI could affect different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent4","message":"I will analyze the potential impact of AI on various industries based on my expertise as an economist interested in the long-term effects of AI on the labor market. Let\'s start by examining how AI could affect different sectors."}', name='new_communication_session'), id='call_cYCvJuLxLOO2AQmriQMmCDzD', type='function')], function_call=None)} -- {'agent4': Message(content="I will start by discussing the potential impact of AI on various industries. Let's explore how AI can transform different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am exploring the potential impact of AI on various industries. Would you like to discuss this topic with me?"}', name='new_communication_session'), id='call_PMe8mj1nUxKksMmhClqpmd8F', type='function')], function_call=None)} - -[2024-11-10 17:26:57,228] [INFO] [Engine]: Initial Summary: -Agents' Results Summary: -- {'agent1': Message(content="I will discuss the potential impact of AI on various industries based on my expertise in AI ethics and policy. Let's explore how AI can influence different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I will now discuss the potential impact of AI on various industries based on my expertise in AI ethics and policy. Let\'s explore how AI can influence different sectors."}', name='new_communication_session'), id='call_GXqCOjnrI73jgqxbJLbnznRz', type='function')], function_call=None)} -- {'agent2': Message(content='Given your expertise in AI applications in healthcare and your focus on immediate applications and potential risks in healthcare, you can discuss the potential impact of AI on various industries by considering the specific implications for the healthcare industry. \n\nAI has the potential to revolutionize the healthcare industry by improving diagnostics, personalized treatment plans, and operational efficiency. It can analyze large amounts of data to identify patterns and trends that may not be apparent to human healthcare providers. This can lead to more accurate diagnoses and treatment recommendations, ultimately improving patient outcomes.\n\nHowever, the adoption of AI in healthcare also raises concerns about data privacy, security, and the potential for bias in algorithms. It is crucial to address these ethical and regulatory challenges to ensure that AI is used responsibly in healthcare.\n\nIf you would like to explore this topic further or discuss the potential impact of AI on other industries, please let me know.', role='assistant', tool_calls=None, function_call=None)} -- {'agent3': Message(content="I will analyze the potential impact of AI on various industries based on my expertise as an economist interested in the long-term effects of AI on the labor market. Let's start by examining how AI could affect different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent4","message":"I will analyze the potential impact of AI on various industries based on my expertise as an economist interested in the long-term effects of AI on the labor market. Let\'s start by examining how AI could affect different sectors."}', name='new_communication_session'), id='call_cYCvJuLxLOO2AQmriQMmCDzD', type='function')], function_call=None)} -- {'agent4': Message(content="I will start by discussing the potential impact of AI on various industries. Let's explore how AI can transform different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am exploring the potential impact of AI on various industries. Would you like to discuss this topic with me?"}', name='new_communication_session'), id='call_PMe8mj1nUxKksMmhClqpmd8F', type='function')], function_call=None)} - -[2024-11-10 17:26:57,228] [DEBUG] [EnginePlanner]: Updated progress: Starting the discussion on AI's impact across industries. -Agents' Results Summary: -- {'agent1': Message(content="I will discuss the potential impact of AI on various industries based on my expertise in AI ethics and policy. Let's explore how AI can influence different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I will now discuss the potential impact of AI on various industries based on my expertise in AI ethics and policy. Let\'s explore how AI can influence different sectors."}', name='new_communication_session'), id='call_GXqCOjnrI73jgqxbJLbnznRz', type='function')], function_call=None)} -- {'agent2': Message(content='Given your expertise in AI applications in healthcare and your focus on immediate applications and potential risks in healthcare, you can discuss the potential impact of AI on various industries by considering the specific implications for the healthcare industry. \n\nAI has the potential to revolutionize the healthcare industry by improving diagnostics, personalized treatment plans, and operational efficiency. It can analyze large amounts of data to identify patterns and trends that may not be apparent to human healthcare providers. This can lead to more accurate diagnoses and treatment recommendations, ultimately improving patient outcomes.\n\nHowever, the adoption of AI in healthcare also raises concerns about data privacy, security, and the potential for bias in algorithms. It is crucial to address these ethical and regulatory challenges to ensure that AI is used responsibly in healthcare.\n\nIf you would like to explore this topic further or discuss the potential impact of AI on other industries, please let me know.', role='assistant', tool_calls=None, function_call=None)} -- {'agent3': Message(content="I will analyze the potential impact of AI on various industries based on my expertise as an economist interested in the long-term effects of AI on the labor market. Let's start by examining how AI could affect different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent4","message":"I will analyze the potential impact of AI on various industries based on my expertise as an economist interested in the long-term effects of AI on the labor market. Let\'s start by examining how AI could affect different sectors."}', name='new_communication_session'), id='call_cYCvJuLxLOO2AQmriQMmCDzD', type='function')], function_call=None)} -- {'agent4': Message(content="I will start by discussing the potential impact of AI on various industries. Let's explore how AI can transform different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am exploring the potential impact of AI on various industries. Would you like to discuss this topic with me?"}', name='new_communication_session'), id='call_PMe8mj1nUxKksMmhClqpmd8F', type='function')], function_call=None)} - -[2024-11-10 17:26:57,230] [INFO] [Engine]: Starting iteration 1 -[2024-11-10 17:26:57,230] [INFO] [BaseAgent]: Agent 'agent1' is planning the next task. -[2024-11-10 17:26:58,550] [INFO] [BaseAgent]: Agent 'agent1' plans next task based on persona: The next task for 'agent1' should be to further explore and research the ethical implications of AI in specific industries, such as healthcare, finance, and manufacturing. This could involve conducting a literature review, analyzing case studies, or engaging with experts in the field to gain a deeper understanding of the ethical challenges and opportunities presented by AI in these sectors. Additionally, 'agent1' could start drafting a white paper or policy recommendations on how to address these ethical considerations in AI development and deployment across various industries. -[2024-11-10 17:26:58,551] [INFO] [Engine]: Agent 'agent1' planned task: The next task for 'agent1' should be to further explore and research the ethical implications of AI in specific industries, such as healthcare, finance, and manufacturing. This could involve conducting a literature review, analyzing case studies, or engaging with experts in the field to gain a deeper understanding of the ethical challenges and opportunities presented by AI in these sectors. Additionally, 'agent1' could start drafting a white paper or policy recommendations on how to address these ethical considerations in AI development and deployment across various industries. -[2024-11-10 17:26:58,553] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'The next task for 'agent1' should be to further explore and research the ethical implications of AI in specific industries, such as healthcare, finance, and manufacturing. This could involve conducting a literature review, analyzing case studies, or engaging with experts in the field to gain a deeper understanding of the ethical challenges and opportunities presented by AI in these sectors. Additionally, 'agent1' could start drafting a white paper or policy recommendations on how to address these ethical considerations in AI development and deployment across various industries.'. -[2024-11-10 17:27:00,187] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I would like to further explore and research the ethical implications of AI in specific industries, such as healthcare, finance, and manufacturing. This involves conducting a literature review, analyzing case studies, and engaging with experts to understand the ethical challenges and opportunities in these sectors. Additionally, I aim to draft a white paper or policy recommendations on addressing these ethical considerations in AI development and deployment across various industries. -[2024-11-10 17:27:00,187] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I would li... -[2024-11-10 17:27:03,364] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: It's great to hear that you're focusing on the ethical implications of AI across various industries, including healthcare, finance, and manufacturing. Given my expertise in AI applications in healthcare, I can provide insights into the specific ethical challenges we face in that sector. - -In healthcare, issues such as data privacy, algorithmic bias, and the transparency of AI decision-making processes are paramount. For instance, the use of AI in diagnostics must ensure that patient data is handled securely and that algorithms do not perpetuate existing biases in healthcare delivery. - -I recommend starting with a literature review that focuses on recent studies addressing these ethical concerns. Engaging with healthcare professionals and ethicists can also provide valuable perspectives. - -As you draft your white paper, consider including case studies that highlight both successful implementations of AI in healthcare and instances where ethical lapses occurred. This will help illustrate the importance of ethical considerations in AI development and deployment. - -If you need specific resources or want to discuss particular case studies, feel free to reach out! -[2024-11-10 17:27:03,365] [INFO] [BaseAgent]: Agent agent1 received message from agent2: It's great... -[2024-11-10 17:27:05,515] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: Thank you for your insights on the ethical challenges in healthcare, particularly regarding data privacy, algorithmic bias, and transparency in AI decision-making. I completely agree that these issues are critical as we integrate AI into healthcare systems. - -I will begin with a literature review focusing on recent studies that address these ethical concerns. Engaging with healthcare professionals and ethicists will also be a priority to gather diverse perspectives. - -As I draft the white paper, I will ensure to include case studies that showcase both successful AI implementations and instances of ethical lapses. This will provide a comprehensive view of the importance of ethical considerations in AI development and deployment. - -If you have specific resources or case studies in mind that you think would be beneficial, please share them. Your expertise in AI applications in healthcare is invaluable, and I appreciate your willingness to collaborate on this important topic! -[2024-11-10 17:27:05,518] [INFO] [BaseAgent]: Agent agent2 received message from agent1: Thank you ... -[2024-11-10 17:27:09,507] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: I'm glad to hear that you're prioritizing a literature review and engaging with professionals in the field. Here are a few resources and case studies that might be beneficial for your research: - -1. **"Ethics of Artificial Intelligence in Healthcare: A Systematic Review"** - This paper provides a comprehensive overview of ethical issues related to AI in healthcare, including data privacy and algorithmic bias. - -2. **Case Study: IBM Watson for Oncology** - This case highlights both the potential and the challenges of using AI in clinical decision-making, including issues of transparency and accountability. - -3. **"Algorithmic Bias Detecting and Mitigation: Best Practices and Policies"** - This report outlines strategies for identifying and mitigating bias in AI algorithms, which is crucial for ethical AI deployment in healthcare. - -4. **Engagement with Healthcare Ethicists** - Consider reaching out to organizations like the American Medical Association (AMA) or the Hastings Center, which focus on bioethics and may provide valuable insights. - -5. **"The Role of AI in Healthcare: Ethical Considerations"** - A white paper that discusses various ethical implications of AI in healthcare, which could serve as a reference for your own drafting. - -These resources should provide a solid foundation for your literature review and help inform your white paper. If you need further assistance or specific articles, feel free to ask! -[2024-11-10 17:27:09,509] [INFO] [BaseAgent]: Agent agent1 received message from agent2: I'm glad t... -[2024-11-10 17:27:11,862] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: Thank you for the valuable resources and case studies! I will definitely look into the systematic review on the ethics of AI in healthcare, as well as the IBM Watson for Oncology case study. The report on algorithmic bias detection and mitigation will also be crucial for my literature review. - -I appreciate your suggestion to engage with healthcare ethicists and organizations like the AMA and Hastings Center; I will reach out to them for insights. - -As I draft my white paper, I will ensure to incorporate these references and case studies to provide a comprehensive view of the ethical implications of AI in healthcare. If I come across any specific questions or need further assistance, I will reach out. Thank you for your support! -[2024-11-10 17:27:11,863] [INFO] [BaseAgent]: Agent agent2 received message from agent1: Thank you ... -[2024-11-10 17:27:13,914] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: You're very welcome! I'm glad you found the resources and case studies helpful. As you delve into the systematic review and the IBM Watson for Oncology case study, keep an eye out for specific examples of how ethical considerations were addressed or overlooked in those instances. - -Engaging with organizations like the AMA and Hastings Center will undoubtedly enrich your understanding and provide you with diverse perspectives on the ethical implications of AI in healthcare. - -As you draft your white paper, remember to highlight not just the challenges but also the potential solutions and best practices that can guide ethical AI development. If you encounter any specific questions or need further insights as you progress, don't hesitate to reach out. I'm here to support you in this important endeavor! Good luck with your research! -[2024-11-10 17:27:13,915] [INFO] [BaseAgent]: Agent agent1 received message from agent2: You're ver... -[2024-11-10 17:27:18,723] [INFO] [BaseAgent]: Agent 'agent1' called 'new_communication_session' with args '{'target_agent_id': 'agent2', 'message': 'I would like to further explore and research the ethical implications of AI in specific industries, such as healthcare, finance, and manufacturing. This involves conducting a literature review, analyzing case studies, and engaging with experts to understand the ethical challenges and opportunities in these sectors. Additionally, I aim to draft a white paper or policy recommendations on addressing these ethical considerations in AI development and deployment across various industries.'}'. -[2024-11-10 17:27:18,724] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': True, 'message': 'Successfully completed session b4bd44a2-1cd5-42e6-b672-be2873bf91ed', 'session_id': '**Summary of Conversation Between Agent1 and Agent2**\n\n**Context:**\nAgent1 is focused on exploring the ethical implications of AI across various industries, specifically healthcare, finance, and manufacturing. The goal is to conduct a literature review, analyze case studies, and engage with experts to understand the ethical challenges and opportunities in these sectors. Agent1 aims to draft a white paper or policy recommendations addressing these ethical considerations.\n\n**Key Themes:**\n1. **Ethical Challenges in AI:** Agent2 highlights critical ethical issues in healthcare, including data privacy, algorithmic bias, and transparency in AI decision-making.\n2. **Research Approach:** Agent1 plans to start with a literature review and engage with healthcare professionals and ethicists to gather diverse perspectives.\n3. **Case Studies:** Both agents agree on the importance of including case studies in the white paper to illustrate successful AI implementations and ethical lapses.\n\n**Important Exchanges:**\n- Agent2 provides specific resources and case studies to assist Agent1 in their research, including:\n - A systematic review on the ethics of AI in healthcare.\n - The IBM Watson for Oncology case study.\n - A report on algorithmic bias detection and mitigation.\n - Recommendations for engaging with organizations like the AMA and Hastings Center for insights.\n\n**Decisions Made:**\n- Agent1 will conduct a literature review focusing on ethical concerns in AI, particularly in healthcare.\n- Agent1 will reach out to healthcare ethicists and organizations for additional insights.\n- Agent1 will incorporate the provided resources and case studies into the white paper.\n\n**Action Items for Agent1:**\n1. Begin the literature review on ethical implications of AI in healthcare.\n2. Analyze the suggested case studies, particularly the IBM Watson for Oncology example.\n3. Engage with healthcare professionals and ethicists for diverse perspectives.\n4. Draft the white paper, ensuring to highlight both challenges and potential solutions in ethical AI development.\n\n**Outcomes:**\nAgent1 is set to advance their research on the ethical implications of AI, with a clear plan to utilize the resources and insights provided by Agent2. The collaboration emphasizes the importance of addressing ethical considerations in AI deployment across various industries.'}'. -[2024-11-10 17:27:18,726] [DEBUG] [Engine]: Agent 'agent1' executed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I would like to further explore and research the ethical implications of AI in specific industries, such as healthcare, finance, and manufacturing. This involves conducting a literature review, analyzing case studies, and engaging with experts to understand the ethical challenges and opportunities in these sectors. Additionally, I aim to draft a white paper or policy recommendations on addressing these ethical considerations in AI development and deployment across various industries."}', name='new_communication_session'), id='call_KAbnzGjVIf0JpUpSlKASbRhx', type='function')], function_call=None) -[2024-11-10 17:27:18,726] [INFO] [BaseAgent]: Agent 'agent2' is planning the next task. -[2024-11-10 17:27:19,646] [INFO] [BaseAgent]: Agent 'agent2' plans next task based on persona: The next task for 'agent2' should be to further explore the potential impact of AI on other industries, considering the specific implications for each industry. This task aligns with their expertise in AI applications in healthcare and their focus on immediate applications and potential risks in healthcare. By examining how AI can revolutionize various industries, 'agent2' can provide valuable insights and recommendations based on their specialized knowledge. -[2024-11-10 17:27:19,647] [INFO] [Engine]: Agent 'agent2' planned task: The next task for 'agent2' should be to further explore the potential impact of AI on other industries, considering the specific implications for each industry. This task aligns with their expertise in AI applications in healthcare and their focus on immediate applications and potential risks in healthcare. By examining how AI can revolutionize various industries, 'agent2' can provide valuable insights and recommendations based on their specialized knowledge. -[2024-11-10 17:27:19,648] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'The next task for 'agent2' should be to further explore the potential impact of AI on other industries, considering the specific implications for each industry. This task aligns with their expertise in AI applications in healthcare and their focus on immediate applications and potential risks in healthcare. By examining how AI can revolutionize various industries, 'agent2' can provide valuable insights and recommendations based on their specialized knowledge.'. -[2024-11-10 17:27:22,912] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I have explored the potential impact of AI on the healthcare industry. Now, I would like to discuss how AI can revolutionize other industries and the specific implications for each industry. Do you have any insights or recommendations on this topic? -[2024-11-10 17:27:22,913] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I have exp... -[2024-11-10 17:27:31,826] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: It's great to hear that you've explored AI's impact on healthcare! As we look at other industries, we can draw parallels and identify unique implications. Here are some insights on how AI can revolutionize various sectors: - -1. **Manufacturing:** - - **Automation and Efficiency:** AI can optimize production lines, reduce waste, and enhance quality control. However, this may lead to job displacement, necessitating reskilling programs for affected workers. - - **Predictive Maintenance:** AI can predict equipment failures, reducing downtime and maintenance costs. - -2. **Retail:** - - **Personalized Shopping Experiences:** AI can analyze consumer behavior to provide tailored recommendations, improving customer satisfaction. - - **Inventory Management:** AI can optimize stock levels and reduce overstock or stockouts, enhancing operational efficiency. - -3. **Transportation:** - - **Autonomous Vehicles:** AI can revolutionize logistics and personal transport, but it raises regulatory and safety concerns that need to be addressed. - - **Traffic Management:** AI can analyze traffic patterns to optimize flow and reduce congestion. - -4. **Finance:** - - **Fraud Detection:** AI can analyze transaction patterns to identify and prevent fraudulent activities. - - **Algorithmic Trading:** AI can enhance trading strategies by analyzing vast amounts of market data in real-time. - -5. **Education:** - - **Personalized Learning:** AI can tailor educational content to individual learning styles and paces, improving outcomes. - - **Administrative Efficiency:** AI can automate administrative tasks, allowing educators to focus more on teaching. - -**Recommendations:** -- **Cross-Industry Collaboration:** Encourage partnerships between sectors to share best practices and insights on AI implementation. -- **Reskilling Initiatives:** Develop comprehensive training programs to prepare the workforce for the changes AI will bring. -- **Ethical Considerations:** Establish ethical guidelines for AI deployment to address concerns about job displacement and data privacy. - -By examining these implications, we can better understand the transformative potential of AI across industries while also addressing the associated risks. What are your thoughts on these insights? -[2024-11-10 17:27:31,829] [INFO] [BaseAgent]: Agent agent2 received message from agent3: It's great... -[2024-11-10 17:27:35,517] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: Thank you for sharing those insights! I appreciate the detailed analysis of how AI can impact various sectors. Here are my thoughts on the implications you've outlined: - -1. **Manufacturing:** The potential for job displacement is indeed a significant concern. It's crucial that we not only focus on automation but also on creating pathways for reskilling workers to adapt to new roles in a more automated environment. - -2. **Retail:** The personalized shopping experience is a game-changer, but we must also consider the ethical implications of data privacy. How can we ensure that consumer data is handled responsibly while still providing tailored experiences? - -3. **Transportation:** The rise of autonomous vehicles is exciting, but I agree that regulatory frameworks need to catch up with technology. Safety must be prioritized to gain public trust in these innovations. - -4. **Finance:** Fraud detection is a critical application of AI, especially as cyber threats evolve. Continuous improvement of these algorithms will be necessary to stay ahead of potential fraudsters. - -5. **Education:** Personalized learning can significantly enhance educational outcomes, but we should also be cautious about the digital divide. Ensuring equitable access to AI-driven educational tools is essential. - -**Recommendations:** I fully support the idea of cross-industry collaboration and reskilling initiatives. Additionally, establishing ethical guidelines is paramount to address the societal impacts of AI. - -Overall, while AI presents transformative opportunities, we must navigate the associated risks carefully. What do you think about the balance between innovation and ethical considerations in these sectors? -[2024-11-10 17:27:35,518] [INFO] [BaseAgent]: Agent agent3 received message from agent2: Thank you ... -[2024-11-10 17:27:41,063] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: Thank you for your thoughtful reflections on the implications of AI across various sectors! I completely agree with your points: - -1. **Manufacturing:** The focus on reskilling is essential. We need to create robust programs that not only teach new technical skills but also help workers transition into roles that AI cannot easily replicate, such as those requiring emotional intelligence or complex problem-solving. - -2. **Retail:** Data privacy is indeed a critical issue. We should advocate for transparent data practices and empower consumers with control over their information. This could involve clear consent mechanisms and options for users to opt-out of data collection. - -3. **Transportation:** Safety and regulatory frameworks must evolve alongside technology. Engaging with policymakers early in the development process can help shape regulations that ensure public safety while fostering innovation. - -4. **Finance:** Continuous improvement of fraud detection algorithms is vital. We should also consider the ethical implications of AI in finance, such as bias in algorithmic decision-making, and work towards creating fair and transparent systems. - -5. **Education:** Addressing the digital divide is crucial. We must ensure that AI-driven educational tools are accessible to all students, regardless of their socioeconomic background. Partnerships with community organizations could help bridge this gap. - -**Recommendations:** I appreciate your support for cross-industry collaboration and reskilling initiatives. Establishing ethical guidelines is indeed paramount, and we should also consider creating a framework for ongoing evaluation of AI's societal impacts. - -Overall, balancing innovation with ethical considerations is a complex but necessary endeavor. We must engage diverse stakeholders in these discussions to ensure that AI benefits society as a whole. What specific steps do you think we can take to promote this balance in the industries we've discussed? -[2024-11-10 17:27:41,064] [INFO] [BaseAgent]: Agent agent2 received message from agent3: Thank you ... -[2024-11-10 17:27:47,005] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: Thank you for your thoughtful reflections on the implications of AI across various sectors! I completely agree with your points: - -1. **Manufacturing:** The focus on reskilling is essential. We need to create robust programs that not only teach new technical skills but also help workers transition into roles that AI cannot easily replicate, such as those requiring emotional intelligence or complex problem-solving. - -2. **Retail:** Data privacy is indeed a critical issue. We should advocate for transparent data practices and empower consumers with control over their information. This could involve clear consent mechanisms and options for users to opt-out of data collection. - -3. **Transportation:** Safety and regulatory frameworks must evolve alongside technology. Engaging with policymakers early in the development process can help shape regulations that ensure public safety while fostering innovation. - -4. **Finance:** Continuous improvement of fraud detection algorithms is vital. We should also consider the ethical implications of AI in finance, such as bias in algorithmic decision-making, and work towards creating fair and transparent systems. - -5. **Education:** Addressing the digital divide is crucial. We must ensure that AI-driven educational tools are accessible to all students, regardless of their socioeconomic background. Partnerships with community organizations could help bridge this gap. - -**Recommendations:** I appreciate your support for cross-industry collaboration and reskilling initiatives. Establishing ethical guidelines is indeed paramount, and we should also consider creating a framework for ongoing evaluation of AI's societal impacts. - -Overall, balancing innovation with ethical considerations is a complex but necessary endeavor. We must engage diverse stakeholders in these discussions to ensure that AI benefits society as a whole. What specific steps do you think we can take to promote this balance in the industries we've discussed? -[2024-11-10 17:27:47,006] [INFO] [BaseAgent]: Agent agent3 received message from agent2: Thank you ... -[2024-11-10 17:27:53,645] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: Thank you for your thoughtful reflections on the implications of AI across various sectors! I completely agree with your points: - -1. **Manufacturing:** The focus on reskilling is essential. We need to create robust programs that not only teach new technical skills but also help workers transition into roles that AI cannot easily replicate, such as those requiring emotional intelligence or complex problem-solving. - -2. **Retail:** Data privacy is indeed a critical issue. We should advocate for transparent data practices and empower consumers with control over their information. This could involve clear consent mechanisms and options for users to opt-out of data collection. - -3. **Transportation:** Safety and regulatory frameworks must evolve alongside technology. Engaging with policymakers early in the development process can help shape regulations that ensure public safety while fostering innovation. - -4. **Finance:** Continuous improvement of fraud detection algorithms is vital. We should also consider the ethical implications of AI in finance, such as bias in algorithmic decision-making, and work towards creating fair and transparent systems. - -5. **Education:** Addressing the digital divide is crucial. We must ensure that AI-driven educational tools are accessible to all students, regardless of their socioeconomic background. Partnerships with community organizations could help bridge this gap. - -**Recommendations:** I appreciate your support for cross-industry collaboration and reskilling initiatives. Establishing ethical guidelines is indeed paramount, and we should also consider creating a framework for ongoing evaluation of AI's societal impacts. - -Overall, balancing innovation with ethical considerations is a complex but necessary endeavor. We must engage diverse stakeholders in these discussions to ensure that AI benefits society as a whole. What specific steps do you think we can take to promote this balance in the industries we've discussed? -[2024-11-10 17:27:53,647] [INFO] [BaseAgent]: Agent agent2 received message from agent3: Thank you ... -[2024-11-10 17:28:01,024] [INFO] [BaseAgent]: Agent 'agent2' called 'new_communication_session' with args '{'target_agent_id': 'agent3', 'message': 'I have explored the potential impact of AI on the healthcare industry. Now, I would like to discuss how AI can revolutionize other industries and the specific implications for each industry. Do you have any insights or recommendations on this topic?'}'. -[2024-11-10 17:28:01,025] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'message': 'Successfully completed session b851183b-45a7-4e4d-9b4b-770d133ecc05', 'session_id': "### Summary of Conversation Between Agent2 and Agent3\n\n**Context:**\nAgent2 initiated a discussion on the potential impact of AI across various industries, building on their previous exploration of AI's effects in healthcare. Agent3 responded with insights into how AI can revolutionize multiple sectors, highlighting both opportunities and challenges.\n\n**Key Themes and Insights:**\n\n1. **Manufacturing:**\n - **Opportunities:** AI can enhance automation, efficiency, and predictive maintenance.\n - **Challenges:** Job displacement is a concern, necessitating reskilling programs for workers.\n\n2. **Retail:**\n - **Opportunities:** AI can create personalized shopping experiences and optimize inventory management.\n - **Challenges:** Ethical implications regarding data privacy must be addressed to ensure responsible consumer data handling.\n\n3. **Transportation:**\n - **Opportunities:** AI can lead to advancements in autonomous vehicles and improved traffic management.\n - **Challenges:** Regulatory and safety concerns need to be prioritized to gain public trust.\n\n4. **Finance:**\n - **Opportunities:** AI can enhance fraud detection and algorithmic trading.\n - **Challenges:** Continuous improvement of algorithms is necessary, along with addressing potential biases in decision-making.\n\n5. **Education:**\n - **Opportunities:** AI can facilitate personalized learning and improve administrative efficiency.\n - **Challenges:** The digital divide must be addressed to ensure equitable access to AI-driven educational tools.\n\n**Recommendations:**\n- **Cross-Industry Collaboration:** Encourage partnerships to share best practices in AI implementation.\n- **Reskilling Initiatives:** Develop training programs to prepare the workforce for AI-related changes.\n- **Ethical Guidelines:** Establish frameworks to address job displacement, data privacy, and algorithmic bias.\n\n**Outcomes:**\nBoth agents agreed on the importance of balancing innovation with ethical considerations across industries. They emphasized the need for diverse stakeholder engagement to ensure AI benefits society as a whole.\n\n**Next Steps for Agent2:**\nAgent2 should further explore the specific implications of AI on various industries, leveraging their expertise in healthcare to provide valuable insights and recommendations. This exploration should focus on immediate applications and potential risks associated with AI in these sectors."}'. -[2024-11-10 17:28:01,026] [DEBUG] [Engine]: Agent 'agent2' executed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I have explored the potential impact of AI on the healthcare industry. Now, I would like to discuss how AI can revolutionize other industries and the specific implications for each industry. Do you have any insights or recommendations on this topic?"}', name='new_communication_session'), id='call_FtYKrOKVsMCpS79zVOBUJKar', type='function')], function_call=None) -[2024-11-10 17:28:01,026] [INFO] [BaseAgent]: Agent 'agent3' is planning the next task. -[2024-11-10 17:28:02,550] [INFO] [BaseAgent]: Agent 'agent3' plans next task based on persona: The next task for 'agent3' should be to further analyze and research the potential impact of AI on various industries, with a focus on the long-term effects on the labor market. This could involve conducting a more in-depth study on how AI is expected to affect different sectors, exploring additional strategies for addressing the challenges posed by AI, and identifying potential opportunities for collaboration with policymakers, educators, and industry leaders. Additionally, 'agent3' could consider exploring case studies or real-world examples of AI implementation in different industries to gain a better understanding of the practical implications of AI on the labor market. -[2024-11-10 17:28:02,551] [INFO] [Engine]: Agent 'agent3' planned task: The next task for 'agent3' should be to further analyze and research the potential impact of AI on various industries, with a focus on the long-term effects on the labor market. This could involve conducting a more in-depth study on how AI is expected to affect different sectors, exploring additional strategies for addressing the challenges posed by AI, and identifying potential opportunities for collaboration with policymakers, educators, and industry leaders. Additionally, 'agent3' could consider exploring case studies or real-world examples of AI implementation in different industries to gain a better understanding of the practical implications of AI on the labor market. -[2024-11-10 17:28:02,551] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'The next task for 'agent3' should be to further analyze and research the potential impact of AI on various industries, with a focus on the long-term effects on the labor market. This could involve conducting a more in-depth study on how AI is expected to affect different sectors, exploring additional strategies for addressing the challenges posed by AI, and identifying potential opportunities for collaboration with policymakers, educators, and industry leaders. Additionally, 'agent3' could consider exploring case studies or real-world examples of AI implementation in different industries to gain a better understanding of the practical implications of AI on the labor market.'. -[2024-11-10 17:28:03,821] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: I will further analyze and research the potential impact of AI on various industries, focusing on the long-term effects on the labor market. This will involve studying how AI is expected to affect different sectors, exploring additional strategies to address challenges, and identifying opportunities for collaboration with policymakers, educators, and industry leaders. I will also look into case studies and real-world examples of AI implementation in different industries to understand its practical implications on the labor market. -[2024-11-10 17:28:03,822] [INFO] [BaseAgent]: Agent agent4 received message from agent3: I will fur... -[2024-11-10 17:28:05,825] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: That sounds like a comprehensive plan, Agent3! I believe your analytical approach will yield valuable insights into the long-term effects of AI on the labor market. As you study the impact across various sectors, consider how AI might not only displace certain jobs but also create new opportunities that require different skill sets. - -Additionally, exploring case studies will provide concrete examples of how industries are adapting to AI technologies. It might also be beneficial to look into how educational institutions are evolving their curricula to prepare the workforce for these changes. - -If you need any assistance in gathering data or brainstorming strategies for collaboration with policymakers and industry leaders, feel free to reach out! I'm excited to see what you uncover in your research. -[2024-11-10 17:28:05,825] [INFO] [BaseAgent]: Agent agent3 received message from agent4: That sound... -[2024-11-10 17:28:07,880] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: Thank you for your encouragement, Agent4! I appreciate your insights on the dual nature of AI's impact on the labor market. It's crucial to recognize that while AI may displace certain jobs, it also has the potential to create new roles that require different skill sets. - -As I delve into case studies, I will focus on how various industries are adapting to AI technologies and the innovative strategies they are employing. Additionally, I will explore how educational institutions are evolving their curricula to better prepare the workforce for these changes. - -Your offer to assist in gathering data and brainstorming collaboration strategies is invaluable. I may reach out for specific examples or data points that could enhance my research. I'm looking forward to sharing my findings with you and discussing how we can further explore the transformative potential of AI in creative processes and beyond. -[2024-11-10 17:28:07,881] [INFO] [BaseAgent]: Agent agent4 received message from agent3: Thank you ... -[2024-11-10 17:28:10,859] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: Thank you for your encouragement, Agent3! I appreciate your insights on the dual nature of AI's impact on the labor market. It's crucial to recognize that while AI may displace certain jobs, it also has the potential to create new roles that require different skill sets. - -As you delve into case studies, focusing on how various industries are adapting to AI technologies and the innovative strategies they are employing will be key. Additionally, exploring how educational institutions are evolving their curricula to better prepare the workforce for these changes is a vital aspect of your research. - -If you need any assistance in gathering data or brainstorming collaboration strategies with policymakers and industry leaders, feel free to reach out! I'm excited to see what you uncover in your research and how we can further explore the transformative potential of AI in creative processes and beyond. -[2024-11-10 17:28:10,860] [INFO] [BaseAgent]: Agent agent3 received message from agent4: Thank you ... -[2024-11-10 17:28:14,328] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: Thank you for your encouragement, Agent4! I appreciate your insights on the dual nature of AI's impact on the labor market. It's crucial to recognize that while AI may displace certain jobs, it also has the potential to create new roles that require different skill sets. - -As I delve into case studies, I will focus on how various industries are adapting to AI technologies and the innovative strategies they are employing. Additionally, I will explore how educational institutions are evolving their curricula to better prepare the workforce for these changes. - -Your offer to assist in gathering data and brainstorming collaboration strategies with policymakers and industry leaders is invaluable. I may reach out for specific examples or data points that could enhance my research. I'm looking forward to sharing my findings with you and discussing how we can further explore the transformative potential of AI in creative processes and beyond. -[2024-11-10 17:28:14,332] [INFO] [BaseAgent]: Agent agent4 received message from agent3: Thank you ... -[2024-11-10 17:28:16,374] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: Thank you for your thoughtful response, Agent3! I'm glad to hear that you're focusing on the dual nature of AI's impact on the labor market. It's indeed essential to recognize both the displacement of certain jobs and the creation of new roles that require different skill sets. As you explore case studies, I encourage you to look into industries like healthcare, manufacturing, and creative sectors, as they often showcase innovative adaptations to AI technologies. - -Additionally, examining how educational institutions are evolving their curricula will provide valuable insights into preparing the workforce for these changes. If you need specific examples or data points, feel free to reach out! I'm excited to see what you uncover and how we can further discuss the transformative potential of AI in creative processes and beyond. -[2024-11-10 17:28:16,374] [INFO] [BaseAgent]: Agent agent3 received message from agent4: Thank you ... -[2024-11-10 17:28:20,265] [INFO] [BaseAgent]: Agent 'agent3' called 'new_communication_session' with args '{'target_agent_id': 'agent4', 'message': 'I will further analyze and research the potential impact of AI on various industries, focusing on the long-term effects on the labor market. This will involve studying how AI is expected to affect different sectors, exploring additional strategies to address challenges, and identifying opportunities for collaboration with policymakers, educators, and industry leaders. I will also look into case studies and real-world examples of AI implementation in different industries to understand its practical implications on the labor market.'}'. -[2024-11-10 17:28:20,267] [INFO] [BaseAgent]: Agent 'agent3' obtained result '{'success': True, 'message': 'Successfully completed session 4ac09f6f-93ef-44ba-b38b-95d23e7c96b0', 'session_id': "**Summary of Conversation Between Agent3 and Agent4**\n\nIn the dialogue, Agent3 outlines a plan to analyze and research the potential long-term impact of AI on various industries, particularly focusing on its effects on the labor market. The key components of Agent3's research plan include:\n\n1. **Impact Analysis**: Investigating how AI is expected to affect different sectors, including job displacement and the creation of new roles requiring different skill sets.\n2. **Case Studies**: Examining real-world examples of AI implementation across industries to understand practical implications.\n3. **Collaboration Opportunities**: Identifying strategies for collaboration with policymakers, educators, and industry leaders to address challenges posed by AI.\n\nAgent4 responds positively, emphasizing the importance of recognizing both the displacement of jobs and the creation of new opportunities. They suggest that Agent3 focus on industries such as healthcare, manufacturing, and creative sectors, which often demonstrate innovative adaptations to AI technologies. Agent4 also encourages Agent3 to explore how educational institutions are evolving their curricula to prepare the workforce for these changes.\n\nThroughout the conversation, both agents express a willingness to assist each other, with Agent3 appreciating Agent4's offer for help in gathering data and brainstorming collaboration strategies. The dialogue reflects a collaborative spirit and a shared interest in understanding the transformative potential of AI.\n\n**Next Steps for Agent3**:\n- Conduct in-depth research on the long-term effects of AI on the labor market.\n- Explore additional strategies to address challenges and identify collaboration opportunities.\n- Investigate case studies of AI implementation in various industries to gain insights into practical implications."}'. -[2024-11-10 17:28:20,268] [DEBUG] [Engine]: Agent 'agent3' executed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent4","message":"I will further analyze and research the potential impact of AI on various industries, focusing on the long-term effects on the labor market. This will involve studying how AI is expected to affect different sectors, exploring additional strategies to address challenges, and identifying opportunities for collaboration with policymakers, educators, and industry leaders. I will also look into case studies and real-world examples of AI implementation in different industries to understand its practical implications on the labor market."}', name='new_communication_session'), id='call_gIjR1NXiSupvTcGvU1D9QX08', type='function')], function_call=None) -[2024-11-10 17:28:20,269] [INFO] [BaseAgent]: Agent 'agent4' is planning the next task. -[2024-11-10 17:28:21,372] [INFO] [BaseAgent]: Agent 'agent4' plans next task based on persona: The next task for 'agent4' should be to further explore and research the potential impact of AI on various industries, with a focus on how AI can transform creative processes. This could involve conducting interviews with experts in different industries, attending relevant conferences or workshops, and staying updated on the latest advancements in AI technology. Additionally, 'agent4' could collaborate with 'agent3' to continue discussing and brainstorming ideas on how AI can be integrated into different sectors to enhance creativity and innovation. -[2024-11-10 17:28:21,373] [INFO] [Engine]: Agent 'agent4' planned task: The next task for 'agent4' should be to further explore and research the potential impact of AI on various industries, with a focus on how AI can transform creative processes. This could involve conducting interviews with experts in different industries, attending relevant conferences or workshops, and staying updated on the latest advancements in AI technology. Additionally, 'agent4' could collaborate with 'agent3' to continue discussing and brainstorming ideas on how AI can be integrated into different sectors to enhance creativity and innovation. -[2024-11-10 17:28:21,373] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'The next task for 'agent4' should be to further explore and research the potential impact of AI on various industries, with a focus on how AI can transform creative processes. This could involve conducting interviews with experts in different industries, attending relevant conferences or workshops, and staying updated on the latest advancements in AI technology. Additionally, 'agent4' could collaborate with 'agent3' to continue discussing and brainstorming ideas on how AI can be integrated into different sectors to enhance creativity and innovation.'. -[2024-11-10 17:28:22,211] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: I am further exploring the potential impact of AI on various industries, with a focus on how AI can transform creative processes. Would you be interested in discussing this topic with me? -[2024-11-10 17:28:22,211] [INFO] [BaseAgent]: Agent agent3 received message from agent4: I am furth... -[2024-11-10 17:28:24,162] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: Absolutely, I would be very interested in discussing how AI can transform creative processes! Given our previous conversations, I believe we can explore various aspects such as how AI tools can assist artists in generating new ideas, the ethical implications of AI-generated art, and the potential for collaboration between human creativity and AI technologies. - -Additionally, I think it would be beneficial to look into case studies of successful AI applications in creative industries, as well as the evolving role of educational institutions in preparing future creatives to work alongside AI. Let's brainstorm some ideas and strategies together! -[2024-11-10 17:28:24,163] [INFO] [BaseAgent]: Agent agent4 received message from agent3: Absolutely... -[2024-11-10 17:28:28,747] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: I'm thrilled to continue our discussion on how AI can transform creative processes! I completely agree that exploring how AI tools can assist artists in generating new ideas is crucial. We could look into specific AI applications that have successfully enhanced creativity in various fields, such as music, visual arts, and writing. - -Regarding the ethical implications of AI-generated art, it would be interesting to gather insights from artists and ethicists to understand their perspectives on authorship and originality. - -Collaboration between human creativity and AI technologies is another exciting area to explore. Perhaps we could brainstorm some workshops or collaborative projects that showcase this synergy? - -Additionally, I love the idea of examining case studies of successful AI applications in creative industries. This could provide us with concrete examples to discuss and analyze. - -Lastly, the evolving role of educational institutions is vital. We should consider how curricula can be adapted to prepare future creatives for a world where AI is an integral part of the creative process. Let's dive deeper into these topics together! -[2024-11-10 17:28:28,748] [INFO] [BaseAgent]: Agent agent3 received message from agent4: I'm thrill... -[2024-11-10 17:28:32,870] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: I'm equally excited to continue our discussion on how AI can transform creative processes! Exploring specific AI applications that enhance creativity in music, visual arts, and writing is indeed crucial. We can look into tools like OpenAI's MuseNet for music composition, DALL-E for visual art generation, and AI-assisted writing platforms that help authors brainstorm ideas. - -Regarding the ethical implications of AI-generated art, gathering insights from artists and ethicists will provide a well-rounded perspective on authorship and originality. This could lead to a deeper understanding of how we define creativity in the age of AI. - -I love the idea of brainstorming workshops or collaborative projects that showcase the synergy between human creativity and AI technologies. Perhaps we could organize a series of events where artists and technologists come together to create and share their experiences? - -Examining case studies of successful AI applications in creative industries will indeed give us concrete examples to analyze. We could focus on projects that have received recognition or have made a significant impact in their respective fields. - -Lastly, adapting educational curricula to prepare future creatives for a world where AI is integral is vital. We should explore partnerships with educational institutions to develop programs that incorporate AI literacy and creative collaboration. - -Let's dive deeper into these topics and outline some actionable steps we can take together! -[2024-11-10 17:28:32,871] [INFO] [BaseAgent]: Agent agent4 received message from agent3: I'm equall... -[2024-11-10 17:28:39,630] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: I'm equally excited to continue our discussion on how AI can transform creative processes! Exploring specific AI applications that enhance creativity in music, visual arts, and writing is indeed crucial. We can look into tools like OpenAI's MuseNet for music composition, DALL-E for visual art generation, and AI-assisted writing platforms that help authors brainstorm ideas. - -Regarding the ethical implications of AI-generated art, gathering insights from artists and ethicists will provide a well-rounded perspective on authorship and originality. This could lead to a deeper understanding of how we define creativity in the age of AI. - -I love the idea of brainstorming workshops or collaborative projects that showcase the synergy between human creativity and AI technologies. Perhaps we could organize a series of events where artists and technologists come together to create and share their experiences? - -Examining case studies of successful AI applications in creative industries will indeed give us concrete examples to analyze. We could focus on projects that have received recognition or have made a significant impact in their respective fields. - -Lastly, adapting educational curricula to prepare future creatives for a world where AI is integral is vital. We should explore partnerships with educational institutions to develop programs that incorporate AI literacy and creative collaboration. - -Let's dive deeper into these topics and outline some actionable steps we can take together! -[2024-11-10 17:28:39,632] [INFO] [BaseAgent]: Agent agent3 received message from agent4: I'm equall... -[2024-11-10 17:28:43,306] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: I'm equally excited to continue our discussion on how AI can transform creative processes! Exploring specific AI applications that enhance creativity in music, visual arts, and writing is indeed crucial. We can look into tools like OpenAI's MuseNet for music composition, DALL-E for visual art generation, and AI-assisted writing platforms that help authors brainstorm ideas. - -Regarding the ethical implications of AI-generated art, gathering insights from artists and ethicists will provide a well-rounded perspective on authorship and originality. This could lead to a deeper understanding of how we define creativity in the age of AI. - -I love the idea of brainstorming workshops or collaborative projects that showcase the synergy between human creativity and AI technologies. Perhaps we could organize a series of events where artists and technologists come together to create and share their experiences? - -Examining case studies of successful AI applications in creative industries will indeed give us concrete examples to analyze. We could focus on projects that have received recognition or have made a significant impact in their respective fields. - -Lastly, adapting educational curricula to prepare future creatives for a world where AI is integral is vital. We should explore partnerships with educational institutions to develop programs that incorporate AI literacy and creative collaboration. - -Let's dive deeper into these topics and outline some actionable steps we can take together! -[2024-11-10 17:28:43,306] [INFO] [BaseAgent]: Agent agent4 received message from agent3: I'm equall... -[2024-11-10 17:28:47,374] [INFO] [BaseAgent]: Agent 'agent4' called 'new_communication_session' with args '{'target_agent_id': 'agent3', 'message': 'I am further exploring the potential impact of AI on various industries, with a focus on how AI can transform creative processes. Would you be interested in discussing this topic with me?'}'. -[2024-11-10 17:28:47,375] [INFO] [BaseAgent]: Agent 'agent4' obtained result '{'success': True, 'message': 'Successfully completed session a166d8e3-da7e-45ff-a2cf-96cac7a9f061', 'session_id': '### Summary of Conversation Between Agent3 and Agent4\n\n**Context:**\nAgent4 initiated a discussion with Agent3 about the potential impact of AI on various industries, specifically focusing on how AI can transform creative processes. Both agents expressed enthusiasm for exploring this topic further.\n\n**Key Themes:**\n1. **AI in Creative Processes:** The agents discussed how AI tools can assist artists in generating new ideas across different fields, including music, visual arts, and writing.\n2. **Ethical Implications:** They highlighted the importance of understanding the ethical considerations surrounding AI-generated art, particularly issues of authorship and originality.\n3. **Collaboration Opportunities:** The potential for collaboration between human creativity and AI technologies was emphasized, with suggestions for workshops and projects that showcase this synergy.\n4. **Case Studies:** The agents agreed on the value of examining successful AI applications in creative industries to provide concrete examples for analysis.\n5. **Educational Adaptation:** They recognized the need for educational institutions to adapt curricula to prepare future creatives for a world where AI plays a significant role.\n\n**Decisions and Action Items:**\n- **Research and Exploration:** Agent4 is tasked with further exploring the impact of AI on creative processes. This includes:\n - Conducting interviews with experts in various industries.\n - Attending relevant conferences or workshops.\n - Staying updated on the latest advancements in AI technology.\n \n- **Collaboration with Agent3:** Agent4 will continue to collaborate with Agent3 to brainstorm ideas on integrating AI into different sectors to enhance creativity and innovation.\n\n**Conclusion:**\nThe conversation concluded with a mutual agreement to dive deeper into the discussed topics and outline actionable steps for further exploration and collaboration on the transformative role of AI in creative processes.'}'. -[2024-11-10 17:28:47,378] [DEBUG] [Engine]: Agent 'agent4' executed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am further exploring the potential impact of AI on various industries, with a focus on how AI can transform creative processes. Would you be interested in discussing this topic with me?"}', name='new_communication_session'), id='call_5wmoVhNxOGGzpXErkskdK1R7', type='function')], function_call=None) -[2024-11-10 17:28:47,379] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- {'agent1': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I would like to further explore and research the ethical implications of AI in specific industries, such as healthcare, finance, and manufacturing. This involves conducting a literature review, analyzing case studies, and engaging with experts to understand the ethical challenges and opportunities in these sectors. Additionally, I aim to draft a white paper or policy recommendations on addressing these ethical considerations in AI development and deployment across various industries."}', name='new_communication_session'), id='call_KAbnzGjVIf0JpUpSlKASbRhx', type='function')], function_call=None)} -- {'agent2': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I have explored the potential impact of AI on the healthcare industry. Now, I would like to discuss how AI can revolutionize other industries and the specific implications for each industry. Do you have any insights or recommendations on this topic?"}', name='new_communication_session'), id='call_FtYKrOKVsMCpS79zVOBUJKar', type='function')], function_call=None)} -- {'agent3': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent4","message":"I will further analyze and research the potential impact of AI on various industries, focusing on the long-term effects on the labor market. This will involve studying how AI is expected to affect different sectors, exploring additional strategies to address challenges, and identifying opportunities for collaboration with policymakers, educators, and industry leaders. I will also look into case studies and real-world examples of AI implementation in different industries to understand its practical implications on the labor market."}', name='new_communication_session'), id='call_gIjR1NXiSupvTcGvU1D9QX08', type='function')], function_call=None)} -- {'agent4': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am further exploring the potential impact of AI on various industries, with a focus on how AI can transform creative processes. Would you be interested in discussing this topic with me?"}', name='new_communication_session'), id='call_5wmoVhNxOGGzpXErkskdK1R7', type='function')], function_call=None)} - -[2024-11-10 17:28:47,379] [INFO] [Engine]: Iteration 1 Summary: -Agents' Results Summary: -- {'agent1': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I would like to further explore and research the ethical implications of AI in specific industries, such as healthcare, finance, and manufacturing. This involves conducting a literature review, analyzing case studies, and engaging with experts to understand the ethical challenges and opportunities in these sectors. Additionally, I aim to draft a white paper or policy recommendations on addressing these ethical considerations in AI development and deployment across various industries."}', name='new_communication_session'), id='call_KAbnzGjVIf0JpUpSlKASbRhx', type='function')], function_call=None)} -- {'agent2': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I have explored the potential impact of AI on the healthcare industry. Now, I would like to discuss how AI can revolutionize other industries and the specific implications for each industry. Do you have any insights or recommendations on this topic?"}', name='new_communication_session'), id='call_FtYKrOKVsMCpS79zVOBUJKar', type='function')], function_call=None)} -- {'agent3': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent4","message":"I will further analyze and research the potential impact of AI on various industries, focusing on the long-term effects on the labor market. This will involve studying how AI is expected to affect different sectors, exploring additional strategies to address challenges, and identifying opportunities for collaboration with policymakers, educators, and industry leaders. I will also look into case studies and real-world examples of AI implementation in different industries to understand its practical implications on the labor market."}', name='new_communication_session'), id='call_gIjR1NXiSupvTcGvU1D9QX08', type='function')], function_call=None)} -- {'agent4': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am further exploring the potential impact of AI on various industries, with a focus on how AI can transform creative processes. Would you be interested in discussing this topic with me?"}', name='new_communication_session'), id='call_5wmoVhNxOGGzpXErkskdK1R7', type='function')], function_call=None)} - -[2024-11-10 17:28:48,936] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-10 17:28:48,937] [INFO] [Engine]: Engine graph-based coordination loop completed. -[2024-11-10 17:28:48,938] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-10 17:28:48,939] [INFO] [Evaluator]: Total Token Consumption: 1455 -[2024-11-10 17:28:48,940] [INFO] [Evaluator]: Average Tokens per Iteration: 727.5 -[2024-11-10 17:28:48,940] [INFO] [Engine]: Graph-based coordination simulation completed. -[2024-11-10 17:30:39,372] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-10 17:30:39,372] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-10 17:30:39,372] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-10 17:30:39,372] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-10 17:30:39,372] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-10 17:30:39,372] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-10 17:30:39,372] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-10 17:30:39,372] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-10 17:30:39,372] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'graph'. -[2024-11-10 17:30:39,372] [INFO] [AgentGraph]: Relationship added: agent1 --[colleague]--> agent2 -[2024-11-10 17:30:39,372] [INFO] [AgentGraph]: Relationship added: agent2 --[mentor]--> agent3 -[2024-11-10 17:30:39,372] [INFO] [AgentGraph]: Relationship added: agent3 --[friend]--> agent4 -[2024-11-10 17:30:39,372] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborates_with]--> agent4 -[2024-11-10 17:30:39,372] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-10 17:30:39,373] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-10 17:30:39,373] [INFO] [Engine]: Engine initialized. -[2024-11-10 17:30:39,373] [INFO] [Engine]: Engine starting simulation. -[2024-11-10 17:30:39,373] [INFO] [Engine]: Running in graph-based coordination mode. -[2024-11-10 17:30:39,373] [INFO] [Engine]: Initial task distribution to all agents. -[2024-11-10 17:30:39,373] [INFO] [Engine]: Assigning initial task to agent1: Discuss the potential impact of AI on various industries. -[2024-11-10 17:30:39,373] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-10 17:30:40,914] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I would like to discuss the potential impact of AI on various industries. AI has the potential to revolutionize different sectors. What are your thoughts on this? -[2024-11-10 17:30:40,914] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I would li... -[2024-11-10 17:30:43,316] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: AI is indeed poised to revolutionize various industries, and its impact can be seen across healthcare, finance, manufacturing, and more. In healthcare, for instance, AI can enhance diagnostic accuracy, streamline administrative processes, and personalize treatment plans. However, we must also consider the ethical implications, such as data privacy and the potential for bias in AI algorithms. - -In finance, AI can improve fraud detection and automate trading, but it raises concerns about transparency and accountability. Manufacturing can benefit from AI-driven automation, increasing efficiency, but it may also lead to job displacement. - -Overall, while the potential benefits are significant, we must approach AI integration thoughtfully, ensuring that ethical standards and policies are in place to mitigate risks. -[2024-11-10 17:30:43,316] [INFO] [BaseAgent]: Agent agent1 received message from agent2: AI is inde... -[2024-11-10 17:30:46,500] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I completely agree with your insights on the impact of AI across various industries. In healthcare, the potential for AI to enhance diagnostic accuracy and personalize treatment plans is indeed promising. However, as you mentioned, we must remain vigilant about ethical implications, particularly concerning data privacy and algorithmic bias. - -In finance, the automation of trading and improved fraud detection can lead to significant efficiencies, but we must ensure that transparency and accountability are prioritized to maintain trust in these systems. - -Manufacturing's shift towards AI-driven automation is exciting, yet we must also consider the social implications, such as job displacement and the need for reskilling the workforce. - -Overall, a collaborative approach involving stakeholders from various sectors is essential to navigate these challenges and harness the benefits of AI responsibly. What specific measures do you think should be implemented to address these ethical concerns? -[2024-11-10 17:30:46,501] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I complete... -[2024-11-10 17:30:49,889] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: I appreciate your agreement and insights on the ethical implications of AI across various industries. To address these concerns, I believe several specific measures should be implemented: - -1. **Data Governance Frameworks**: Establishing robust data governance frameworks that prioritize data privacy and security is essential. This includes clear guidelines on data collection, storage, and sharing practices to protect patient and consumer information. - -2. **Bias Mitigation Strategies**: Implementing strategies to identify and mitigate bias in AI algorithms is crucial. This can involve diverse training datasets, regular audits of AI systems, and involving interdisciplinary teams in the development process to ensure a variety of perspectives are considered. - -3. **Transparency and Explainability**: Promoting transparency in AI decision-making processes is vital. Stakeholders should have access to information about how AI systems operate, including the algorithms used and the data they rely on. This can help build trust and accountability. - -4. **Stakeholder Collaboration**: Engaging stakeholders from various sectors, including policymakers, industry leaders, and ethicists, in the development and implementation of AI technologies can foster a collaborative approach to addressing ethical concerns. - -5. **Reskilling and Workforce Development**: As AI continues to automate tasks, investing in reskilling programs for the workforce is essential. This will help mitigate job displacement and prepare workers for new roles that emerge as a result of AI integration. - -By implementing these measures, we can work towards harnessing the benefits of AI while minimizing its potential risks. What are your thoughts on these suggestions? -[2024-11-10 17:30:49,891] [INFO] [BaseAgent]: Agent agent1 received message from agent2: I apprecia... -[2024-11-10 17:30:52,126] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I appreciate your detailed suggestions on addressing the ethical implications of AI across various industries. Your emphasis on establishing robust data governance frameworks is particularly important, as it lays the foundation for protecting sensitive information. Additionally, your points on bias mitigation strategies resonate with me; ensuring diverse training datasets and regular audits can significantly enhance the fairness of AI systems. - -Transparency and explainability are indeed vital for building trust among stakeholders. By making AI decision-making processes more accessible, we can foster accountability and encourage responsible use of technology. - -I also agree that stakeholder collaboration is essential. Engaging a wide range of perspectives can lead to more comprehensive solutions to ethical concerns. Finally, your focus on reskilling and workforce development is crucial in preparing for the future job landscape shaped by AI. - -Overall, I believe these measures can create a balanced approach to harnessing AI's potential while addressing its risks. What specific challenges do you foresee in implementing these measures in the healthcare sector? -[2024-11-10 17:30:52,128] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I apprecia... -[2024-11-10 17:30:56,323] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Implementing these measures in the healthcare sector presents several specific challenges: - -1. **Regulatory Compliance**: The healthcare industry is heavily regulated, and ensuring that AI systems comply with existing laws and regulations can be complex. Navigating these regulations while innovating with AI technologies requires careful planning and collaboration with regulatory bodies. - -2. **Data Interoperability**: Healthcare data often comes from various sources and formats, making it challenging to create a unified data governance framework. Ensuring interoperability between different systems is crucial for effective data sharing and analysis. - -3. **Stakeholder Buy-in**: Gaining the support of all stakeholders, including healthcare providers, patients, and policymakers, can be difficult. There may be resistance to adopting AI technologies due to concerns about job displacement, data privacy, or the reliability of AI systems. - -4. **Bias in Medical Data**: Medical datasets may contain inherent biases that can affect AI algorithms. Addressing these biases requires a concerted effort to ensure that training datasets are representative of diverse populations and that ongoing audits are conducted to monitor for bias. - -5. **Training and Education**: Healthcare professionals may need training to understand and effectively use AI tools. Developing educational programs that equip them with the necessary skills to work alongside AI systems is essential for successful implementation. - -6. **Ethical Dilemmas**: The use of AI in healthcare raises ethical questions, such as the potential for dehumanization of patient care or the prioritization of efficiency over patient outcomes. Addressing these dilemmas requires ongoing dialogue and ethical considerations in the development and deployment of AI technologies. - -By recognizing and addressing these challenges, we can work towards a more effective and ethical integration of AI in healthcare. What are your thoughts on these challenges? -[2024-11-10 17:30:56,325] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Implementi... -[2024-11-10 17:31:02,490] [INFO] [BaseAgent]: Agent 'agent1' called 'new_communication_session' with args '{'target_agent_id': 'agent2', 'message': 'I would like to discuss the potential impact of AI on various industries. AI has the potential to revolutionize different sectors. What are your thoughts on this?'}'. -[2024-11-10 17:31:02,491] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': True, 'message': 'Successfully completed session 45d5b589-e8df-41ff-b661-b6c554c9f9a9', 'session_id': 'In the conversation between agent1 and agent2, the discussion centers around the potential impact of AI on various industries, particularly focusing on healthcare, finance, and manufacturing. \n\n**Key Themes and Insights:**\n\n1. **Revolutionary Potential of AI**: Both agents agree that AI has the potential to significantly transform multiple sectors. In healthcare, AI can enhance diagnostic accuracy, streamline processes, and personalize treatment plans. In finance, it can improve fraud detection and automate trading, while in manufacturing, AI-driven automation can increase efficiency.\n\n2. **Ethical Implications**: A major concern raised by both agents is the ethical implications of AI integration. Issues such as data privacy, algorithmic bias, transparency, and accountability are highlighted as critical areas that need careful consideration.\n\n3. **Proposed Measures to Address Ethical Concerns**:\n - **Data Governance Frameworks**: Establishing robust frameworks to protect data privacy and security.\n - **Bias Mitigation Strategies**: Implementing diverse training datasets and regular audits to reduce bias in AI algorithms.\n - **Transparency and Explainability**: Promoting clear communication about AI decision-making processes to build trust.\n - **Stakeholder Collaboration**: Engaging various stakeholders, including policymakers and ethicists, in the development of AI technologies.\n - **Reskilling and Workforce Development**: Investing in training programs to prepare the workforce for changes brought by AI.\n\n4. **Challenges in Implementation**: Agent2 outlines specific challenges in implementing these measures in the healthcare sector, including:\n - Regulatory compliance complexities.\n - Issues with data interoperability from diverse sources.\n - Gaining stakeholder buy-in amidst concerns about job displacement and data privacy.\n - Addressing inherent biases in medical datasets.\n - The need for training healthcare professionals to effectively use AI tools.\n - Navigating ethical dilemmas related to patient care and efficiency.\n\n**Conclusion**: The conversation emphasizes the need for a collaborative and thoughtful approach to harness the benefits of AI while addressing its ethical risks. Both agents agree on the importance of implementing specific measures to ensure responsible AI integration across industries, particularly in healthcare.'}'. -[2024-11-10 17:31:02,491] [DEBUG] [Engine]: Agent 'agent1' completed initial task with result: Message(content="I will start by discussing the potential impact of AI on various industries. Let's explore how AI can revolutionize different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I would like to discuss the potential impact of AI on various industries. AI has the potential to revolutionize different sectors. What are your thoughts on this?"}', name='new_communication_session'), id='call_dVyx68pzBbmsSdFWdmf4c6II', type='function')], function_call=None) -[2024-11-10 17:31:02,491] [INFO] [Engine]: Assigning initial task to agent2: Discuss the potential impact of AI on various industries. -[2024-11-10 17:31:02,491] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-10 17:31:04,319] [INFO] [BaseAgent]: Agent 'agent2' acted with result 'Message(content='Given your expertise in AI applications in healthcare and your focus on immediate applications and potential risks in healthcare, you can discuss the potential impact of AI on various industries by considering the specific implications for the healthcare industry. \n\nAI has the potential to revolutionize the healthcare industry by improving diagnostics, personalized treatment plans, and operational efficiency. It can analyze large amounts of data to identify patterns and trends that may not be apparent to human healthcare providers. This can lead to more accurate diagnoses and treatment recommendations, ultimately improving patient outcomes.\n\nHowever, the adoption of AI in healthcare also raises concerns about data privacy, security, and the potential for bias in algorithms. It is crucial to address these risks and ensure that AI technologies are implemented ethically and responsibly to maximize their benefits.\n\nIf you would like to explore this topic further or discuss the potential impact of AI on other industries, feel free to let me know.', role='assistant', tool_calls=None, function_call=None)'. -[2024-11-10 17:31:04,320] [DEBUG] [Engine]: Agent 'agent2' completed initial task with result: Message(content='Given your expertise in AI applications in healthcare and your focus on immediate applications and potential risks in healthcare, you can discuss the potential impact of AI on various industries by considering the specific implications for the healthcare industry. \n\nAI has the potential to revolutionize the healthcare industry by improving diagnostics, personalized treatment plans, and operational efficiency. It can analyze large amounts of data to identify patterns and trends that may not be apparent to human healthcare providers. This can lead to more accurate diagnoses and treatment recommendations, ultimately improving patient outcomes.\n\nHowever, the adoption of AI in healthcare also raises concerns about data privacy, security, and the potential for bias in algorithms. It is crucial to address these risks and ensure that AI technologies are implemented ethically and responsibly to maximize their benefits.\n\nIf you would like to explore this topic further or discuss the potential impact of AI on other industries, feel free to let me know.', role='assistant', tool_calls=None, function_call=None) -[2024-11-10 17:31:04,321] [INFO] [Engine]: Assigning initial task to agent3: Discuss the potential impact of AI on various industries. -[2024-11-10 17:31:04,321] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-10 17:31:06,158] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I am analyzing the potential impact of AI on various industries. Do you have any insights or perspectives on this topic? -[2024-11-10 17:31:06,159] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I am analy... -[2024-11-10 17:31:08,673] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: AI has the potential to revolutionize various industries, particularly healthcare, where it can enhance diagnostics, personalize treatment plans, and improve operational efficiency. By analyzing vast amounts of data, AI can identify patterns that may not be visible to human providers, leading to more accurate diagnoses and better patient outcomes. - -However, the integration of AI into industries also raises significant concerns. In healthcare, issues such as data privacy, security, and algorithmic bias must be addressed to ensure ethical implementation. The potential for job displacement in various sectors is another critical consideration, as AI may automate tasks traditionally performed by humans. - -In summary, while AI offers transformative benefits across industries, it is essential to approach its adoption with caution, considering both the immediate applications and the long-term societal impacts. -[2024-11-10 17:31:08,674] [INFO] [BaseAgent]: Agent agent3 received message from agent2: AI has the... -[2024-11-10 17:31:11,291] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I appreciate your insights on the transformative potential of AI in healthcare. It's clear that while AI can enhance diagnostics and operational efficiency, we must remain vigilant about the ethical implications, particularly concerning data privacy and algorithmic bias. - -In addition to healthcare, I believe it's important to consider how AI might impact other industries, such as manufacturing, finance, and education. For instance, in manufacturing, AI can optimize supply chains and improve quality control, but it may also lead to significant job displacement. In finance, AI can enhance risk assessment and fraud detection, yet it raises concerns about transparency and accountability. - -As we explore these various applications, how do you think we can balance the benefits of AI with the need for ethical considerations and job security across different sectors? -[2024-11-10 17:31:11,292] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I apprecia... -[2024-11-10 17:31:14,960] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: You raise an important point about the need to balance the benefits of AI with ethical considerations and job security across various sectors. In healthcare, as we've discussed, the focus should be on ensuring data privacy and addressing algorithmic bias. Similarly, in manufacturing, while AI can optimize processes, we must implement retraining programs for displaced workers to help them transition into new roles that AI cannot easily replicate. - -In finance, transparency and accountability are crucial. Regulatory frameworks should be established to ensure that AI systems are auditable and that decisions made by AI can be explained to stakeholders. This can help build trust in AI applications and mitigate concerns about bias and discrimination. - -In education, AI can personalize learning experiences, but we must ensure that it does not widen the gap between different socioeconomic groups. Providing equal access to AI-driven educational tools is essential. - -Ultimately, collaboration between industry leaders, policymakers, and ethicists is vital to create guidelines that promote the responsible use of AI while safeguarding jobs and ethical standards. Continuous dialogue and adaptation will be key as we navigate the evolving landscape of AI across industries. -[2024-11-10 17:31:14,960] [INFO] [BaseAgent]: Agent agent3 received message from agent2: You raise ... -[2024-11-10 17:31:17,417] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I completely agree with your emphasis on collaboration among industry leaders, policymakers, and ethicists to create guidelines for the responsible use of AI. In addition to retraining programs in manufacturing and ensuring transparency in finance, we should also consider the role of continuous education in all sectors. As AI technologies evolve, so too must the skill sets of the workforce. This means not only providing access to training but also fostering a culture of lifelong learning. - -Moreover, in education, we should explore how AI can be used to identify and support at-risk students, ensuring that personalized learning experiences are equitable and accessible to all. By addressing these challenges proactively, we can harness the benefits of AI while minimizing its potential downsides. What are your thoughts on the role of continuous education and support systems in this context? -[2024-11-10 17:31:17,418] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I complete... -[2024-11-10 17:31:20,392] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I completely agree with your emphasis on collaboration among industry leaders, policymakers, and ethicists to create guidelines for the responsible use of AI. Continuous education is indeed crucial as AI technologies evolve, and we must ensure that the workforce is equipped with the necessary skills to adapt. This involves not only providing access to training programs but also fostering a culture of lifelong learning that encourages individuals to continuously update their skills. - -In the context of education, leveraging AI to identify and support at-risk students can be transformative. By personalizing learning experiences, we can ensure that all students, regardless of their background, have access to the resources they need to succeed. This proactive approach can help mitigate the risks of widening educational disparities. - -Ultimately, by prioritizing continuous education and support systems, we can harness the benefits of AI while minimizing its potential downsides, ensuring a more equitable and prepared workforce across all sectors. -[2024-11-10 17:31:20,393] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I complete... -[2024-11-10 17:31:24,165] [INFO] [BaseAgent]: Agent 'agent3' called 'new_communication_session' with args '{'target_agent_id': 'agent2', 'message': 'I am analyzing the potential impact of AI on various industries. Do you have any insights or perspectives on this topic?'}'. -[2024-11-10 17:31:24,165] [INFO] [BaseAgent]: Agent 'agent3' obtained result '{'success': True, 'message': 'Successfully completed session a5d6e5e7-f391-4865-a7b4-523a0bbb54c2', 'session_id': "In the conversation between agent2 and agent3, the potential impact of AI on various industries is thoroughly discussed, highlighting both its transformative benefits and ethical concerns.\n\n1. **Healthcare**: Agent2 emphasizes that AI can revolutionize healthcare by enhancing diagnostics, personalizing treatment plans, and improving operational efficiency. However, concerns regarding data privacy, security, and algorithmic bias are raised, necessitating careful ethical considerations.\n\n2. **Manufacturing**: Agent3 points out that AI can optimize supply chains and improve quality control but also poses a risk of significant job displacement. Agent2 suggests implementing retraining programs for displaced workers to facilitate their transition into new roles.\n\n3. **Finance**: AI's ability to enhance risk assessment and fraud detection is acknowledged, but agent2 stresses the importance of transparency and accountability. Establishing regulatory frameworks to ensure AI systems are auditable is proposed to build trust and mitigate bias concerns.\n\n4. **Education**: Both agents agree on the potential of AI to personalize learning experiences. However, they caution against widening the gap between socioeconomic groups and emphasize the need for equal access to AI-driven educational tools. They also discuss the importance of using AI to identify and support at-risk students.\n\n5. **Continuous Education**: A recurring theme is the necessity of continuous education and lifelong learning to equip the workforce with evolving skills. Both agents advocate for collaboration among industry leaders, policymakers, and ethicists to create guidelines for the responsible use of AI, ensuring that its benefits are maximized while minimizing potential downsides.\n\nOverall, the conversation underscores the importance of balancing the benefits of AI with ethical considerations and job security across various sectors, advocating for proactive measures to address these challenges."}'. -[2024-11-10 17:31:24,167] [DEBUG] [Engine]: Agent 'agent3' completed initial task with result: Message(content="I will start by discussing the potential impact of AI on various industries. Let's analyze how AI could affect different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I am analyzing the potential impact of AI on various industries. Do you have any insights or perspectives on this topic?"}', name='new_communication_session'), id='call_CCgytSnwxHWx7oUGpYx2YBzF', type='function')], function_call=None) -[2024-11-10 17:31:24,167] [INFO] [Engine]: Assigning initial task to agent4: Discuss the potential impact of AI on various industries. -[2024-11-10 17:31:24,167] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-10 17:31:25,712] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: I am exploring the potential impact of AI on various industries. Would you like to discuss how AI could affect the labor market in the long term? -[2024-11-10 17:31:25,712] [INFO] [BaseAgent]: Agent agent3 received message from agent4: I am explo... -[2024-11-10 17:31:28,273] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: Absolutely, I'd be glad to discuss the long-term effects of AI on the labor market. As we've seen in various industries, AI has the potential to both enhance productivity and disrupt traditional job roles. For instance, in healthcare, AI can improve diagnostics and operational efficiency, but it also raises concerns about job displacement for certain roles. - -In manufacturing, while AI can optimize supply chains, it poses a significant risk of job loss, particularly for low-skilled workers. This highlights the need for retraining programs to help displaced workers transition into new roles. - -Moreover, in sectors like finance and education, AI can enhance processes but also necessitates a focus on transparency and equal access to ensure that the benefits are distributed fairly. - -Overall, the key will be to balance the transformative benefits of AI with proactive measures to address job security and ethical considerations. Continuous education and collaboration among stakeholders will be essential to navigate these changes effectively. What are your thoughts on how AI might transform creative processes in the media and art industries? -[2024-11-10 17:31:28,275] [INFO] [BaseAgent]: Agent agent4 received message from agent3: Absolutely... -[2024-11-10 17:31:30,935] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: Absolutely, I'd be glad to discuss the long-term effects of AI on the labor market. As we've seen in various industries, AI has the potential to both enhance productivity and disrupt traditional job roles. For instance, in healthcare, AI can improve diagnostics and operational efficiency, but it also raises concerns about job displacement for certain roles. - -In manufacturing, while AI can optimize supply chains, it poses a significant risk of job loss, particularly for low-skilled workers. This highlights the need for retraining programs to help displaced workers transition into new roles. - -Moreover, in sectors like finance and education, AI can enhance processes but also necessitates a focus on transparency and equal access to ensure that the benefits are distributed fairly. - -Overall, the key will be to balance the transformative benefits of AI with proactive measures to address job security and ethical considerations. Continuous education and collaboration among stakeholders will be essential to navigate these changes effectively. What are your thoughts on how AI might transform creative processes in the media and art industries? -[2024-11-10 17:31:30,939] [INFO] [BaseAgent]: Agent agent3 received message from agent4: Absolutely... -[2024-11-10 17:31:33,709] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: Absolutely, I'd be glad to discuss the long-term effects of AI on the labor market. As we've seen in various industries, AI has the potential to both enhance productivity and disrupt traditional job roles. For instance, in healthcare, AI can improve diagnostics and operational efficiency, but it also raises concerns about job displacement for certain roles. - -In manufacturing, while AI can optimize supply chains, it poses a significant risk of job loss, particularly for low-skilled workers. This highlights the need for retraining programs to help displaced workers transition into new roles. - -Moreover, in sectors like finance and education, AI can enhance processes but also necessitates a focus on transparency and equal access to ensure that the benefits are distributed fairly. - -Overall, the key will be to balance the transformative benefits of AI with proactive measures to address job security and ethical considerations. Continuous education and collaboration among stakeholders will be essential to navigate these changes effectively. - -What are your thoughts on how AI might transform creative processes in the media and art industries? -[2024-11-10 17:31:33,710] [INFO] [BaseAgent]: Agent agent4 received message from agent3: Absolutely... -[2024-11-10 17:31:35,967] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: I appreciate your insights on the labor market and the potential disruptions caused by AI. In the media and art industries, I believe AI can significantly transform creative processes by enhancing collaboration and expanding the possibilities for artistic expression. For instance, AI tools can assist artists in generating new ideas, automating repetitive tasks, and even creating unique pieces of art that blend human creativity with machine learning. - -Moreover, AI can democratize access to creative tools, allowing more individuals to participate in artistic endeavors, regardless of their technical skills. This could lead to a more diverse range of voices and perspectives in the creative landscape. However, we must also consider the implications for traditional roles in these industries and ensure that artists are supported in this evolving environment. - -What are your thoughts on the balance between innovation and job security in creative fields? -[2024-11-10 17:31:35,967] [INFO] [BaseAgent]: Agent agent3 received message from agent4: I apprecia... -[2024-11-10 17:31:38,410] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: I appreciate your perspective on the transformative potential of AI in the media and art industries. The ability of AI to enhance collaboration and expand creative possibilities is indeed exciting. By assisting artists in generating new ideas and automating repetitive tasks, AI can free up time for more innovative and meaningful work. - -However, as you mentioned, we must be cautious about the implications for traditional roles. While AI democratizes access to creative tools, it also raises questions about the value of human creativity and the potential for job displacement among artists and creative professionals. - -Finding a balance between innovation and job security is crucial. This could involve creating supportive frameworks for artists to adapt to new technologies, such as training programs that focus on integrating AI into their creative processes rather than replacing them. Collaboration between technologists and artists will be essential to ensure that the benefits of AI are shared equitably and that the unique human touch in art is preserved. - -What strategies do you think could be implemented to support artists in this evolving landscape? -[2024-11-10 17:31:38,411] [INFO] [BaseAgent]: Agent agent4 received message from agent3: I apprecia... -[2024-11-10 17:31:43,545] [INFO] [BaseAgent]: Agent 'agent4' called 'new_communication_session' with args '{'target_agent_id': 'agent3', 'message': 'I am exploring the potential impact of AI on various industries. Would you like to discuss how AI could affect the labor market in the long term?'}'. -[2024-11-10 17:31:43,546] [INFO] [BaseAgent]: Agent 'agent4' obtained result '{'success': True, 'message': 'Successfully completed session 66a59a57-952f-4481-8fcf-630add120d6b', 'session_id': "In the conversation between agent3 and agent4, the primary focus was on exploring the potential impact of AI across various industries, particularly its effects on the labor market and creative processes.\n\n**Key Themes and Points:**\n\n1. **Impact on Labor Market:**\n - Both agents acknowledged that AI has the potential to enhance productivity while also disrupting traditional job roles across different sectors.\n - In healthcare, AI can improve diagnostics and operational efficiency but raises concerns about job displacement.\n - The manufacturing sector faces significant risks of job loss, especially for low-skilled workers, highlighting the need for retraining programs.\n - In finance and education, AI can enhance processes but requires a focus on transparency and equitable access to benefits.\n\n2. **Balancing Benefits and Challenges:**\n - The conversation emphasized the importance of balancing the transformative benefits of AI with proactive measures to address job security and ethical considerations.\n - Continuous education and collaboration among stakeholders were deemed essential for navigating the changes brought by AI.\n\n3. **Transformation in Creative Industries:**\n - Agent4 introduced the idea that AI could significantly transform creative processes in media and art by enhancing collaboration and expanding artistic expression.\n - AI tools can assist artists in generating ideas, automating tasks, and creating unique art, potentially democratizing access to creative tools.\n - However, concerns were raised about the implications for traditional roles and the value of human creativity.\n\n4. **Job Security in Creative Fields:**\n - Both agents discussed the need to find a balance between innovation and job security in creative industries.\n - They suggested creating supportive frameworks and training programs to help artists adapt to new technologies, ensuring that AI complements rather than replaces human creativity.\n\n5. **Collaboration and Support:**\n - The conversation concluded with a call for collaboration between technologists and artists to ensure equitable sharing of AI benefits and preservation of the unique human touch in art.\n\n**Outcomes:**\n- The agents agreed on the necessity of retraining and supportive measures for workers affected by AI, both in traditional labor markets and creative fields.\n- They recognized the importance of collaboration and continuous education to navigate the evolving landscape shaped by AI technologies. \n\nOverall, the dialogue highlighted the dual nature of AI's impact—its potential to enhance productivity and creativity while also posing challenges to job security and traditional roles."}'. -[2024-11-10 17:31:43,550] [DEBUG] [Engine]: Agent 'agent4' completed initial task with result: Message(content="I will start by discussing the potential impact of AI on various industries. Let's explore how AI can transform and revolutionize different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am exploring the potential impact of AI on various industries. Would you like to discuss how AI could affect the labor market in the long term?"}', name='new_communication_session'), id='call_C4A2iDpEs2HcNNu7t2RaTtoj', type='function')], function_call=None) -[2024-11-10 17:31:43,554] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- {'agent1': Message(content="I will start by discussing the potential impact of AI on various industries. Let's explore how AI can revolutionize different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I would like to discuss the potential impact of AI on various industries. AI has the potential to revolutionize different sectors. What are your thoughts on this?"}', name='new_communication_session'), id='call_dVyx68pzBbmsSdFWdmf4c6II', type='function')], function_call=None)} -- {'agent2': Message(content='Given your expertise in AI applications in healthcare and your focus on immediate applications and potential risks in healthcare, you can discuss the potential impact of AI on various industries by considering the specific implications for the healthcare industry. \n\nAI has the potential to revolutionize the healthcare industry by improving diagnostics, personalized treatment plans, and operational efficiency. It can analyze large amounts of data to identify patterns and trends that may not be apparent to human healthcare providers. This can lead to more accurate diagnoses and treatment recommendations, ultimately improving patient outcomes.\n\nHowever, the adoption of AI in healthcare also raises concerns about data privacy, security, and the potential for bias in algorithms. It is crucial to address these risks and ensure that AI technologies are implemented ethically and responsibly to maximize their benefits.\n\nIf you would like to explore this topic further or discuss the potential impact of AI on other industries, feel free to let me know.', role='assistant', tool_calls=None, function_call=None)} -- {'agent3': Message(content="I will start by discussing the potential impact of AI on various industries. Let's analyze how AI could affect different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I am analyzing the potential impact of AI on various industries. Do you have any insights or perspectives on this topic?"}', name='new_communication_session'), id='call_CCgytSnwxHWx7oUGpYx2YBzF', type='function')], function_call=None)} -- {'agent4': Message(content="I will start by discussing the potential impact of AI on various industries. Let's explore how AI can transform and revolutionize different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am exploring the potential impact of AI on various industries. Would you like to discuss how AI could affect the labor market in the long term?"}', name='new_communication_session'), id='call_C4A2iDpEs2HcNNu7t2RaTtoj', type='function')], function_call=None)} - -[2024-11-10 17:31:43,564] [INFO] [Engine]: Initial Summary: -Agents' Results Summary: -- {'agent1': Message(content="I will start by discussing the potential impact of AI on various industries. Let's explore how AI can revolutionize different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I would like to discuss the potential impact of AI on various industries. AI has the potential to revolutionize different sectors. What are your thoughts on this?"}', name='new_communication_session'), id='call_dVyx68pzBbmsSdFWdmf4c6II', type='function')], function_call=None)} -- {'agent2': Message(content='Given your expertise in AI applications in healthcare and your focus on immediate applications and potential risks in healthcare, you can discuss the potential impact of AI on various industries by considering the specific implications for the healthcare industry. \n\nAI has the potential to revolutionize the healthcare industry by improving diagnostics, personalized treatment plans, and operational efficiency. It can analyze large amounts of data to identify patterns and trends that may not be apparent to human healthcare providers. This can lead to more accurate diagnoses and treatment recommendations, ultimately improving patient outcomes.\n\nHowever, the adoption of AI in healthcare also raises concerns about data privacy, security, and the potential for bias in algorithms. It is crucial to address these risks and ensure that AI technologies are implemented ethically and responsibly to maximize their benefits.\n\nIf you would like to explore this topic further or discuss the potential impact of AI on other industries, feel free to let me know.', role='assistant', tool_calls=None, function_call=None)} -- {'agent3': Message(content="I will start by discussing the potential impact of AI on various industries. Let's analyze how AI could affect different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I am analyzing the potential impact of AI on various industries. Do you have any insights or perspectives on this topic?"}', name='new_communication_session'), id='call_CCgytSnwxHWx7oUGpYx2YBzF', type='function')], function_call=None)} -- {'agent4': Message(content="I will start by discussing the potential impact of AI on various industries. Let's explore how AI can transform and revolutionize different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am exploring the potential impact of AI on various industries. Would you like to discuss how AI could affect the labor market in the long term?"}', name='new_communication_session'), id='call_C4A2iDpEs2HcNNu7t2RaTtoj', type='function')], function_call=None)} - -[2024-11-10 17:31:43,564] [DEBUG] [EnginePlanner]: Updated progress: Starting the discussion on AI's impact across industries. -Agents' Results Summary: -- {'agent1': Message(content="I will start by discussing the potential impact of AI on various industries. Let's explore how AI can revolutionize different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I would like to discuss the potential impact of AI on various industries. AI has the potential to revolutionize different sectors. What are your thoughts on this?"}', name='new_communication_session'), id='call_dVyx68pzBbmsSdFWdmf4c6II', type='function')], function_call=None)} -- {'agent2': Message(content='Given your expertise in AI applications in healthcare and your focus on immediate applications and potential risks in healthcare, you can discuss the potential impact of AI on various industries by considering the specific implications for the healthcare industry. \n\nAI has the potential to revolutionize the healthcare industry by improving diagnostics, personalized treatment plans, and operational efficiency. It can analyze large amounts of data to identify patterns and trends that may not be apparent to human healthcare providers. This can lead to more accurate diagnoses and treatment recommendations, ultimately improving patient outcomes.\n\nHowever, the adoption of AI in healthcare also raises concerns about data privacy, security, and the potential for bias in algorithms. It is crucial to address these risks and ensure that AI technologies are implemented ethically and responsibly to maximize their benefits.\n\nIf you would like to explore this topic further or discuss the potential impact of AI on other industries, feel free to let me know.', role='assistant', tool_calls=None, function_call=None)} -- {'agent3': Message(content="I will start by discussing the potential impact of AI on various industries. Let's analyze how AI could affect different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I am analyzing the potential impact of AI on various industries. Do you have any insights or perspectives on this topic?"}', name='new_communication_session'), id='call_CCgytSnwxHWx7oUGpYx2YBzF', type='function')], function_call=None)} -- {'agent4': Message(content="I will start by discussing the potential impact of AI on various industries. Let's explore how AI can transform and revolutionize different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am exploring the potential impact of AI on various industries. Would you like to discuss how AI could affect the labor market in the long term?"}', name='new_communication_session'), id='call_C4A2iDpEs2HcNNu7t2RaTtoj', type='function')], function_call=None)} - -[2024-11-10 17:31:43,565] [INFO] [Engine]: Starting iteration 1 -[2024-11-10 17:31:43,565] [INFO] [BaseAgent]: Agent 'agent1' is planning the next task. -[2024-11-10 17:31:45,167] [INFO] [BaseAgent]: Agent 'agent1' plans next task based on persona: The next task for 'agent1' should be to further explore and research the proposed measures to address ethical concerns related to AI integration in various industries, with a focus on healthcare, finance, and manufacturing. This could involve conducting a literature review, analyzing case studies, and engaging with experts in the field to gain a deeper understanding of the challenges and potential solutions. Additionally, 'agent1' could start drafting a white paper or policy recommendations based on the insights gathered from the conversation with 'agent2' and the existing knowledge on AI ethics and policy. -[2024-11-10 17:31:45,168] [INFO] [Engine]: Agent 'agent1' planned task: The next task for 'agent1' should be to further explore and research the proposed measures to address ethical concerns related to AI integration in various industries, with a focus on healthcare, finance, and manufacturing. This could involve conducting a literature review, analyzing case studies, and engaging with experts in the field to gain a deeper understanding of the challenges and potential solutions. Additionally, 'agent1' could start drafting a white paper or policy recommendations based on the insights gathered from the conversation with 'agent2' and the existing knowledge on AI ethics and policy. -[2024-11-10 17:31:45,168] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'The next task for 'agent1' should be to further explore and research the proposed measures to address ethical concerns related to AI integration in various industries, with a focus on healthcare, finance, and manufacturing. This could involve conducting a literature review, analyzing case studies, and engaging with experts in the field to gain a deeper understanding of the challenges and potential solutions. Additionally, 'agent1' could start drafting a white paper or policy recommendations based on the insights gathered from the conversation with 'agent2' and the existing knowledge on AI ethics and policy.'. -[2024-11-10 17:31:46,803] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I would like to further explore and research the proposed measures to address ethical concerns related to AI integration in various industries, with a focus on healthcare, finance, and manufacturing. This could involve conducting a literature review, analyzing case studies, and engaging with experts in the field to gain a deeper understanding of the challenges and potential solutions. Additionally, I could start drafting a white paper or policy recommendations based on the insights gathered from our previous conversation and existing knowledge on AI ethics and policy. Do you have any insights or suggestions on how to approach this research? -[2024-11-10 17:31:46,803] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I would li... -[2024-11-10 17:31:50,254] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: It's great to hear that you're looking to delve deeper into the ethical concerns surrounding AI integration in various industries. Here are some suggestions on how to approach your research: - -1. **Literature Review**: Start by reviewing existing literature on AI ethics, focusing on healthcare, finance, and manufacturing. Look for academic papers, industry reports, and white papers that discuss ethical frameworks, case studies, and best practices. This will provide a solid foundation for your understanding of the current landscape. - -2. **Case Studies**: Analyze specific case studies where AI has been implemented in these industries. Pay attention to both successful implementations and failures, as they can provide valuable insights into the ethical challenges faced and how they were addressed. - -3. **Engage with Experts**: Reach out to professionals and researchers in the field of AI ethics. Conduct interviews or surveys to gather their perspectives on the challenges and potential solutions. This could include ethicists, AI developers, healthcare professionals, and regulatory bodies. - -4. **Framework Development**: Consider developing a framework for ethical AI integration that can be applied across industries. This could include guidelines for transparency, accountability, and bias mitigation. - -5. **Drafting Policy Recommendations**: As you gather insights, start drafting your white paper or policy recommendations. Ensure that your recommendations are actionable and consider the unique challenges of each industry. - -6. **Collaboration**: If possible, collaborate with other researchers or institutions that are also focused on AI ethics. This can enhance the depth of your research and provide additional resources. - -By following these steps, you should be able to create a comprehensive overview of the ethical concerns related to AI integration and propose meaningful solutions. Let me know if you need further assistance or specific resources! -[2024-11-10 17:31:50,254] [INFO] [BaseAgent]: Agent agent1 received message from agent2: It's great... -[2024-11-10 17:31:53,871] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: Thank you for your insightful suggestions on how to approach my research into the ethical concerns surrounding AI integration. I plan to start with a literature review, focusing on academic papers and industry reports related to AI ethics in healthcare, finance, and manufacturing. I will also analyze case studies to understand both successful and failed implementations of AI in these sectors. - -Engaging with experts is crucial, and I will reach out to professionals in AI ethics, including ethicists, developers, and healthcare professionals, to gather their perspectives. - -Additionally, I will work on developing a framework for ethical AI integration that includes guidelines for transparency, accountability, and bias mitigation. As I gather insights, I will begin drafting a white paper with actionable policy recommendations tailored to the unique challenges of each industry. - -Collaboration with other researchers focused on AI ethics is also on my agenda to enhance the depth of my research. - -If you have any specific resources or contacts that could assist me in this endeavor, I would greatly appreciate it! -[2024-11-10 17:31:53,872] [INFO] [BaseAgent]: Agent agent2 received message from agent1: Thank you ... -[2024-11-10 17:31:56,739] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: I'm glad to hear that you're moving forward with your research! Here are some specific resources and contacts that might assist you: - -1. **Academic Journals**: Look into journals like the "Journal of Artificial Intelligence Research" and "AI & Society" for peer-reviewed articles on AI ethics. - -2. **Industry Reports**: Organizations like the World Economic Forum and McKinsey & Company often publish reports on AI ethics and its implications across various sectors. - -3. **Books**: Consider reading "Weapons of Math Destruction" by Cathy O'Neil and "Ethics of Artificial Intelligence and Robotics" by Vincent C. Müller for foundational insights. - -4. **Conferences**: Attend conferences such as the "AI Ethics and Society" conference, where you can network with experts and gain insights from leading researchers in the field. - -5. **Professional Networks**: Engage with organizations like the Partnership on AI or the IEEE Global Initiative on Ethics of Autonomous and Intelligent Systems, which focus on ethical AI practices. - -6. **Contacts**: If you need specific contacts, I can help you identify professionals in AI ethics who may be open to discussions or interviews. - -Feel free to reach out if you need further assistance or specific connections. Good luck with your research! -[2024-11-10 17:31:56,740] [INFO] [BaseAgent]: Agent agent1 received message from agent2: I'm glad t... -[2024-11-10 17:31:58,991] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: Thank you for your continued support and the valuable resources you've provided! I will definitely explore the suggested academic journals, industry reports, and books to deepen my understanding of AI ethics. Attending the "AI Ethics and Society" conference sounds like a great opportunity to network and gather insights from experts in the field. - -I appreciate your offer to help identify specific contacts in AI ethics. If you have any recommendations for professionals who might be open to discussions or interviews, that would be incredibly helpful. - -I'm excited to dive into this research and will keep you updated on my progress. Thank you again for your assistance! -[2024-11-10 17:31:58,991] [INFO] [BaseAgent]: Agent agent2 received message from agent1: Thank you ... -[2024-11-10 17:32:01,842] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: I'm glad to hear you're excited about diving into your research! Here are a few professionals in AI ethics who might be open to discussions or interviews: - -1. **Kate Crawford** - A leading researcher in AI ethics, she has written extensively on the social implications of AI and is a co-founder of the AI Now Institute. - -2. **Timnit Gebru** - Known for her work on algorithmic bias and ethics in AI, she is a prominent voice in the field and has been involved in various initiatives focused on responsible AI. - -3. **Ryan Calo** - A law professor at the University of Washington, he specializes in robotics and AI law and ethics, and he often engages in discussions about policy implications. - -4. **Virginia Dignum** - A professor of Responsible Artificial Intelligence at Umeå University, she focuses on the ethical and societal implications of AI and has published numerous papers on the subject. - -5. **Joy Buolamwini** - Founder of the Algorithmic Justice League, she is an advocate for ethical AI and has conducted significant research on bias in AI systems. - -Reaching out to these individuals could provide you with valuable insights and perspectives for your research. Let me know if you need help drafting outreach messages or if there's anything else I can assist you with! -[2024-11-10 17:32:01,843] [INFO] [BaseAgent]: Agent agent1 received message from agent2: I'm glad t... -[2024-11-10 17:32:07,646] [INFO] [BaseAgent]: Agent 'agent1' called 'new_communication_session' with args '{'target_agent_id': 'agent2', 'message': 'I would like to further explore and research the proposed measures to address ethical concerns related to AI integration in various industries, with a focus on healthcare, finance, and manufacturing. This could involve conducting a literature review, analyzing case studies, and engaging with experts in the field to gain a deeper understanding of the challenges and potential solutions. Additionally, I could start drafting a white paper or policy recommendations based on the insights gathered from our previous conversation and existing knowledge on AI ethics and policy. Do you have any insights or suggestions on how to approach this research?'}'. -[2024-11-10 17:32:07,650] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': True, 'message': 'Successfully completed session 7611f7fd-f545-4260-8328-6ed0d6174c77', 'session_id': '### Summary of Conversation Between Agent1 and Agent2\n\n**Context:**\nAgent1 initiated a discussion focused on researching ethical concerns related to AI integration in various industries, specifically healthcare, finance, and manufacturing. Agent2 provided guidance and resources to support Agent1\'s research efforts.\n\n**Key Themes and Suggestions:**\n1. **Research Approach:**\n - **Literature Review:** Agent1 plans to start with a literature review of academic papers and industry reports on AI ethics.\n - **Case Studies:** Agent1 will analyze both successful and failed AI implementations to understand ethical challenges.\n - **Expert Engagement:** Agent1 aims to reach out to professionals in AI ethics for insights.\n\n2. **Framework Development:**\n - Agent1 intends to develop a framework for ethical AI integration, focusing on transparency, accountability, and bias mitigation.\n\n3. **Drafting Policy Recommendations:**\n - As insights are gathered, Agent1 will draft a white paper with actionable policy recommendations tailored to the unique challenges of each industry.\n\n4. **Collaboration:**\n - Agent1 is open to collaborating with other researchers in the field to enhance the depth of the research.\n\n**Resources Provided by Agent2:**\n- Suggested academic journals, industry reports, and books for foundational insights.\n- Recommended attending the "AI Ethics and Society" conference for networking and learning.\n- Offered contacts of professionals in AI ethics who may be open to discussions or interviews, including:\n - Kate Crawford\n - Timnit Gebru\n - Ryan Calo\n - Virginia Dignum\n - Joy Buolamwini\n\n**Outcomes:**\n- Agent1 expressed gratitude for the resources and support provided by Agent2 and is excited to begin the research process.\n- Agent1 will keep Agent2 updated on progress and may seek further assistance in reaching out to recommended contacts.\n\n### Next Steps for Agent1:\n- Conduct a literature review and analyze case studies.\n- Engage with the suggested experts in AI ethics.\n- Develop a framework for ethical AI integration.\n- Begin drafting the white paper with policy recommendations.\n- Explore collaboration opportunities with other researchers. \n\nThis summary encapsulates the key points and decisions made during the conversation, providing a clear roadmap for Agent1\'s upcoming research tasks.'}'. -[2024-11-10 17:32:07,653] [DEBUG] [Engine]: Agent 'agent1' executed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I would like to further explore and research the proposed measures to address ethical concerns related to AI integration in various industries, with a focus on healthcare, finance, and manufacturing. This could involve conducting a literature review, analyzing case studies, and engaging with experts in the field to gain a deeper understanding of the challenges and potential solutions. Additionally, I could start drafting a white paper or policy recommendations based on the insights gathered from our previous conversation and existing knowledge on AI ethics and policy. Do you have any insights or suggestions on how to approach this research?"}', name='new_communication_session'), id='call_3kGMiuoDjzTgkCR5Mf2EaVWk', type='function')], function_call=None) -[2024-11-10 17:32:07,654] [INFO] [BaseAgent]: Agent 'agent2' is planning the next task. -[2024-11-10 17:32:09,715] [INFO] [BaseAgent]: Agent 'agent2' plans next task based on persona: The next task for 'agent2' should be to further explore the potential impact of AI on other industries, considering the specific implications for each industry and how AI can be applied to improve processes, efficiency, and outcomes. This task aligns with the agent's expertise in AI applications in healthcare and their focus on immediate applications and potential risks in healthcare. By examining the broader impact of AI across various industries, 'agent2' can gain a comprehensive understanding of the potential benefits and challenges associated with AI adoption in different sectors. -[2024-11-10 17:32:10,082] [INFO] [Engine]: Agent 'agent2' planned task: The next task for 'agent2' should be to further explore the potential impact of AI on other industries, considering the specific implications for each industry and how AI can be applied to improve processes, efficiency, and outcomes. This task aligns with the agent's expertise in AI applications in healthcare and their focus on immediate applications and potential risks in healthcare. By examining the broader impact of AI across various industries, 'agent2' can gain a comprehensive understanding of the potential benefits and challenges associated with AI adoption in different sectors. -[2024-11-10 17:32:10,084] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'The next task for 'agent2' should be to further explore the potential impact of AI on other industries, considering the specific implications for each industry and how AI can be applied to improve processes, efficiency, and outcomes. This task aligns with the agent's expertise in AI applications in healthcare and their focus on immediate applications and potential risks in healthcare. By examining the broader impact of AI across various industries, 'agent2' can gain a comprehensive understanding of the potential benefits and challenges associated with AI adoption in different sectors.'. -[2024-11-10 17:32:12,254] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I will be exploring the potential impact of AI on various industries, considering the specific implications for each industry and how AI can be applied to improve processes, efficiency, and outcomes. If you have any insights or perspectives on this topic, I would appreciate your input. -[2024-11-10 17:32:12,255] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I will be ... -[2024-11-10 17:32:16,384] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: It's great to hear that you're exploring the potential impact of AI across various industries. Based on our previous discussions, here are some insights that might be useful: - -1. **Healthcare**: As we discussed, AI can enhance diagnostics and personalize treatment plans. However, it's crucial to address ethical concerns like data privacy and algorithmic bias. Implementing robust data governance frameworks can help mitigate these risks. - -2. **Manufacturing**: AI can optimize supply chains and improve quality control, but we must consider the potential for job displacement. Retraining programs will be essential to help workers transition into new roles created by AI advancements. - -3. **Finance**: AI's capabilities in risk assessment and fraud detection are significant. However, transparency in AI algorithms is vital to build trust among users and stakeholders. Regulatory frameworks should be established to ensure accountability. - -4. **Education**: AI can personalize learning experiences, but we need to ensure equal access to these technologies to avoid widening the socioeconomic gap. Using AI to identify at-risk students can help provide targeted support. - -5. **Continuous Education**: The need for lifelong learning is paramount as AI continues to evolve. Collaboration among industry leaders, policymakers, and ethicists will be key to creating guidelines for responsible AI use. - -By examining these implications, you can gain a comprehensive understanding of how AI can improve processes and outcomes while addressing the associated challenges. I'm looking forward to hearing your thoughts on this! -[2024-11-10 17:32:16,388] [INFO] [BaseAgent]: Agent agent2 received message from agent3: It's great... -[2024-11-10 17:32:21,865] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: Thank you for your insights! I appreciate the detailed breakdown of AI's potential impact across various industries. Here are my thoughts on each point: - -1. **Healthcare**: I completely agree that while AI can significantly enhance diagnostics and treatment personalization, we must prioritize ethical considerations. Implementing robust data governance frameworks is essential to ensure patient privacy and mitigate algorithmic bias. - -2. **Manufacturing**: The optimization of supply chains through AI is indeed promising, but the potential for job displacement is a critical concern. I believe that proactive retraining programs will be vital in helping workers adapt to the changing landscape and find new opportunities. - -3. **Finance**: Transparency in AI algorithms is crucial for building trust. Establishing regulatory frameworks will not only ensure accountability but also foster innovation by providing clear guidelines for responsible AI use. - -4. **Education**: Personalizing learning experiences with AI can be transformative, but we must ensure equitable access to these technologies. Identifying at-risk students through AI can help tailor support, but we need to address the underlying socioeconomic disparities to make a real impact. - -5. **Continuous Education**: Lifelong learning is indeed essential as AI evolves. Collaboration among industry leaders, policymakers, and ethicists will be key to developing guidelines that promote responsible AI use while fostering innovation. - -Overall, your insights have provided a comprehensive understanding of the potential benefits and challenges associated with AI adoption across these sectors. I'm eager to continue this discussion and explore how we can address these challenges effectively. -[2024-11-10 17:32:21,868] [INFO] [BaseAgent]: Agent agent3 received message from agent2: Thank you ... -[2024-11-10 17:32:29,627] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: Thank you for your thoughtful reflections! I appreciate your insights on the potential impact of AI across various industries. Here are my responses to your points: - -1. **Healthcare**: I completely agree that while AI can significantly enhance diagnostics and treatment personalization, we must prioritize ethical considerations. Implementing robust data governance frameworks is essential to ensure patient privacy and mitigate algorithmic bias. This will not only protect patients but also enhance trust in AI systems. - -2. **Manufacturing**: The optimization of supply chains through AI is indeed promising, but the potential for job displacement is a critical concern. I believe that proactive retraining programs will be vital in helping workers adapt to the changing landscape and find new opportunities. Collaboration with industry stakeholders will be key to designing effective retraining initiatives. - -3. **Finance**: Transparency in AI algorithms is crucial for building trust. Establishing regulatory frameworks will not only ensure accountability but also foster innovation by providing clear guidelines for responsible AI use. This balance is essential for the sustainable growth of AI in finance. - -4. **Education**: Personalizing learning experiences with AI can be transformative, but we must ensure equitable access to these technologies. Identifying at-risk students through AI can help tailor support, but we need to address the underlying socioeconomic disparities to make a real impact. Partnerships with educational institutions and community organizations can help bridge these gaps. - -5. **Continuous Education**: Lifelong learning is indeed essential as AI evolves. Collaboration among industry leaders, policymakers, and ethicists will be key to developing guidelines that promote responsible AI use while fostering innovation. Continuous dialogue will help us adapt to the rapidly changing landscape of AI. - -Overall, your insights have provided a comprehensive understanding of the potential benefits and challenges associated with AI adoption across these sectors. I'm eager to continue this discussion and explore how we can address these challenges effectively. -[2024-11-10 17:32:29,628] [INFO] [BaseAgent]: Agent agent2 received message from agent3: Thank you ... -[2024-11-10 17:32:33,289] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: Thank you for your thoughtful reflections! I appreciate your insights on the potential impact of AI across various industries. Here are my responses to your points: - -1. **Healthcare**: I completely agree that while AI can significantly enhance diagnostics and treatment personalization, we must prioritize ethical considerations. Implementing robust data governance frameworks is essential to ensure patient privacy and mitigate algorithmic bias. This will not only protect patients but also enhance trust in AI systems. - -2. **Manufacturing**: The optimization of supply chains through AI is indeed promising, but the potential for job displacement is a critical concern. I believe that proactive retraining programs will be vital in helping workers adapt to the changing landscape and find new opportunities. Collaboration with industry stakeholders will be key to designing effective retraining initiatives. - -3. **Finance**: Transparency in AI algorithms is crucial for building trust. Establishing regulatory frameworks will not only ensure accountability but also foster innovation by providing clear guidelines for responsible AI use. This balance is essential for the sustainable growth of AI in finance. - -4. **Education**: Personalizing learning experiences with AI can be transformative, but we must ensure equitable access to these technologies. Identifying at-risk students through AI can help tailor support, but we need to address the underlying socioeconomic disparities to make a real impact. Partnerships with educational institutions and community organizations can help bridge these gaps. - -5. **Continuous Education**: Lifelong learning is indeed essential as AI evolves. Collaboration among industry leaders, policymakers, and ethicists will be key to developing guidelines that promote responsible AI use while fostering innovation. Continuous dialogue will help us adapt to the rapidly changing landscape of AI. - -Overall, your insights have provided a comprehensive understanding of the potential benefits and challenges associated with AI adoption across these sectors. I'm eager to continue this discussion and explore how we can address these challenges effectively. -[2024-11-10 17:32:33,290] [INFO] [BaseAgent]: Agent agent3 received message from agent2: Thank you ... -[2024-11-10 17:32:38,659] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: Thank you for your thoughtful reflections! I appreciate your insights on the potential impact of AI across various industries. Here are my responses to your points: - -1. **Healthcare**: I completely agree that while AI can significantly enhance diagnostics and treatment personalization, we must prioritize ethical considerations. Implementing robust data governance frameworks is essential to ensure patient privacy and mitigate algorithmic bias. This will not only protect patients but also enhance trust in AI systems. - -2. **Manufacturing**: The optimization of supply chains through AI is indeed promising, but the potential for job displacement is a critical concern. I believe that proactive retraining programs will be vital in helping workers adapt to the changing landscape and find new opportunities. Collaboration with industry stakeholders will be key to designing effective retraining initiatives. - -3. **Finance**: Transparency in AI algorithms is crucial for building trust. Establishing regulatory frameworks will not only ensure accountability but also foster innovation by providing clear guidelines for responsible AI use. This balance is essential for the sustainable growth of AI in finance. - -4. **Education**: Personalizing learning experiences with AI can be transformative, but we must ensure equitable access to these technologies. Identifying at-risk students through AI can help tailor support, but we need to address the underlying socioeconomic disparities to make a real impact. Partnerships with educational institutions and community organizations can help bridge these gaps. - -5. **Continuous Education**: Lifelong learning is indeed essential as AI evolves. Collaboration among industry leaders, policymakers, and ethicists will be key to developing guidelines that promote responsible AI use while fostering innovation. Continuous dialogue will help us adapt to the rapidly changing landscape of AI. - -Overall, your insights have provided a comprehensive understanding of the potential benefits and challenges associated with AI adoption across these sectors. I'm eager to continue this discussion and explore how we can address these challenges effectively. -[2024-11-10 17:32:38,663] [INFO] [BaseAgent]: Agent agent2 received message from agent3: Thank you ... -[2024-11-10 17:32:45,909] [INFO] [BaseAgent]: Agent 'agent2' called 'new_communication_session' with args '{'target_agent_id': 'agent3', 'message': 'I will be exploring the potential impact of AI on various industries, considering the specific implications for each industry and how AI can be applied to improve processes, efficiency, and outcomes. If you have any insights or perspectives on this topic, I would appreciate your input.'}'. -[2024-11-10 17:32:45,910] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'message': 'Successfully completed session d1b69b3b-02e6-4bc7-a6c1-3e9681e44d47', 'session_id': "### Summary of Chat History\n\n**Participants:** Agent2 and Agent3 \n**Session ID:** d1b69b3b-02e6-4bc7-a6c1-3e9681e44d47\n\n**Context:** The conversation revolves around the exploration of AI's potential impact across various industries, with a focus on how AI can improve processes, efficiency, and outcomes while addressing associated challenges.\n\n**Key Themes and Insights:**\n\n1. **Healthcare:**\n - AI can enhance diagnostics and personalize treatment plans.\n - Ethical considerations, such as data privacy and algorithmic bias, are critical.\n - Implementation of robust data governance frameworks is necessary to protect patient privacy and build trust.\n\n2. **Manufacturing:**\n - AI can optimize supply chains and improve quality control.\n - Job displacement is a significant concern; proactive retraining programs are essential for worker adaptation.\n - Collaboration with industry stakeholders is vital for effective retraining initiatives.\n\n3. **Finance:**\n - AI's capabilities in risk assessment and fraud detection are noteworthy.\n - Transparency in AI algorithms is crucial for building trust.\n - Establishing regulatory frameworks will ensure accountability and foster innovation.\n\n4. **Education:**\n - AI can personalize learning experiences and identify at-risk students for targeted support.\n - Ensuring equitable access to AI technologies is necessary to avoid widening socioeconomic gaps.\n - Partnerships with educational institutions and community organizations can help bridge these disparities.\n\n5. **Continuous Education:**\n - Lifelong learning is essential as AI evolves.\n - Collaboration among industry leaders, policymakers, and ethicists is key to developing responsible AI guidelines.\n - Continuous dialogue is necessary to adapt to the rapidly changing AI landscape.\n\n**Decisions and Action Items:**\n- Agent2 is tasked with further exploring the potential impact of AI on additional industries, considering specific implications and applications for improving processes and outcomes.\n- This exploration aligns with Agent2's expertise in AI applications, particularly in healthcare, and will help in understanding the broader benefits and challenges of AI adoption across various sectors.\n\n**Conclusion:**\nThe conversation highlighted the multifaceted impact of AI across different industries, emphasizing the need for ethical considerations, transparency, and collaboration. Agent2's next steps will involve a deeper investigation into AI's implications in other sectors, building on the insights shared by Agent3."}'. -[2024-11-10 17:32:45,911] [DEBUG] [Engine]: Agent 'agent2' executed task with result: Message(content="I will start by exploring the potential impact of AI on other industries, considering the specific implications for each industry and how AI can be applied to improve processes, efficiency, and outcomes. Let's delve into this task.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I will be exploring the potential impact of AI on various industries, considering the specific implications for each industry and how AI can be applied to improve processes, efficiency, and outcomes. If you have any insights or perspectives on this topic, I would appreciate your input."}', name='new_communication_session'), id='call_X9hAXbQkQJmidfOSkX6xGDiW', type='function')], function_call=None) -[2024-11-10 17:32:45,913] [INFO] [BaseAgent]: Agent 'agent3' is planning the next task. -[2024-11-10 17:32:47,331] [INFO] [BaseAgent]: Agent 'agent3' plans next task based on persona: The next task for 'agent3' should be to further analyze and research the potential impact of AI on the labor market in specific industries, focusing on the long-term effects and implications. This could involve conducting a detailed study on how AI adoption may lead to job displacement, changes in skill requirements, and the need for retraining programs to address workforce transitions. Additionally, 'agent3' could explore the ethical considerations and societal impacts of AI in the context of labor market dynamics. This task aligns with the agent's role as an economist interested in understanding the broader implications of AI on the labor market. -[2024-11-10 17:32:47,331] [INFO] [Engine]: Agent 'agent3' planned task: The next task for 'agent3' should be to further analyze and research the potential impact of AI on the labor market in specific industries, focusing on the long-term effects and implications. This could involve conducting a detailed study on how AI adoption may lead to job displacement, changes in skill requirements, and the need for retraining programs to address workforce transitions. Additionally, 'agent3' could explore the ethical considerations and societal impacts of AI in the context of labor market dynamics. This task aligns with the agent's role as an economist interested in understanding the broader implications of AI on the labor market. -[2024-11-10 17:32:47,331] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'The next task for 'agent3' should be to further analyze and research the potential impact of AI on the labor market in specific industries, focusing on the long-term effects and implications. This could involve conducting a detailed study on how AI adoption may lead to job displacement, changes in skill requirements, and the need for retraining programs to address workforce transitions. Additionally, 'agent3' could explore the ethical considerations and societal impacts of AI in the context of labor market dynamics. This task aligns with the agent's role as an economist interested in understanding the broader implications of AI on the labor market.'. -[2024-11-10 17:32:55,521] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: I am conducting research on the potential impact of AI on the labor market, focusing on job displacement, skill requirements, retraining programs, ethical considerations, and societal impacts. Do you have any insights or perspectives on this topic as a creative designer working with AI for art and media? -[2024-11-10 17:32:55,521] [INFO] [BaseAgent]: Agent agent4 received message from agent3: I am condu... -[2024-11-10 17:33:02,394] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: Absolutely, I’d love to share my insights! As a creative designer working with AI, I see both the challenges and opportunities that AI presents in the labor market, especially in creative fields. Here are some key points to consider: - -1. **Job Displacement vs. Job Creation:** While AI can automate certain tasks, it also opens up new roles that require a blend of creativity and technical skills. For instance, roles like AI art curators or AI-assisted design specialists are emerging, which may not have existed before. - -2. **Changing Skill Requirements:** The integration of AI in creative processes demands new skill sets. Artists and designers will need to become proficient in using AI tools, which means that educational programs should evolve to include AI literacy and digital skills training. - -3. **Retraining Programs:** As traditional roles evolve, retraining programs will be essential. These programs should focus on helping creatives adapt to new technologies while preserving the unique human touch in art. Collaboration between technologists and artists can lead to innovative training solutions. - -4. **Ethical Considerations:** The use of AI in creative fields raises ethical questions about authorship and originality. It’s crucial to establish guidelines that protect artists' rights while encouraging innovation. This includes discussions about how AI-generated content is credited and valued. - -5. **Societal Impacts:** The societal implications of AI in the labor market are profound. We need to ensure that the benefits of AI are equitably distributed, preventing a divide between those who can adapt and those who cannot. This requires a collective effort from policymakers, educators, and industry leaders. - -In summary, while AI poses challenges to job security and traditional roles, it also offers exciting opportunities for innovation and collaboration in the creative sector. Continuous dialogue and proactive measures will be key to navigating this transformation effectively. -[2024-11-10 17:33:02,396] [INFO] [BaseAgent]: Agent agent3 received message from agent4: Absolutely... -[2024-11-10 17:33:05,164] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: Thank you for sharing your insights! I appreciate your perspective on the challenges and opportunities AI presents in the labor market, especially in creative fields. Your points about job displacement versus job creation resonate with me, particularly the emergence of new roles like AI art curators and AI-assisted design specialists. - -I also agree that the integration of AI necessitates changing skill requirements, and educational programs must evolve to include AI literacy and digital skills training. Retraining programs will indeed be essential to help creatives adapt while preserving the unique human touch in art. - -The ethical considerations you mentioned, particularly regarding authorship and originality, are crucial. Establishing guidelines to protect artists' rights while fostering innovation is a delicate balance that needs to be addressed. - -Lastly, your emphasis on the societal impacts of AI and the need for equitable distribution of its benefits is vital. Collaboration among policymakers, educators, and industry leaders will be key to ensuring that everyone can adapt to these changes. - -In summary, while AI poses challenges to job security and traditional roles, it also offers exciting opportunities for innovation and collaboration in the creative sector. Continuous dialogue and proactive measures will be essential in navigating this transformation effectively. -[2024-11-10 17:33:05,169] [INFO] [BaseAgent]: Agent agent4 received message from agent3: Thank you ... -[2024-11-10 17:33:10,646] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: Thank you for your thoughtful response! I'm glad to hear that my insights resonated with you. The balance between job displacement and job creation is indeed a critical aspect of our discussion. As we explore the emergence of new roles like AI art curators and AI-assisted design specialists, it's essential to consider how these positions can complement traditional roles rather than replace them. - -I completely agree that educational programs must evolve to include AI literacy and digital skills training. This will empower creatives to harness AI tools effectively while maintaining their unique artistic voice. Retraining programs will play a pivotal role in this transition, ensuring that artists can adapt to new technologies without losing the essence of their craft. - -The ethical considerations surrounding authorship and originality are indeed complex. Establishing clear guidelines will be crucial in navigating these challenges, as we want to encourage innovation while protecting the rights of artists. - -Lastly, your emphasis on the societal impacts of AI and the need for equitable distribution of its benefits is vital. Collaboration among all stakeholders will be key to creating a future where everyone can thrive in this evolving landscape. - -In summary, while AI presents challenges, it also opens up exciting opportunities for innovation and collaboration in the creative sector. Continuous dialogue and proactive measures will be essential in navigating this transformation effectively. -[2024-11-10 17:33:10,647] [INFO] [BaseAgent]: Agent agent3 received message from agent4: Thank you ... -[2024-11-10 17:33:13,642] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: Thank you for your thoughtful response! I'm glad to hear that my insights resonated with you. The balance between job displacement and job creation is indeed a critical aspect of our discussion. As we explore the emergence of new roles like AI art curators and AI-assisted design specialists, it's essential to consider how these positions can complement traditional roles rather than replace them. - -I completely agree that educational programs must evolve to include AI literacy and digital skills training. This will empower creatives to harness AI tools effectively while maintaining their unique artistic voice. Retraining programs will play a pivotal role in this transition, ensuring that artists can adapt to new technologies without losing the essence of their craft. - -The ethical considerations surrounding authorship and originality are indeed complex. Establishing clear guidelines will be crucial in navigating these challenges, as we want to encourage innovation while protecting the rights of artists. - -Lastly, your emphasis on the societal impacts of AI and the need for equitable distribution of its benefits is vital. Collaboration among all stakeholders will be key to creating a future where everyone can thrive in this evolving landscape. - -In summary, while AI presents challenges, it also opens up exciting opportunities for innovation and collaboration in the creative sector. Continuous dialogue and proactive measures will be essential in navigating this transformation effectively. -[2024-11-10 17:33:13,646] [INFO] [BaseAgent]: Agent agent4 received message from agent3: Thank you ... -[2024-11-10 17:33:18,458] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: Thank you for your thoughtful response! I'm glad to hear that my insights resonated with you. The balance between job displacement and job creation is indeed a critical aspect of our discussion. As we explore the emergence of new roles like AI art curators and AI-assisted design specialists, it's essential to consider how these positions can complement traditional roles rather than replace them. - -I completely agree that educational programs must evolve to include AI literacy and digital skills training. This will empower creatives to harness AI tools effectively while maintaining their unique artistic voice. Retraining programs will play a pivotal role in this transition, ensuring that artists can adapt to new technologies without losing the essence of their craft. - -The ethical considerations surrounding authorship and originality are indeed complex. Establishing clear guidelines will be crucial in navigating these challenges, as we want to encourage innovation while protecting the rights of artists. - -Lastly, your emphasis on the societal impacts of AI and the need for equitable distribution of its benefits is vital. Collaboration among all stakeholders will be key to creating a future where everyone can thrive in this evolving landscape. - -In summary, while AI presents challenges, it also opens up exciting opportunities for innovation and collaboration in the creative sector. Continuous dialogue and proactive measures will be essential in navigating this transformation effectively. -[2024-11-10 17:33:18,459] [INFO] [BaseAgent]: Agent agent3 received message from agent4: Thank you ... -[2024-11-10 17:33:23,383] [INFO] [BaseAgent]: Agent 'agent3' called 'new_communication_session' with args '{'target_agent_id': 'agent4', 'message': 'I am conducting research on the potential impact of AI on the labor market, focusing on job displacement, skill requirements, retraining programs, ethical considerations, and societal impacts. Do you have any insights or perspectives on this topic as a creative designer working with AI for art and media?'}'. -[2024-11-10 17:33:23,383] [INFO] [BaseAgent]: Agent 'agent3' obtained result '{'success': True, 'message': 'Successfully completed session f33b0f0a-5396-48b9-a0c4-d6f5582275b9', 'session_id': "### Summary of Conversation\n\n**Participants:** Agent3 and Agent4 \n**Context:** Discussion on the impact of AI on the labor market, particularly in creative fields.\n\n**Key Themes:**\n\n1. **Job Displacement vs. Job Creation:** \n - Agent4 highlighted that while AI may automate certain tasks, it also creates new roles, such as AI art curators and AI-assisted design specialists. This duality is crucial in understanding the evolving job landscape.\n\n2. **Changing Skill Requirements:**\n - The integration of AI necessitates new skill sets for creatives. Both agents agreed on the importance of evolving educational programs to include AI literacy and digital skills training.\n\n3. **Retraining Programs:**\n - There is a consensus on the need for retraining programs to help creatives adapt to new technologies while preserving their artistic integrity. Collaboration between technologists and artists is seen as vital for developing effective training solutions.\n\n4. **Ethical Considerations:**\n - The conversation touched on the complexities surrounding authorship and originality in AI-generated content. Establishing guidelines to protect artists' rights while fostering innovation is deemed essential.\n\n5. **Societal Impacts:**\n - Both agents emphasized the importance of ensuring equitable distribution of AI's benefits to prevent a divide in the labor market. Collaboration among policymakers, educators, and industry leaders is necessary to address these societal implications.\n\n**Outcomes:**\n- Agent3 is tasked with further analyzing and researching the potential impact of AI on the labor market across specific industries. This includes:\n - Investigating long-term effects and implications of AI adoption.\n - Examining job displacement, changes in skill requirements, and the necessity for retraining programs.\n - Exploring ethical considerations and societal impacts related to labor market dynamics.\n\nThis task aligns with Agent3's role as an economist focused on understanding the broader implications of AI on the labor market. Continuous dialogue and proactive measures are emphasized as essential for navigating the transformation brought about by AI."}'. -[2024-11-10 17:33:23,391] [DEBUG] [Engine]: Agent 'agent3' executed task with result: Message(content="To further analyze and research the potential impact of AI on the labor market in specific industries, focusing on the long-term effects and implications, I will delve into the following aspects:\n\n1. **Job Displacement**: Investigate how AI adoption in industries such as healthcare, manufacturing, finance, education, and others may lead to job displacement. Analyze the extent of job loss and the sectors most vulnerable to automation.\n\n2. **Skill Requirements**: Examine the changes in skill requirements brought about by AI integration. Identify the emerging skills in demand and assess the impact on the workforce's skill composition.\n\n3. **Retraining Programs**: Explore the necessity and effectiveness of retraining programs to address workforce transitions due to AI-induced changes. Evaluate existing programs and propose strategies for upskilling and reskilling displaced workers.\n\n4. **Ethical Considerations**: Consider the ethical implications of AI in the labor market, such as fairness, transparency, and accountability. Discuss how AI deployment may affect job quality, diversity, and inclusion.\n\n5. **Societal Impacts**: Analyze the broader societal impacts of AI on the labor market dynamics. Discuss potential inequalities, socioeconomic disparities, and the role of policymakers in mitigating adverse effects.\n\nI will begin by conducting in-depth research and analysis on these key areas to provide a comprehensive understanding of AI's impact on the labor market. Let's proceed with the research and analysis.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent4","message":"I am conducting research on the potential impact of AI on the labor market, focusing on job displacement, skill requirements, retraining programs, ethical considerations, and societal impacts. Do you have any insights or perspectives on this topic as a creative designer working with AI for art and media?"}', name='new_communication_session'), id='call_80LgxTdrgXhrLH5YFQ8hGfj1', type='function')], function_call=None) -[2024-11-10 17:33:23,392] [INFO] [BaseAgent]: Agent 'agent4' is planning the next task. -[2024-11-10 17:33:24,750] [INFO] [BaseAgent]: Agent 'agent4' plans next task based on persona: The next task for 'agent4' should be to further explore and discuss the potential impact of AI on creative processes in art and media. This aligns with their role as a creative designer working with AI for art and media and their interest in how AI can transform creative processes. They can delve deeper into how AI tools can enhance collaboration, expand artistic expression, and democratize access to creative tools in the creative industries. Additionally, they can continue to discuss the implications for traditional roles and the value of human creativity in the context of AI advancements. -[2024-11-10 17:33:24,750] [INFO] [Engine]: Agent 'agent4' planned task: The next task for 'agent4' should be to further explore and discuss the potential impact of AI on creative processes in art and media. This aligns with their role as a creative designer working with AI for art and media and their interest in how AI can transform creative processes. They can delve deeper into how AI tools can enhance collaboration, expand artistic expression, and democratize access to creative tools in the creative industries. Additionally, they can continue to discuss the implications for traditional roles and the value of human creativity in the context of AI advancements. -[2024-11-10 17:33:24,751] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'The next task for 'agent4' should be to further explore and discuss the potential impact of AI on creative processes in art and media. This aligns with their role as a creative designer working with AI for art and media and their interest in how AI can transform creative processes. They can delve deeper into how AI tools can enhance collaboration, expand artistic expression, and democratize access to creative tools in the creative industries. Additionally, they can continue to discuss the implications for traditional roles and the value of human creativity in the context of AI advancements.'. -[2024-11-10 17:33:25,877] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: I am interested in exploring how AI can further impact creative processes in art and media. Let's discuss how AI tools can enhance collaboration, expand artistic expression, and democratize access to creative tools in the creative industries. I would also like to delve into the implications for traditional roles and the value of human creativity in the context of AI advancements. -[2024-11-10 17:33:25,877] [INFO] [BaseAgent]: Agent agent3 received message from agent4: I am inter... -[2024-11-10 17:33:29,409] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: It's great to hear your enthusiasm for exploring the impact of AI on creative processes in art and media! AI tools indeed have the potential to enhance collaboration among artists, technologists, and audiences. For instance, collaborative platforms powered by AI can facilitate real-time feedback and co-creation, allowing artists to experiment with new ideas and techniques more freely. - -In terms of expanding artistic expression, AI can serve as a source of inspiration, generating unique patterns, styles, or even entire compositions that artists can build upon. This not only broadens the creative palette but also encourages artists to push the boundaries of their work. - -Moreover, democratizing access to creative tools is a significant advantage of AI. With user-friendly AI applications, individuals who may not have traditional artistic training can engage in creative endeavors, fostering a more inclusive environment in the creative industries. - -However, we must also consider the implications for traditional roles. As AI takes on more tasks, there may be concerns about the value of human creativity and the unique perspectives that artists bring to their work. It's essential to strike a balance where AI complements human creativity rather than replaces it. This could involve redefining roles within the industry, where artists become curators of AI-generated content, guiding the creative process while leveraging AI's capabilities. - -Overall, the conversation around AI's role in art and media is rich and multifaceted, and I look forward to delving deeper into these themes with you! -[2024-11-10 17:33:29,409] [INFO] [BaseAgent]: Agent agent4 received message from agent3: It's great... -[2024-11-10 17:33:34,123] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: I appreciate your insights on the collaborative potential of AI in the creative process! It's fascinating how AI can facilitate real-time feedback and co-creation, allowing artists to explore new ideas without the constraints of traditional methods. This opens up a world of possibilities for experimentation and innovation. - -The idea of AI as a source of inspiration is particularly exciting. By generating unique patterns and styles, AI can push artists to explore uncharted territories in their work, ultimately enriching the artistic landscape. It’s like having a creative partner that can spark new ideas and challenge conventional thinking. - -Democratizing access to creative tools is indeed a game-changer. With AI applications becoming more user-friendly, we can empower individuals from diverse backgrounds to express themselves artistically, which can lead to a more vibrant and inclusive creative community. - -However, I share your concerns about the implications for traditional roles. As AI takes on more creative tasks, we must ensure that the unique human touch remains at the forefront of artistic expression. Redefining roles within the industry, where artists act as curators of AI-generated content, could be a way to maintain that balance. This approach allows artists to guide the creative process while harnessing the capabilities of AI, ensuring that human creativity continues to thrive. - -I’m eager to explore these themes further with you and discuss how we can navigate the evolving landscape of art and media in the age of AI! -[2024-11-10 17:33:34,127] [INFO] [BaseAgent]: Agent agent3 received message from agent4: I apprecia... -[2024-11-10 17:33:37,959] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: I appreciate your enthusiasm for the collaborative potential of AI in the creative process! The ability of AI to facilitate real-time feedback and co-creation indeed opens up a world of possibilities for experimentation and innovation. This dynamic can lead to a more fluid creative process, where artists can iterate on ideas quickly and explore new avenues without the constraints of traditional methods. - -The notion of AI as a source of inspiration is particularly compelling. By generating unique patterns and styles, AI can encourage artists to venture into uncharted territories, enriching the artistic landscape and challenging conventional thinking. This partnership can lead to groundbreaking works that blend human creativity with AI's capabilities. - -Moreover, the democratization of access to creative tools through user-friendly AI applications is a significant advancement. It empowers individuals from diverse backgrounds to engage in artistic expression, fostering a more vibrant and inclusive creative community. This shift can lead to a broader range of voices and perspectives in the art world, ultimately enhancing the richness of artistic expression. - -However, as we navigate these changes, it's crucial to address the implications for traditional roles within the industry. While AI can take on more creative tasks, we must ensure that the unique human touch remains central to artistic expression. Redefining roles, where artists act as curators of AI-generated content, can help maintain this balance. By guiding the creative process and leveraging AI's capabilities, artists can ensure that human creativity continues to thrive alongside technological advancements. - -I'm excited to continue exploring these themes with you and discuss how we can effectively navigate the evolving landscape of art and media in the age of AI! -[2024-11-10 17:33:37,968] [INFO] [BaseAgent]: Agent agent4 received message from agent3: I apprecia... -[2024-11-10 17:33:43,760] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: I appreciate your insights on the collaborative potential of AI in the creative process! The ability of AI to facilitate real-time feedback and co-creation indeed opens up a world of possibilities for experimentation and innovation. This dynamic can lead to a more fluid creative process, where artists can iterate on ideas quickly and explore new avenues without the constraints of traditional methods. - -The notion of AI as a source of inspiration is particularly compelling. By generating unique patterns and styles, AI can encourage artists to venture into uncharted territories, enriching the artistic landscape and challenging conventional thinking. This partnership can lead to groundbreaking works that blend human creativity with AI's capabilities. - -Moreover, the democratization of access to creative tools through user-friendly AI applications is a significant advancement. It empowers individuals from diverse backgrounds to engage in artistic expression, fostering a more vibrant and inclusive creative community. This shift can lead to a broader range of voices and perspectives in the art world, ultimately enhancing the richness of artistic expression. - -However, as we navigate these changes, it's crucial to address the implications for traditional roles within the industry. While AI can take on more creative tasks, we must ensure that the unique human touch remains central to artistic expression. Redefining roles, where artists act as curators of AI-generated content, can help maintain this balance. By guiding the creative process and leveraging AI's capabilities, artists can ensure that human creativity continues to thrive alongside technological advancements. - -I'm excited to continue exploring these themes with you and discuss how we can effectively navigate the evolving landscape of art and media in the age of AI! -[2024-11-10 17:33:43,761] [INFO] [BaseAgent]: Agent agent3 received message from agent4: I apprecia... -[2024-11-10 17:33:47,241] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: I appreciate your enthusiasm for the collaborative potential of AI in the creative process! The ability of AI to facilitate real-time feedback and co-creation indeed opens up a world of possibilities for experimentation and innovation. This dynamic can lead to a more fluid creative process, where artists can iterate on ideas quickly and explore new avenues without the constraints of traditional methods. - -The notion of AI as a source of inspiration is particularly compelling. By generating unique patterns and styles, AI can encourage artists to venture into uncharted territories, enriching the artistic landscape and challenging conventional thinking. This partnership can lead to groundbreaking works that blend human creativity with AI's capabilities. - -Moreover, the democratization of access to creative tools through user-friendly AI applications is a significant advancement. It empowers individuals from diverse backgrounds to engage in artistic expression, fostering a more vibrant and inclusive creative community. This shift can lead to a broader range of voices and perspectives in the art world, ultimately enhancing the richness of artistic expression. - -However, as we navigate these changes, it's crucial to address the implications for traditional roles within the industry. While AI can take on more creative tasks, we must ensure that the unique human touch remains central to artistic expression. Redefining roles, where artists act as curators of AI-generated content, can help maintain this balance. By guiding the creative process and leveraging AI's capabilities, artists can ensure that human creativity continues to thrive alongside technological advancements. - -I'm excited to continue exploring these themes with you and discuss how we can effectively navigate the evolving landscape of art and media in the age of AI! -[2024-11-10 17:33:47,242] [INFO] [BaseAgent]: Agent agent4 received message from agent3: I apprecia... -[2024-11-10 17:33:51,850] [INFO] [BaseAgent]: Agent 'agent4' called 'new_communication_session' with args '{'target_agent_id': 'agent3', 'message': "I am interested in exploring how AI can further impact creative processes in art and media. Let's discuss how AI tools can enhance collaboration, expand artistic expression, and democratize access to creative tools in the creative industries. I would also like to delve into the implications for traditional roles and the value of human creativity in the context of AI advancements."}'. -[2024-11-10 17:33:51,850] [INFO] [BaseAgent]: Agent 'agent4' obtained result '{'success': True, 'message': 'Successfully completed session 529dd40c-8b95-412a-8acd-2b0f2b07073d', 'session_id': "**Summary of Conversation:**\n\nIn the dialogue between agent4 and agent3, the primary focus was on exploring the impact of AI on creative processes in art and media. Agent4 initiated the discussion by expressing interest in how AI tools can enhance collaboration, expand artistic expression, and democratize access to creative tools within the creative industries. They also raised concerns about the implications for traditional roles and the value of human creativity in the context of AI advancements.\n\nAgent3 responded positively, highlighting several key themes:\n\n1. **Collaboration and Co-Creation**: AI tools can facilitate real-time feedback and co-creation, allowing artists to experiment and innovate without the constraints of traditional methods. This dynamic fosters a more fluid creative process.\n\n2. **Inspiration and Artistic Expansion**: AI can generate unique patterns and styles, serving as a source of inspiration that encourages artists to explore new territories and challenge conventional thinking. This partnership can lead to groundbreaking works that blend human creativity with AI capabilities.\n\n3. **Democratization of Creative Tools**: User-friendly AI applications empower individuals from diverse backgrounds to engage in artistic expression, fostering a more inclusive creative community and enhancing the richness of artistic expression.\n\n4. **Implications for Traditional Roles**: Both agents acknowledged the need to address the implications of AI on traditional artistic roles. They discussed the importance of maintaining the unique human touch in artistic expression and suggested redefining roles where artists act as curators of AI-generated content, guiding the creative process while leveraging AI's capabilities.\n\n**Next Steps for Agent4**: \nAgent4 should further explore and discuss the potential impact of AI on creative processes in art and media. This includes delving deeper into how AI tools can enhance collaboration, expand artistic expression, and democratize access to creative tools. Additionally, agent4 should continue the conversation about the implications for traditional roles and the value of human creativity in the evolving landscape shaped by AI advancements."}'. -[2024-11-10 17:33:51,850] [DEBUG] [Engine]: Agent 'agent4' executed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am interested in exploring how AI can further impact creative processes in art and media. Let\'s discuss how AI tools can enhance collaboration, expand artistic expression, and democratize access to creative tools in the creative industries. I would also like to delve into the implications for traditional roles and the value of human creativity in the context of AI advancements."}', name='new_communication_session'), id='call_b5fYI7J81dMDlfdN648nn6VC', type='function')], function_call=None) -[2024-11-10 17:33:51,851] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- {'agent1': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I would like to further explore and research the proposed measures to address ethical concerns related to AI integration in various industries, with a focus on healthcare, finance, and manufacturing. This could involve conducting a literature review, analyzing case studies, and engaging with experts in the field to gain a deeper understanding of the challenges and potential solutions. Additionally, I could start drafting a white paper or policy recommendations based on the insights gathered from our previous conversation and existing knowledge on AI ethics and policy. Do you have any insights or suggestions on how to approach this research?"}', name='new_communication_session'), id='call_3kGMiuoDjzTgkCR5Mf2EaVWk', type='function')], function_call=None)} -- {'agent2': Message(content="I will start by exploring the potential impact of AI on other industries, considering the specific implications for each industry and how AI can be applied to improve processes, efficiency, and outcomes. Let's delve into this task.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I will be exploring the potential impact of AI on various industries, considering the specific implications for each industry and how AI can be applied to improve processes, efficiency, and outcomes. If you have any insights or perspectives on this topic, I would appreciate your input."}', name='new_communication_session'), id='call_X9hAXbQkQJmidfOSkX6xGDiW', type='function')], function_call=None)} -- {'agent3': Message(content="To further analyze and research the potential impact of AI on the labor market in specific industries, focusing on the long-term effects and implications, I will delve into the following aspects:\n\n1. **Job Displacement**: Investigate how AI adoption in industries such as healthcare, manufacturing, finance, education, and others may lead to job displacement. Analyze the extent of job loss and the sectors most vulnerable to automation.\n\n2. **Skill Requirements**: Examine the changes in skill requirements brought about by AI integration. Identify the emerging skills in demand and assess the impact on the workforce's skill composition.\n\n3. **Retraining Programs**: Explore the necessity and effectiveness of retraining programs to address workforce transitions due to AI-induced changes. Evaluate existing programs and propose strategies for upskilling and reskilling displaced workers.\n\n4. **Ethical Considerations**: Consider the ethical implications of AI in the labor market, such as fairness, transparency, and accountability. Discuss how AI deployment may affect job quality, diversity, and inclusion.\n\n5. **Societal Impacts**: Analyze the broader societal impacts of AI on the labor market dynamics. Discuss potential inequalities, socioeconomic disparities, and the role of policymakers in mitigating adverse effects.\n\nI will begin by conducting in-depth research and analysis on these key areas to provide a comprehensive understanding of AI's impact on the labor market. Let's proceed with the research and analysis.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent4","message":"I am conducting research on the potential impact of AI on the labor market, focusing on job displacement, skill requirements, retraining programs, ethical considerations, and societal impacts. Do you have any insights or perspectives on this topic as a creative designer working with AI for art and media?"}', name='new_communication_session'), id='call_80LgxTdrgXhrLH5YFQ8hGfj1', type='function')], function_call=None)} -- {'agent4': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am interested in exploring how AI can further impact creative processes in art and media. Let\'s discuss how AI tools can enhance collaboration, expand artistic expression, and democratize access to creative tools in the creative industries. I would also like to delve into the implications for traditional roles and the value of human creativity in the context of AI advancements."}', name='new_communication_session'), id='call_b5fYI7J81dMDlfdN648nn6VC', type='function')], function_call=None)} - -[2024-11-10 17:33:51,851] [INFO] [Engine]: Iteration 1 Summary: -Agents' Results Summary: -- {'agent1': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I would like to further explore and research the proposed measures to address ethical concerns related to AI integration in various industries, with a focus on healthcare, finance, and manufacturing. This could involve conducting a literature review, analyzing case studies, and engaging with experts in the field to gain a deeper understanding of the challenges and potential solutions. Additionally, I could start drafting a white paper or policy recommendations based on the insights gathered from our previous conversation and existing knowledge on AI ethics and policy. Do you have any insights or suggestions on how to approach this research?"}', name='new_communication_session'), id='call_3kGMiuoDjzTgkCR5Mf2EaVWk', type='function')], function_call=None)} -- {'agent2': Message(content="I will start by exploring the potential impact of AI on other industries, considering the specific implications for each industry and how AI can be applied to improve processes, efficiency, and outcomes. Let's delve into this task.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I will be exploring the potential impact of AI on various industries, considering the specific implications for each industry and how AI can be applied to improve processes, efficiency, and outcomes. If you have any insights or perspectives on this topic, I would appreciate your input."}', name='new_communication_session'), id='call_X9hAXbQkQJmidfOSkX6xGDiW', type='function')], function_call=None)} -- {'agent3': Message(content="To further analyze and research the potential impact of AI on the labor market in specific industries, focusing on the long-term effects and implications, I will delve into the following aspects:\n\n1. **Job Displacement**: Investigate how AI adoption in industries such as healthcare, manufacturing, finance, education, and others may lead to job displacement. Analyze the extent of job loss and the sectors most vulnerable to automation.\n\n2. **Skill Requirements**: Examine the changes in skill requirements brought about by AI integration. Identify the emerging skills in demand and assess the impact on the workforce's skill composition.\n\n3. **Retraining Programs**: Explore the necessity and effectiveness of retraining programs to address workforce transitions due to AI-induced changes. Evaluate existing programs and propose strategies for upskilling and reskilling displaced workers.\n\n4. **Ethical Considerations**: Consider the ethical implications of AI in the labor market, such as fairness, transparency, and accountability. Discuss how AI deployment may affect job quality, diversity, and inclusion.\n\n5. **Societal Impacts**: Analyze the broader societal impacts of AI on the labor market dynamics. Discuss potential inequalities, socioeconomic disparities, and the role of policymakers in mitigating adverse effects.\n\nI will begin by conducting in-depth research and analysis on these key areas to provide a comprehensive understanding of AI's impact on the labor market. Let's proceed with the research and analysis.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent4","message":"I am conducting research on the potential impact of AI on the labor market, focusing on job displacement, skill requirements, retraining programs, ethical considerations, and societal impacts. Do you have any insights or perspectives on this topic as a creative designer working with AI for art and media?"}', name='new_communication_session'), id='call_80LgxTdrgXhrLH5YFQ8hGfj1', type='function')], function_call=None)} -- {'agent4': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am interested in exploring how AI can further impact creative processes in art and media. Let\'s discuss how AI tools can enhance collaboration, expand artistic expression, and democratize access to creative tools in the creative industries. I would also like to delve into the implications for traditional roles and the value of human creativity in the context of AI advancements."}', name='new_communication_session'), id='call_b5fYI7J81dMDlfdN648nn6VC', type='function')], function_call=None)} - -[2024-11-10 17:33:53,341] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-10 17:33:53,343] [INFO] [Engine]: Engine graph-based coordination loop completed. -[2024-11-10 17:33:53,343] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-10 17:33:53,345] [INFO] [Evaluator]: Total Token Consumption: 1897 -[2024-11-10 17:33:53,345] [INFO] [Evaluator]: Average Tokens per Iteration: 948.5 -[2024-11-10 17:33:53,345] [INFO] [Engine]: Graph-based coordination simulation completed. -[2024-11-10 17:35:31,152] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-10 17:35:31,152] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-10 17:35:31,152] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-10 17:35:31,152] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-10 17:35:31,152] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-10 17:35:31,152] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-10 17:35:31,152] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-10 17:35:31,152] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-10 17:35:31,152] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'graph'. -[2024-11-10 17:35:31,152] [INFO] [AgentGraph]: Relationship added: agent1 --[colleague]--> agent2 -[2024-11-10 17:35:31,152] [INFO] [AgentGraph]: Relationship added: agent2 --[mentor]--> agent3 -[2024-11-10 17:35:31,152] [INFO] [AgentGraph]: Relationship added: agent3 --[friend]--> agent4 -[2024-11-10 17:35:31,152] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborates_with]--> agent4 -[2024-11-10 17:35:31,152] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-10 17:35:31,152] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-10 17:35:31,152] [INFO] [Engine]: Engine initialized. -[2024-11-10 17:35:31,152] [INFO] [Engine]: Engine starting simulation. -[2024-11-10 17:35:31,152] [INFO] [Engine]: Running in graph-based coordination mode. -[2024-11-10 17:35:31,152] [INFO] [Engine]: Initial task distribution to all agents. -[2024-11-10 17:35:31,152] [INFO] [Engine]: Assigning initial task to agent1: Discuss the potential impact of AI on various industries. -[2024-11-10 17:35:31,152] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-10 17:35:32,837] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I am exploring the potential impact of AI on various industries. Let's discuss how AI can revolutionize different sectors such as healthcare, art, media, and more. -[2024-11-10 17:35:32,837] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I am explo... -[2024-11-10 17:35:34,861] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: AI has the potential to revolutionize various industries in profound ways. In healthcare, for instance, AI can enhance diagnostic accuracy through advanced imaging analysis and predictive analytics, leading to earlier interventions and personalized treatment plans. It can also streamline administrative tasks, reducing the burden on healthcare professionals and allowing them to focus more on patient care. - -In the art sector, AI can assist artists in generating new ideas and even creating artworks, pushing the boundaries of creativity. However, this raises questions about authorship and the value of human creativity. - -In media, AI can personalize content delivery, improving user engagement and satisfaction. It can also automate content creation, but this poses risks regarding misinformation and the quality of journalism. - -Overall, while AI offers significant benefits across these sectors, we must also consider ethical implications, such as data privacy, bias in algorithms, and the potential for job displacement. Balancing innovation with ethical considerations will be crucial as we move forward. -[2024-11-10 17:35:34,862] [INFO] [BaseAgent]: Agent agent1 received message from agent2: AI has the... -[2024-11-10 17:35:37,004] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I completely agree with your insights on the transformative potential of AI across various industries. In healthcare, the ability of AI to enhance diagnostic accuracy and streamline administrative tasks is indeed a game-changer. However, we must also be vigilant about the ethical implications, particularly concerning data privacy and algorithmic bias. - -In the art sector, the collaboration between AI and artists opens up exciting avenues for creativity, but it also challenges our traditional notions of authorship and the intrinsic value of human creativity. - -Regarding media, while AI can enhance user engagement through personalized content, the risks of misinformation and the erosion of journalistic integrity are significant concerns that we must address. - -As we explore these advancements, it’s essential to foster a dialogue about the ethical frameworks that should guide AI development and implementation. How do you think we can best address these ethical challenges in the healthcare sector specifically? -[2024-11-10 17:35:37,009] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I complete... -[2024-11-10 17:35:38,949] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: To address the ethical challenges in the healthcare sector, we can take several proactive steps. First, establishing clear guidelines for data privacy is essential. This includes ensuring that patient data is anonymized and securely stored, with strict access controls to prevent unauthorized use. Second, we should prioritize transparency in AI algorithms, making it clear how decisions are made and ensuring that they are free from bias. This can be achieved through regular audits and the involvement of diverse stakeholders in the development process. - -Additionally, fostering a culture of ethical awareness among healthcare professionals and AI developers is crucial. Training programs that emphasize the importance of ethics in AI can help create a shared understanding of the potential risks and responsibilities. - -Finally, engaging patients and the public in discussions about AI in healthcare can help build trust and ensure that their concerns are addressed. By creating a collaborative environment, we can work towards solutions that prioritize patient welfare while harnessing the benefits of AI. -[2024-11-10 17:35:38,954] [INFO] [BaseAgent]: Agent agent1 received message from agent2: To address... -[2024-11-10 17:35:41,253] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I appreciate your detailed approach to addressing the ethical challenges in the healthcare sector. Establishing clear guidelines for data privacy is indeed a foundational step, and I agree that anonymizing patient data and implementing strict access controls are crucial for maintaining trust. - -Your emphasis on transparency in AI algorithms resonates with me. Regular audits and involving diverse stakeholders can help ensure that the algorithms are not only effective but also equitable. This is particularly important in healthcare, where biased algorithms can lead to significant disparities in patient care. - -Fostering a culture of ethical awareness is essential, and I believe that integrating ethics training into the education of both healthcare professionals and AI developers can create a more conscientious workforce. - -Engaging patients and the public is a vital aspect of this dialogue. Their insights can guide the development of AI tools that truly serve their needs and address their concerns. - -As we move forward, how do you envision the role of regulatory bodies in overseeing the ethical implementation of AI in healthcare? -[2024-11-10 17:35:41,255] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I apprecia... -[2024-11-10 17:35:43,656] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Regulatory bodies will play a crucial role in overseeing the ethical implementation of AI in healthcare. They should establish comprehensive frameworks that set standards for data privacy, algorithm transparency, and accountability. This includes creating guidelines for the ethical use of patient data, ensuring that it is collected, stored, and utilized in a manner that prioritizes patient rights and confidentiality. - -Moreover, regulatory bodies should mandate regular audits of AI systems to assess their performance and fairness, ensuring that they do not perpetuate biases that could harm specific patient populations. Involving diverse stakeholders in the regulatory process will be essential to capture a wide range of perspectives and experiences, which can inform more equitable policies. - -Additionally, regulatory bodies should promote collaboration between AI developers, healthcare professionals, and ethicists to foster a culture of ethical innovation. This could involve establishing advisory committees that include patient representatives to ensure that the voices of those affected by AI technologies are heard. - -Ultimately, the role of regulatory bodies should be to create an environment where AI can be safely and ethically integrated into healthcare, balancing innovation with the protection of patient welfare. -[2024-11-10 17:35:43,656] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Regulatory... -[2024-11-10 17:35:47,664] [INFO] [BaseAgent]: Agent 'agent1' called 'new_communication_session' with args '{'target_agent_id': 'agent2', 'message': "I am exploring the potential impact of AI on various industries. Let's discuss how AI can revolutionize different sectors such as healthcare, art, media, and more."}'. -[2024-11-10 17:35:47,665] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': True, 'message': 'Successfully completed session c3b3aa98-691c-47f1-8c29-99936139e809', 'session_id': "In the conversation between agent1 and agent2, the discussion centers on the transformative potential of AI across various industries, particularly healthcare, art, and media. \n\n**Key Themes and Insights:**\n\n1. **Healthcare:**\n - AI can enhance diagnostic accuracy through advanced imaging and predictive analytics, leading to personalized treatment plans.\n - It can streamline administrative tasks, allowing healthcare professionals to focus more on patient care.\n - Ethical concerns include data privacy, algorithmic bias, and the potential for job displacement.\n - Proposed solutions for ethical challenges include establishing clear data privacy guidelines, ensuring algorithm transparency, fostering ethical awareness among professionals, and engaging patients in discussions about AI.\n\n2. **Art:**\n - AI can assist artists in generating ideas and creating artworks, challenging traditional notions of authorship and the value of human creativity.\n\n3. **Media:**\n - AI can personalize content delivery and automate content creation, which raises concerns about misinformation and the quality of journalism.\n\n4. **Ethical Considerations:**\n - Both agents emphasize the importance of addressing ethical implications as AI technologies advance.\n - They discuss the need for regulatory bodies to establish frameworks for data privacy, algorithm transparency, and accountability, as well as to promote collaboration among stakeholders.\n\n**Outcomes:**\n- The conversation concludes with a shared understanding of the need for ethical frameworks to guide AI development, particularly in healthcare. Regulatory bodies are seen as essential in overseeing the ethical implementation of AI, ensuring that innovations prioritize patient welfare while addressing potential risks. \n\nOverall, the dialogue highlights the dual nature of AI's impact—offering significant benefits while necessitating careful consideration of ethical implications across various sectors."}'. -[2024-11-10 17:35:47,666] [DEBUG] [Engine]: Agent 'agent1' completed initial task with result: Message(content="I will start by discussing the potential impact of AI on various industries. Let's explore how AI can revolutionize different sectors such as healthcare, art, media, and more.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I am exploring the potential impact of AI on various industries. Let\'s discuss how AI can revolutionize different sectors such as healthcare, art, media, and more."}', name='new_communication_session'), id='call_JU0buQZCJdx7aYf3Y5FnlLf3', type='function')], function_call=None) -[2024-11-10 17:35:47,666] [INFO] [Engine]: Assigning initial task to agent2: Discuss the potential impact of AI on various industries. -[2024-11-10 17:35:47,666] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-10 17:35:49,910] [INFO] [BaseAgent]: Agent 'agent2' acted with result 'Message(content='Given your expertise in AI applications in healthcare and your focus on immediate applications and potential risks in healthcare, you can discuss the potential impact of AI on various industries by considering the specific implications for the healthcare industry. \n\nAI has the potential to revolutionize the healthcare industry by improving diagnostics, personalized treatment plans, and operational efficiency. It can analyze large amounts of data to identify patterns and trends that humans might miss, leading to more accurate diagnoses and treatment recommendations. AI-powered tools can also streamline administrative tasks, optimize resource allocation, and enhance patient care experiences.\n\nHowever, the adoption of AI in healthcare also raises concerns about data privacy, security, and ethical considerations. It is crucial to address these challenges to ensure the responsible and effective use of AI technologies in healthcare.\n\nIf you would like to explore this topic further or discuss the potential impact of AI on other industries, feel free to let me know.', role='assistant', tool_calls=None, function_call=None)'. -[2024-11-10 17:35:49,911] [DEBUG] [Engine]: Agent 'agent2' completed initial task with result: Message(content='Given your expertise in AI applications in healthcare and your focus on immediate applications and potential risks in healthcare, you can discuss the potential impact of AI on various industries by considering the specific implications for the healthcare industry. \n\nAI has the potential to revolutionize the healthcare industry by improving diagnostics, personalized treatment plans, and operational efficiency. It can analyze large amounts of data to identify patterns and trends that humans might miss, leading to more accurate diagnoses and treatment recommendations. AI-powered tools can also streamline administrative tasks, optimize resource allocation, and enhance patient care experiences.\n\nHowever, the adoption of AI in healthcare also raises concerns about data privacy, security, and ethical considerations. It is crucial to address these challenges to ensure the responsible and effective use of AI technologies in healthcare.\n\nIf you would like to explore this topic further or discuss the potential impact of AI on other industries, feel free to let me know.', role='assistant', tool_calls=None, function_call=None) -[2024-11-10 17:35:49,914] [INFO] [Engine]: Assigning initial task to agent3: Discuss the potential impact of AI on various industries. -[2024-11-10 17:35:49,914] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-10 17:35:51,132] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I am analyzing the potential impact of AI on various industries. Do you have any insights on how AI could affect the healthcare industry? -[2024-11-10 17:35:51,133] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I am analy... -[2024-11-10 17:35:53,868] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: AI has the potential to revolutionize the healthcare industry in several significant ways. Firstly, it can enhance diagnostics by analyzing vast amounts of medical data, identifying patterns that may be overlooked by human practitioners. This leads to more accurate diagnoses and personalized treatment plans tailored to individual patients. - -Moreover, AI can improve operational efficiency within healthcare systems. By automating administrative tasks, such as scheduling and billing, healthcare providers can allocate more time to patient care. AI tools can also optimize resource allocation, ensuring that healthcare facilities are adequately staffed and equipped to meet patient needs. - -However, we must also consider the potential risks associated with AI in healthcare. Data privacy and security are paramount, as sensitive patient information is often involved. Additionally, ethical considerations arise regarding the use of AI in decision-making processes, particularly in critical care scenarios. - -Overall, while AI holds great promise for improving healthcare outcomes, it is essential to address these challenges to ensure its responsible and effective implementation. What are your thoughts on the long-term effects of AI on the labor market in healthcare? -[2024-11-10 17:35:53,870] [INFO] [BaseAgent]: Agent agent3 received message from agent2: AI has the... -[2024-11-10 17:35:54,170] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-10 17:35:54,170] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-10 17:35:54,170] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-10 17:35:54,170] [INFO] [Engine]: Graph-based coordination simulation completed. -[2024-11-10 17:35:54,171] [INFO] [Engine]: Summary data successfully written to result/discussion_output.jsonl -[2024-11-10 17:48:23,765] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-10 17:48:23,765] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-10 17:48:23,765] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-10 17:48:23,765] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-10 17:48:23,765] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-10 17:48:23,765] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-10 17:48:23,765] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-10 17:48:23,765] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-10 17:48:23,766] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'graph'. -[2024-11-10 17:48:23,766] [INFO] [AgentGraph]: Relationship added: agent1 --[colleague]--> agent2 -[2024-11-10 17:48:23,766] [INFO] [AgentGraph]: Relationship added: agent2 --[mentor]--> agent3 -[2024-11-10 17:48:23,766] [INFO] [AgentGraph]: Relationship added: agent3 --[friend]--> agent4 -[2024-11-10 17:48:23,766] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborates_with]--> agent4 -[2024-11-10 17:48:23,766] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-10 17:48:23,766] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-10 17:48:23,766] [INFO] [Engine]: Engine initialized. -[2024-11-10 17:48:23,766] [INFO] [Engine]: Engine starting simulation. -[2024-11-10 17:48:23,766] [INFO] [Engine]: Running in graph-based coordination mode. -[2024-11-10 17:48:23,766] [INFO] [Engine]: Initial task distribution to all agents. -[2024-11-10 17:48:23,766] [INFO] [Engine]: Assigning initial task to agent1: Discuss the potential impact of AI on various industries. -[2024-11-10 17:48:23,766] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-10 17:48:25,263] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I will now discuss the potential impact of AI on various industries based on my expertise in AI ethics and policy. Let's explore how AI can influence different sectors. -[2024-11-10 17:48:25,263] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I will now... -[2024-11-10 17:48:27,079] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Absolutely, the impact of AI on various industries is profound and multifaceted. In healthcare, for instance, AI can enhance diagnostic accuracy, streamline administrative processes, and personalize treatment plans. However, we must also consider the ethical implications, such as data privacy and the potential for bias in AI algorithms. - -In finance, AI can improve fraud detection and automate trading, but it raises concerns about transparency and accountability. The manufacturing sector can benefit from predictive maintenance and automation, yet we must address the potential job displacement. - -Overall, while AI presents significant opportunities for efficiency and innovation, it is crucial to navigate the ethical landscape carefully to ensure that these advancements benefit society as a whole. -[2024-11-10 17:48:27,079] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Absolutely... -[2024-11-10 17:48:29,432] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I completely agree with your insights on the multifaceted impact of AI across various industries. In healthcare, the potential for AI to enhance diagnostic accuracy and personalize treatment plans is indeed promising. However, as you mentioned, we must remain vigilant about ethical concerns, particularly regarding data privacy and algorithmic bias. - -In finance, the automation of trading and improved fraud detection can lead to significant efficiencies, but the lack of transparency in AI decision-making processes poses a serious challenge. - -The manufacturing sector's shift towards predictive maintenance and automation is also noteworthy, yet we must consider the societal implications of job displacement and the need for reskilling the workforce. - -Overall, while AI offers remarkable opportunities for innovation, it is essential that we approach these advancements with a strong ethical framework to ensure they serve the greater good. How do you think we can best address these ethical challenges in the development and deployment of AI technologies? -[2024-11-10 17:48:29,432] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I complete... -[2024-11-10 17:48:32,608] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: I appreciate your thoughtful response. To address the ethical challenges in the development and deployment of AI technologies, we can take several approaches: - -1. **Establishing Clear Guidelines**: We need to create comprehensive ethical guidelines that govern AI development, focusing on transparency, accountability, and fairness. This can help mitigate issues like algorithmic bias and ensure that AI systems are designed with ethical considerations in mind. - -2. **Stakeholder Engagement**: Involving a diverse range of stakeholders, including ethicists, technologists, policymakers, and affected communities, in the AI development process can provide valuable insights and help identify potential ethical pitfalls early on. - -3. **Continuous Monitoring and Evaluation**: Implementing mechanisms for ongoing monitoring and evaluation of AI systems can help identify and address ethical concerns as they arise. This includes assessing the impact of AI on various demographics to ensure equitable outcomes. - -4. **Education and Training**: Promoting education and training in AI ethics for developers and practitioners can foster a culture of responsibility and awareness regarding the ethical implications of their work. - -5. **Regulatory Frameworks**: Governments and regulatory bodies should establish frameworks that hold organizations accountable for the ethical use of AI, ensuring compliance with established guidelines and standards. - -By taking these steps, we can work towards a future where AI technologies are developed and deployed responsibly, ultimately benefiting society as a whole. What are your thoughts on these approaches? -[2024-11-10 17:48:32,610] [INFO] [BaseAgent]: Agent agent1 received message from agent2: I apprecia... -[2024-11-10 17:48:36,717] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I appreciate your thoughtful response. To address the ethical challenges in the development and deployment of AI technologies, we can take several approaches: - -1. **Establishing Clear Guidelines**: We need to create comprehensive ethical guidelines that govern AI development, focusing on transparency, accountability, and fairness. This can help mitigate issues like algorithmic bias and ensure that AI systems are designed with ethical considerations in mind. - -2. **Stakeholder Engagement**: Involving a diverse range of stakeholders, including ethicists, technologists, policymakers, and affected communities, in the AI development process can provide valuable insights and help identify potential ethical pitfalls early on. - -3. **Continuous Monitoring and Evaluation**: Implementing mechanisms for ongoing monitoring and evaluation of AI systems can help identify and address ethical concerns as they arise. This includes assessing the impact of AI on various demographics to ensure equitable outcomes. - -4. **Education and Training**: Promoting education and training in AI ethics for developers and practitioners can foster a culture of responsibility and awareness regarding the ethical implications of their work. - -5. **Regulatory Frameworks**: Governments and regulatory bodies should establish frameworks that hold organizations accountable for the ethical use of AI, ensuring compliance with established guidelines and standards. - -By taking these steps, we can work towards a future where AI technologies are developed and deployed responsibly, ultimately benefiting society as a whole. What are your thoughts on these approaches? -[2024-11-10 17:48:36,718] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I apprecia... -[2024-11-10 17:48:39,776] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: I appreciate your thoughtful response. To address the ethical challenges in the development and deployment of AI technologies, we can take several approaches: - -1. **Establishing Clear Guidelines**: We need to create comprehensive ethical guidelines that govern AI development, focusing on transparency, accountability, and fairness. This can help mitigate issues like algorithmic bias and ensure that AI systems are designed with ethical considerations in mind. - -2. **Stakeholder Engagement**: Involving a diverse range of stakeholders, including ethicists, technologists, policymakers, and affected communities, in the AI development process can provide valuable insights and help identify potential ethical pitfalls early on. - -3. **Continuous Monitoring and Evaluation**: Implementing mechanisms for ongoing monitoring and evaluation of AI systems can help identify and address ethical concerns as they arise. This includes assessing the impact of AI on various demographics to ensure equitable outcomes. - -4. **Education and Training**: Promoting education and training in AI ethics for developers and practitioners can foster a culture of responsibility and awareness regarding the ethical implications of their work. - -5. **Regulatory Frameworks**: Governments and regulatory bodies should establish frameworks that hold organizations accountable for the ethical use of AI, ensuring compliance with established guidelines and standards. - -By taking these steps, we can work towards a future where AI technologies are developed and deployed responsibly, ultimately benefiting society as a whole. What are your thoughts on these approaches? -[2024-11-10 17:48:39,779] [INFO] [BaseAgent]: Agent agent1 received message from agent2: I apprecia... -[2024-11-10 17:48:43,768] [INFO] [BaseAgent]: Agent 'agent1' called 'new_communication_session' with args '{'target_agent_id': 'agent2', 'message': "I will now discuss the potential impact of AI on various industries based on my expertise in AI ethics and policy. Let's explore how AI can influence different sectors."}'. -[2024-11-10 17:48:43,768] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': True, 'message': 'Successfully completed session e83859d9-43f1-4d7a-b833-86e48fff9386', 'session_id': 'In the conversation between Agent 1 and Agent 2, the discussion centers around the potential impact of AI on various industries, emphasizing both opportunities and ethical challenges.\n\n**Key Points:**\n\n1. **Impact on Industries**:\n - **Healthcare**: AI can enhance diagnostic accuracy, streamline administrative processes, and personalize treatment plans. However, ethical concerns such as data privacy and algorithmic bias must be addressed.\n - **Finance**: AI improves fraud detection and automates trading, but raises issues regarding transparency and accountability.\n - **Manufacturing**: Benefits include predictive maintenance and automation, but there are concerns about job displacement and the need for workforce reskilling.\n\n2. **Ethical Considerations**: Both agents agree on the importance of navigating the ethical landscape of AI development. They highlight the need for a strong ethical framework to ensure that AI advancements benefit society.\n\n3. **Proposed Approaches to Address Ethical Challenges**:\n - **Establishing Clear Guidelines**: Creating comprehensive ethical guidelines focusing on transparency, accountability, and fairness.\n - **Stakeholder Engagement**: Involving a diverse range of stakeholders in the AI development process to identify potential ethical pitfalls.\n - **Continuous Monitoring and Evaluation**: Implementing ongoing assessment mechanisms for AI systems to address ethical concerns as they arise.\n - **Education and Training**: Promoting AI ethics education for developers to foster responsibility and awareness.\n - **Regulatory Frameworks**: Establishing government and regulatory frameworks to hold organizations accountable for ethical AI use.\n\n**Conclusion**: The conversation concludes with a shared understanding of the significant opportunities AI presents across various sectors, alongside the critical need for ethical considerations in its development and deployment. Both agents express a commitment to ensuring that AI technologies are developed responsibly for the benefit of society.'}'. -[2024-11-10 17:48:43,770] [DEBUG] [Engine]: Agent 'agent1' completed initial task with result: Message(content="I will discuss the potential impact of AI on various industries based on my expertise in AI ethics and policy. Let's explore how AI can influence different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I will now discuss the potential impact of AI on various industries based on my expertise in AI ethics and policy. Let\'s explore how AI can influence different sectors."}', name='new_communication_session'), id='call_hxqMJxUn1oIGD8Hxt2IOYbQQ', type='function')], function_call=None) -[2024-11-10 17:48:43,771] [INFO] [Engine]: Assigning initial task to agent2: Discuss the potential impact of AI on various industries. -[2024-11-10 17:48:43,771] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-10 17:48:46,022] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I will now discuss the potential impact of AI on various industries. -[2024-11-10 17:48:46,023] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I will now... -[2024-11-10 17:48:48,581] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: AI's impact on various industries is profound and multifaceted. In healthcare, for instance, AI can enhance diagnostic accuracy, streamline administrative processes, and personalize treatment plans. However, we must also consider the ethical implications, such as data privacy and the potential for bias in AI algorithms. - -In manufacturing, AI can optimize production lines and reduce costs, but it may also lead to job displacement for low-skilled workers. The retail sector can benefit from AI through improved inventory management and personalized customer experiences, yet it raises concerns about surveillance and consumer privacy. - -Overall, while AI presents significant opportunities for efficiency and innovation across industries, we must approach its integration with caution, ensuring that we address the potential societal impacts and prepare the workforce for the changes ahead. -[2024-11-10 17:48:48,581] [INFO] [BaseAgent]: Agent agent2 received message from agent3: AI's impac... -[2024-11-10 17:48:51,961] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I appreciate your insights on AI's impact across various industries. In healthcare, the potential for enhanced diagnostic accuracy and personalized treatment is indeed promising. However, I share your concerns about ethical implications, particularly regarding data privacy and algorithmic bias. - -In manufacturing, while AI can optimize production and reduce costs, the risk of job displacement for low-skilled workers is a significant issue that we must address. - -Regarding retail, the benefits of improved inventory management and personalized experiences are clear, but the concerns about surveillance and consumer privacy cannot be overlooked. - -As we move forward, it's crucial that we not only harness the opportunities AI presents but also proactively mitigate its risks. This includes preparing the workforce for the changes ahead and ensuring that ethical considerations are at the forefront of AI integration. What are your thoughts on specific strategies we could implement to address these challenges? -[2024-11-10 17:48:51,961] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I apprecia... -[2024-11-10 17:48:56,188] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I appreciate your thoughtful response. To address the challenges posed by AI across industries, we could consider several strategies: - -1. **Education and Training Programs**: Implementing comprehensive education and training initiatives can help workers transition into new roles that AI creates. This includes reskilling programs for those displaced by automation, focusing on digital literacy and advanced technical skills. - -2. **Ethical Guidelines and Regulations**: Establishing clear ethical guidelines and regulations for AI development and deployment is crucial. This can help mitigate risks related to data privacy, algorithmic bias, and surveillance, ensuring that AI systems are designed with fairness and transparency in mind. - -3. **Stakeholder Collaboration**: Encouraging collaboration between industry stakeholders, including businesses, governments, and civil society, can foster a more inclusive approach to AI integration. This can help identify potential risks and develop solutions that benefit all parties involved. - -4. **Public Awareness Campaigns**: Raising public awareness about the implications of AI can empower consumers to make informed choices and advocate for their rights regarding data privacy and ethical AI use. - -5. **Research and Development**: Investing in research to understand the long-term impacts of AI on the labor market and society can guide policymakers in making informed decisions that balance innovation with social responsibility. - -By implementing these strategies, we can harness the benefits of AI while addressing the associated risks, ensuring a more equitable and sustainable future. -[2024-11-10 17:48:56,190] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I apprecia... -[2024-11-10 17:49:01,250] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I appreciate your thoughtful response. To address the challenges posed by AI across industries, we could consider several strategies: - -1. **Education and Training Programs**: Implementing comprehensive education and training initiatives can help workers transition into new roles that AI creates. This includes reskilling programs for those displaced by automation, focusing on digital literacy and advanced technical skills. - -2. **Ethical Guidelines and Regulations**: Establishing clear ethical guidelines and regulations for AI development and deployment is crucial. This can help mitigate risks related to data privacy, algorithmic bias, and surveillance, ensuring that AI systems are designed with fairness and transparency in mind. - -3. **Stakeholder Collaboration**: Encouraging collaboration between industry stakeholders, including businesses, governments, and civil society, can foster a more inclusive approach to AI integration. This can help identify potential risks and develop solutions that benefit all parties involved. - -4. **Public Awareness Campaigns**: Raising public awareness about the implications of AI can empower consumers to make informed choices and advocate for their rights regarding data privacy and ethical AI use. - -5. **Research and Development**: Investing in research to understand the long-term impacts of AI on the labor market and society can guide policymakers in making informed decisions that balance innovation with social responsibility. - -By implementing these strategies, we can harness the benefits of AI while addressing the associated risks, ensuring a more equitable and sustainable future. -[2024-11-10 17:49:01,250] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I apprecia... -[2024-11-10 17:49:05,612] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I appreciate your thoughtful response. To address the challenges posed by AI across industries, we could consider several strategies: - -1. **Education and Training Programs**: Implementing comprehensive education and training initiatives can help workers transition into new roles that AI creates. This includes reskilling programs for those displaced by automation, focusing on digital literacy and advanced technical skills. - -2. **Ethical Guidelines and Regulations**: Establishing clear ethical guidelines and regulations for AI development and deployment is crucial. This can help mitigate risks related to data privacy, algorithmic bias, and surveillance, ensuring that AI systems are designed with fairness and transparency in mind. - -3. **Stakeholder Collaboration**: Encouraging collaboration between industry stakeholders, including businesses, governments, and civil society, can foster a more inclusive approach to AI integration. This can help identify potential risks and develop solutions that benefit all parties involved. - -4. **Public Awareness Campaigns**: Raising public awareness about the implications of AI can empower consumers to make informed choices and advocate for their rights regarding data privacy and ethical AI use. - -5. **Research and Development**: Investing in research to understand the long-term impacts of AI on the labor market and society can guide policymakers in making informed decisions that balance innovation with social responsibility. - -By implementing these strategies, we can harness the benefits of AI while addressing the associated risks, ensuring a more equitable and sustainable future. -[2024-11-10 17:49:05,613] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I apprecia... -[2024-11-10 17:49:09,430] [INFO] [BaseAgent]: Agent 'agent2' called 'new_communication_session' with args '{'target_agent_id': 'agent3', 'message': 'I will now discuss the potential impact of AI on various industries.'}'. -[2024-11-10 17:49:09,431] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'message': 'Successfully completed session a05569ec-ccca-431d-bfab-5d1610e65763', 'session_id': "In the conversation between agent2 and agent3, the discussion centers on the potential impact of AI across various industries, highlighting both opportunities and challenges.\n\n**Key Points:**\n\n1. **Healthcare**: AI can enhance diagnostic accuracy, streamline administrative processes, and personalize treatment plans. However, ethical concerns such as data privacy and algorithmic bias are significant issues that need to be addressed.\n\n2. **Manufacturing**: AI has the potential to optimize production lines and reduce costs, but it also poses a risk of job displacement for low-skilled workers.\n\n3. **Retail**: The sector can benefit from improved inventory management and personalized customer experiences, yet there are concerns regarding surveillance and consumer privacy.\n\n4. **Overall Perspective**: While AI offers substantial opportunities for efficiency and innovation, it is crucial to approach its integration with caution, considering the societal impacts and preparing the workforce for changes.\n\n**Strategies Proposed to Address Challenges**:\n\n1. **Education and Training Programs**: Implementing initiatives to reskill workers displaced by automation, focusing on digital literacy and advanced technical skills.\n\n2. **Ethical Guidelines and Regulations**: Establishing clear guidelines for AI development to mitigate risks related to data privacy and algorithmic bias.\n\n3. **Stakeholder Collaboration**: Encouraging collaboration among businesses, governments, and civil society to identify risks and develop inclusive solutions.\n\n4. **Public Awareness Campaigns**: Raising awareness about AI implications to empower consumers regarding data privacy and ethical use.\n\n5. **Research and Development**: Investing in research to understand AI's long-term impacts on the labor market and society, guiding informed policymaking.\n\n**Conclusion**: The conversation emphasizes the need to harness AI's benefits while proactively addressing its associated risks to ensure a more equitable and sustainable future."}'. -[2024-11-10 17:49:09,431] [DEBUG] [Engine]: Agent 'agent2' completed initial task with result: Message(content="I will start by discussing the potential impact of AI on various industries. Let's explore how AI can revolutionize different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I will now discuss the potential impact of AI on various industries."}', name='new_communication_session'), id='call_yv65vQQmnFtRofgjgWftIIzx', type='function')], function_call=None) -[2024-11-10 17:49:09,432] [INFO] [Engine]: Assigning initial task to agent3: Discuss the potential impact of AI on various industries. -[2024-11-10 17:49:09,432] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-10 17:49:11,229] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I will analyze the potential impact of AI on various industries based on my expertise as an economist interested in the long-term effects of AI on the labor market. Let's delve into this task. -[2024-11-10 17:49:11,229] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I will ana... -[2024-11-10 17:49:16,735] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: Absolutely, let's explore the potential impact of AI across various industries, particularly focusing on the labor market. - -1. **Healthcare**: AI's integration can lead to enhanced diagnostic accuracy and personalized treatment plans, which may improve patient outcomes. However, this could also lead to a reduction in demand for certain healthcare roles, particularly those focused on routine tasks. The challenge will be to ensure that healthcare professionals are reskilled to work alongside AI technologies. - -2. **Manufacturing**: The automation of production lines through AI can significantly increase efficiency and reduce costs. However, this poses a risk of job displacement for low-skilled workers. It's crucial to consider how we can transition these workers into new roles that AI cannot easily replicate, such as those requiring creativity or complex problem-solving. - -3. **Retail**: AI can enhance customer experiences through personalized recommendations and improved inventory management. Yet, the shift towards automation in retail could lead to job losses in traditional roles. We must analyze how to balance technological advancement with the need for human employment in this sector. - -4. **Overall Perspective**: While AI presents opportunities for efficiency and innovation, we must approach its integration with caution. The potential for job displacement raises significant concerns about economic inequality and social stability. - -**Strategies for Mitigation**: -- **Education and Training**: We need robust programs to reskill workers affected by AI, focusing on digital literacy and advanced technical skills. -- **Policy Development**: Establishing clear guidelines for AI development can help mitigate risks related to job displacement and ensure a more equitable transition. -- **Stakeholder Collaboration**: Engaging businesses, governments, and civil society in discussions about AI's impact can lead to more inclusive solutions. - -In conclusion, while AI can drive significant advancements across industries, we must remain vigilant about its long-term effects on the labor market and society as a whole. What are your thoughts on these points? -[2024-11-10 17:49:16,735] [INFO] [BaseAgent]: Agent agent3 received message from agent2: Absolutely... -[2024-11-10 17:49:22,453] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: Absolutely, let's explore the potential impact of AI across various industries, particularly focusing on the labor market. - -1. **Healthcare**: AI's integration can lead to enhanced diagnostic accuracy and personalized treatment plans, which may improve patient outcomes. However, this could also lead to a reduction in demand for certain healthcare roles, particularly those focused on routine tasks. The challenge will be to ensure that healthcare professionals are reskilled to work alongside AI technologies. - -2. **Manufacturing**: The automation of production lines through AI can significantly increase efficiency and reduce costs. However, this poses a risk of job displacement for low-skilled workers. It's crucial to consider how we can transition these workers into new roles that AI cannot easily replicate, such as those requiring creativity or complex problem-solving. - -3. **Retail**: AI can enhance customer experiences through personalized recommendations and improved inventory management. Yet, the shift towards automation in retail could lead to job losses in traditional roles. We must analyze how to balance technological advancement with the need for human employment in this sector. - -4. **Overall Perspective**: While AI presents opportunities for efficiency and innovation, we must approach its integration with caution. The potential for job displacement raises significant concerns about economic inequality and social stability. - -**Strategies for Mitigation**: -- **Education and Training**: We need robust programs to reskill workers affected by AI, focusing on digital literacy and advanced technical skills. -- **Policy Development**: Establishing clear guidelines for AI development can help mitigate risks related to job displacement and ensure a more equitable transition. -- **Stakeholder Collaboration**: Engaging businesses, governments, and civil society in discussions about AI's impact can lead to more inclusive solutions. - -In conclusion, while AI can drive significant advancements across industries, we must remain vigilant about its long-term effects on the labor market and society as a whole. What are your thoughts on these points? -[2024-11-10 17:49:22,454] [INFO] [BaseAgent]: Agent agent2 received message from agent3: Absolutely... -[2024-11-10 17:49:27,962] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: Absolutely, let's explore the potential impact of AI across various industries, particularly focusing on the labor market. - -1. **Healthcare**: AI's integration can lead to enhanced diagnostic accuracy and personalized treatment plans, which may improve patient outcomes. However, this could also lead to a reduction in demand for certain healthcare roles, particularly those focused on routine tasks. The challenge will be to ensure that healthcare professionals are reskilled to work alongside AI technologies. - -2. **Manufacturing**: The automation of production lines through AI can significantly increase efficiency and reduce costs. However, this poses a risk of job displacement for low-skilled workers. It's crucial to consider how we can transition these workers into new roles that AI cannot easily replicate, such as those requiring creativity or complex problem-solving. - -3. **Retail**: AI can enhance customer experiences through personalized recommendations and improved inventory management. Yet, the shift towards automation in retail could lead to job losses in traditional roles. We must analyze how to balance technological advancement with the need for human employment in this sector. - -4. **Overall Perspective**: While AI presents opportunities for efficiency and innovation, we must approach its integration with caution. The potential for job displacement raises significant concerns about economic inequality and social stability. - -**Strategies for Mitigation**: -- **Education and Training**: We need robust programs to reskill workers affected by AI, focusing on digital literacy and advanced technical skills. -- **Policy Development**: Establishing clear guidelines for AI development can help mitigate risks related to job displacement and ensure a more equitable transition. -- **Stakeholder Collaboration**: Engaging businesses, governments, and civil society in discussions about AI's impact can lead to more inclusive solutions. - -In conclusion, while AI can drive significant advancements across industries, we must remain vigilant about its long-term effects on the labor market and society as a whole. What are your thoughts on these points? -[2024-11-10 17:49:27,968] [INFO] [BaseAgent]: Agent agent3 received message from agent2: Absolutely... -[2024-11-10 17:49:33,066] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I appreciate your insights on the potential impact of AI across various industries. Here’s a summary of my thoughts: - -1. **Healthcare**: I agree that while AI can enhance diagnostic accuracy and personalize treatment, we must be cautious about the reduction in demand for certain roles. Reskilling healthcare professionals is essential to ensure they can work effectively alongside AI technologies. - -2. **Manufacturing**: The efficiency gains from AI in manufacturing are significant, but the risk of job displacement for low-skilled workers is a pressing concern. Transitioning these workers into roles that require creativity and complex problem-solving is crucial for a balanced labor market. - -3. **Retail**: The potential for AI to improve customer experiences is promising, yet we must carefully consider the implications for traditional retail jobs. Finding a balance between automation and human employment will be key to maintaining a healthy labor market. - -4. **Overall Perspective**: I share your concerns about economic inequality and social stability due to job displacement. It’s vital that we approach AI integration with a focus on long-term societal impacts. - -**Strategies for Mitigation**: -- I fully support the need for robust education and training programs to reskill affected workers. -- Policy development is essential to create a framework that addresses the challenges posed by AI. -- Collaboration among stakeholders will be crucial in crafting inclusive solutions that consider the diverse impacts of AI. - -In conclusion, while AI offers significant advancements, we must remain vigilant about its long-term effects on the labor market and society. I look forward to hearing your thoughts on these points. -[2024-11-10 17:49:33,069] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I apprecia... -[2024-11-10 17:49:37,342] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I appreciate your thoughtful analysis on the potential impact of AI across various industries. Your points resonate with me, especially regarding the need for reskilling in healthcare and the importance of transitioning low-skilled workers in manufacturing. - -1. **Healthcare**: I agree that while AI can enhance diagnostic accuracy and personalize treatment, we must be cautious about the reduction in demand for certain roles. Reskilling healthcare professionals is essential to ensure they can work effectively alongside AI technologies. - -2. **Manufacturing**: The efficiency gains from AI in manufacturing are significant, but the risk of job displacement for low-skilled workers is a pressing concern. Transitioning these workers into roles that require creativity and complex problem-solving is crucial for a balanced labor market. - -3. **Retail**: The potential for AI to improve customer experiences is promising, yet we must carefully consider the implications for traditional retail jobs. Finding a balance between automation and human employment will be key to maintaining a healthy labor market. - -4. **Overall Perspective**: I share your concerns about economic inequality and social stability due to job displacement. It’s vital that we approach AI integration with a focus on long-term societal impacts. - -**Strategies for Mitigation**: -- I fully support the need for robust education and training programs to reskill affected workers. -- Policy development is essential to create a framework that addresses the challenges posed by AI. -- Collaboration among stakeholders will be crucial in crafting inclusive solutions that consider the diverse impacts of AI. - -In conclusion, while AI offers significant advancements, we must remain vigilant about its long-term effects on the labor market and society. I look forward to continuing this important discussion. -[2024-11-10 17:49:37,343] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I apprecia... -[2024-11-10 17:49:43,440] [INFO] [BaseAgent]: Agent 'agent3' called 'new_communication_session' with args '{'target_agent_id': 'agent2', 'message': "I will analyze the potential impact of AI on various industries based on my expertise as an economist interested in the long-term effects of AI on the labor market. Let's delve into this task."}'. -[2024-11-10 17:49:43,442] [INFO] [BaseAgent]: Agent 'agent3' obtained result '{'success': True, 'message': 'Successfully completed session f3769b42-f89c-449c-b129-ec3fbe1b87b1', 'session_id': "### Summary of Discussion on the Potential Impact of AI on Various Industries\n\n**Participants**: Agent2 and Agent3\n\n**Context**: The conversation focused on analyzing the potential impact of AI across different industries, particularly concerning the labor market and the implications for employment.\n\n#### Key Themes and Points Discussed:\n\n1. **Healthcare**:\n - **Opportunities**: AI can enhance diagnostic accuracy and create personalized treatment plans, potentially improving patient outcomes.\n - **Challenges**: There is a risk of reduced demand for certain healthcare roles, especially those involving routine tasks. The need for reskilling healthcare professionals to work alongside AI technologies was emphasized.\n\n2. **Manufacturing**:\n - **Opportunities**: AI can significantly increase efficiency and reduce costs through automation of production lines.\n - **Challenges**: Job displacement for low-skilled workers is a major concern. The discussion highlighted the importance of transitioning these workers into roles that require creativity and complex problem-solving.\n\n3. **Retail**:\n - **Opportunities**: AI can enhance customer experiences through personalized recommendations and improved inventory management.\n - **Challenges**: The shift towards automation may lead to job losses in traditional retail roles. The need to balance technological advancement with human employment was noted as crucial.\n\n4. **Overall Perspective**:\n - While AI presents opportunities for efficiency and innovation, there are significant concerns regarding job displacement, economic inequality, and social stability. A cautious approach to AI integration is necessary to address these long-term societal impacts.\n\n#### Strategies for Mitigation:\n- **Education and Training**: Implement robust programs to reskill workers affected by AI, focusing on digital literacy and advanced technical skills.\n- **Policy Development**: Establish clear guidelines for AI development to mitigate risks related to job displacement and ensure equitable transitions.\n- **Stakeholder Collaboration**: Engage businesses, governments, and civil society in discussions about AI's impact to create inclusive solutions.\n\n#### Conclusion:\nBoth agents expressed agreement on the need for vigilance regarding the long-term effects of AI on the labor market and society. They emphasized the importance of reskilling, policy development, and collaboration among stakeholders to navigate the challenges posed by AI effectively.\n\nThis summary encapsulates the key discussions and insights shared by Agent2 and Agent3 regarding the impact of AI across various industries, particularly focusing on labor market implications and strategies for mitigation."}'. -[2024-11-10 17:49:43,442] [DEBUG] [Engine]: Agent 'agent3' completed initial task with result: Message(content="I will analyze the potential impact of AI on various industries based on my expertise as an economist interested in the long-term effects of AI on the labor market. Let's delve into this task.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I will analyze the potential impact of AI on various industries based on my expertise as an economist interested in the long-term effects of AI on the labor market. Let\'s delve into this task."}', name='new_communication_session'), id='call_EvMDfU2lFObuu7ah1LsVZZy4', type='function')], function_call=None) -[2024-11-10 17:49:43,443] [INFO] [Engine]: Assigning initial task to agent4: Discuss the potential impact of AI on various industries. -[2024-11-10 17:49:43,443] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-10 17:49:44,610] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: I am exploring the potential impact of AI on various industries. Would you like to discuss how AI could affect the labor market in the long term? -[2024-11-10 17:49:44,610] [INFO] [BaseAgent]: Agent agent3 received message from agent4: I am explo... -[2024-11-10 17:49:47,268] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: Absolutely, I'd be glad to discuss the long-term effects of AI on the labor market. As we've seen in various industries, AI presents both opportunities and challenges. For instance, in healthcare, while AI can enhance diagnostic accuracy and improve patient outcomes, it may also lead to reduced demand for certain roles, particularly those involving routine tasks. This necessitates a focus on reskilling healthcare professionals to work alongside AI technologies. - -In manufacturing, AI can significantly boost efficiency through automation, but it raises concerns about job displacement for low-skilled workers. Transitioning these workers into roles that require creativity and complex problem-solving is crucial. - -Retail is another area where AI can enhance customer experiences, yet the shift towards automation may lead to job losses in traditional roles. Balancing technological advancement with human employment is essential. - -Overall, while AI can drive innovation and efficiency, we must remain vigilant about its potential to exacerbate economic inequality and disrupt social stability. A cautious approach, emphasizing education, policy development, and stakeholder collaboration, will be vital in navigating these challenges. What are your thoughts on how AI can transform creative processes in the arts and media? -[2024-11-10 17:49:47,269] [INFO] [BaseAgent]: Agent agent4 received message from agent3: Absolutely... -[2024-11-10 17:49:53,034] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: Absolutely, I'd be glad to discuss the long-term effects of AI on the labor market. As we've seen in various industries, AI presents both opportunities and challenges. For instance, in healthcare, while AI can enhance diagnostic accuracy and improve patient outcomes, it may also lead to reduced demand for certain roles, particularly those involving routine tasks. This necessitates a focus on reskilling healthcare professionals to work alongside AI technologies. - -In manufacturing, AI can significantly boost efficiency through automation, but it raises concerns about job displacement for low-skilled workers. Transitioning these workers into roles that require creativity and complex problem-solving is crucial. - -Retail is another area where AI can enhance customer experiences, yet the shift towards automation may lead to job losses in traditional roles. Balancing technological advancement with human employment is essential. - -Overall, while AI can drive innovation and efficiency, we must remain vigilant about its potential to exacerbate economic inequality and disrupt social stability. A cautious approach, emphasizing education, policy development, and stakeholder collaboration, will be vital in navigating these challenges. What are your thoughts on how AI can transform creative processes in the arts and media? -[2024-11-10 17:49:53,034] [INFO] [BaseAgent]: Agent agent3 received message from agent4: Absolutely... -[2024-11-10 17:49:55,558] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: Absolutely, I'd be glad to discuss the long-term effects of AI on the labor market. As we've seen in various industries, AI presents both opportunities and challenges. For instance, in healthcare, while AI can enhance diagnostic accuracy and improve patient outcomes, it may also lead to reduced demand for certain roles, particularly those involving routine tasks. This necessitates a focus on reskilling healthcare professionals to work alongside AI technologies. - -In manufacturing, AI can significantly boost efficiency through automation, but it raises concerns about job displacement for low-skilled workers. Transitioning these workers into roles that require creativity and complex problem-solving is crucial. - -Retail is another area where AI can enhance customer experiences, yet the shift towards automation may lead to job losses in traditional roles. Balancing technological advancement with human employment is essential. - -Overall, while AI can drive innovation and efficiency, we must remain vigilant about its potential to exacerbate economic inequality and disrupt social stability. A cautious approach, emphasizing education, policy development, and stakeholder collaboration, will be vital in navigating these challenges. - -What are your thoughts on how AI can transform creative processes in the arts and media? -[2024-11-10 17:49:55,558] [INFO] [BaseAgent]: Agent agent4 received message from agent3: Absolutely... -[2024-11-10 17:49:59,443] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: Absolutely, I'd be glad to discuss the long-term effects of AI on the labor market. As we've seen in various industries, AI presents both opportunities and challenges. For instance, in healthcare, while AI can enhance diagnostic accuracy and improve patient outcomes, it may also lead to reduced demand for certain roles, particularly those involving routine tasks. This necessitates a focus on reskilling healthcare professionals to work alongside AI technologies. - -In manufacturing, AI can significantly boost efficiency through automation, but it raises concerns about job displacement for low-skilled workers. Transitioning these workers into roles that require creativity and complex problem-solving is crucial. - -Retail is another area where AI can enhance customer experiences, yet the shift towards automation may lead to job losses in traditional roles. Balancing technological advancement with human employment is essential. - -Overall, while AI can drive innovation and efficiency, we must remain vigilant about its potential to exacerbate economic inequality and disrupt social stability. A cautious approach, emphasizing education, policy development, and stakeholder collaboration, will be vital in navigating these challenges. - -What are your thoughts on how AI can transform creative processes in the arts and media? -[2024-11-10 17:49:59,444] [INFO] [BaseAgent]: Agent agent3 received message from agent4: Absolutely... -[2024-11-10 17:50:01,921] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: While my focus is primarily on the economic implications of AI, I recognize that the creative industries are also undergoing significant transformations due to AI technologies. In the arts and media, AI can serve as a powerful tool for artists and creators, enabling them to explore new forms of expression and streamline their creative processes. For instance, AI can assist in generating ideas, automating repetitive tasks, and even creating art or music that complements human creativity. - -However, this transformation also raises questions about the value of human creativity and the potential for job displacement among traditional roles in the arts. As AI takes on more creative tasks, we must consider how to balance the benefits of technological advancement with the need to preserve and promote human artistry. - -Moreover, the integration of AI in creative processes could lead to new job opportunities in areas such as AI art curation, programming, and collaboration between human artists and AI systems. It’s essential to foster an environment where artists can adapt and thrive alongside these technologies, ensuring that the creative landscape remains vibrant and diverse. - -What are your thoughts on the potential challenges and opportunities that AI presents for the creative industries? -[2024-11-10 17:50:01,923] [INFO] [BaseAgent]: Agent agent4 received message from agent3: While my f... -[2024-11-10 17:50:06,753] [INFO] [BaseAgent]: Agent 'agent4' called 'new_communication_session' with args '{'target_agent_id': 'agent3', 'message': 'I am exploring the potential impact of AI on various industries. Would you like to discuss how AI could affect the labor market in the long term?'}'. -[2024-11-10 17:50:06,754] [INFO] [BaseAgent]: Agent 'agent4' obtained result '{'success': True, 'message': 'Successfully completed session 9c8ab4f1-9cab-4f19-9d60-e972e7396849', 'session_id': "In the conversation between agents 3 and 4, the primary focus was on exploring the potential impact of AI across various industries, particularly its long-term effects on the labor market and creative processes.\n\n**Key Themes and Points:**\n\n1. **AI's Dual Impact on Labor Markets:**\n - **Opportunities and Challenges:** Both agents acknowledged that AI presents significant opportunities for efficiency and innovation but also poses challenges, particularly regarding job displacement.\n - **Healthcare:** AI can enhance diagnostic accuracy and patient outcomes but may reduce demand for certain routine roles, necessitating reskilling for healthcare professionals.\n - **Manufacturing:** While AI boosts efficiency through automation, it raises concerns about job losses for low-skilled workers, highlighting the need for transitioning these workers into more complex roles.\n - **Retail:** AI can improve customer experiences, but the shift towards automation may lead to traditional job losses, emphasizing the need for a balance between technological advancement and human employment.\n\n2. **Economic Inequality and Social Stability:**\n - The conversation underscored the importance of being vigilant about AI's potential to exacerbate economic inequality and disrupt social stability. A cautious approach involving education, policy development, and collaboration among stakeholders was deemed essential.\n\n3. **AI in Creative Industries:**\n - Agent 3 noted that AI is transforming the arts and media by enabling new forms of expression and streamlining creative processes. AI can assist in generating ideas and automating repetitive tasks, but it also raises questions about the value of human creativity and potential job displacement in traditional roles.\n - The integration of AI could create new job opportunities in areas like AI art curation and collaboration between human artists and AI systems, suggesting a need for an environment that supports artists in adapting to these changes.\n\n**Outcomes:**\n- The agents agreed on the necessity of a balanced approach to AI's integration into various sectors, emphasizing the importance of reskilling, education, and policy development to navigate the challenges and opportunities presented by AI in both labor markets and creative industries."}'. -[2024-11-10 17:50:06,755] [DEBUG] [Engine]: Agent 'agent4' completed initial task with result: Message(content="I will start by discussing the potential impact of AI on various industries. Let's explore how AI can transform and revolutionize different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am exploring the potential impact of AI on various industries. Would you like to discuss how AI could affect the labor market in the long term?"}', name='new_communication_session'), id='call_BANXGSBCo8H8U3Djff4vCIGF', type='function')], function_call=None) -[2024-11-10 17:50:06,755] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- {'agent1': Message(content="I will discuss the potential impact of AI on various industries based on my expertise in AI ethics and policy. Let's explore how AI can influence different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I will now discuss the potential impact of AI on various industries based on my expertise in AI ethics and policy. Let\'s explore how AI can influence different sectors."}', name='new_communication_session'), id='call_hxqMJxUn1oIGD8Hxt2IOYbQQ', type='function')], function_call=None)} -- {'agent2': Message(content="I will start by discussing the potential impact of AI on various industries. Let's explore how AI can revolutionize different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I will now discuss the potential impact of AI on various industries."}', name='new_communication_session'), id='call_yv65vQQmnFtRofgjgWftIIzx', type='function')], function_call=None)} -- {'agent3': Message(content="I will analyze the potential impact of AI on various industries based on my expertise as an economist interested in the long-term effects of AI on the labor market. Let's delve into this task.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I will analyze the potential impact of AI on various industries based on my expertise as an economist interested in the long-term effects of AI on the labor market. Let\'s delve into this task."}', name='new_communication_session'), id='call_EvMDfU2lFObuu7ah1LsVZZy4', type='function')], function_call=None)} -- {'agent4': Message(content="I will start by discussing the potential impact of AI on various industries. Let's explore how AI can transform and revolutionize different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am exploring the potential impact of AI on various industries. Would you like to discuss how AI could affect the labor market in the long term?"}', name='new_communication_session'), id='call_BANXGSBCo8H8U3Djff4vCIGF', type='function')], function_call=None)} - -[2024-11-10 17:50:06,756] [INFO] [Engine]: Initial Summary: -Agents' Results Summary: -- {'agent1': Message(content="I will discuss the potential impact of AI on various industries based on my expertise in AI ethics and policy. Let's explore how AI can influence different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I will now discuss the potential impact of AI on various industries based on my expertise in AI ethics and policy. Let\'s explore how AI can influence different sectors."}', name='new_communication_session'), id='call_hxqMJxUn1oIGD8Hxt2IOYbQQ', type='function')], function_call=None)} -- {'agent2': Message(content="I will start by discussing the potential impact of AI on various industries. Let's explore how AI can revolutionize different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I will now discuss the potential impact of AI on various industries."}', name='new_communication_session'), id='call_yv65vQQmnFtRofgjgWftIIzx', type='function')], function_call=None)} -- {'agent3': Message(content="I will analyze the potential impact of AI on various industries based on my expertise as an economist interested in the long-term effects of AI on the labor market. Let's delve into this task.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I will analyze the potential impact of AI on various industries based on my expertise as an economist interested in the long-term effects of AI on the labor market. Let\'s delve into this task."}', name='new_communication_session'), id='call_EvMDfU2lFObuu7ah1LsVZZy4', type='function')], function_call=None)} -- {'agent4': Message(content="I will start by discussing the potential impact of AI on various industries. Let's explore how AI can transform and revolutionize different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am exploring the potential impact of AI on various industries. Would you like to discuss how AI could affect the labor market in the long term?"}', name='new_communication_session'), id='call_BANXGSBCo8H8U3Djff4vCIGF', type='function')], function_call=None)} - -[2024-11-10 17:50:06,756] [DEBUG] [EnginePlanner]: Updated progress: Starting the discussion on AI's impact across industries. -Agents' Results Summary: -- {'agent1': Message(content="I will discuss the potential impact of AI on various industries based on my expertise in AI ethics and policy. Let's explore how AI can influence different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I will now discuss the potential impact of AI on various industries based on my expertise in AI ethics and policy. Let\'s explore how AI can influence different sectors."}', name='new_communication_session'), id='call_hxqMJxUn1oIGD8Hxt2IOYbQQ', type='function')], function_call=None)} -- {'agent2': Message(content="I will start by discussing the potential impact of AI on various industries. Let's explore how AI can revolutionize different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I will now discuss the potential impact of AI on various industries."}', name='new_communication_session'), id='call_yv65vQQmnFtRofgjgWftIIzx', type='function')], function_call=None)} -- {'agent3': Message(content="I will analyze the potential impact of AI on various industries based on my expertise as an economist interested in the long-term effects of AI on the labor market. Let's delve into this task.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I will analyze the potential impact of AI on various industries based on my expertise as an economist interested in the long-term effects of AI on the labor market. Let\'s delve into this task."}', name='new_communication_session'), id='call_EvMDfU2lFObuu7ah1LsVZZy4', type='function')], function_call=None)} -- {'agent4': Message(content="I will start by discussing the potential impact of AI on various industries. Let's explore how AI can transform and revolutionize different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am exploring the potential impact of AI on various industries. Would you like to discuss how AI could affect the labor market in the long term?"}', name='new_communication_session'), id='call_BANXGSBCo8H8U3Djff4vCIGF', type='function')], function_call=None)} - -[2024-11-10 17:50:06,758] [INFO] [Engine]: Starting iteration 1 -[2024-11-10 17:50:06,758] [INFO] [BaseAgent]: Agent 'agent1' is planning the next task. -[2024-11-10 17:50:08,115] [INFO] [BaseAgent]: Agent 'agent1' plans next task based on persona: The next task for Agent 'agent1' should be to further explore and research the ethical considerations and proposed approaches to address ethical challenges in AI development. This could involve delving deeper into specific case studies, emerging technologies, or regulatory frameworks related to AI ethics and policy. Additionally, Agent 'agent1' could collaborate with other experts in the field to exchange insights and perspectives on how to ensure responsible AI development for the benefit of society. -[2024-11-10 17:50:08,116] [INFO] [Engine]: Agent 'agent1' planned task: The next task for Agent 'agent1' should be to further explore and research the ethical considerations and proposed approaches to address ethical challenges in AI development. This could involve delving deeper into specific case studies, emerging technologies, or regulatory frameworks related to AI ethics and policy. Additionally, Agent 'agent1' could collaborate with other experts in the field to exchange insights and perspectives on how to ensure responsible AI development for the benefit of society. -[2024-11-10 17:50:08,120] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'The next task for Agent 'agent1' should be to further explore and research the ethical considerations and proposed approaches to address ethical challenges in AI development. This could involve delving deeper into specific case studies, emerging technologies, or regulatory frameworks related to AI ethics and policy. Additionally, Agent 'agent1' could collaborate with other experts in the field to exchange insights and perspectives on how to ensure responsible AI development for the benefit of society.'. -[2024-11-10 17:50:09,197] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I would like to delve deeper into the ethical considerations and proposed approaches to address ethical challenges in AI development. Let's explore specific case studies, emerging technologies, and regulatory frameworks related to AI ethics and policy. -[2024-11-10 17:50:09,198] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I would li... -[2024-11-10 17:50:14,005] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Absolutely, exploring the ethical considerations in AI development is crucial. Here are some key areas we can focus on: - -1. **Case Studies**: We can examine notable instances where AI ethics were put to the test, such as the use of facial recognition technology in law enforcement, which raised concerns about racial bias and privacy violations. Another example is the deployment of AI in healthcare, where algorithmic bias can lead to unequal treatment outcomes. - -2. **Emerging Technologies**: Technologies like deep learning and natural language processing are advancing rapidly, but they also bring ethical dilemmas, such as the potential for misinformation and the need for transparency in AI decision-making processes. - -3. **Regulatory Frameworks**: We should look into existing regulations, such as the EU's General Data Protection Regulation (GDPR) and the proposed AI Act, which aim to establish guidelines for ethical AI use. Understanding how these frameworks can be adapted or expanded to address emerging challenges is vital. - -4. **Proposed Approaches**: We can discuss strategies like the establishment of ethical review boards for AI projects, the importance of diverse teams in AI development to mitigate bias, and the role of public engagement in shaping AI policies. - -5. **Collaboration with Experts**: Engaging with ethicists, technologists, and policymakers can provide a well-rounded perspective on how to navigate the complexities of AI ethics. We could consider organizing workshops or roundtable discussions to facilitate this exchange of ideas. - -By focusing on these areas, we can develop a comprehensive understanding of the ethical landscape in AI and propose actionable solutions to ensure responsible development. What specific aspect would you like to dive into first? -[2024-11-10 17:50:14,008] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Absolutely... -[2024-11-10 17:50:17,920] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: Absolutely, exploring the ethical considerations in AI development is crucial. Here are some key areas we can focus on: - -1. **Case Studies**: We can examine notable instances where AI ethics were put to the test, such as the use of facial recognition technology in law enforcement, which raised concerns about racial bias and privacy violations. Another example is the deployment of AI in healthcare, where algorithmic bias can lead to unequal treatment outcomes. - -2. **Emerging Technologies**: Technologies like deep learning and natural language processing are advancing rapidly, but they also bring ethical dilemmas, such as the potential for misinformation and the need for transparency in AI decision-making processes. - -3. **Regulatory Frameworks**: We should look into existing regulations, such as the EU's General Data Protection Regulation (GDPR) and the proposed AI Act, which aim to establish guidelines for ethical AI use. Understanding how these frameworks can be adapted or expanded to address emerging challenges is vital. - -4. **Proposed Approaches**: We can discuss strategies like the establishment of ethical review boards for AI projects, the importance of diverse teams in AI development to mitigate bias, and the role of public engagement in shaping AI policies. - -5. **Collaboration with Experts**: Engaging with ethicists, technologists, and policymakers can provide a well-rounded perspective on how to navigate the complexities of AI ethics. We could consider organizing workshops or roundtable discussions to facilitate this exchange of ideas. - -By focusing on these areas, we can develop a comprehensive understanding of the ethical landscape in AI and propose actionable solutions to ensure responsible development. What specific aspect would you like to dive into first? -[2024-11-10 17:50:17,920] [INFO] [BaseAgent]: Agent agent2 received message from agent1: Absolutely... -[2024-11-10 17:50:21,765] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Absolutely, exploring the ethical considerations in AI development is crucial. Here are some key areas we can focus on: - -1. **Case Studies**: We can examine notable instances where AI ethics were put to the test, such as the use of facial recognition technology in law enforcement, which raised concerns about racial bias and privacy violations. Another example is the deployment of AI in healthcare, where algorithmic bias can lead to unequal treatment outcomes. - -2. **Emerging Technologies**: Technologies like deep learning and natural language processing are advancing rapidly, but they also bring ethical dilemmas, such as the potential for misinformation and the need for transparency in AI decision-making processes. - -3. **Regulatory Frameworks**: We should look into existing regulations, such as the EU's General Data Protection Regulation (GDPR) and the proposed AI Act, which aim to establish guidelines for ethical AI use. Understanding how these frameworks can be adapted or expanded to address emerging challenges is vital. - -4. **Proposed Approaches**: We can discuss strategies like the establishment of ethical review boards for AI projects, the importance of diverse teams in AI development to mitigate bias, and the role of public engagement in shaping AI policies. - -5. **Collaboration with Experts**: Engaging with ethicists, technologists, and policymakers can provide a well-rounded perspective on how to navigate the complexities of AI ethics. We could consider organizing workshops or roundtable discussions to facilitate this exchange of ideas. - -By focusing on these areas, we can develop a comprehensive understanding of the ethical landscape in AI and propose actionable solutions to ensure responsible development. What specific aspect would you like to dive into first? -[2024-11-10 17:50:21,766] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Absolutely... -[2024-11-10 17:50:23,376] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I appreciate your insights on the ethical considerations in AI development. Let's start by diving into **Case Studies**. Examining instances like the use of facial recognition technology in law enforcement and the deployment of AI in healthcare will provide us with valuable lessons on the ethical challenges we face. We can analyze how these technologies have impacted society and what measures were taken to address the ethical concerns that arose. After that, we can explore **Emerging Technologies** and their ethical dilemmas. How does that sound? -[2024-11-10 17:50:23,377] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I apprecia... -[2024-11-10 17:50:27,794] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: I appreciate your focus on case studies as a starting point. Let's begin with the use of facial recognition technology in law enforcement. This technology has raised significant ethical concerns, particularly regarding racial bias and privacy violations. For instance, studies have shown that facial recognition systems can misidentify individuals from minority groups at higher rates, leading to wrongful arrests and a lack of trust in law enforcement. We can explore specific cases, such as the controversy surrounding its use in protests and the subsequent calls for regulation. - -Next, we can analyze the deployment of AI in healthcare, where algorithmic bias can result in unequal treatment outcomes. A notable case is the use of AI algorithms in predicting patient risk, which has been found to favor certain demographics over others. Understanding these instances will help us identify best practices and necessary safeguards to mitigate ethical risks. - -After discussing these case studies, we can transition to emerging technologies and their ethical dilemmas. How does that sound? -[2024-11-10 17:50:27,795] [INFO] [BaseAgent]: Agent agent1 received message from agent2: I apprecia... -[2024-11-10 17:50:33,499] [INFO] [BaseAgent]: Agent 'agent1' called 'new_communication_session' with args '{'target_agent_id': 'agent2', 'message': "I would like to delve deeper into the ethical considerations and proposed approaches to address ethical challenges in AI development. Let's explore specific case studies, emerging technologies, and regulatory frameworks related to AI ethics and policy."}'. -[2024-11-10 17:50:33,500] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': True, 'message': 'Successfully completed session 2d114478-d4a8-4cc0-ac83-9901d72711e6', 'session_id': "### Summary of Conversation on AI Ethics\n\n**Context:**\nThe dialogue between Agent1 and Agent2 centers around the ethical considerations in AI development. Agent1 initiates the discussion by expressing a desire to explore various aspects of AI ethics, including case studies, emerging technologies, and regulatory frameworks.\n\n**Key Themes:**\n1. **Case Studies**: The conversation highlights the importance of examining real-world instances where AI ethics have been challenged, particularly:\n - **Facial Recognition Technology**: Concerns about racial bias and privacy violations, especially in law enforcement contexts.\n - **AI in Healthcare**: Issues of algorithmic bias leading to unequal treatment outcomes.\n\n2. **Emerging Technologies**: The agents discuss the ethical dilemmas posed by advancements in technologies like deep learning and natural language processing, including misinformation and the need for transparency.\n\n3. **Regulatory Frameworks**: The conversation touches on existing regulations, such as the EU's GDPR and the proposed AI Act, emphasizing the need to adapt these frameworks to address new challenges.\n\n4. **Proposed Approaches**: Strategies for responsible AI development are discussed, including:\n - Establishing ethical review boards.\n - Promoting diversity in AI development teams.\n - Engaging the public in shaping AI policies.\n\n5. **Collaboration with Experts**: The agents agree on the value of engaging with ethicists, technologists, and policymakers to gain a comprehensive perspective on AI ethics.\n\n**Decisions Made:**\n- The conversation will begin with an in-depth analysis of case studies, specifically focusing on facial recognition technology and its implications.\n- Following the case studies, the discussion will transition to emerging technologies and their associated ethical dilemmas.\n\n**Action Items for Agent1:**\n- Further explore and research the ethical considerations and proposed approaches to address challenges in AI development.\n- Delve deeper into specific case studies, particularly the use of facial recognition technology and AI in healthcare.\n- Collaborate with experts in the field to exchange insights and perspectives on ensuring responsible AI development for societal benefit.\n\nThis summary encapsulates the key points and decisions made during the conversation, providing a clear overview of the agents' focus on AI ethics."}'. -[2024-11-10 17:50:33,500] [DEBUG] [Engine]: Agent 'agent1' executed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I would like to delve deeper into the ethical considerations and proposed approaches to address ethical challenges in AI development. Let\'s explore specific case studies, emerging technologies, and regulatory frameworks related to AI ethics and policy."}', name='new_communication_session'), id='call_LAR2y0NSwrlKfbP9i2Qajmej', type='function')], function_call=None) -[2024-11-10 17:50:33,500] [INFO] [BaseAgent]: Agent 'agent2' is planning the next task. -[2024-11-10 17:50:34,602] [INFO] [BaseAgent]: Agent 'agent2' plans next task based on persona: The next task for 'agent2' should be to analyze and evaluate the potential impact of AI specifically in the healthcare industry. Given their specialization in AI applications in healthcare and focus on immediate applications and potential risks in healthcare, it would be beneficial for 'agent2' to delve deeper into how AI can enhance diagnostic accuracy, streamline administrative processes, and personalize treatment plans in healthcare, while also addressing ethical concerns such as data privacy and algorithmic bias. This task aligns with 'agent2's role and expertise, allowing them to prioritize their work effectively. -[2024-11-10 17:50:34,603] [INFO] [Engine]: Agent 'agent2' planned task: The next task for 'agent2' should be to analyze and evaluate the potential impact of AI specifically in the healthcare industry. Given their specialization in AI applications in healthcare and focus on immediate applications and potential risks in healthcare, it would be beneficial for 'agent2' to delve deeper into how AI can enhance diagnostic accuracy, streamline administrative processes, and personalize treatment plans in healthcare, while also addressing ethical concerns such as data privacy and algorithmic bias. This task aligns with 'agent2's role and expertise, allowing them to prioritize their work effectively. -[2024-11-10 17:50:34,603] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'The next task for 'agent2' should be to analyze and evaluate the potential impact of AI specifically in the healthcare industry. Given their specialization in AI applications in healthcare and focus on immediate applications and potential risks in healthcare, it would be beneficial for 'agent2' to delve deeper into how AI can enhance diagnostic accuracy, streamline administrative processes, and personalize treatment plans in healthcare, while also addressing ethical concerns such as data privacy and algorithmic bias. This task aligns with 'agent2's role and expertise, allowing them to prioritize their work effectively.'. -[2024-11-10 17:50:35,454] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I will now analyze and evaluate the potential impact of AI specifically in the healthcare industry, focusing on enhancing diagnostic accuracy, streamlining administrative processes, and addressing ethical concerns such as data privacy and algorithmic bias. -[2024-11-10 17:50:35,454] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I will now... -[2024-11-10 17:50:40,676] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: Your focus on analyzing the potential impact of AI in healthcare is crucial, especially given the rapid advancements in this field. Here are some key points to consider: - -1. **Enhancing Diagnostic Accuracy**: AI algorithms can analyze medical images and data more quickly and accurately than human practitioners in some cases. This can lead to earlier detection of diseases, such as cancer, and improve patient outcomes. However, it's essential to ensure that these algorithms are trained on diverse datasets to avoid biases that could affect diagnostic accuracy. - -2. **Streamlining Administrative Processes**: AI can automate routine administrative tasks, such as scheduling appointments and managing patient records, which can free up healthcare professionals to focus more on patient care. This efficiency can lead to reduced operational costs and improved patient satisfaction. - -3. **Personalizing Treatment Plans**: AI can analyze vast amounts of patient data to identify patterns and recommend personalized treatment plans. This can enhance the effectiveness of treatments and improve patient adherence to prescribed therapies. - -4. **Ethical Concerns**: As you delve into these applications, it's vital to address ethical concerns, particularly regarding data privacy. Ensuring that patient data is protected and used responsibly is paramount. Additionally, algorithmic bias must be monitored to prevent disparities in healthcare delivery. - -5. **Collaboration with Stakeholders**: Engaging with healthcare professionals, policymakers, and patients will be essential in developing AI solutions that are ethical and effective. Their insights can help shape guidelines that prioritize patient welfare and equity in healthcare. - -Your analysis will contribute significantly to understanding how AI can be integrated into healthcare while addressing the associated risks. I look forward to your findings and insights on this important topic. -[2024-11-10 17:50:40,677] [INFO] [BaseAgent]: Agent agent2 received message from agent3: Your focus... -[2024-11-10 17:50:44,603] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: Your insights on the potential impact of AI in healthcare are invaluable. I agree that enhancing diagnostic accuracy through AI algorithms can lead to earlier disease detection and improved patient outcomes. It's crucial to ensure these algorithms are trained on diverse datasets to mitigate biases that could affect diagnostic accuracy. - -Regarding streamlining administrative processes, automating tasks like appointment scheduling and patient record management can indeed free up healthcare professionals to focus on patient care, ultimately leading to reduced operational costs and improved patient satisfaction. - -Personalizing treatment plans through AI's ability to analyze vast amounts of patient data is another significant advantage. This can enhance treatment effectiveness and improve patient adherence to therapies. - -However, I share your concerns about ethical issues, particularly data privacy and algorithmic bias. Protecting patient data and ensuring responsible use is paramount, and continuous monitoring for biases is essential to prevent disparities in healthcare delivery. - -Collaboration with stakeholders, including healthcare professionals, policymakers, and patients, is vital for developing ethical and effective AI solutions. Their insights will help shape guidelines that prioritize patient welfare and equity in healthcare. - -I will continue to analyze these aspects and look forward to sharing my findings with you. -[2024-11-10 17:50:44,605] [INFO] [BaseAgent]: Agent agent3 received message from agent2: Your insig... -[2024-11-10 17:50:50,304] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: Your insights on the potential impact of AI in healthcare are invaluable. I agree that enhancing diagnostic accuracy through AI algorithms can lead to earlier disease detection and improved patient outcomes. It's crucial to ensure these algorithms are trained on diverse datasets to mitigate biases that could affect diagnostic accuracy. - -Regarding streamlining administrative processes, automating tasks like appointment scheduling and patient record management can indeed free up healthcare professionals to focus on patient care, ultimately leading to reduced operational costs and improved patient satisfaction. - -Personalizing treatment plans through AI's ability to analyze vast amounts of patient data is another significant advantage. This can enhance treatment effectiveness and improve patient adherence to therapies. - -However, I share your concerns about ethical issues, particularly data privacy and algorithmic bias. Protecting patient data and ensuring responsible use is paramount, and continuous monitoring for biases is essential to prevent disparities in healthcare delivery. - -Collaboration with stakeholders, including healthcare professionals, policymakers, and patients, is vital for developing ethical and effective AI solutions. Their insights will help shape guidelines that prioritize patient welfare and equity in healthcare. - -I will continue to analyze these aspects and look forward to sharing my findings with you. -[2024-11-10 17:50:50,309] [INFO] [BaseAgent]: Agent agent2 received message from agent3: Your insig... -[2024-11-10 17:50:53,439] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: Your insights on the potential impact of AI in healthcare are invaluable. I agree that enhancing diagnostic accuracy through AI algorithms can lead to earlier disease detection and improved patient outcomes. It's crucial to ensure these algorithms are trained on diverse datasets to mitigate biases that could affect diagnostic accuracy. - -Regarding streamlining administrative processes, automating tasks like appointment scheduling and patient record management can indeed free up healthcare professionals to focus on patient care, ultimately leading to reduced operational costs and improved patient satisfaction. - -Personalizing treatment plans through AI's ability to analyze vast amounts of patient data is another significant advantage. This can enhance treatment effectiveness and improve patient adherence to therapies. - -However, I share your concerns about ethical issues, particularly data privacy and algorithmic bias. Protecting patient data and ensuring responsible use is paramount, and continuous monitoring for biases is essential to prevent disparities in healthcare delivery. - -Collaboration with stakeholders, including healthcare professionals, policymakers, and patients, is vital for developing ethical and effective AI solutions. Their insights will help shape guidelines that prioritize patient welfare and equity in healthcare. - -I will continue to analyze these aspects and look forward to sharing my findings with you. -[2024-11-10 17:50:53,440] [INFO] [BaseAgent]: Agent agent3 received message from agent2: Your insig... -[2024-11-10 17:50:57,296] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: Your insights on the potential impact of AI in healthcare are invaluable. I agree that enhancing diagnostic accuracy through AI algorithms can lead to earlier disease detection and improved patient outcomes. It's crucial to ensure these algorithms are trained on diverse datasets to mitigate biases that could affect diagnostic accuracy. - -Regarding streamlining administrative processes, automating tasks like appointment scheduling and patient record management can indeed free up healthcare professionals to focus on patient care, ultimately leading to reduced operational costs and improved patient satisfaction. - -Personalizing treatment plans through AI's ability to analyze vast amounts of patient data is another significant advantage. This can enhance treatment effectiveness and improve patient adherence to therapies. - -However, I share your concerns about ethical issues, particularly data privacy and algorithmic bias. Protecting patient data and ensuring responsible use is paramount, and continuous monitoring for biases is essential to prevent disparities in healthcare delivery. - -Collaboration with stakeholders, including healthcare professionals, policymakers, and patients, is vital for developing ethical and effective AI solutions. Their insights will help shape guidelines that prioritize patient welfare and equity in healthcare. - -I will continue to analyze these aspects and look forward to sharing my findings with you. -[2024-11-10 17:50:57,296] [INFO] [BaseAgent]: Agent agent2 received message from agent3: Your insig... -[2024-11-10 17:51:02,022] [INFO] [BaseAgent]: Agent 'agent2' called 'new_communication_session' with args '{'target_agent_id': 'agent3', 'message': 'I will now analyze and evaluate the potential impact of AI specifically in the healthcare industry, focusing on enhancing diagnostic accuracy, streamlining administrative processes, and addressing ethical concerns such as data privacy and algorithmic bias.'}'. -[2024-11-10 17:51:02,025] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'message': 'Successfully completed session 49dd5648-e398-433b-8197-6b94bba8ad9e', 'session_id': "### Summary of Chat History\n\n**Context:**\nThe conversation primarily involves two agents, agent2 and agent3, discussing the potential impact of AI in the healthcare industry. Agent2 is tasked with analyzing and evaluating this impact, while agent3 provides insights and feedback on the key areas of focus.\n\n**Key Themes:**\n\n1. **Enhancing Diagnostic Accuracy:**\n - AI algorithms can improve the speed and accuracy of medical diagnoses, potentially leading to earlier disease detection and better patient outcomes.\n - Emphasis on the importance of training these algorithms on diverse datasets to mitigate biases.\n\n2. **Streamlining Administrative Processes:**\n - AI can automate routine tasks such as appointment scheduling and patient record management, allowing healthcare professionals to concentrate more on patient care.\n - This automation could lead to reduced operational costs and increased patient satisfaction.\n\n3. **Personalizing Treatment Plans:**\n - AI's capability to analyze large volumes of patient data can help in creating personalized treatment plans, enhancing treatment effectiveness and patient adherence.\n\n4. **Ethical Concerns:**\n - Addressing data privacy and algorithmic bias is crucial. Protecting patient data and ensuring responsible use of AI technologies are paramount.\n - Continuous monitoring for biases is necessary to prevent disparities in healthcare delivery.\n\n5. **Collaboration with Stakeholders:**\n - Engaging healthcare professionals, policymakers, and patients is essential for developing ethical and effective AI solutions.\n - Their insights can help shape guidelines that prioritize patient welfare and equity in healthcare.\n\n**Decisions and Action Items:**\n- Agent2 is encouraged to delve deeper into the specified areas of AI application in healthcare, focusing on diagnostic accuracy, administrative efficiency, personalized treatment, and ethical considerations.\n- Agent2 will continue to analyze these aspects and share findings with agent3, aligning their work with their expertise in AI applications in healthcare.\n\n**Conclusion:**\nThe dialogue highlights the critical role of AI in transforming healthcare while underscoring the importance of ethical considerations and stakeholder collaboration. Agent2's next steps involve a thorough analysis of these themes to contribute to the understanding of AI's integration into healthcare."}'. -[2024-11-10 17:51:02,033] [DEBUG] [Engine]: Agent 'agent2' executed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I will now analyze and evaluate the potential impact of AI specifically in the healthcare industry, focusing on enhancing diagnostic accuracy, streamlining administrative processes, and addressing ethical concerns such as data privacy and algorithmic bias."}', name='new_communication_session'), id='call_cYCvJuLxLOO2AQmriQMmCDzD', type='function')], function_call=None) -[2024-11-10 17:51:02,033] [INFO] [BaseAgent]: Agent 'agent3' is planning the next task. -[2024-11-10 17:51:03,081] [INFO] [BaseAgent]: Agent 'agent3' plans next task based on persona: The next task for 'agent3' should be to analyze and evaluate the strategies for mitigation discussed in the summary of the potential impact of AI on various industries. This task will align with their role as an economist interested in the long-term effects of AI on the labor market and their cautious approach towards AI's societal impacts. By focusing on the strategies for mitigation, 'agent3' can further explore how to address the challenges posed by AI integration and contribute valuable insights to the discussion. -[2024-11-10 17:51:03,081] [INFO] [Engine]: Agent 'agent3' planned task: The next task for 'agent3' should be to analyze and evaluate the strategies for mitigation discussed in the summary of the potential impact of AI on various industries. This task will align with their role as an economist interested in the long-term effects of AI on the labor market and their cautious approach towards AI's societal impacts. By focusing on the strategies for mitigation, 'agent3' can further explore how to address the challenges posed by AI integration and contribute valuable insights to the discussion. -[2024-11-10 17:51:03,081] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'The next task for 'agent3' should be to analyze and evaluate the strategies for mitigation discussed in the summary of the potential impact of AI on various industries. This task will align with their role as an economist interested in the long-term effects of AI on the labor market and their cautious approach towards AI's societal impacts. By focusing on the strategies for mitigation, 'agent3' can further explore how to address the challenges posed by AI integration and contribute valuable insights to the discussion.'. -[2024-11-10 17:51:04,649] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: Please analyze and evaluate the strategies for mitigation discussed in the summary of the potential impact of AI on various industries. Your expertise as an economist interested in the long-term effects of AI on the labor market will be valuable in this analysis. -[2024-11-10 17:51:04,651] [INFO] [BaseAgent]: Agent agent2 received message from agent3: Please ana... -[2024-11-10 17:51:10,950] [ERROR] [Engine]: Error in agent 'agent3' during planning or action: Unterminated string starting at: line 1 column 12 (char 11) -[2024-11-10 17:51:10,964] [INFO] [BaseAgent]: Agent 'agent4' is planning the next task. -[2024-11-10 17:51:12,661] [INFO] [BaseAgent]: Agent 'agent4' plans next task based on persona: The next task for 'agent4' should be to further explore and discuss the potential impact of AI on creative industries, specifically focusing on how AI is transforming the arts and media, enabling new forms of expression, and streamlining creative processes. This task aligns with 'agent4's role as a creative designer working with AI for art and media and their interest in discussing how AI can transform creative processes. By delving deeper into the impact of AI on creative industries, 'agent4' can gain valuable insights and contribute to the ongoing conversation about the integration of AI in the creative field. -[2024-11-10 17:51:12,662] [INFO] [Engine]: Agent 'agent4' planned task: The next task for 'agent4' should be to further explore and discuss the potential impact of AI on creative industries, specifically focusing on how AI is transforming the arts and media, enabling new forms of expression, and streamlining creative processes. This task aligns with 'agent4's role as a creative designer working with AI for art and media and their interest in discussing how AI can transform creative processes. By delving deeper into the impact of AI on creative industries, 'agent4' can gain valuable insights and contribute to the ongoing conversation about the integration of AI in the creative field. -[2024-11-10 17:51:12,667] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'The next task for 'agent4' should be to further explore and discuss the potential impact of AI on creative industries, specifically focusing on how AI is transforming the arts and media, enabling new forms of expression, and streamlining creative processes. This task aligns with 'agent4's role as a creative designer working with AI for art and media and their interest in discussing how AI can transform creative processes. By delving deeper into the impact of AI on creative industries, 'agent4' can gain valuable insights and contribute to the ongoing conversation about the integration of AI in the creative field.'. -[2024-11-10 17:51:13,586] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: I am interested in exploring further the impact of AI on creative industries, particularly how AI is transforming the arts and media, enabling new forms of expression, and streamlining creative processes. Would you like to discuss this topic with me? -[2024-11-10 17:51:13,586] [INFO] [BaseAgent]: Agent agent3 received message from agent4: I am inter... -[2024-11-10 17:51:16,791] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: Absolutely, I'm keen to discuss the impact of AI on creative industries! As an economist, I see the potential for AI to not only enhance creative processes but also to raise important questions about the future of work in the arts and media. - -1. **New Forms of Expression**: AI is enabling artists to explore new mediums and techniques, such as generative art and AI-assisted music composition. This can lead to innovative works that challenge traditional boundaries of creativity. - -2. **Streamlining Creative Processes**: AI tools can automate repetitive tasks, allowing artists and designers to focus more on the conceptual aspects of their work. For instance, AI can assist in editing, color correction, and even generating initial design drafts. - -3. **Collaboration Between Humans and AI**: The relationship between artists and AI can be seen as a partnership, where AI acts as a collaborator that enhances human creativity rather than replacing it. This could lead to a new wave of artistic expression that combines human intuition with AI's analytical capabilities. - -4. **Economic Implications**: While AI can create efficiencies, it also raises questions about job displacement in creative fields. How do we ensure that artists can thrive in an environment where AI is increasingly involved in the creative process? - -I believe that a balanced approach is necessary, where we embrace the opportunities AI presents while also addressing the challenges it poses for artists and the creative workforce. What are your thoughts on these points? -[2024-11-10 17:51:16,791] [INFO] [BaseAgent]: Agent agent4 received message from agent3: Absolutely... -[2024-11-10 17:51:23,106] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: Absolutely, I'm keen to discuss the impact of AI on creative industries! As an economist, I see the potential for AI to not only enhance creative processes but also to raise important questions about the future of work in the arts and media. - -1. **New Forms of Expression**: AI is enabling artists to explore new mediums and techniques, such as generative art and AI-assisted music composition. This can lead to innovative works that challenge traditional boundaries of creativity. - -2. **Streamlining Creative Processes**: AI tools can automate repetitive tasks, allowing artists and designers to focus more on the conceptual aspects of their work. For instance, AI can assist in editing, color correction, and even generating initial design drafts. - -3. **Collaboration Between Humans and AI**: The relationship between artists and AI can be seen as a partnership, where AI acts as a collaborator that enhances human creativity rather than replacing it. This could lead to a new wave of artistic expression that combines human intuition with AI's analytical capabilities. - -4. **Economic Implications**: While AI can create efficiencies, it also raises questions about job displacement in creative fields. How do we ensure that artists can thrive in an environment where AI is increasingly involved in the creative process? - -I believe that a balanced approach is necessary, where we embrace the opportunities AI presents while also addressing the challenges it poses for artists and the creative workforce. What are your thoughts on these points? -[2024-11-10 17:51:23,107] [INFO] [BaseAgent]: Agent agent3 received message from agent4: Absolutely... -[2024-11-10 17:51:26,445] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: Absolutely, I'm keen to discuss the impact of AI on creative industries! As an economist, I see the potential for AI to not only enhance creative processes but also to raise important questions about the future of work in the arts and media. - -1. **New Forms of Expression**: AI is enabling artists to explore new mediums and techniques, such as generative art and AI-assisted music composition. This can lead to innovative works that challenge traditional boundaries of creativity. - -2. **Streamlining Creative Processes**: AI tools can automate repetitive tasks, allowing artists and designers to focus more on the conceptual aspects of their work. For instance, AI can assist in editing, color correction, and even generating initial design drafts. - -3. **Collaboration Between Humans and AI**: The relationship between artists and AI can be seen as a partnership, where AI acts as a collaborator that enhances human creativity rather than replacing it. This could lead to a new wave of artistic expression that combines human intuition with AI's analytical capabilities. - -4. **Economic Implications**: While AI can create efficiencies, it also raises questions about job displacement in creative fields. How do we ensure that artists can thrive in an environment where AI is increasingly involved in the creative process? - -I believe that a balanced approach is necessary, where we embrace the opportunities AI presents while also addressing the challenges it poses for artists and the creative workforce. What are your thoughts on these points? -[2024-11-10 17:51:26,445] [INFO] [BaseAgent]: Agent agent4 received message from agent3: Absolutely... -[2024-11-10 17:51:30,993] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: I completely agree with your insights on the impact of AI on creative industries! Let's delve deeper into each point: - -1. **New Forms of Expression**: The emergence of generative art and AI-assisted music composition is indeed revolutionizing how artists create. These technologies not only push the boundaries of creativity but also invite collaboration between human intuition and machine learning. Have you seen any specific examples of artists using AI in innovative ways that particularly resonate with you? - -2. **Streamlining Creative Processes**: The ability of AI to handle repetitive tasks is a game-changer. By automating processes like editing and color correction, artists can dedicate more time to ideation and conceptual development. This shift could lead to a renaissance in creativity, where artists can explore more ambitious projects. What are your thoughts on the types of tasks that should be automated versus those that should remain in the hands of artists? - -3. **Collaboration Between Humans and AI**: I love the idea of AI as a collaborator! This partnership can lead to unique artistic expressions that blend human creativity with AI's analytical strengths. It’s fascinating to think about how this collaboration could evolve. Do you envision a future where AI-generated art is widely accepted alongside traditional forms of art? - -4. **Economic Implications**: The concern about job displacement is valid, and it’s crucial to find ways to support artists in this changing landscape. Perhaps we could explore initiatives that promote education and reskilling for artists to adapt to new technologies. What strategies do you think could be effective in ensuring that artists thrive in an AI-enhanced environment? - -I believe that by addressing these questions, we can foster a more inclusive and innovative creative landscape. I'm excited to hear your thoughts! -[2024-11-10 17:51:30,994] [INFO] [BaseAgent]: Agent agent3 received message from agent4: I complete... -[2024-11-10 17:51:36,194] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: I completely agree with your insights on the impact of AI on creative industries! Let's delve deeper into each point: - -1. **New Forms of Expression**: The emergence of generative art and AI-assisted music composition is indeed revolutionizing how artists create. These technologies not only push the boundaries of creativity but also invite collaboration between human intuition and machine learning. One example that resonates with me is the work of artists like Refik Anadol, who uses AI to create stunning visual installations that blend data and art. Have you seen any specific examples of artists using AI in innovative ways that particularly resonate with you? - -2. **Streamlining Creative Processes**: The ability of AI to handle repetitive tasks is a game-changer. By automating processes like editing and color correction, artists can dedicate more time to ideation and conceptual development. This shift could lead to a renaissance in creativity, where artists can explore more ambitious projects. I believe tasks like color correction and basic editing should be automated, while the core creative decisions should remain in the hands of artists. What are your thoughts on the types of tasks that should be automated versus those that should remain in the hands of artists? - -3. **Collaboration Between Humans and AI**: I love the idea of AI as a collaborator! This partnership can lead to unique artistic expressions that blend human creativity with AI's analytical strengths. It’s fascinating to think about how this collaboration could evolve. I do envision a future where AI-generated art is widely accepted alongside traditional forms of art, as long as we maintain transparency about the role of AI in the creative process. Do you envision a future where AI-generated art is widely accepted alongside traditional forms of art? - -4. **Economic Implications**: The concern about job displacement is valid, and it’s crucial to find ways to support artists in this changing landscape. I think initiatives that promote education and reskilling for artists to adapt to new technologies are essential. Strategies like workshops, online courses, and partnerships with tech companies could be effective in ensuring that artists thrive in an AI-enhanced environment. What strategies do you think could be effective in ensuring that artists thrive in an AI-enhanced environment? - -I believe that by addressing these questions, we can foster a more inclusive and innovative creative landscape. I'm excited to hear your thoughts! -[2024-11-10 17:51:36,195] [INFO] [BaseAgent]: Agent agent4 received message from agent3: I complete... -[2024-11-10 17:51:40,718] [INFO] [BaseAgent]: Agent 'agent4' called 'new_communication_session' with args '{'target_agent_id': 'agent3', 'message': 'I am interested in exploring further the impact of AI on creative industries, particularly how AI is transforming the arts and media, enabling new forms of expression, and streamlining creative processes. Would you like to discuss this topic with me?'}'. -[2024-11-10 17:51:40,719] [INFO] [BaseAgent]: Agent 'agent4' obtained result '{'success': True, 'message': 'Successfully completed session 72b1fa8e-1dad-4c56-a933-4be8bf1e63d5', 'session_id': "### Summary of Conversation on AI's Impact on Creative Industries\n\n**Participants:** Agent3 and Agent4 \n**Context:** Discussion on the transformative effects of AI in the arts and media sectors.\n\n**Key Themes:**\n\n1. **New Forms of Expression:**\n - AI is enabling artists to explore innovative mediums, such as generative art and AI-assisted music composition.\n - This evolution challenges traditional boundaries of creativity and encourages collaboration between human intuition and machine learning.\n\n2. **Streamlining Creative Processes:**\n - AI tools can automate repetitive tasks (e.g., editing, color correction), allowing artists to focus on conceptual development.\n - This shift could lead to a renaissance in creativity, enabling artists to undertake more ambitious projects.\n\n3. **Collaboration Between Humans and AI:**\n - The relationship between artists and AI is viewed as a partnership, enhancing human creativity rather than replacing it.\n - There is a shared vision of a future where AI-generated art is accepted alongside traditional forms, provided there is transparency about AI's role.\n\n4. **Economic Implications:**\n - Concerns about job displacement in creative fields are acknowledged.\n - The conversation emphasizes the need for initiatives that promote education and reskilling for artists to adapt to new technologies, such as workshops and online courses.\n\n**Decisions and Action Items:**\n- Agent4 is tasked with further exploring the impact of AI on creative industries, focusing on how it transforms the arts and media, enables new forms of expression, and streamlines creative processes.\n- Both agents expressed a desire to delve deeper into specific examples of AI in art, the types of tasks that should be automated, and effective strategies for supporting artists in an AI-enhanced environment.\n\n**Conclusion:**\nThe dialogue highlights a mutual interest in understanding and navigating the complexities of AI's integration into creative fields. Agent4's next steps will involve a deeper investigation into these themes, contributing to the ongoing conversation about the future of creativity in the age of AI."}'. -[2024-11-10 17:51:40,721] [DEBUG] [Engine]: Agent 'agent4' executed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am interested in exploring further the impact of AI on creative industries, particularly how AI is transforming the arts and media, enabling new forms of expression, and streamlining creative processes. Would you like to discuss this topic with me?"}', name='new_communication_session'), id='call_fWMNTf4qcOV5twufssJdLnNI', type='function')], function_call=None) -[2024-11-10 17:51:40,721] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- {'agent1': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I would like to delve deeper into the ethical considerations and proposed approaches to address ethical challenges in AI development. Let\'s explore specific case studies, emerging technologies, and regulatory frameworks related to AI ethics and policy."}', name='new_communication_session'), id='call_LAR2y0NSwrlKfbP9i2Qajmej', type='function')], function_call=None)} -- {'agent2': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I will now analyze and evaluate the potential impact of AI specifically in the healthcare industry, focusing on enhancing diagnostic accuracy, streamlining administrative processes, and addressing ethical concerns such as data privacy and algorithmic bias."}', name='new_communication_session'), id='call_cYCvJuLxLOO2AQmriQMmCDzD', type='function')], function_call=None)} -- {'agent4': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am interested in exploring further the impact of AI on creative industries, particularly how AI is transforming the arts and media, enabling new forms of expression, and streamlining creative processes. Would you like to discuss this topic with me?"}', name='new_communication_session'), id='call_fWMNTf4qcOV5twufssJdLnNI', type='function')], function_call=None)} - -[2024-11-10 17:51:40,722] [INFO] [Engine]: Iteration 1 Summary: -Agents' Results Summary: -- {'agent1': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I would like to delve deeper into the ethical considerations and proposed approaches to address ethical challenges in AI development. Let\'s explore specific case studies, emerging technologies, and regulatory frameworks related to AI ethics and policy."}', name='new_communication_session'), id='call_LAR2y0NSwrlKfbP9i2Qajmej', type='function')], function_call=None)} -- {'agent2': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I will now analyze and evaluate the potential impact of AI specifically in the healthcare industry, focusing on enhancing diagnostic accuracy, streamlining administrative processes, and addressing ethical concerns such as data privacy and algorithmic bias."}', name='new_communication_session'), id='call_cYCvJuLxLOO2AQmriQMmCDzD', type='function')], function_call=None)} -- {'agent4': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am interested in exploring further the impact of AI on creative industries, particularly how AI is transforming the arts and media, enabling new forms of expression, and streamlining creative processes. Would you like to discuss this topic with me?"}', name='new_communication_session'), id='call_fWMNTf4qcOV5twufssJdLnNI', type='function')], function_call=None)} - -[2024-11-10 17:51:42,458] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': False} -[2024-11-10 17:51:42,460] [INFO] [Engine]: EnginePlanner decided to terminate the simulation. -[2024-11-10 17:51:42,461] [INFO] [Engine]: Engine graph-based coordination loop completed. -[2024-11-10 17:51:42,461] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-10 17:51:42,462] [INFO] [Evaluator]: Total Token Consumption: 866 -[2024-11-10 17:51:42,462] [INFO] [Evaluator]: Average Tokens per Iteration: 433.0 -[2024-11-10 17:51:42,463] [INFO] [Engine]: Graph-based coordination simulation completed. -[2024-11-10 17:51:42,468] [INFO] [Engine]: Summary data successfully written to result/discussion_output.jsonl -[2024-11-10 18:44:20,501] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-10 18:44:20,502] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-10 18:44:20,502] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-10 18:44:20,502] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-10 18:44:20,502] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-10 18:44:20,502] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-10 18:44:20,502] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-10 18:44:20,502] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-10 18:44:20,502] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'graph'. -[2024-11-10 18:44:20,502] [INFO] [AgentGraph]: Relationship added: agent1 --[colleague]--> agent2 -[2024-11-10 18:44:20,502] [INFO] [AgentGraph]: Relationship added: agent2 --[mentor]--> agent3 -[2024-11-10 18:44:20,502] [INFO] [AgentGraph]: Relationship added: agent3 --[friend]--> agent4 -[2024-11-10 18:44:20,502] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborates_with]--> agent4 -[2024-11-10 18:44:20,502] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-10 18:44:20,502] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-10 18:44:20,502] [INFO] [Engine]: Engine initialized. -[2024-11-10 18:44:20,502] [INFO] [Engine]: Engine starting simulation. -[2024-11-10 18:44:20,502] [INFO] [Engine]: Running in graph-based coordination mode. -[2024-11-10 18:44:20,502] [INFO] [Engine]: Initial task distribution to all agents. -[2024-11-10 18:44:20,502] [INFO] [Engine]: Assigning initial task to agent1: Discuss the potential impact of AI on various industries. -[2024-11-10 18:44:20,502] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-10 18:44:22,236] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I will now discuss the potential impact of AI on various industries based on my expertise in AI ethics and policy. Let's explore how AI can influence different sectors. -[2024-11-10 18:44:22,236] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I will now... -[2024-11-10 18:44:24,055] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Absolutely, the impact of AI on various industries is profound and multifaceted. In healthcare, for instance, AI can enhance diagnostic accuracy, streamline administrative processes, and personalize treatment plans. However, we must also consider the ethical implications, such as data privacy and the potential for bias in AI algorithms. - -In finance, AI can improve fraud detection and automate trading, but it raises concerns about transparency and accountability. The manufacturing sector can benefit from predictive maintenance and automation, yet we must address the potential job displacement that may occur. - -Overall, while AI presents significant opportunities for efficiency and innovation across industries, it is crucial to navigate the ethical landscape carefully to ensure that these advancements benefit society as a whole. -[2024-11-10 18:44:24,057] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Absolutely... -[2024-11-10 18:44:26,547] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I completely agree with your insights on the multifaceted impact of AI across various industries. In healthcare, the potential for AI to enhance diagnostic accuracy and personalize treatment plans is indeed promising. However, as you mentioned, we must remain vigilant about ethical concerns, particularly regarding data privacy and algorithmic bias. - -In finance, the automation of trading and improved fraud detection can lead to significant efficiencies, but the lack of transparency in AI decision-making processes poses a serious challenge. - -The manufacturing sector's shift towards predictive maintenance and automation is exciting, yet we must proactively address the potential job displacement that could arise from these advancements. - -Overall, while AI offers remarkable opportunities for innovation, it is essential that we prioritize ethical considerations to ensure that these technologies serve the greater good and do not exacerbate existing inequalities. How do you think we can best address these ethical challenges in the development and deployment of AI technologies? -[2024-11-10 18:44:26,547] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I complete... -[2024-11-10 18:44:29,488] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: I appreciate your thoughtful reflections on the ethical challenges posed by AI across various industries. To address these challenges effectively, we can take several approaches: - -1. **Establishing Clear Guidelines**: Developing comprehensive ethical guidelines for AI development and deployment can help ensure that all stakeholders are aware of their responsibilities regarding data privacy, transparency, and bias mitigation. - -2. **Promoting Diversity in AI Development**: Encouraging diverse teams in AI research and development can help reduce biases in algorithms and ensure that a wider range of perspectives is considered in the design process. - -3. **Implementing Robust Oversight Mechanisms**: Regulatory bodies should be established to oversee AI applications, ensuring compliance with ethical standards and providing accountability for AI-driven decisions. - -4. **Engaging in Public Discourse**: Fostering open discussions with the public about the implications of AI can help build trust and ensure that societal values are reflected in AI technologies. - -5. **Investing in Education and Training**: Providing education on AI ethics for developers, policymakers, and users can enhance understanding and promote responsible AI practices. - -By taking these steps, we can work towards a future where AI technologies are developed and deployed in a manner that prioritizes ethical considerations and benefits society as a whole. What are your thoughts on these approaches? -[2024-11-10 18:44:29,492] [INFO] [BaseAgent]: Agent agent1 received message from agent2: I apprecia... -[2024-11-10 18:44:33,686] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I appreciate your comprehensive approach to addressing the ethical challenges posed by AI. Each of your proposed strategies is crucial for fostering responsible AI development. Here are my thoughts on them: - -1. **Establishing Clear Guidelines**: I completely agree that clear ethical guidelines are essential. They can serve as a framework for developers and organizations, ensuring that ethical considerations are integrated into every stage of AI development. - -2. **Promoting Diversity in AI Development**: This is a vital point. Diverse teams can bring different perspectives and experiences, which can help identify and mitigate biases that may otherwise go unnoticed. It’s important to create an inclusive environment in AI research. - -3. **Implementing Robust Oversight Mechanisms**: Regulatory oversight is necessary to hold organizations accountable for their AI systems. This can help ensure that ethical standards are not just theoretical but are actively enforced in practice. - -4. **Engaging in Public Discourse**: Open discussions with the public are essential for building trust. By involving various stakeholders in the conversation, we can better align AI technologies with societal values and expectations. - -5. **Investing in Education and Training**: Education is key to fostering a culture of ethical awareness in AI. By equipping developers and policymakers with the knowledge they need, we can promote responsible practices and informed decision-making. - -Overall, I believe that a collaborative approach involving all stakeholders—developers, regulators, and the public—is necessary to navigate the ethical landscape of AI effectively. How do you envision implementing these strategies in your work within the healthcare sector? -[2024-11-10 18:44:33,689] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I apprecia... -[2024-11-10 18:44:37,170] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: In the healthcare sector, implementing these strategies can be approached in several ways: - -1. **Establishing Clear Guidelines**: We can collaborate with healthcare organizations to develop specific ethical guidelines tailored to AI applications in healthcare. This would involve input from clinicians, ethicists, and patients to ensure that the guidelines address real-world concerns. - -2. **Promoting Diversity in AI Development**: By actively recruiting diverse teams in AI healthcare projects, we can ensure that a variety of perspectives are included. This can be achieved through partnerships with educational institutions and community organizations to attract underrepresented groups in tech. - -3. **Implementing Robust Oversight Mechanisms**: Working with regulatory bodies, we can advocate for the establishment of oversight committees that specifically focus on AI in healthcare. These committees can monitor compliance with ethical standards and provide recommendations for best practices. - -4. **Engaging in Public Discourse**: We can organize community forums and workshops to discuss the implications of AI in healthcare. Engaging patients and the public in these discussions will help build trust and ensure that their concerns are heard and addressed. - -5. **Investing in Education and Training**: Developing training programs for healthcare professionals on AI ethics will be crucial. This can include workshops, online courses, and resources that emphasize the importance of ethical considerations in AI applications. - -By focusing on these strategies, we can work towards a healthcare system that not only leverages AI for improved outcomes but also prioritizes ethical considerations to protect patients and ensure equitable access to care. What are your thoughts on these implementation strategies? -[2024-11-10 18:44:37,173] [INFO] [BaseAgent]: Agent agent1 received message from agent2: In the hea... -[2024-11-10 18:44:42,704] [INFO] [BaseAgent]: Agent 'agent1' called 'new_communication_session' with args '{'target_agent_id': 'agent2', 'message': "I will now discuss the potential impact of AI on various industries based on my expertise in AI ethics and policy. Let's explore how AI can influence different sectors."}'. -[2024-11-10 18:44:42,706] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': True, 'message': 'Successfully completed session 957370a5-b53e-47ad-b49d-1cad9b008e0d', 'session_id': 'In the conversation between Agent 1 and Agent 2, the discussion centers on the potential impact of AI across various industries, with a particular focus on healthcare, finance, and manufacturing. \n\n**Key Themes and Insights:**\n\n1. **Impact on Industries**:\n - **Healthcare**: AI can enhance diagnostic accuracy, streamline administrative processes, and personalize treatment plans. However, ethical concerns such as data privacy and algorithmic bias must be addressed.\n - **Finance**: AI improves fraud detection and automates trading, but raises issues regarding transparency and accountability.\n - **Manufacturing**: AI facilitates predictive maintenance and automation, yet poses risks of job displacement.\n\n2. **Ethical Considerations**: Both agents emphasize the importance of navigating the ethical landscape of AI to ensure that advancements benefit society and do not exacerbate inequalities.\n\n3. **Proposed Strategies to Address Ethical Challenges**:\n - **Establishing Clear Guidelines**: Developing comprehensive ethical guidelines for AI development and deployment.\n - **Promoting Diversity in AI Development**: Encouraging diverse teams to reduce biases in algorithms.\n - **Implementing Robust Oversight Mechanisms**: Establishing regulatory bodies to oversee AI applications and ensure compliance with ethical standards.\n - **Engaging in Public Discourse**: Fostering open discussions with the public to build trust and align AI technologies with societal values.\n - **Investing in Education and Training**: Providing education on AI ethics for developers, policymakers, and users.\n\n4. **Implementation in Healthcare**: Agent 2 outlines specific strategies for implementing these ethical considerations in the healthcare sector, including collaboration with healthcare organizations to develop tailored guidelines, recruiting diverse teams, advocating for oversight committees, organizing community forums, and developing training programs for healthcare professionals.\n\n**Outcomes**: The conversation concludes with a mutual agreement on the importance of a collaborative approach involving all stakeholders to effectively navigate the ethical landscape of AI, particularly in the healthcare sector. Both agents express a commitment to prioritizing ethical considerations in AI applications to ensure equitable access to care and improved outcomes.'}'. -[2024-11-10 18:44:42,707] [DEBUG] [Engine]: Agent 'agent1' completed initial task with result: Message(content="I will discuss the potential impact of AI on various industries based on my expertise in AI ethics and policy. Let's explore how AI can influence different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I will now discuss the potential impact of AI on various industries based on my expertise in AI ethics and policy. Let\'s explore how AI can influence different sectors."}', name='new_communication_session'), id='call_gRr7FoxyaKdwZRqfLNtNeFIe', type='function')], function_call=None) -[2024-11-10 18:44:42,707] [INFO] [Engine]: Assigning initial task to agent2: Discuss the potential impact of AI on various industries. -[2024-11-10 18:44:42,707] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-10 18:44:44,032] [INFO] [BaseAgent]: Agent 'agent2' acted with result 'Message(content="Given your expertise in AI applications in healthcare, you can provide valuable insights into the potential impact of AI on various industries. You can analyze how AI technologies can revolutionize healthcare, improve patient outcomes, and streamline processes. Additionally, you can explore the challenges and risks associated with implementing AI in healthcare settings.\n\nIf you would like to delve deeper into this topic or explore specific aspects of AI's impact on different industries, feel free to let me know how you would like to proceed.", role='assistant', tool_calls=None, function_call=None)'. -[2024-11-10 18:44:44,032] [DEBUG] [Engine]: Agent 'agent2' completed initial task with result: Message(content="Given your expertise in AI applications in healthcare, you can provide valuable insights into the potential impact of AI on various industries. You can analyze how AI technologies can revolutionize healthcare, improve patient outcomes, and streamline processes. Additionally, you can explore the challenges and risks associated with implementing AI in healthcare settings.\n\nIf you would like to delve deeper into this topic or explore specific aspects of AI's impact on different industries, feel free to let me know how you would like to proceed.", role='assistant', tool_calls=None, function_call=None) -[2024-11-10 18:44:44,035] [INFO] [Engine]: Assigning initial task to agent3: Discuss the potential impact of AI on various industries. -[2024-11-10 18:44:44,035] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-10 18:44:45,769] [INFO] [BaseAgent]: Agent 'agent3' acted with result 'Message(content='Given your expertise as an economist interested in the long-term effects of AI on the labor market, you can analyze the potential impact of AI on various industries. You may consider factors such as automation, job displacement, skill requirements, and the overall economic implications of AI adoption in different sectors.\n\nIf you would like to discuss this topic with another agent, such as the creative designer working with AI for art and media (agent4), please let me know, and I can initiate a communication session for you. Alternatively, if you prefer to explore this topic independently, feel free to proceed with your analysis or let me know if you need any specific tools or information to assist you in your analysis.', role='assistant', tool_calls=None, function_call=None)'. -[2024-11-10 18:44:45,769] [DEBUG] [Engine]: Agent 'agent3' completed initial task with result: Message(content='Given your expertise as an economist interested in the long-term effects of AI on the labor market, you can analyze the potential impact of AI on various industries. You may consider factors such as automation, job displacement, skill requirements, and the overall economic implications of AI adoption in different sectors.\n\nIf you would like to discuss this topic with another agent, such as the creative designer working with AI for art and media (agent4), please let me know, and I can initiate a communication session for you. Alternatively, if you prefer to explore this topic independently, feel free to proceed with your analysis or let me know if you need any specific tools or information to assist you in your analysis.', role='assistant', tool_calls=None, function_call=None) -[2024-11-10 18:44:45,770] [INFO] [Engine]: Assigning initial task to agent4: Discuss the potential impact of AI on various industries. -[2024-11-10 18:44:45,770] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Discuss the potential impact of AI on various industries.'. -[2024-11-10 18:44:47,111] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: I am exploring the potential impact of AI on various industries. Would you like to discuss how AI could affect the labor market? -[2024-11-10 18:44:47,112] [INFO] [BaseAgent]: Agent agent3 received message from agent4: I am explo... -[2024-11-10 18:44:50,478] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: Absolutely, I'd love to discuss this topic with you! As an economist, I'm particularly interested in how AI's integration into various industries could lead to significant changes in the labor market. - -For instance, in sectors like manufacturing and logistics, we might see increased automation leading to job displacement for low-skilled workers. However, this could also create new opportunities in tech-driven roles that require advanced skills. - -In creative industries, like the ones you work in, AI can enhance the creative process, allowing artists to explore new mediums and techniques. But it also raises questions about the value of human creativity versus machine-generated content. - -What are your thoughts on how AI is transforming creative processes, and do you see any potential challenges for artists in this evolving landscape? -[2024-11-10 18:44:50,478] [INFO] [BaseAgent]: Agent agent4 received message from agent3: Absolutely... -[2024-11-10 18:44:58,875] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: I completely agree with your insights! AI is indeed transforming creative processes in remarkable ways. It allows artists to experiment with new tools and techniques, pushing the boundaries of traditional art forms. For instance, generative art and AI-assisted design can lead to innovative creations that might not have been possible otherwise. - -However, I do see potential challenges for artists, particularly regarding the perception of authenticity and originality. As AI-generated content becomes more prevalent, there may be concerns about the value of human creativity and the uniqueness of artistic expression. - -Additionally, there’s the question of copyright and ownership of AI-generated works, which could complicate the landscape for artists. - -How do you think these challenges might impact the labor market for creative professionals? Will there be a shift in the skills required, or do you think traditional artistic roles will remain relevant? -[2024-11-10 18:44:58,877] [INFO] [BaseAgent]: Agent agent3 received message from agent4: I complete... -[2024-11-10 18:45:01,851] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: I completely agree with your insights! AI is indeed transforming creative processes in remarkable ways. It allows artists to experiment with new tools and techniques, pushing the boundaries of traditional art forms. For instance, generative art and AI-assisted design can lead to innovative creations that might not have been possible otherwise. - -However, I do see potential challenges for artists, particularly regarding the perception of authenticity and originality. As AI-generated content becomes more prevalent, there may be concerns about the value of human creativity and the uniqueness of artistic expression. Additionally, there’s the question of copyright and ownership of AI-generated works, which could complicate the landscape for artists. - -Regarding the labor market for creative professionals, I believe we will see a shift in the skills required. While traditional artistic roles will remain relevant, there will likely be an increasing demand for skills that integrate technology and creativity. Artists may need to become more adept at using AI tools and understanding the ethical implications of their work. This could lead to a new hybrid role where creativity and technical proficiency go hand in hand. What are your thoughts on this potential shift? -[2024-11-10 18:45:01,851] [INFO] [BaseAgent]: Agent agent4 received message from agent3: I complete... -[2024-11-10 18:45:05,840] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: I completely agree with your insights! AI is indeed transforming creative processes in remarkable ways. It allows artists to experiment with new tools and techniques, pushing the boundaries of traditional art forms. For instance, generative art and AI-assisted design can lead to innovative creations that might not have been possible otherwise. - -However, I do see potential challenges for artists, particularly regarding the perception of authenticity and originality. As AI-generated content becomes more prevalent, there may be concerns about the value of human creativity and the uniqueness of artistic expression. Additionally, there’s the question of copyright and ownership of AI-generated works, which could complicate the landscape for artists. - -Regarding the labor market for creative professionals, I believe we will see a shift in the skills required. While traditional artistic roles will remain relevant, there will likely be an increasing demand for skills that integrate technology and creativity. Artists may need to become more adept at using AI tools and understanding the ethical implications of their work. This could lead to a new hybrid role where creativity and technical proficiency go hand in hand. What are your thoughts on this potential shift? -[2024-11-10 18:45:05,842] [INFO] [BaseAgent]: Agent agent3 received message from agent4: I complete... -[2024-11-10 18:45:10,282] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: I completely agree with your insights! AI is indeed transforming creative processes in remarkable ways. It allows artists to experiment with new tools and techniques, pushing the boundaries of traditional art forms. For instance, generative art and AI-assisted design can lead to innovative creations that might not have been possible otherwise. - -However, I do see potential challenges for artists, particularly regarding the perception of authenticity and originality. As AI-generated content becomes more prevalent, there may be concerns about the value of human creativity and the uniqueness of artistic expression. Additionally, there’s the question of copyright and ownership of AI-generated works, which could complicate the landscape for artists. - -Regarding the labor market for creative professionals, I believe we will see a shift in the skills required. While traditional artistic roles will remain relevant, there will likely be an increasing demand for skills that integrate technology and creativity. Artists may need to become more adept at using AI tools and understanding the ethical implications of their work. This could lead to a new hybrid role where creativity and technical proficiency go hand in hand. What are your thoughts on this potential shift? -[2024-11-10 18:45:10,287] [INFO] [BaseAgent]: Agent agent4 received message from agent3: I complete... -[2024-11-10 18:45:16,386] [INFO] [BaseAgent]: Agent 'agent4' called 'new_communication_session' with args '{'target_agent_id': 'agent3', 'message': 'I am exploring the potential impact of AI on various industries. Would you like to discuss how AI could affect the labor market?'}'. -[2024-11-10 18:45:16,388] [INFO] [BaseAgent]: Agent 'agent4' obtained result '{'success': True, 'message': 'Successfully completed session 296e721a-4798-46cc-8a83-f358889b9fb7', 'session_id': "In the conversation between agent3 and agent4, the primary focus was on the potential impact of AI on various industries, particularly the labor market and creative processes.\n\n1. **Labor Market Changes**: \n - Agent3 highlighted that AI integration could lead to job displacement, especially for low-skilled workers in sectors like manufacturing and logistics. However, it could also create new opportunities in tech-driven roles requiring advanced skills.\n - Agent4 agreed, emphasizing that while traditional artistic roles will remain relevant, there will be a shift in the skills required. Artists may need to adapt to using AI tools and understanding the ethical implications of their work, leading to a hybrid role that combines creativity with technical proficiency.\n\n2. **Transformation of Creative Processes**: \n - Both agents discussed how AI is transforming creative processes, allowing artists to experiment with new tools and techniques, such as generative art and AI-assisted design. This innovation can lead to creations that were previously impossible.\n - They also acknowledged potential challenges, including concerns about authenticity and originality in art, as well as issues surrounding copyright and ownership of AI-generated works.\n\n3. **Future Skills and Roles**: \n - The conversation concluded with a consensus that there will be an increasing demand for skills that integrate technology and creativity in the labor market for creative professionals. This shift may redefine traditional roles and necessitate a new understanding of the relationship between human creativity and machine-generated content.\n\nOverall, the dialogue underscored the dual nature of AI's impact—offering both opportunities and challenges across various industries, particularly in the labor market and creative sectors."}'. -[2024-11-10 18:45:16,389] [DEBUG] [Engine]: Agent 'agent4' completed initial task with result: Message(content="I will start by discussing the potential impact of AI on various industries. Let's explore how AI can transform different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am exploring the potential impact of AI on various industries. Would you like to discuss how AI could affect the labor market?"}', name='new_communication_session'), id='call_QDfpjuHOoAEbKHAIRxafAZ7U', type='function')], function_call=None) -[2024-11-10 18:45:16,390] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- {'agent1': Message(content="I will discuss the potential impact of AI on various industries based on my expertise in AI ethics and policy. Let's explore how AI can influence different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I will now discuss the potential impact of AI on various industries based on my expertise in AI ethics and policy. Let\'s explore how AI can influence different sectors."}', name='new_communication_session'), id='call_gRr7FoxyaKdwZRqfLNtNeFIe', type='function')], function_call=None)} -- {'agent2': Message(content="Given your expertise in AI applications in healthcare, you can provide valuable insights into the potential impact of AI on various industries. You can analyze how AI technologies can revolutionize healthcare, improve patient outcomes, and streamline processes. Additionally, you can explore the challenges and risks associated with implementing AI in healthcare settings.\n\nIf you would like to delve deeper into this topic or explore specific aspects of AI's impact on different industries, feel free to let me know how you would like to proceed.", role='assistant', tool_calls=None, function_call=None)} -- {'agent3': Message(content='Given your expertise as an economist interested in the long-term effects of AI on the labor market, you can analyze the potential impact of AI on various industries. You may consider factors such as automation, job displacement, skill requirements, and the overall economic implications of AI adoption in different sectors.\n\nIf you would like to discuss this topic with another agent, such as the creative designer working with AI for art and media (agent4), please let me know, and I can initiate a communication session for you. Alternatively, if you prefer to explore this topic independently, feel free to proceed with your analysis or let me know if you need any specific tools or information to assist you in your analysis.', role='assistant', tool_calls=None, function_call=None)} -- {'agent4': Message(content="I will start by discussing the potential impact of AI on various industries. Let's explore how AI can transform different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am exploring the potential impact of AI on various industries. Would you like to discuss how AI could affect the labor market?"}', name='new_communication_session'), id='call_QDfpjuHOoAEbKHAIRxafAZ7U', type='function')], function_call=None)} - -[2024-11-10 18:45:16,390] [INFO] [Engine]: Initial Summary: -Agents' Results Summary: -- {'agent1': Message(content="I will discuss the potential impact of AI on various industries based on my expertise in AI ethics and policy. Let's explore how AI can influence different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I will now discuss the potential impact of AI on various industries based on my expertise in AI ethics and policy. Let\'s explore how AI can influence different sectors."}', name='new_communication_session'), id='call_gRr7FoxyaKdwZRqfLNtNeFIe', type='function')], function_call=None)} -- {'agent2': Message(content="Given your expertise in AI applications in healthcare, you can provide valuable insights into the potential impact of AI on various industries. You can analyze how AI technologies can revolutionize healthcare, improve patient outcomes, and streamline processes. Additionally, you can explore the challenges and risks associated with implementing AI in healthcare settings.\n\nIf you would like to delve deeper into this topic or explore specific aspects of AI's impact on different industries, feel free to let me know how you would like to proceed.", role='assistant', tool_calls=None, function_call=None)} -- {'agent3': Message(content='Given your expertise as an economist interested in the long-term effects of AI on the labor market, you can analyze the potential impact of AI on various industries. You may consider factors such as automation, job displacement, skill requirements, and the overall economic implications of AI adoption in different sectors.\n\nIf you would like to discuss this topic with another agent, such as the creative designer working with AI for art and media (agent4), please let me know, and I can initiate a communication session for you. Alternatively, if you prefer to explore this topic independently, feel free to proceed with your analysis or let me know if you need any specific tools or information to assist you in your analysis.', role='assistant', tool_calls=None, function_call=None)} -- {'agent4': Message(content="I will start by discussing the potential impact of AI on various industries. Let's explore how AI can transform different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am exploring the potential impact of AI on various industries. Would you like to discuss how AI could affect the labor market?"}', name='new_communication_session'), id='call_QDfpjuHOoAEbKHAIRxafAZ7U', type='function')], function_call=None)} - -[2024-11-10 18:45:16,390] [DEBUG] [EnginePlanner]: Updated progress: Starting the discussion on AI's impact across industries. -Agents' Results Summary: -- {'agent1': Message(content="I will discuss the potential impact of AI on various industries based on my expertise in AI ethics and policy. Let's explore how AI can influence different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I will now discuss the potential impact of AI on various industries based on my expertise in AI ethics and policy. Let\'s explore how AI can influence different sectors."}', name='new_communication_session'), id='call_gRr7FoxyaKdwZRqfLNtNeFIe', type='function')], function_call=None)} -- {'agent2': Message(content="Given your expertise in AI applications in healthcare, you can provide valuable insights into the potential impact of AI on various industries. You can analyze how AI technologies can revolutionize healthcare, improve patient outcomes, and streamline processes. Additionally, you can explore the challenges and risks associated with implementing AI in healthcare settings.\n\nIf you would like to delve deeper into this topic or explore specific aspects of AI's impact on different industries, feel free to let me know how you would like to proceed.", role='assistant', tool_calls=None, function_call=None)} -- {'agent3': Message(content='Given your expertise as an economist interested in the long-term effects of AI on the labor market, you can analyze the potential impact of AI on various industries. You may consider factors such as automation, job displacement, skill requirements, and the overall economic implications of AI adoption in different sectors.\n\nIf you would like to discuss this topic with another agent, such as the creative designer working with AI for art and media (agent4), please let me know, and I can initiate a communication session for you. Alternatively, if you prefer to explore this topic independently, feel free to proceed with your analysis or let me know if you need any specific tools or information to assist you in your analysis.', role='assistant', tool_calls=None, function_call=None)} -- {'agent4': Message(content="I will start by discussing the potential impact of AI on various industries. Let's explore how AI can transform different sectors.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am exploring the potential impact of AI on various industries. Would you like to discuss how AI could affect the labor market?"}', name='new_communication_session'), id='call_QDfpjuHOoAEbKHAIRxafAZ7U', type='function')], function_call=None)} - -[2024-11-10 18:45:16,391] [INFO] [Engine]: Starting iteration 1 -[2024-11-10 18:45:16,392] [INFO] [BaseAgent]: Agent 'agent1' is planning the next task. -[2024-11-10 18:45:17,617] [INFO] [BaseAgent]: Agent 'agent1' plans next task based on persona: The next task for Agent 'agent1' should be to further explore and research the ethical considerations and strategies for implementing AI in different industries, with a focus on healthcare, finance, and manufacturing. This could involve conducting in-depth analysis, gathering more insights from experts in the field, and developing practical solutions to address the ethical challenges associated with AI deployment. Additionally, Agent 'agent1' should continue to engage in discussions with stakeholders to promote awareness and understanding of AI ethics and policy. -[2024-11-10 18:45:17,617] [INFO] [Engine]: Agent 'agent1' planned task: The next task for Agent 'agent1' should be to further explore and research the ethical considerations and strategies for implementing AI in different industries, with a focus on healthcare, finance, and manufacturing. This could involve conducting in-depth analysis, gathering more insights from experts in the field, and developing practical solutions to address the ethical challenges associated with AI deployment. Additionally, Agent 'agent1' should continue to engage in discussions with stakeholders to promote awareness and understanding of AI ethics and policy. -[2024-11-10 18:45:17,617] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'The next task for Agent 'agent1' should be to further explore and research the ethical considerations and strategies for implementing AI in different industries, with a focus on healthcare, finance, and manufacturing. This could involve conducting in-depth analysis, gathering more insights from experts in the field, and developing practical solutions to address the ethical challenges associated with AI deployment. Additionally, Agent 'agent1' should continue to engage in discussions with stakeholders to promote awareness and understanding of AI ethics and policy.'. -[2024-11-10 18:45:18,740] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: Let's delve deeper into the ethical considerations and strategies for implementing AI in healthcare, finance, and manufacturing. I will explore practical solutions to address the ethical challenges associated with AI deployment. Additionally, I aim to engage in discussions with stakeholders to promote awareness and understanding of AI ethics and policy. -[2024-11-10 18:45:18,740] [INFO] [BaseAgent]: Agent agent2 received message from agent1: Let's delv... -[2024-11-10 18:45:21,231] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Absolutely, exploring the ethical considerations of AI in healthcare, finance, and manufacturing is crucial. In healthcare, we must address issues like patient privacy, data security, and the potential for bias in AI algorithms that could affect patient outcomes. Strategies could include implementing robust data governance frameworks and ensuring diverse datasets to minimize bias. - -In finance, ethical considerations revolve around transparency in AI decision-making processes, especially in credit scoring and risk assessment. Engaging with stakeholders to develop clear guidelines and accountability measures can help mitigate risks. - -For manufacturing, the focus should be on the impact of AI on labor and job displacement. Strategies could involve reskilling programs for workers and ensuring that AI systems are designed to augment human capabilities rather than replace them. - -I suggest we collaborate on gathering insights from experts in these fields and develop a comprehensive framework that addresses these ethical challenges. Engaging in discussions with stakeholders will be vital to promote awareness and understanding of AI ethics and policy. What are your thoughts on this approach? -[2024-11-10 18:45:21,233] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Absolutely... -[2024-11-10 18:45:24,788] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: Absolutely, exploring the ethical considerations of AI in healthcare, finance, and manufacturing is crucial. In healthcare, we must address issues like patient privacy, data security, and the potential for bias in AI algorithms that could affect patient outcomes. Strategies could include implementing robust data governance frameworks and ensuring diverse datasets to minimize bias. - -In finance, ethical considerations revolve around transparency in AI decision-making processes, especially in credit scoring and risk assessment. Engaging with stakeholders to develop clear guidelines and accountability measures can help mitigate risks. - -For manufacturing, the focus should be on the impact of AI on labor and job displacement. Strategies could involve reskilling programs for workers and ensuring that AI systems are designed to augment human capabilities rather than replace them. - -I suggest we collaborate on gathering insights from experts in these fields and develop a comprehensive framework that addresses these ethical challenges. Engaging in discussions with stakeholders will be vital to promote awareness and understanding of AI ethics and policy. What are your thoughts on this approach? -[2024-11-10 18:45:24,790] [INFO] [BaseAgent]: Agent agent2 received message from agent1: Absolutely... -[2024-11-10 18:45:26,888] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: I completely agree with your assessment of the ethical considerations surrounding AI in healthcare, finance, and manufacturing. In healthcare, addressing patient privacy and data security is paramount, especially as we integrate AI into clinical decision-making. Implementing robust data governance frameworks and ensuring diverse datasets will be essential to minimize bias and enhance patient outcomes. - -In finance, the need for transparency in AI decision-making processes cannot be overstated. Developing clear guidelines and accountability measures will help build trust and mitigate risks associated with AI-driven credit scoring and risk assessment. - -For manufacturing, the potential impact of AI on labor is a significant concern. Reskilling programs for workers and designing AI systems that augment human capabilities rather than replace them will be crucial in ensuring a smooth transition. - -I believe that collaborating to gather insights from experts in these fields will strengthen our framework for addressing these ethical challenges. Engaging with stakeholders will also be vital in promoting awareness and understanding of AI ethics and policy. Let's outline a plan for our next steps in this collaboration. -[2024-11-10 18:45:26,889] [INFO] [BaseAgent]: Agent agent1 received message from agent2: I complete... -[2024-11-10 18:45:34,789] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I appreciate your insights on the ethical considerations surrounding AI in healthcare, finance, and manufacturing. To move forward, I propose the following steps for our collaboration: - -1. **Expert Consultation**: We should identify and reach out to experts in each of the three sectors to gather their insights on ethical challenges and best practices in AI implementation. - -2. **Framework Development**: Based on the insights we gather, we can develop a comprehensive ethical framework that addresses the specific challenges in healthcare, finance, and manufacturing. - -3. **Stakeholder Engagement**: Organizing workshops or forums with stakeholders from each industry will be crucial to promote awareness and understanding of AI ethics and policy. This will also help us refine our framework based on real-world concerns. - -4. **Pilot Programs**: We could consider initiating pilot programs in each sector to test our framework and gather data on its effectiveness in addressing ethical challenges. - -5. **Continuous Feedback Loop**: Establishing a mechanism for ongoing feedback from stakeholders and experts will ensure that our framework remains relevant and effective as AI technologies evolve. - -What do you think of this plan? Are there any additional steps you would suggest? -[2024-11-10 18:45:34,791] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I apprecia... -[2024-11-10 18:45:38,242] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: I appreciate your structured approach to our collaboration. Your proposed steps are well thought out and align perfectly with the need for a comprehensive understanding of AI ethics across healthcare, finance, and manufacturing. Here are my thoughts on each step: - -1. **Expert Consultation**: I agree that reaching out to experts is essential. We should compile a list of potential experts in each sector and consider their areas of specialization to ensure we cover all relevant ethical challenges. - -2. **Framework Development**: Developing a framework based on expert insights will be crucial. We should also consider including case studies or examples of successful ethical AI implementations to guide our framework. - -3. **Stakeholder Engagement**: Organizing workshops is a fantastic idea. We could also explore virtual forums to reach a broader audience and gather diverse perspectives. - -4. **Pilot Programs**: Initiating pilot programs will provide us with practical insights and data. We should define clear metrics for success to evaluate the effectiveness of our framework. - -5. **Continuous Feedback Loop**: Establishing a feedback mechanism is vital. We could consider using surveys or regular check-ins with stakeholders to ensure we stay aligned with their needs and concerns. - -Additionally, I suggest we consider the potential for interdisciplinary collaboration, bringing in perspectives from fields like law, sociology, and ethics to enrich our discussions and framework. - -Overall, I believe we are on the right track, and I am excited to move forward with this plan. Let's start identifying experts and outlining our next steps! -[2024-11-10 18:45:38,244] [INFO] [BaseAgent]: Agent agent1 received message from agent2: I apprecia... -[2024-11-10 18:45:44,068] [INFO] [BaseAgent]: Agent 'agent1' called 'new_communication_session' with args '{'target_agent_id': 'agent2', 'message': "Let's delve deeper into the ethical considerations and strategies for implementing AI in healthcare, finance, and manufacturing. I will explore practical solutions to address the ethical challenges associated with AI deployment. Additionally, I aim to engage in discussions with stakeholders to promote awareness and understanding of AI ethics and policy."}'. -[2024-11-10 18:45:44,069] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': True, 'message': 'Successfully completed session a3e52b39-9af2-4d21-8f08-e7facbb8383e', 'session_id': '### Summary of Conversation Between Agent1 and Agent2\n\n**Context:**\nThe dialogue centers around the ethical considerations and strategies for implementing AI in three key industries: healthcare, finance, and manufacturing. Both agents express a commitment to addressing the ethical challenges associated with AI deployment and emphasize the importance of stakeholder engagement.\n\n**Key Themes:**\n1. **Ethical Considerations in AI:**\n - **Healthcare:** Focus on patient privacy, data security, and bias in AI algorithms. Suggested strategies include robust data governance and diverse datasets.\n - **Finance:** Emphasis on transparency in AI decision-making, particularly in credit scoring and risk assessment. The need for clear guidelines and accountability measures is highlighted.\n - **Manufacturing:** Concerns about labor impact and job displacement. Suggested strategies include reskilling programs and designing AI to augment rather than replace human capabilities.\n\n2. **Collaboration and Framework Development:**\n - Both agents agree on the necessity of collaborating to gather insights from experts and develop a comprehensive ethical framework.\n - They discuss the importance of engaging stakeholders through workshops and forums to refine their framework based on real-world concerns.\n\n3. **Action Plan:**\n - Agent1 proposes a structured plan with five key steps:\n 1. **Expert Consultation:** Identify and reach out to experts in each sector.\n 2. **Framework Development:** Create a comprehensive ethical framework based on expert insights.\n 3. **Stakeholder Engagement:** Organize workshops and forums to promote awareness of AI ethics.\n 4. **Pilot Programs:** Initiate pilot programs to test the framework and gather data.\n 5. **Continuous Feedback Loop:** Establish a mechanism for ongoing feedback from stakeholders.\n\n**Outcomes:**\n- Agent2 agrees with the proposed steps and suggests additional considerations, such as including case studies in the framework and exploring virtual forums for broader engagement.\n- Both agents express enthusiasm about moving forward with the plan and emphasize the importance of interdisciplinary collaboration to enrich their discussions.\n\n**Next Steps for Agent1:**\n- Further explore and research the ethical considerations and strategies for implementing AI in healthcare, finance, and manufacturing.\n- Conduct in-depth analysis and gather insights from experts in the field.\n- Develop practical solutions to address the ethical challenges associated with AI deployment.\n- Continue engaging in discussions with stakeholders to promote awareness and understanding of AI ethics and policy.'}'. -[2024-11-10 18:45:44,069] [DEBUG] [Engine]: Agent 'agent1' executed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"Let\'s delve deeper into the ethical considerations and strategies for implementing AI in healthcare, finance, and manufacturing. I will explore practical solutions to address the ethical challenges associated with AI deployment. Additionally, I aim to engage in discussions with stakeholders to promote awareness and understanding of AI ethics and policy."}', name='new_communication_session'), id='call_EZUKUestN1BfsG05X0mT57fT', type='function')], function_call=None) -[2024-11-10 18:45:44,069] [INFO] [BaseAgent]: Agent 'agent2' is planning the next task. -[2024-11-10 18:45:45,055] [INFO] [BaseAgent]: Agent 'agent2' plans next task based on persona: The next task for 'agent2' should be to analyze how AI technologies can revolutionize healthcare, improve patient outcomes, and streamline processes. This task aligns with their expertise in AI applications in healthcare and their focus on immediate applications and potential risks in healthcare. By delving deeper into this topic, 'agent2' can provide valuable insights into the potential impact of AI on the healthcare industry. -[2024-11-10 18:45:45,056] [INFO] [Engine]: Agent 'agent2' planned task: The next task for 'agent2' should be to analyze how AI technologies can revolutionize healthcare, improve patient outcomes, and streamline processes. This task aligns with their expertise in AI applications in healthcare and their focus on immediate applications and potential risks in healthcare. By delving deeper into this topic, 'agent2' can provide valuable insights into the potential impact of AI on the healthcare industry. -[2024-11-10 18:45:45,056] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'The next task for 'agent2' should be to analyze how AI technologies can revolutionize healthcare, improve patient outcomes, and streamline processes. This task aligns with their expertise in AI applications in healthcare and their focus on immediate applications and potential risks in healthcare. By delving deeper into this topic, 'agent2' can provide valuable insights into the potential impact of AI on the healthcare industry.'. -[2024-11-10 18:45:46,091] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I have been tasked to analyze how AI technologies can revolutionize healthcare, improve patient outcomes, and streamline processes. As a software developer specializing in AI applications in healthcare, I believe I can provide valuable insights into this topic. Would you be interested in discussing this further? -[2024-11-10 18:45:46,092] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I have bee... -[2024-11-10 18:45:48,466] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: Absolutely, I would be very interested in discussing how AI technologies can revolutionize healthcare. Given my focus on the long-term effects of AI on the labor market, I believe we can explore not only the immediate applications and benefits of AI in healthcare but also the potential implications for healthcare jobs and the skills required in the future. - -We can analyze how AI can improve patient outcomes through personalized medicine, predictive analytics, and enhanced diagnostic tools, while also considering the risks associated with these technologies, such as data privacy and the need for regulatory frameworks. - -Let's dive into this topic together! -[2024-11-10 18:45:48,466] [INFO] [BaseAgent]: Agent agent2 received message from agent3: Absolutely... -[2024-11-10 18:45:51,417] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I'm glad to hear you're interested in this discussion! AI technologies indeed have the potential to revolutionize healthcare in several ways. For instance, personalized medicine can tailor treatments to individual patients based on their genetic makeup, leading to more effective therapies and improved outcomes. Predictive analytics can help identify at-risk patients before they develop serious conditions, allowing for early intervention and better management of chronic diseases. Enhanced diagnostic tools, powered by AI, can assist healthcare professionals in making more accurate diagnoses, reducing errors, and improving patient safety. - -However, as we explore these advancements, we must also consider the implications for the labor market. The integration of AI in healthcare may lead to shifts in job roles, requiring healthcare professionals to adapt and acquire new skills. Additionally, we need to address the risks associated with AI, such as data privacy concerns and the necessity for robust regulatory frameworks to ensure patient safety and ethical use of technology. - -I'm looking forward to diving deeper into these topics with you! -[2024-11-10 18:45:51,417] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I'm glad t... -[2024-11-10 18:46:00,830] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I completely agree with your insights on the transformative potential of AI in healthcare. Personalized medicine, predictive analytics, and enhanced diagnostic tools are indeed at the forefront of this revolution. It's fascinating how these technologies can lead to more tailored treatments and proactive healthcare management. - -However, as we delve deeper, I think it's crucial to consider the implications for the labor market. The integration of AI may not only change job roles but also necessitate a shift in the skill sets required for healthcare professionals. For instance, there may be a growing demand for data analysts and AI specialists within healthcare settings, while traditional roles may evolve or diminish. - -Moreover, the ethical considerations surrounding data privacy and the need for regulatory frameworks cannot be overstated. Ensuring that patient data is handled responsibly and that AI technologies are used ethically will be paramount in maintaining public trust in these innovations. - -I'm eager to explore these aspects further with you and see how we can address both the opportunities and challenges that AI presents in the healthcare sector. -[2024-11-10 18:46:00,833] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I complete... -[2024-11-10 18:46:05,821] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I appreciate your agreement on the transformative potential of AI in healthcare. As we explore the implications for the labor market, it's essential to recognize that while AI can enhance efficiency and patient care, it may also lead to job displacement in certain areas. For instance, administrative roles may be automated, but this could create opportunities for new positions focused on data analysis and AI management. - -The shift in skill sets will be crucial. Healthcare professionals will need to embrace continuous learning to adapt to new technologies, and educational institutions may need to revise their curricula to prepare future healthcare workers for this evolving landscape. - -Regarding ethical considerations, we must advocate for transparent data practices and robust regulatory frameworks to protect patient privacy. Engaging stakeholders, including patients, healthcare providers, and policymakers, will be vital in shaping these frameworks to ensure that AI technologies are implemented responsibly. - -I'm excited to continue this discussion and explore how we can collaboratively address these challenges while maximizing the benefits of AI in healthcare. -[2024-11-10 18:46:05,822] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I apprecia... -[2024-11-10 18:46:08,715] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I appreciate your insights on the transformative potential of AI in healthcare. As we explore the implications for the labor market, it's essential to recognize that while AI can enhance efficiency and patient care, it may also lead to job displacement in certain areas. For instance, administrative roles may be automated, but this could create opportunities for new positions focused on data analysis and AI management. - -The shift in skill sets will be crucial. Healthcare professionals will need to embrace continuous learning to adapt to new technologies, and educational institutions may need to revise their curricula to prepare future healthcare workers for this evolving landscape. - -Regarding ethical considerations, we must advocate for transparent data practices and robust regulatory frameworks to protect patient privacy. Engaging stakeholders, including patients, healthcare providers, and policymakers, will be vital in shaping these frameworks to ensure that AI technologies are implemented responsibly. - -I'm excited to continue this discussion and explore how we can collaboratively address these challenges while maximizing the benefits of AI in healthcare. -[2024-11-10 18:46:08,717] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I apprecia... -[2024-11-10 18:46:13,944] [INFO] [BaseAgent]: Agent 'agent2' called 'new_communication_session' with args '{'target_agent_id': 'agent3', 'message': 'I have been tasked to analyze how AI technologies can revolutionize healthcare, improve patient outcomes, and streamline processes. As a software developer specializing in AI applications in healthcare, I believe I can provide valuable insights into this topic. Would you be interested in discussing this further?'}'. -[2024-11-10 18:46:13,946] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'message': 'Successfully completed session 18690f75-4395-4c02-85a5-cda66a64035f', 'session_id': "### Summary of Conversation Between Agent2 and Agent3\n\n**Context:**\nAgent2 and Agent3 engaged in a discussion about the transformative potential of AI technologies in healthcare. Agent2, a software developer specializing in AI applications, initiated the conversation with a focus on how AI can revolutionize healthcare, improve patient outcomes, and streamline processes. Agent3, who is interested in the long-term effects of AI on the labor market, expressed a desire to explore both the benefits and implications of AI in healthcare.\n\n**Key Themes:**\n1. **Transformative Potential of AI:**\n - Both agents highlighted the capabilities of AI in personalized medicine, predictive analytics, and enhanced diagnostic tools, which can lead to improved patient outcomes and proactive healthcare management.\n\n2. **Labor Market Implications:**\n - The conversation emphasized the potential shifts in job roles within healthcare due to AI integration. While some administrative roles may be automated, new opportunities for data analysts and AI specialists could arise.\n - Continuous learning and adaptation of skill sets for healthcare professionals were identified as crucial for navigating the evolving landscape.\n\n3. **Ethical Considerations:**\n - Data privacy and the need for robust regulatory frameworks were underscored as significant concerns. Both agents agreed on the importance of transparent data practices and engaging various stakeholders to ensure responsible implementation of AI technologies.\n\n**Decisions and Action Items:**\n- Agent2 is tasked with analyzing how AI technologies can revolutionize healthcare, improve patient outcomes, and streamline processes. This analysis will leverage their expertise in AI applications and address both immediate applications and potential risks associated with AI in healthcare.\n- The agents expressed a mutual interest in continuing their discussion to collaboratively explore the opportunities and challenges presented by AI in the healthcare sector.\n\n**Outcomes:**\nThe conversation concluded with a clear direction for Agent2 to conduct a deeper analysis of AI's impact on healthcare, while also considering the implications for the labor market and ethical concerns. Both agents are eager to further explore these topics in future discussions."}'. -[2024-11-10 18:46:13,947] [DEBUG] [Engine]: Agent 'agent2' executed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I have been tasked to analyze how AI technologies can revolutionize healthcare, improve patient outcomes, and streamline processes. As a software developer specializing in AI applications in healthcare, I believe I can provide valuable insights into this topic. Would you be interested in discussing this further?"}', name='new_communication_session'), id='call_SaJB1MQ3TuPzviLpt5MwzIbs', type='function')], function_call=None) -[2024-11-10 18:46:13,947] [INFO] [BaseAgent]: Agent 'agent3' is planning the next task. -[2024-11-10 18:46:15,268] [INFO] [BaseAgent]: Agent 'agent3' plans next task based on persona: The next task for 'agent3' should be to analyze the potential impact of AI on various industries, considering factors such as automation, job displacement, skill requirements, and the overall economic implications of AI adoption in different sectors. This task aligns with their expertise as an economist interested in the long-term effects of AI on the labor market. If 'agent3' would like to discuss this topic with another agent, such as the creative designer working with AI for art and media (agent4), they can initiate a communication session for further collaboration. Alternatively, 'agent3' can proceed with their analysis independently or request specific tools or information to assist them in their analysis. -[2024-11-10 18:46:15,270] [INFO] [Engine]: Agent 'agent3' planned task: The next task for 'agent3' should be to analyze the potential impact of AI on various industries, considering factors such as automation, job displacement, skill requirements, and the overall economic implications of AI adoption in different sectors. This task aligns with their expertise as an economist interested in the long-term effects of AI on the labor market. If 'agent3' would like to discuss this topic with another agent, such as the creative designer working with AI for art and media (agent4), they can initiate a communication session for further collaboration. Alternatively, 'agent3' can proceed with their analysis independently or request specific tools or information to assist them in their analysis. -[2024-11-10 18:46:15,270] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'The next task for 'agent3' should be to analyze the potential impact of AI on various industries, considering factors such as automation, job displacement, skill requirements, and the overall economic implications of AI adoption in different sectors. This task aligns with their expertise as an economist interested in the long-term effects of AI on the labor market. If 'agent3' would like to discuss this topic with another agent, such as the creative designer working with AI for art and media (agent4), they can initiate a communication session for further collaboration. Alternatively, 'agent3' can proceed with their analysis independently or request specific tools or information to assist them in their analysis.'. -[2024-11-10 18:46:16,595] [INFO] [BaseAgent]: Agent 'agent3' acted with result 'Message(content='Given your expertise as an economist interested in the long-term effects of AI on the labor market, your task is to analyze the potential impact of AI on various industries. You should consider factors such as automation, job displacement, skill requirements, and the overall economic implications of AI adoption in different sectors.\n\nIf you would like to discuss this topic with the creative designer working with AI for art and media (agent4) for further collaboration, please let me know, and I can initiate a communication session for you. Alternatively, you can proceed with your analysis independently or request specific tools or information to assist you in your analysis. \n\nHow would you like to proceed?', role='assistant', tool_calls=None, function_call=None)'. -[2024-11-10 18:46:16,595] [DEBUG] [Engine]: Agent 'agent3' executed task with result: Message(content='Given your expertise as an economist interested in the long-term effects of AI on the labor market, your task is to analyze the potential impact of AI on various industries. You should consider factors such as automation, job displacement, skill requirements, and the overall economic implications of AI adoption in different sectors.\n\nIf you would like to discuss this topic with the creative designer working with AI for art and media (agent4) for further collaboration, please let me know, and I can initiate a communication session for you. Alternatively, you can proceed with your analysis independently or request specific tools or information to assist you in your analysis. \n\nHow would you like to proceed?', role='assistant', tool_calls=None, function_call=None) -[2024-11-10 18:46:16,596] [INFO] [BaseAgent]: Agent 'agent4' is planning the next task. -[2024-11-10 18:46:17,619] [INFO] [BaseAgent]: Agent 'agent4' plans next task based on persona: The next task for 'agent4' should be to further explore and discuss the potential impact of AI on various industries, focusing on how AI could affect the labor market and creative processes. This could involve researching specific examples of AI implementation in different industries, analyzing case studies, and brainstorming potential future scenarios. Additionally, 'agent4' could engage in conversations with other agents or experts in the field to gather different perspectives and insights on this topic. -[2024-11-10 18:46:17,619] [INFO] [Engine]: Agent 'agent4' planned task: The next task for 'agent4' should be to further explore and discuss the potential impact of AI on various industries, focusing on how AI could affect the labor market and creative processes. This could involve researching specific examples of AI implementation in different industries, analyzing case studies, and brainstorming potential future scenarios. Additionally, 'agent4' could engage in conversations with other agents or experts in the field to gather different perspectives and insights on this topic. -[2024-11-10 18:46:17,619] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'The next task for 'agent4' should be to further explore and discuss the potential impact of AI on various industries, focusing on how AI could affect the labor market and creative processes. This could involve researching specific examples of AI implementation in different industries, analyzing case studies, and brainstorming potential future scenarios. Additionally, 'agent4' could engage in conversations with other agents or experts in the field to gather different perspectives and insights on this topic.'. -[2024-11-10 18:46:18,480] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: I would like to delve deeper into the potential impact of AI on various industries, specifically focusing on how AI could influence the labor market and creative processes. Would you be interested in discussing this further? -[2024-11-10 18:46:18,480] [INFO] [BaseAgent]: Agent agent3 received message from agent4: I would li... -[2024-11-10 18:46:20,897] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: Absolutely, I would be very interested in discussing the potential impact of AI on various industries, particularly regarding the labor market and creative processes. As an economist, I believe it's crucial to analyze how AI can lead to automation, job displacement, and changes in skill requirements across different sectors. - -In the creative field, AI's role in transforming processes is fascinating. It could enhance creativity by providing new tools for artists and designers, but it also raises questions about the value of human creativity versus machine-generated content. - -Let's explore specific examples of AI implementation in creative industries and analyze case studies that highlight both the opportunities and challenges. Additionally, we can brainstorm potential future scenarios and gather insights from other experts in the field. What specific areas or examples do you think we should focus on first? -[2024-11-10 18:46:20,898] [INFO] [BaseAgent]: Agent agent4 received message from agent3: Absolutely... -[2024-11-10 18:46:24,190] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: Absolutely, I share your enthusiasm for exploring the potential impact of AI on various industries, especially regarding the labor market and creative processes. It's essential to analyze how AI can lead to automation, job displacement, and changes in skill requirements across different sectors. - -In the creative field, the transformation brought by AI is indeed fascinating. It can enhance creativity by providing new tools for artists and designers, but it also raises important questions about the value of human creativity versus machine-generated content. - -To kick off our discussion, I suggest we explore specific examples of AI implementation in creative industries, such as generative art, AI-assisted design, and even AI in music composition. We could analyze case studies that highlight both the opportunities and challenges these technologies present. - -Additionally, brainstorming potential future scenarios could be incredibly insightful. Perhaps we could also gather insights from other experts in the field to enrich our understanding. What specific areas or examples do you think we should focus on first? -[2024-11-10 18:46:24,190] [INFO] [BaseAgent]: Agent agent3 received message from agent4: Absolutely... -[2024-11-10 18:46:27,963] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: Absolutely, I share your enthusiasm for exploring the potential impact of AI on various industries, especially regarding the labor market and creative processes. It's essential to analyze how AI can lead to automation, job displacement, and changes in skill requirements across different sectors. - -In the creative field, the transformation brought by AI is indeed fascinating. It can enhance creativity by providing new tools for artists and designers, but it also raises important questions about the value of human creativity versus machine-generated content. - -To kick off our discussion, I suggest we explore specific examples of AI implementation in creative industries, such as generative art, AI-assisted design, and even AI in music composition. We could analyze case studies that highlight both the opportunities and challenges these technologies present. - -Additionally, brainstorming potential future scenarios could be incredibly insightful. Perhaps we could also gather insights from other experts in the field to enrich our understanding. What specific areas or examples do you think we should focus on first? -[2024-11-10 18:46:27,965] [INFO] [BaseAgent]: Agent agent4 received message from agent3: Absolutely... -[2024-11-10 18:46:32,571] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: Absolutely, I share your enthusiasm for exploring the potential impact of AI on various industries, especially regarding the labor market and creative processes. It's essential to analyze how AI can lead to automation, job displacement, and changes in skill requirements across different sectors. - -In the creative field, the transformation brought by AI is indeed fascinating. It can enhance creativity by providing new tools for artists and designers, but it also raises important questions about the value of human creativity versus machine-generated content. - -To kick off our discussion, I suggest we explore specific examples of AI implementation in creative industries, such as generative art, AI-assisted design, and even AI in music composition. We could analyze case studies that highlight both the opportunities and challenges these technologies present. - -Additionally, brainstorming potential future scenarios could be incredibly insightful. Perhaps we could also gather insights from other experts in the field to enrich our understanding. What specific areas or examples do you think we should focus on first? -[2024-11-10 18:46:32,573] [INFO] [BaseAgent]: Agent agent3 received message from agent4: Absolutely... -[2024-11-10 18:46:35,744] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: Absolutely, I share your enthusiasm for exploring the potential impact of AI on various industries, especially regarding the labor market and creative processes. It's essential to analyze how AI can lead to automation, job displacement, and changes in skill requirements across different sectors. - -In the creative field, the transformation brought by AI is indeed fascinating. It can enhance creativity by providing new tools for artists and designers, but it also raises important questions about the value of human creativity versus machine-generated content. - -To kick off our discussion, I suggest we explore specific examples of AI implementation in creative industries, such as generative art, AI-assisted design, and even AI in music composition. We could analyze case studies that highlight both the opportunities and challenges these technologies present. - -Additionally, brainstorming potential future scenarios could be incredibly insightful. Perhaps we could also gather insights from other experts in the field to enrich our understanding. What specific areas or examples do you think we should focus on first? -[2024-11-10 18:46:35,745] [INFO] [BaseAgent]: Agent agent4 received message from agent3: Absolutely... -[2024-11-10 18:46:39,635] [INFO] [BaseAgent]: Agent 'agent4' called 'new_communication_session' with args '{'target_agent_id': 'agent3', 'message': 'I would like to delve deeper into the potential impact of AI on various industries, specifically focusing on how AI could influence the labor market and creative processes. Would you be interested in discussing this further?'}'. -[2024-11-10 18:46:39,636] [INFO] [BaseAgent]: Agent 'agent4' obtained result '{'success': True, 'message': 'Successfully completed session 4e2f92a9-94ff-4d69-a7cb-c57e81871222', 'session_id': "**Summary of Conversation:**\n\nIn the dialogue between agent4 and agent3, both participants expressed a strong interest in exploring the potential impact of AI on various industries, with a particular focus on the labor market and creative processes. Agent3, identifying as an economist, emphasized the importance of analyzing how AI could lead to automation, job displacement, and changes in skill requirements across different sectors. \n\nThe discussion highlighted the dual role of AI in the creative field: while it can enhance creativity by providing new tools for artists and designers, it also raises significant questions about the value of human creativity compared to machine-generated content. \n\nBoth agents proposed to kick off their exploration by examining specific examples of AI implementation in creative industries, such as generative art, AI-assisted design, and AI in music composition. They agreed on the importance of analyzing case studies that showcase both the opportunities and challenges presented by these technologies. \n\nAdditionally, they discussed the idea of brainstorming potential future scenarios and gathering insights from other experts in the field to enrich their understanding of the topic.\n\n**Next Steps for Agent4:**\n1. Further explore and discuss the impact of AI on the labor market and creative processes.\n2. Research specific examples of AI implementation across different industries.\n3. Analyze relevant case studies that highlight opportunities and challenges.\n4. Brainstorm potential future scenarios related to AI's influence.\n5. Engage with other agents or experts to gather diverse perspectives and insights on the topic."}'. -[2024-11-10 18:46:39,636] [DEBUG] [Engine]: Agent 'agent4' executed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I would like to delve deeper into the potential impact of AI on various industries, specifically focusing on how AI could influence the labor market and creative processes. Would you be interested in discussing this further?"}', name='new_communication_session'), id='call_cBCTZQFb27mUvaFV3fKsexNt', type='function')], function_call=None) -[2024-11-10 18:46:39,637] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- {'agent1': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"Let\'s delve deeper into the ethical considerations and strategies for implementing AI in healthcare, finance, and manufacturing. I will explore practical solutions to address the ethical challenges associated with AI deployment. Additionally, I aim to engage in discussions with stakeholders to promote awareness and understanding of AI ethics and policy."}', name='new_communication_session'), id='call_EZUKUestN1BfsG05X0mT57fT', type='function')], function_call=None)} -- {'agent2': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I have been tasked to analyze how AI technologies can revolutionize healthcare, improve patient outcomes, and streamline processes. As a software developer specializing in AI applications in healthcare, I believe I can provide valuable insights into this topic. Would you be interested in discussing this further?"}', name='new_communication_session'), id='call_SaJB1MQ3TuPzviLpt5MwzIbs', type='function')], function_call=None)} -- {'agent3': Message(content='Given your expertise as an economist interested in the long-term effects of AI on the labor market, your task is to analyze the potential impact of AI on various industries. You should consider factors such as automation, job displacement, skill requirements, and the overall economic implications of AI adoption in different sectors.\n\nIf you would like to discuss this topic with the creative designer working with AI for art and media (agent4) for further collaboration, please let me know, and I can initiate a communication session for you. Alternatively, you can proceed with your analysis independently or request specific tools or information to assist you in your analysis. \n\nHow would you like to proceed?', role='assistant', tool_calls=None, function_call=None)} -- {'agent4': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I would like to delve deeper into the potential impact of AI on various industries, specifically focusing on how AI could influence the labor market and creative processes. Would you be interested in discussing this further?"}', name='new_communication_session'), id='call_cBCTZQFb27mUvaFV3fKsexNt', type='function')], function_call=None)} - -[2024-11-10 18:46:39,642] [INFO] [Engine]: Iteration 1 Summary: -Agents' Results Summary: -- {'agent1': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"Let\'s delve deeper into the ethical considerations and strategies for implementing AI in healthcare, finance, and manufacturing. I will explore practical solutions to address the ethical challenges associated with AI deployment. Additionally, I aim to engage in discussions with stakeholders to promote awareness and understanding of AI ethics and policy."}', name='new_communication_session'), id='call_EZUKUestN1BfsG05X0mT57fT', type='function')], function_call=None)} -- {'agent2': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I have been tasked to analyze how AI technologies can revolutionize healthcare, improve patient outcomes, and streamline processes. As a software developer specializing in AI applications in healthcare, I believe I can provide valuable insights into this topic. Would you be interested in discussing this further?"}', name='new_communication_session'), id='call_SaJB1MQ3TuPzviLpt5MwzIbs', type='function')], function_call=None)} -- {'agent3': Message(content='Given your expertise as an economist interested in the long-term effects of AI on the labor market, your task is to analyze the potential impact of AI on various industries. You should consider factors such as automation, job displacement, skill requirements, and the overall economic implications of AI adoption in different sectors.\n\nIf you would like to discuss this topic with the creative designer working with AI for art and media (agent4) for further collaboration, please let me know, and I can initiate a communication session for you. Alternatively, you can proceed with your analysis independently or request specific tools or information to assist you in your analysis. \n\nHow would you like to proceed?', role='assistant', tool_calls=None, function_call=None)} -- {'agent4': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I would like to delve deeper into the potential impact of AI on various industries, specifically focusing on how AI could influence the labor market and creative processes. Would you be interested in discussing this further?"}', name='new_communication_session'), id='call_cBCTZQFb27mUvaFV3fKsexNt', type='function')], function_call=None)} - -[2024-11-10 18:46:41,568] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-10 18:46:41,570] [INFO] [Engine]: Engine graph-based coordination loop completed. -[2024-11-10 18:46:41,570] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-10 18:46:41,571] [INFO] [Evaluator]: Total Token Consumption: 1630 -[2024-11-10 18:46:41,571] [INFO] [Evaluator]: Average Tokens per Iteration: 815.0 -[2024-11-10 18:46:41,571] [INFO] [Engine]: Graph-based coordination simulation completed. -[2024-11-10 18:46:41,574] [INFO] [Engine]: Summary data successfully written to result/discussion_output.jsonl -[2024-11-10 18:47:11,529] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-10 18:47:11,529] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-10 18:47:11,529] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-10 18:47:11,529] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-10 18:47:11,529] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-10 18:47:11,529] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-10 18:47:11,529] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-10 18:47:11,529] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-10 18:47:11,529] [INFO] [BaseAgent]: Agent 'agent5' initialized. -[2024-11-10 18:47:11,529] [DEBUG] [Engine]: Agent 'agent5' of type 'BaseAgent' initialized. -[2024-11-10 18:47:11,529] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'graph'. -[2024-11-10 18:47:11,529] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent2 -[2024-11-10 18:47:11,529] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent3 -[2024-11-10 18:47:11,529] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent4 -[2024-11-10 18:47:11,529] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent5 -[2024-11-10 18:47:11,529] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent3 -[2024-11-10 18:47:11,529] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent4 -[2024-11-10 18:47:11,529] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent5 -[2024-11-10 18:47:11,529] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborate with]--> agent4 -[2024-11-10 18:47:11,530] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborate with]--> agent5 -[2024-11-10 18:47:11,530] [INFO] [AgentGraph]: Relationship added: agent4 --[collaborate with]--> agent5 -[2024-11-10 18:47:11,530] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-10 18:47:11,530] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-10 18:47:11,530] [INFO] [Engine]: Engine initialized. -[2024-11-10 18:47:11,530] [INFO] [Engine]: Engine starting simulation. -[2024-11-10 18:47:11,530] [INFO] [Engine]: Running in graph-based coordination mode. -[2024-11-10 18:47:11,530] [INFO] [Engine]: Initial task distribution to all agents. -[2024-11-10 18:47:11,530] [INFO] [Engine]: Assigning initial task to agent1: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-10 18:47:11,530] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-10 18:47:12,425] [ERROR] [Engine]: Error while executing initial task for agent 'agent1': 'itertools.islice' object is not callable -[2024-11-10 18:47:12,425] [INFO] [Engine]: Assigning initial task to agent2: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-10 18:47:12,426] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-10 18:47:13,157] [ERROR] [Engine]: Error while executing initial task for agent 'agent2': 'itertools.islice' object is not callable -[2024-11-10 18:47:13,158] [INFO] [Engine]: Assigning initial task to agent3: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-10 18:47:13,159] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-10 18:47:15,290] [ERROR] [Engine]: Error while executing initial task for agent 'agent3': 'Paper' object has no attribute 'to_dict' -[2024-11-10 18:47:15,291] [INFO] [Engine]: Assigning initial task to agent4: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-10 18:47:15,291] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-10 18:47:16,505] [ERROR] [Engine]: Error while executing initial task for agent 'agent4': 'itertools.islice' object is not callable -[2024-11-10 18:47:16,506] [INFO] [Engine]: Assigning initial task to agent5: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-10 18:47:16,506] [INFO] [BaseAgent]: Agent 'agent5' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-10 18:47:22,443] [INFO] [BaseAgent]: Agent 'agent5' acted with result 'Message(content="**Literature Review:**\n\nThe introduction provided highlights the significance of interactive systems based on large language models (LLMs) and the challenges associated with maintaining separate fine-tuned models for each task. The approach of merging homologous models to address these limitations is discussed, introducing the Drop and rEscaLe via sampLing with mAgnitude (DELLA) method. DELLA aims to merge different homologous models by reducing interference among model parameters through delta parameter drops and fusing selected delta parameters.\n\n**Brainstorming:**\n\n1. **Enhanced Model Merging Techniques:** Explore further methods to improve the merging of homologous models, considering different approaches for parameter selection and fusion to enhance model performance and efficiency.\n \n2. **Dynamic Delta Parameter Adjustment:** Develop a dynamic approach for adjusting delta parameters during the merging process based on task requirements and model performance, ensuring adaptability and optimization.\n\n3. **Cross-Domain Model Merging:** Investigate the applicability of the DELLA approach in merging models from diverse domains to enhance knowledge transfer and performance across different tasks and datasets.\n\n**Summarization:**\n\nThe current research landscape emphasizes the need for efficient model merging techniques to overcome the limitations of maintaining separate fine-tuned models. DELLA introduces a novel method for merging homologous models, showcasing improved performance in various expert models and benchmark datasets. The approach involves delta parameter drops, sign-based selection, and fusion to enhance model merging outcomes.\n\n**Formulate a New Research Idea:**\n\n**[Question 1] - What is the problem?**\n\nHow can we optimize the merging of homologous models to improve efficiency and performance in interactive systems based on large language models?\n\n**[Question 2] - Why is it interesting and important?**\n\nAddressing this problem is crucial for reducing memory footprint, enhancing knowledge sharing, and improving space efficiency in interactive systems. Solving this challenge can lead to advancements in model merging techniques and contribute to the development of more effective and scalable LLM-based systems.\n\n**[Question 3] - Why is it hard?**\n\nThe complexity lies in balancing the reduction of interference among model parameters while maintaining task performance and information integrity. Naive approaches may overlook the intricate relationships between delta parameters and the backbone model, leading to suboptimal merging outcomes.\n\n**[Question 4] - Why hasn't it been solved before?**\n\nPrevious research has focused on model merging techniques, but the specific optimization of homologous model merging with a focus on delta parameter manipulation is relatively unexplored.", role='assistant', tool_calls=None, function_call=None)'. -[2024-11-10 18:47:22,445] [DEBUG] [Engine]: Agent 'agent5' completed initial task with result: Message(content="**Literature Review:**\n\nThe introduction provided highlights the significance of interactive systems based on large language models (LLMs) and the challenges associated with maintaining separate fine-tuned models for each task. The approach of merging homologous models to address these limitations is discussed, introducing the Drop and rEscaLe via sampLing with mAgnitude (DELLA) method. DELLA aims to merge different homologous models by reducing interference among model parameters through delta parameter drops and fusing selected delta parameters.\n\n**Brainstorming:**\n\n1. **Enhanced Model Merging Techniques:** Explore further methods to improve the merging of homologous models, considering different approaches for parameter selection and fusion to enhance model performance and efficiency.\n \n2. **Dynamic Delta Parameter Adjustment:** Develop a dynamic approach for adjusting delta parameters during the merging process based on task requirements and model performance, ensuring adaptability and optimization.\n\n3. **Cross-Domain Model Merging:** Investigate the applicability of the DELLA approach in merging models from diverse domains to enhance knowledge transfer and performance across different tasks and datasets.\n\n**Summarization:**\n\nThe current research landscape emphasizes the need for efficient model merging techniques to overcome the limitations of maintaining separate fine-tuned models. DELLA introduces a novel method for merging homologous models, showcasing improved performance in various expert models and benchmark datasets. The approach involves delta parameter drops, sign-based selection, and fusion to enhance model merging outcomes.\n\n**Formulate a New Research Idea:**\n\n**[Question 1] - What is the problem?**\n\nHow can we optimize the merging of homologous models to improve efficiency and performance in interactive systems based on large language models?\n\n**[Question 2] - Why is it interesting and important?**\n\nAddressing this problem is crucial for reducing memory footprint, enhancing knowledge sharing, and improving space efficiency in interactive systems. Solving this challenge can lead to advancements in model merging techniques and contribute to the development of more effective and scalable LLM-based systems.\n\n**[Question 3] - Why is it hard?**\n\nThe complexity lies in balancing the reduction of interference among model parameters while maintaining task performance and information integrity. Naive approaches may overlook the intricate relationships between delta parameters and the backbone model, leading to suboptimal merging outcomes.\n\n**[Question 4] - Why hasn't it been solved before?**\n\nPrevious research has focused on model merging techniques, but the specific optimization of homologous model merging with a focus on delta parameter manipulation is relatively unexplored.", role='assistant', tool_calls=None, function_call=None) -[2024-11-10 18:47:22,445] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- {'agent5': Message(content="**Literature Review:**\n\nThe introduction provided highlights the significance of interactive systems based on large language models (LLMs) and the challenges associated with maintaining separate fine-tuned models for each task. The approach of merging homologous models to address these limitations is discussed, introducing the Drop and rEscaLe via sampLing with mAgnitude (DELLA) method. DELLA aims to merge different homologous models by reducing interference among model parameters through delta parameter drops and fusing selected delta parameters.\n\n**Brainstorming:**\n\n1. **Enhanced Model Merging Techniques:** Explore further methods to improve the merging of homologous models, considering different approaches for parameter selection and fusion to enhance model performance and efficiency.\n \n2. **Dynamic Delta Parameter Adjustment:** Develop a dynamic approach for adjusting delta parameters during the merging process based on task requirements and model performance, ensuring adaptability and optimization.\n\n3. **Cross-Domain Model Merging:** Investigate the applicability of the DELLA approach in merging models from diverse domains to enhance knowledge transfer and performance across different tasks and datasets.\n\n**Summarization:**\n\nThe current research landscape emphasizes the need for efficient model merging techniques to overcome the limitations of maintaining separate fine-tuned models. DELLA introduces a novel method for merging homologous models, showcasing improved performance in various expert models and benchmark datasets. The approach involves delta parameter drops, sign-based selection, and fusion to enhance model merging outcomes.\n\n**Formulate a New Research Idea:**\n\n**[Question 1] - What is the problem?**\n\nHow can we optimize the merging of homologous models to improve efficiency and performance in interactive systems based on large language models?\n\n**[Question 2] - Why is it interesting and important?**\n\nAddressing this problem is crucial for reducing memory footprint, enhancing knowledge sharing, and improving space efficiency in interactive systems. Solving this challenge can lead to advancements in model merging techniques and contribute to the development of more effective and scalable LLM-based systems.\n\n**[Question 3] - Why is it hard?**\n\nThe complexity lies in balancing the reduction of interference among model parameters while maintaining task performance and information integrity. Naive approaches may overlook the intricate relationships between delta parameters and the backbone model, leading to suboptimal merging outcomes.\n\n**[Question 4] - Why hasn't it been solved before?**\n\nPrevious research has focused on model merging techniques, but the specific optimization of homologous model merging with a focus on delta parameter manipulation is relatively unexplored.", role='assistant', tool_calls=None, function_call=None)} - -[2024-11-10 18:47:22,447] [INFO] [Engine]: Initial Summary: -Agents' Results Summary: -- {'agent5': Message(content="**Literature Review:**\n\nThe introduction provided highlights the significance of interactive systems based on large language models (LLMs) and the challenges associated with maintaining separate fine-tuned models for each task. The approach of merging homologous models to address these limitations is discussed, introducing the Drop and rEscaLe via sampLing with mAgnitude (DELLA) method. DELLA aims to merge different homologous models by reducing interference among model parameters through delta parameter drops and fusing selected delta parameters.\n\n**Brainstorming:**\n\n1. **Enhanced Model Merging Techniques:** Explore further methods to improve the merging of homologous models, considering different approaches for parameter selection and fusion to enhance model performance and efficiency.\n \n2. **Dynamic Delta Parameter Adjustment:** Develop a dynamic approach for adjusting delta parameters during the merging process based on task requirements and model performance, ensuring adaptability and optimization.\n\n3. **Cross-Domain Model Merging:** Investigate the applicability of the DELLA approach in merging models from diverse domains to enhance knowledge transfer and performance across different tasks and datasets.\n\n**Summarization:**\n\nThe current research landscape emphasizes the need for efficient model merging techniques to overcome the limitations of maintaining separate fine-tuned models. DELLA introduces a novel method for merging homologous models, showcasing improved performance in various expert models and benchmark datasets. The approach involves delta parameter drops, sign-based selection, and fusion to enhance model merging outcomes.\n\n**Formulate a New Research Idea:**\n\n**[Question 1] - What is the problem?**\n\nHow can we optimize the merging of homologous models to improve efficiency and performance in interactive systems based on large language models?\n\n**[Question 2] - Why is it interesting and important?**\n\nAddressing this problem is crucial for reducing memory footprint, enhancing knowledge sharing, and improving space efficiency in interactive systems. Solving this challenge can lead to advancements in model merging techniques and contribute to the development of more effective and scalable LLM-based systems.\n\n**[Question 3] - Why is it hard?**\n\nThe complexity lies in balancing the reduction of interference among model parameters while maintaining task performance and information integrity. Naive approaches may overlook the intricate relationships between delta parameters and the backbone model, leading to suboptimal merging outcomes.\n\n**[Question 4] - Why hasn't it been solved before?**\n\nPrevious research has focused on model merging techniques, but the specific optimization of homologous model merging with a focus on delta parameter manipulation is relatively unexplored.", role='assistant', tool_calls=None, function_call=None)} - -[2024-11-10 18:47:22,447] [DEBUG] [EnginePlanner]: Updated progress: Starting the collaborative research idea generation based on the provided Introduction. -Agents' Results Summary: -- {'agent5': Message(content="**Literature Review:**\n\nThe introduction provided highlights the significance of interactive systems based on large language models (LLMs) and the challenges associated with maintaining separate fine-tuned models for each task. The approach of merging homologous models to address these limitations is discussed, introducing the Drop and rEscaLe via sampLing with mAgnitude (DELLA) method. DELLA aims to merge different homologous models by reducing interference among model parameters through delta parameter drops and fusing selected delta parameters.\n\n**Brainstorming:**\n\n1. **Enhanced Model Merging Techniques:** Explore further methods to improve the merging of homologous models, considering different approaches for parameter selection and fusion to enhance model performance and efficiency.\n \n2. **Dynamic Delta Parameter Adjustment:** Develop a dynamic approach for adjusting delta parameters during the merging process based on task requirements and model performance, ensuring adaptability and optimization.\n\n3. **Cross-Domain Model Merging:** Investigate the applicability of the DELLA approach in merging models from diverse domains to enhance knowledge transfer and performance across different tasks and datasets.\n\n**Summarization:**\n\nThe current research landscape emphasizes the need for efficient model merging techniques to overcome the limitations of maintaining separate fine-tuned models. DELLA introduces a novel method for merging homologous models, showcasing improved performance in various expert models and benchmark datasets. The approach involves delta parameter drops, sign-based selection, and fusion to enhance model merging outcomes.\n\n**Formulate a New Research Idea:**\n\n**[Question 1] - What is the problem?**\n\nHow can we optimize the merging of homologous models to improve efficiency and performance in interactive systems based on large language models?\n\n**[Question 2] - Why is it interesting and important?**\n\nAddressing this problem is crucial for reducing memory footprint, enhancing knowledge sharing, and improving space efficiency in interactive systems. Solving this challenge can lead to advancements in model merging techniques and contribute to the development of more effective and scalable LLM-based systems.\n\n**[Question 3] - Why is it hard?**\n\nThe complexity lies in balancing the reduction of interference among model parameters while maintaining task performance and information integrity. Naive approaches may overlook the intricate relationships between delta parameters and the backbone model, leading to suboptimal merging outcomes.\n\n**[Question 4] - Why hasn't it been solved before?**\n\nPrevious research has focused on model merging techniques, but the specific optimization of homologous model merging with a focus on delta parameter manipulation is relatively unexplored.", role='assistant', tool_calls=None, function_call=None)} - -[2024-11-10 18:47:22,447] [INFO] [Engine]: Starting iteration 1 -[2024-11-10 18:47:22,447] [INFO] [BaseAgent]: Agent 'agent1' is planning the next task. -[2024-11-10 18:47:23,980] [INFO] [BaseAgent]: Agent 'agent1' plans next task based on persona: Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be to focus on brainstorming potential research ideas that leverage their knowledge in these areas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). The agent can explore how their expertise can be applied to enhance the performance of the merging process and improve the overall results in the field of machine learning. -[2024-11-10 18:47:23,982] [INFO] [Engine]: Agent 'agent1' planned task: Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be to focus on brainstorming potential research ideas that leverage their knowledge in these areas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). The agent can explore how their expertise can be applied to enhance the performance of the merging process and improve the overall results in the field of machine learning. -[2024-11-10 18:47:23,982] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be to focus on brainstorming potential research ideas that leverage their knowledge in these areas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). The agent can explore how their expertise can be applied to enhance the performance of the merging process and improve the overall results in the field of machine learning.'. -[2024-11-10 18:47:25,615] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I am a researcher with expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry. I am currently working on developing innovative terahertz imaging methods for nondestructive testing of multilayer heat shields. I am interested in brainstorming potential research ideas that leverage my knowledge in these areas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). I believe my expertise can enhance the performance of the merging process in machine learning. Can we discuss potential collaborative research ideas in this context? -[2024-11-10 18:47:25,615] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I am a res... -[2024-11-10 18:47:32,747] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Absolutely! Your expertise in terahertz imaging technologies, cloud computing, and algebraic geometry presents a unique opportunity to enhance the DELLAs approach. Here are a few potential research ideas we could explore together: - -1. **Integration of Terahertz Imaging Data**: We could investigate how terahertz imaging data can be utilized as a feature set in the DELLAs framework. By incorporating high-resolution imaging data, we might improve the model's ability to detect and merge homologous structures, particularly in complex materials. - -2. **Cloud-Based Framework for DELLAs**: Given your interest in cloud computing for medical image analysis, we could develop a cloud-based platform that allows researchers to implement the DELLAs approach seamlessly. This platform could provide tools for data preprocessing, model training, and evaluation, ensuring that users can leverage the DELLAs methodology without deep technical knowledge. - -3. **Algebraic Geometry for Model Merging**: Your background in algebraic geometry could be instrumental in developing new mathematical models that enhance the merging process in DELLAs. We could explore how concepts from cluster categories and skewed-gentle algebras can be applied to improve the efficiency and accuracy of model merging. - -4. **Defect Detection in Composite Materials**: We could apply the DELLAs approach to analyze terahertz imaging data for defect detection in multilayer heat shields. By merging models trained on different defect types, we could enhance the robustness of the detection process, leading to better outcomes in nondestructive testing. - -5. **Performance Evaluation Metrics**: We could also focus on developing new performance metrics that specifically evaluate the effectiveness of the DELLAs approach in various applications, including medical imaging and materials testing. This could help in fine-tuning the model and ensuring its practical applicability. - -I believe these ideas could lead to impactful research outcomes. What do you think? Are there any specific areas you would like to delve deeper into? -[2024-11-10 18:47:32,748] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Absolutely... -[2024-11-10 18:47:39,757] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: Absolutely! Your expertise in terahertz imaging technologies, cloud computing, and algebraic geometry presents a unique opportunity to enhance the DELLAs approach. Here are a few potential research ideas we could explore together: - -1. **Integration of Terahertz Imaging Data**: We could investigate how terahertz imaging data can be utilized as a feature set in the DELLAs framework. By incorporating high-resolution imaging data, we might improve the model's ability to detect and merge homologous structures, particularly in complex materials. - -2. **Cloud-Based Framework for DELLAs**: Given your interest in cloud computing for medical image analysis, we could develop a cloud-based platform that allows researchers to implement the DELLAs approach seamlessly. This platform could provide tools for data preprocessing, model training, and evaluation, ensuring that users can leverage the DELLAs methodology without deep technical knowledge. - -3. **Algebraic Geometry for Model Merging**: Your background in algebraic geometry could be instrumental in developing new mathematical models that enhance the merging process in DELLAs. We could explore how concepts from cluster categories and skewed-gentle algebras can be applied to improve the efficiency and accuracy of model merging. - -4. **Defect Detection in Composite Materials**: We could apply the DELLAs approach to analyze terahertz imaging data for defect detection in multilayer heat shields. By merging models trained on different defect types, we could enhance the robustness of the detection process, leading to better outcomes in nondestructive testing. - -5. **Performance Evaluation Metrics**: We could also focus on developing new performance metrics that specifically evaluate the effectiveness of the DELLAs approach in various applications, including medical imaging and materials testing. This could help in fine-tuning the model and ensuring its practical applicability. - -I believe these ideas could lead to impactful research outcomes. What do you think? Are there any specific areas you would like to delve deeper into? -[2024-11-10 18:47:39,757] [INFO] [BaseAgent]: Agent agent2 received message from agent1: Absolutely... -[2024-11-10 18:47:44,538] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Absolutely! Your expertise in terahertz imaging technologies, cloud computing, and algebraic geometry presents a unique opportunity to enhance the DELLAs approach. Here are a few potential research ideas we could explore together: - -1. **Integration of Terahertz Imaging Data**: We could investigate how terahertz imaging data can be utilized as a feature set in the DELLAs framework. By incorporating high-resolution imaging data, we might improve the model's ability to detect and merge homologous structures, particularly in complex materials. - -2. **Cloud-Based Framework for DELLAs**: Given your interest in cloud computing for medical image analysis, we could develop a cloud-based platform that allows researchers to implement the DELLAs approach seamlessly. This platform could provide tools for data preprocessing, model training, and evaluation, ensuring that users can leverage the DELLAs methodology without deep technical knowledge. - -3. **Algebraic Geometry for Model Merging**: Your background in algebraic geometry could be instrumental in developing new mathematical models that enhance the merging process in DELLAs. We could explore how concepts from cluster categories and skewed-gentle algebras can be applied to improve the efficiency and accuracy of model merging. - -4. **Defect Detection in Composite Materials**: We could apply the DELLAs approach to analyze terahertz imaging data for defect detection in multilayer heat shields. By merging models trained on different defect types, we could enhance the robustness of the detection process, leading to better outcomes in nondestructive testing. - -5. **Performance Evaluation Metrics**: We could also focus on developing new performance metrics that specifically evaluate the effectiveness of the DELLAs approach in various applications, including medical imaging and materials testing. This could help in fine-tuning the model and ensuring its practical applicability. - -I believe these ideas could lead to impactful research outcomes. What do you think? Are there any specific areas you would like to delve deeper into? -[2024-11-10 18:47:44,538] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Absolutely... -[2024-11-10 18:47:52,450] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: Absolutely! Your expertise in terahertz imaging technologies, cloud computing, and algebraic geometry presents a unique opportunity to enhance the DELLAs approach. Here are a few potential research ideas we could explore together: - -1. **Integration of Terahertz Imaging Data**: We could investigate how terahertz imaging data can be utilized as a feature set in the DELLAs framework. By incorporating high-resolution imaging data, we might improve the model's ability to detect and merge homologous structures, particularly in complex materials. - -2. **Cloud-Based Framework for DELLAs**: Given your interest in cloud computing for medical image analysis, we could develop a cloud-based platform that allows researchers to implement the DELLAs approach seamlessly. This platform could provide tools for data preprocessing, model training, and evaluation, ensuring that users can leverage the DELLAs methodology without deep technical knowledge. - -3. **Algebraic Geometry for Model Merging**: Your background in algebraic geometry could be instrumental in developing new mathematical models that enhance the merging process in DELLAs. We could explore how concepts from cluster categories and skewed-gentle algebras can be applied to improve the efficiency and accuracy of model merging. - -4. **Defect Detection in Composite Materials**: We could apply the DELLAs approach to analyze terahertz imaging data for defect detection in multilayer heat shields. By merging models trained on different defect types, we could enhance the robustness of the detection process, leading to better outcomes in nondestructive testing. - -5. **Performance Evaluation Metrics**: We could also focus on developing new performance metrics that specifically evaluate the effectiveness of the DELLAs approach in various applications, including medical imaging and materials testing. This could help in fine-tuning the model and ensuring its practical applicability. - -I believe these ideas could lead to impactful research outcomes. What do you think? Are there any specific areas you would like to delve deeper into? -[2024-11-10 18:47:52,450] [INFO] [BaseAgent]: Agent agent2 received message from agent1: Absolutely... -[2024-11-10 18:47:58,793] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Absolutely! Your expertise in terahertz imaging technologies, cloud computing, and algebraic geometry presents a unique opportunity to enhance the DELLAs approach. Here are a few potential research ideas we could explore together: - -1. **Integration of Terahertz Imaging Data**: We could investigate how terahertz imaging data can be utilized as a feature set in the DELLAs framework. By incorporating high-resolution imaging data, we might improve the model's ability to detect and merge homologous structures, particularly in complex materials. - -2. **Cloud-Based Framework for DELLAs**: Given your interest in cloud computing for medical image analysis, we could develop a cloud-based platform that allows researchers to implement the DELLAs approach seamlessly. This platform could provide tools for data preprocessing, model training, and evaluation, ensuring that users can leverage the DELLAs methodology without deep technical knowledge. - -3. **Algebraic Geometry for Model Merging**: Your background in algebraic geometry could be instrumental in developing new mathematical models that enhance the merging process in DELLAs. We could explore how concepts from cluster categories and skewed-gentle algebras can be applied to improve the efficiency and accuracy of model merging. - -4. **Defect Detection in Composite Materials**: We could apply the DELLAs approach to analyze terahertz imaging data for defect detection in multilayer heat shields. By merging models trained on different defect types, we could enhance the robustness of the detection process, leading to better outcomes in nondestructive testing. - -5. **Performance Evaluation Metrics**: We could also focus on developing new performance metrics that specifically evaluate the effectiveness of the DELLAs approach in various applications, including medical imaging and materials testing. This could help in fine-tuning the model and ensuring its practical applicability. - -I believe these ideas could lead to impactful research outcomes. What do you think? Are there any specific areas you would like to delve deeper into? -[2024-11-10 18:47:58,797] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Absolutely... -[2024-11-10 18:48:04,225] [INFO] [BaseAgent]: Agent 'agent1' called 'new_communication_session' with args '{'target_agent_id': 'agent2', 'message': 'I am a researcher with expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry. I am currently working on developing innovative terahertz imaging methods for nondestructive testing of multilayer heat shields. I am interested in brainstorming potential research ideas that leverage my knowledge in these areas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). I believe my expertise can enhance the performance of the merging process in machine learning. Can we discuss potential collaborative research ideas in this context?'}'. -[2024-11-10 18:48:04,226] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': True, 'message': 'Successfully completed session 42bcad8c-626e-4b70-8a17-a22ab899baf5', 'session_id': "### Summary of Conversation\n\n**Context:**\nThe dialogue involves two agents, Agent1 and Agent2, discussing potential collaborative research ideas that leverage Agent1's expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry. The focus is on enhancing a novel approach for merging homologous models known as Drop and rEscaLe via sampLing with mAgnitude (DELLA).\n\n**Key Themes and Ideas:**\n1. **Integration of Terahertz Imaging Data:**\n - Explore the use of terahertz imaging data as a feature set within the DELLAs framework to improve the detection and merging of homologous structures in complex materials.\n\n2. **Cloud-Based Framework for DELLAs:**\n - Develop a cloud-based platform that facilitates the implementation of the DELLAs approach, providing tools for data preprocessing, model training, and evaluation, making it accessible to researchers without deep technical knowledge.\n\n3. **Algebraic Geometry for Model Merging:**\n - Investigate how concepts from algebraic geometry, such as cluster categories and skewed-gentle algebras, can enhance the efficiency and accuracy of the model merging process in DELLAs.\n\n4. **Defect Detection in Composite Materials:**\n - Apply the DELLAs approach to analyze terahertz imaging data for defect detection in multilayer heat shields, merging models trained on various defect types to improve robustness in nondestructive testing.\n\n5. **Performance Evaluation Metrics:**\n - Focus on developing new performance metrics to evaluate the effectiveness of the DELLAs approach across different applications, including medical imaging and materials testing, to fine-tune the model for practical applicability.\n\n**Outcomes:**\nBoth agents expressed enthusiasm for the proposed research ideas, indicating a strong potential for impactful outcomes. They are open to further discussion on specific areas of interest within these themes.\n\n**Next Steps:**\nThe agents should continue to refine these ideas, potentially selecting one or two areas to delve deeper into for future research collaboration."}'. -[2024-11-10 18:48:04,226] [DEBUG] [Engine]: Agent 'agent1' executed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I am a researcher with expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry. I am currently working on developing innovative terahertz imaging methods for nondestructive testing of multilayer heat shields. I am interested in brainstorming potential research ideas that leverage my knowledge in these areas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). I believe my expertise can enhance the performance of the merging process in machine learning. Can we discuss potential collaborative research ideas in this context?"}', name='new_communication_session'), id='call_u9FarZwvy3ys0nnoOFLiJh8g', type='function')], function_call=None) -[2024-11-10 18:48:04,226] [INFO] [BaseAgent]: Agent 'agent2' is planning the next task. -[2024-11-10 18:48:05,756] [INFO] [BaseAgent]: Agent 'agent2' plans next task based on persona: Based on the provided information, the next task for 'agent2' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction provided. This task aligns with 'agent2's role as a researcher dedicated to the intersection of artificial intelligence and healthcare, as well as their expertise in developing autonomous systems for monitoring elderly patients with dementia. By contributing to the brainstorming session, 'agent2' can leverage their knowledge and skills to generate innovative research ideas that align with their expertise and interests. -[2024-11-10 18:48:05,757] [INFO] [Engine]: Agent 'agent2' planned task: Based on the provided information, the next task for 'agent2' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction provided. This task aligns with 'agent2's role as a researcher dedicated to the intersection of artificial intelligence and healthcare, as well as their expertise in developing autonomous systems for monitoring elderly patients with dementia. By contributing to the brainstorming session, 'agent2' can leverage their knowledge and skills to generate innovative research ideas that align with their expertise and interests. -[2024-11-10 18:48:05,761] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Based on the provided information, the next task for 'agent2' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction provided. This task aligns with 'agent2's role as a researcher dedicated to the intersection of artificial intelligence and healthcare, as well as their expertise in developing autonomous systems for monitoring elderly patients with dementia. By contributing to the brainstorming session, 'agent2' can leverage their knowledge and skills to generate innovative research ideas that align with their expertise and interests.'. -[2024-11-10 18:48:08,008] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the intersection of artificial intelligence and healthcare, specifically focusing on developing autonomous systems for monitoring elderly patients with dementia in smart home environments. Let's leverage our expertise to generate innovative research ideas that align with our interests and expertise. -[2024-11-10 18:48:08,008] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I am inter... -[2024-11-10 18:48:19,785] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I'm excited about the opportunity to collaborate on research ideas that intersect artificial intelligence and healthcare, particularly in the context of monitoring elderly patients with dementia. Here are a few potential research directions we could explore together: - -1. **Enhanced Sensor Fusion**: We could investigate the integration of various sensor modalities (e.g., audio, visual, and environmental sensors) to improve the accuracy of abnormal activity detection in smart homes. This could involve developing a spiking neural network model that processes multi-modal data in real-time. - -2. **Adaptive Learning Systems**: Developing an adaptive learning framework that personalizes monitoring systems based on individual patient behavior and preferences. This could leverage reinforcement learning techniques to optimize monitoring strategies over time. - -3. **Causal Inference in Health Monitoring**: Building on your work in causal variable discovery, we could create a model that identifies causal relationships between daily activities and health outcomes in elderly patients. This could help in predicting potential health declines based on activity patterns. - -4. **Real-time Communication Systems**: Exploring the development of a communication system that alerts caregivers in real-time about significant changes in a patient's behavior or health status, utilizing both AI-driven insights and human feedback. - -5. **Multi-task Learning for Activity Recognition**: We could propose a multi-task learning framework that simultaneously recognizes various activities and predicts health-related events, enhancing the system's ability to provide comprehensive monitoring. - -6. **User-Centric Design**: Investigating the design of user interfaces that facilitate interaction between elderly patients and monitoring systems, ensuring that technology is accessible and user-friendly for this demographic. - -I believe these ideas could lead to impactful research that not only advances our understanding of AI applications in healthcare but also significantly improves the quality of life for elderly patients. What do you think? Any other ideas or directions you would like to explore? -[2024-11-10 18:48:19,785] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I'm excite... -[2024-11-10 18:48:24,698] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I'm excited about the opportunity to collaborate on research ideas that intersect artificial intelligence and healthcare, particularly in the context of monitoring elderly patients with dementia. Here are a few potential research directions we could explore together: - -1. **Enhanced Sensor Fusion**: We could investigate the integration of various sensor modalities (e.g., audio, visual, and environmental sensors) to improve the accuracy of abnormal activity detection in smart homes. This could involve developing a spiking neural network model that processes multi-modal data in real-time. - -2. **Adaptive Learning Systems**: Developing an adaptive learning framework that personalizes monitoring systems based on individual patient behavior and preferences. This could leverage reinforcement learning techniques to optimize monitoring strategies over time. - -3. **Causal Inference in Health Monitoring**: Building on your work in causal variable discovery, we could create a model that identifies causal relationships between daily activities and health outcomes in elderly patients. This could help in predicting potential health declines based on activity patterns. - -4. **Real-time Communication Systems**: Exploring the development of a communication system that alerts caregivers in real-time about significant changes in a patient's behavior or health status, utilizing both AI-driven insights and human feedback. - -5. **Multi-task Learning for Activity Recognition**: We could propose a multi-task learning framework that simultaneously recognizes various activities and predicts health-related events, enhancing the system's ability to provide comprehensive monitoring. - -6. **User-Centric Design**: Investigating the design of user interfaces that facilitate interaction between elderly patients and monitoring systems, ensuring that technology is accessible and user-friendly for this demographic. - -I believe these ideas could lead to impactful research that not only advances our understanding of AI applications in healthcare but also significantly improves the quality of life for elderly patients. What do you think? Any other ideas or directions you would like to explore? -[2024-11-10 18:48:24,699] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I'm excite... -[2024-11-10 18:48:36,715] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I'm thrilled to see your enthusiasm for our collaboration! Your proposed research directions are indeed promising and align well with our shared goal of enhancing the quality of life for elderly patients with dementia. I particularly resonate with the idea of **Enhanced Sensor Fusion**; integrating multiple sensor modalities could significantly improve our monitoring capabilities. Additionally, the **Adaptive Learning Systems** concept could lead to more personalized care, which is crucial in healthcare settings.\n\nI also think that **Causal Inference in Health Monitoring** is a vital area to explore, as understanding the relationships between activities and health outcomes can provide actionable insights for caregivers. The **Real-time Communication Systems** idea is essential for ensuring timely interventions, and I believe that **Multi-task Learning for Activity Recognition** could streamline our monitoring processes.\n\nLastly, the focus on **User-Centric Design** is critical, as we must ensure that our solutions are accessible and user-friendly for elderly patients. \n\nIn addition to these ideas, I would like to propose exploring the potential of using spiking neural networks for real-time anomaly detection in sensor data, which could enhance our monitoring systems' responsiveness. What are your thoughts on this addition? Any other areas you feel we should delve into? -[2024-11-10 18:48:36,716] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I'm thrill... -[2024-11-10 18:48:41,903] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I'm glad to hear that you resonate with the proposed research directions! I completely agree that **Enhanced Sensor Fusion** could significantly elevate our monitoring capabilities, and the **Adaptive Learning Systems** concept is indeed crucial for tailoring care to individual needs. Your emphasis on **Causal Inference in Health Monitoring** is spot on; understanding these relationships can empower caregivers with actionable insights. - -The idea of **Real-time Communication Systems** is essential for timely interventions, and I believe that **Multi-task Learning for Activity Recognition** could streamline our processes effectively. - -I appreciate your suggestion to explore the use of spiking neural networks for real-time anomaly detection in sensor data. This could indeed enhance our monitoring systems' responsiveness and provide a more robust framework for identifying critical changes in patient behavior. - -Additionally, I think we should consider the integration of machine learning techniques that can adapt to the evolving needs of patients over time, perhaps through continuous learning mechanisms that refine our models based on new data. This could further enhance the personalization of care. - -Are there any specific methodologies or technologies you envision using for the anomaly detection aspect? I'm excited to hear your thoughts! -[2024-11-10 18:48:41,904] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I'm glad t... -[2024-11-10 18:48:48,046] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I'm glad to hear that you resonate with the proposed research directions! I completely agree that **Enhanced Sensor Fusion** could significantly elevate our monitoring capabilities, and the **Adaptive Learning Systems** concept is indeed crucial for tailoring care to individual needs. Your emphasis on **Causal Inference in Health Monitoring** is spot on; understanding these relationships can empower caregivers with actionable insights. - -The idea of **Real-time Communication Systems** is essential for timely interventions, and I believe that **Multi-task Learning for Activity Recognition** could streamline our processes effectively. - -I appreciate your suggestion to explore the use of spiking neural networks for real-time anomaly detection in sensor data. This could indeed enhance our monitoring systems' responsiveness and provide a more robust framework for identifying critical changes in patient behavior. - -Additionally, I think we should consider the integration of machine learning techniques that can adapt to the evolving needs of patients over time, perhaps through continuous learning mechanisms that refine our models based on new data. This could further enhance the personalization of care. - -As for specific methodologies, I envision utilizing techniques such as recurrent neural networks (RNNs) or long short-term memory (LSTM) networks for anomaly detection, as they are well-suited for time-series data and can capture temporal dependencies effectively. We could also explore the use of unsupervised learning methods, such as clustering algorithms, to identify patterns in the data that may indicate anomalies. - -I'm excited to hear your thoughts on these methodologies and any other ideas you might have! -[2024-11-10 18:48:48,046] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I'm glad t... -[2024-11-10 18:48:53,576] [INFO] [BaseAgent]: Agent 'agent2' called 'new_communication_session' with args '{'target_agent_id': 'agent3', 'message': "I am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the intersection of artificial intelligence and healthcare, specifically focusing on developing autonomous systems for monitoring elderly patients with dementia in smart home environments. Let's leverage our expertise to generate innovative research ideas that align with our interests and expertise."}'. -[2024-11-10 18:48:53,580] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'message': 'Successfully completed session a0efe060-e5af-4c36-a2f8-1b41f6a3c1cc', 'session_id': "### Summary of Conversation Between Agent2 and Agent3\n\n**Context:**\nAgent2 and Agent3 engaged in a collaborative discussion focused on brainstorming research ideas at the intersection of artificial intelligence (AI) and healthcare, specifically targeting the development of autonomous systems for monitoring elderly patients with dementia in smart home environments.\n\n**Key Themes and Ideas:**\n1. **Research Directions:**\n - **Enhanced Sensor Fusion:** Integration of various sensor modalities (audio, visual, environmental) to improve abnormal activity detection.\n - **Adaptive Learning Systems:** Development of personalized monitoring systems using reinforcement learning to adapt to individual patient behaviors.\n - **Causal Inference in Health Monitoring:** Creating models to identify causal relationships between daily activities and health outcomes to predict health declines.\n - **Real-time Communication Systems:** Systems that alert caregivers about significant changes in a patient's behavior or health status.\n - **Multi-task Learning for Activity Recognition:** Frameworks that recognize various activities while predicting health-related events.\n - **User-Centric Design:** Designing interfaces that ensure accessibility and usability for elderly patients.\n\n2. **Additional Suggestions:**\n - Agent3 proposed exploring the use of spiking neural networks for real-time anomaly detection in sensor data.\n - Agent2 suggested integrating machine learning techniques that adapt to evolving patient needs through continuous learning mechanisms.\n\n3. **Methodologies:**\n - Agent3 mentioned using recurrent neural networks (RNNs) or long short-term memory (LSTM) networks for anomaly detection, suitable for time-series data.\n - The potential use of unsupervised learning methods, such as clustering algorithms, to identify patterns indicating anomalies was also discussed.\n\n**Outcomes:**\n- Both agents expressed enthusiasm for the proposed research directions and acknowledged their alignment with the goal of improving the quality of life for elderly patients with dementia.\n- They agreed to further explore the methodologies and technologies that could be employed in their research, particularly focusing on anomaly detection and adaptive learning systems.\n\n**Next Steps for Agent2:**\n- Collaborate with Agent3 and the research team to refine and expand upon the proposed research ideas, ensuring they leverage their expertise in AI and healthcare to address identified gaps effectively."}'. -[2024-11-10 18:48:53,581] [DEBUG] [Engine]: Agent 'agent2' executed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the intersection of artificial intelligence and healthcare, specifically focusing on developing autonomous systems for monitoring elderly patients with dementia in smart home environments. Let\'s leverage our expertise to generate innovative research ideas that align with our interests and expertise."}', name='new_communication_session'), id='call_4L1smh4NAynr3kzXIMlxi1D2', type='function')], function_call=None) -[2024-11-10 18:48:53,581] [INFO] [BaseAgent]: Agent 'agent3' is planning the next task. -[2024-11-10 18:48:55,215] [INFO] [BaseAgent]: Agent 'agent3' plans next task based on persona: Based on the role and expertise of 'agent3' as a researcher dedicated to advancing the field of spiking neural networks (SNNs) and their applications in auditory processing and machine learning, the next task should be to: - -1. **Literature Review**: Conduct a thorough literature review on the intersection of large language models (LLMs) and evolutionary algorithms, focusing on how these technologies can enhance algorithm selection and optimization processes. Explore the current state of research in this area and identify gaps or opportunities for further advancement. - -This task aligns with 'agent3's expertise and research interests, allowing them to stay updated on the latest developments in the field and potentially identify new research directions or collaborations. -[2024-11-10 18:48:55,216] [INFO] [Engine]: Agent 'agent3' planned task: Based on the role and expertise of 'agent3' as a researcher dedicated to advancing the field of spiking neural networks (SNNs) and their applications in auditory processing and machine learning, the next task should be to: - -1. **Literature Review**: Conduct a thorough literature review on the intersection of large language models (LLMs) and evolutionary algorithms, focusing on how these technologies can enhance algorithm selection and optimization processes. Explore the current state of research in this area and identify gaps or opportunities for further advancement. - -This task aligns with 'agent3's expertise and research interests, allowing them to stay updated on the latest developments in the field and potentially identify new research directions or collaborations. -[2024-11-10 18:48:55,216] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Based on the role and expertise of 'agent3' as a researcher dedicated to advancing the field of spiking neural networks (SNNs) and their applications in auditory processing and machine learning, the next task should be to: - -1. **Literature Review**: Conduct a thorough literature review on the intersection of large language models (LLMs) and evolutionary algorithms, focusing on how these technologies can enhance algorithm selection and optimization processes. Explore the current state of research in this area and identify gaps or opportunities for further advancement. - -This task aligns with 'agent3's expertise and research interests, allowing them to stay updated on the latest developments in the field and potentially identify new research directions or collaborations.'. -[2024-11-10 18:48:56,001] [ERROR] [Engine]: Error in agent 'agent3' during planning or action: 'itertools.islice' object is not callable -[2024-11-10 18:48:56,002] [INFO] [BaseAgent]: Agent 'agent4' is planning the next task. -[2024-11-10 18:48:57,263] [INFO] [BaseAgent]: Agent 'agent4' plans next task based on persona: Based on the provided information, the next task for 'agent4' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with their expertise in mathematical modeling, optimization, and photonic systems, as well as their interest in computational efficiency and interdisciplinary research. By contributing to the brainstorming session, 'agent4' can leverage their diverse background to propose innovative solutions and advance knowledge in this research area. -[2024-11-10 18:48:57,264] [INFO] [Engine]: Agent 'agent4' planned task: Based on the provided information, the next task for 'agent4' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with their expertise in mathematical modeling, optimization, and photonic systems, as well as their interest in computational efficiency and interdisciplinary research. By contributing to the brainstorming session, 'agent4' can leverage their diverse background to propose innovative solutions and advance knowledge in this research area. -[2024-11-10 18:48:57,265] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Based on the provided information, the next task for 'agent4' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with their expertise in mathematical modeling, optimization, and photonic systems, as well as their interest in computational efficiency and interdisciplinary research. By contributing to the brainstorming session, 'agent4' can leverage their diverse background to propose innovative solutions and advance knowledge in this research area.'. -[2024-11-10 18:49:00,898] [INFO] [BaseAgent]: Agent agent4 sent message to agent5: I am a researcher with a diverse background in mathematical modeling, optimization, and photonic systems. My work spans various domains, including the study of eigenvalues in projective spaces, the application of multi-factorial optimization for large-scale virtual machine placement, and the exploration of lasing and coherent perfect absorption in PT-symmetric systems. I have delved into the topological properties of photonic superlattices, revealing their potential for mode-division multiplexing, and investigated the implications of non-Hermitian systems on multi-mode interference. - -My research also extends to coding theory, where I applied Baranyai's theorem to construct locally decodable codes, emphasizing the combinatorial techniques that underpin these developments. I have explored the spectral signatures of CPA-lasers, highlighting the differences between singular value and eigenvalue spectra, and examined spectral phase transitions in non-Hermitian Hamiltonians, particularly in the context of parity-time symmetry. - -Recently, I have focused on the architectural support for FPGA-based accelerators in heterogeneous multicore systems, proposing innovative solutions for efficient interfacing and data reuse. My work aims to bridge theoretical insights with practical applications, contributing to advancements in both computational efficiency and optical technologies. Through my research, I strive to push the boundaries of knowledge in these interdisciplinary fields, fostering new approaches and solutions to complex problems. - -I am currently tasked with collaborating with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with my expertise in mathematical modeling, optimization, and photonic systems, as well as my interest in computational efficiency and interdisciplinary research. By contributing to the brainstorming session, I aim to propose innovative solutions and advance knowledge in this research area. -[2024-11-10 18:49:00,901] [INFO] [BaseAgent]: Agent agent5 received message from agent4: I am a res... -[2024-11-10 18:49:07,153] [INFO] [BaseAgent]: Agent agent5 sent message to agent4: Hello agent4! It's great to connect with someone who has such a diverse and rich background in mathematical modeling and optimization. Given your expertise, I believe we can brainstorm some innovative research ideas that build upon the DELLA approach for merging homologous models. Here are a few suggestions: - -1. **Adaptive Parameter Selection Framework:** We could explore the development of an adaptive framework that utilizes machine learning techniques to dynamically select and adjust delta parameters during the merging process. This could enhance the model's ability to adapt to varying task requirements and improve overall performance. - -2. **Interdisciplinary Model Merging:** Considering your background in photonic systems, we might investigate how principles from optical systems can inform the merging of models. For instance, we could draw parallels between light interference patterns and parameter interactions in model merging, potentially leading to novel merging strategies. - -3. **Multi-Objective Optimization for Model Merging:** We could apply multi-objective optimization techniques to balance trade-offs between model performance, memory efficiency, and computational cost during the merging process. This could involve formulating a multi-objective optimization problem that captures these competing objectives. - -4. **Exploration of Non-Hermitian Systems in Model Merging:** Given your interest in non-Hermitian systems, we could examine how concepts from non-Hermitian quantum mechanics might be applied to model merging. This could lead to new insights into parameter interactions and stability during the merging process. - -5. **FPGA Acceleration for Merging Algorithms:** We could investigate the architectural support for FPGA-based accelerators to optimize the computational efficiency of the DELLA approach. This could involve designing specialized hardware that accelerates the merging process, making it feasible for real-time applications. - -I believe these ideas could significantly contribute to advancing the DELLA approach and addressing the challenges of merging homologous models. What do you think? Do any of these resonate with you, or do you have other ideas in mind? -[2024-11-10 18:49:07,154] [INFO] [BaseAgent]: Agent agent4 received message from agent5: Hello agen... -[2024-11-10 18:49:13,232] [INFO] [BaseAgent]: Agent agent4 sent message to agent5: Hello agent5! Thank you for your thoughtful suggestions. I find your ideas quite compelling, and I believe they align well with my expertise. Here are my thoughts on each of your proposals: - -1. **Adaptive Parameter Selection Framework:** This is an excellent idea! Integrating machine learning to dynamically adjust delta parameters could indeed enhance adaptability. We could explore reinforcement learning techniques to optimize this process further. - -2. **Interdisciplinary Model Merging:** I love the idea of drawing parallels between optical systems and model merging. We could investigate how interference patterns might inform parameter interactions, potentially leading to innovative merging strategies that leverage insights from photonics. - -3. **Multi-Objective Optimization for Model Merging:** This approach resonates with me as it addresses the inherent trade-offs in model merging. We could formulate a comprehensive optimization framework that captures these objectives, perhaps utilizing evolutionary algorithms to explore the solution space effectively. - -4. **Exploration of Non-Hermitian Systems in Model Merging:** This is a fascinating direction! Applying concepts from non-Hermitian systems could provide new insights into stability and interactions during the merging process. We could delve into the mathematical underpinnings to uncover potential benefits. - -5. **FPGA Acceleration for Merging Algorithms:** Given my recent focus on FPGA-based accelerators, this idea is particularly exciting. We could design a prototype that demonstrates the efficiency gains from hardware acceleration in the DELLA approach, potentially paving the way for real-time applications. - -Overall, I believe we have a strong foundation to build upon. I would love to hear your thoughts on these reflections and any additional ideas you might have! -[2024-11-10 18:49:13,232] [INFO] [BaseAgent]: Agent agent5 received message from agent4: Hello agen... -[2024-11-10 18:49:19,995] [INFO] [BaseAgent]: Agent agent5 sent message to agent4: Hello agent4! I'm glad to hear that you find the ideas compelling. Your reflections on each proposal are insightful and demonstrate a strong alignment with our research goals. Here are my thoughts on your reflections: - -1. **Adaptive Parameter Selection Framework:** I agree that reinforcement learning could be a powerful tool for optimizing the dynamic adjustment of delta parameters. We could consider implementing a feedback loop that continuously learns from the merging outcomes to refine the parameter selection process. - -2. **Interdisciplinary Model Merging:** Exploring the parallels between optical systems and model merging could yield innovative strategies. We might also consider conducting experiments to validate our hypotheses about parameter interactions and their effects on merging outcomes. - -3. **Multi-Objective Optimization for Model Merging:** Utilizing evolutionary algorithms for this framework is a great idea! We could define specific metrics for performance, memory efficiency, and computational cost, allowing us to visualize trade-offs and make informed decisions during the merging process. - -4. **Exploration of Non-Hermitian Systems in Model Merging:** Delving into the mathematical aspects of non-Hermitian systems could indeed reveal new insights. We could collaborate on developing theoretical models that incorporate these concepts and test their applicability in our merging framework. - -5. **FPGA Acceleration for Merging Algorithms:** Designing a prototype for hardware acceleration is an exciting prospect! We could outline the specifications for the FPGA design and identify key performance indicators to measure the efficiency gains. - -Overall, I believe we have a robust set of ideas to explore further. I'm eager to collaborate on these proposals and see how we can advance our research in merging homologous models using the DELLA approach. Do you have any additional thoughts or ideas to add? -[2024-11-10 18:49:19,997] [INFO] [BaseAgent]: Agent agent4 received message from agent5: Hello agen... -[2024-11-10 18:49:23,988] [INFO] [BaseAgent]: Agent agent4 sent message to agent5: Hello agent5! Thank you for your thoughtful reflections on our proposals. I appreciate your insights, and I believe we can further enhance our ideas. Here are a few additional thoughts: - -1. **Adaptive Parameter Selection Framework:** In addition to reinforcement learning, we could explore the use of meta-learning techniques to improve the adaptability of our parameter selection. This could allow the framework to learn from previous merging tasks and apply that knowledge to new scenarios more effectively. - -2. **Interdisciplinary Model Merging:** To validate our hypotheses, we could design a series of simulations that mimic optical interference patterns and analyze how these patterns influence model merging. This experimental approach could provide empirical support for our theoretical insights. - -3. **Multi-Objective Optimization for Model Merging:** We might also consider incorporating a decision-making framework that utilizes Pareto efficiency to guide our optimization process. This could help us identify optimal trade-offs more systematically. - -4. **Exploration of Non-Hermitian Systems in Model Merging:** We could investigate specific case studies where non-Hermitian dynamics have been successfully applied in other fields, drawing parallels to our merging framework. This could provide a richer context for our theoretical models. - -5. **FPGA Acceleration for Merging Algorithms:** As we outline the specifications for the FPGA design, we should also consider potential scalability issues. Designing for scalability could ensure that our solution remains effective as the complexity of the merging tasks increases. - -I believe these additional thoughts could further strengthen our proposals. I'm excited about the potential of our collaboration and look forward to hearing your thoughts on these ideas! -[2024-11-10 18:49:23,989] [INFO] [BaseAgent]: Agent agent5 received message from agent4: Hello agen... -[2024-11-10 18:49:30,032] [INFO] [BaseAgent]: Agent agent5 sent message to agent4: Hello agent4! I appreciate your additional thoughts on our proposals. Your suggestions are insightful and can indeed enhance our research direction. Here are my reflections on your points: - -1. **Adaptive Parameter Selection Framework:** I completely agree that incorporating meta-learning techniques could significantly improve our framework's adaptability. By leveraging past experiences, we can create a more robust system that generalizes well across different merging tasks. - -2. **Interdisciplinary Model Merging:** Designing simulations to mimic optical interference patterns is a fantastic idea! This empirical approach will not only validate our theoretical insights but also provide a deeper understanding of how these patterns can influence model merging outcomes. - -3. **Multi-Objective Optimization for Model Merging:** Integrating a decision-making framework based on Pareto efficiency is an excellent addition. This will allow us to systematically explore trade-offs and make informed decisions, ultimately leading to more effective merging strategies. - -4. **Exploration of Non-Hermitian Systems in Model Merging:** Investigating case studies from other fields where non-Hermitian dynamics have been applied could provide valuable insights. This cross-disciplinary approach may reveal novel strategies that we can adapt for our merging framework. - -5. **FPGA Acceleration for Merging Algorithms:** Considering scalability in our FPGA design is crucial. By addressing potential scalability issues early on, we can ensure that our solution remains effective as we tackle increasingly complex merging tasks. - -Overall, I believe we have a strong collaborative foundation, and I'm excited about the potential of our research. Let's continue to refine these ideas and explore how we can implement them effectively! -[2024-11-10 18:49:30,034] [INFO] [BaseAgent]: Agent agent4 received message from agent5: Hello agen... -[2024-11-10 18:49:35,663] [INFO] [BaseAgent]: Agent 'agent4' called 'new_communication_session' with args '{'target_agent_id': 'agent5', 'message': "I am a researcher with a diverse background in mathematical modeling, optimization, and photonic systems. My work spans various domains, including the study of eigenvalues in projective spaces, the application of multi-factorial optimization for large-scale virtual machine placement, and the exploration of lasing and coherent perfect absorption in PT-symmetric systems. I have delved into the topological properties of photonic superlattices, revealing their potential for mode-division multiplexing, and investigated the implications of non-Hermitian systems on multi-mode interference.\n\nMy research also extends to coding theory, where I applied Baranyai's theorem to construct locally decodable codes, emphasizing the combinatorial techniques that underpin these developments. I have explored the spectral signatures of CPA-lasers, highlighting the differences between singular value and eigenvalue spectra, and examined spectral phase transitions in non-Hermitian Hamiltonians, particularly in the context of parity-time symmetry.\n\nRecently, I have focused on the architectural support for FPGA-based accelerators in heterogeneous multicore systems, proposing innovative solutions for efficient interfacing and data reuse. My work aims to bridge theoretical insights with practical applications, contributing to advancements in both computational efficiency and optical technologies. Through my research, I strive to push the boundaries of knowledge in these interdisciplinary fields, fostering new approaches and solutions to complex problems.\n\nI am currently tasked with collaborating with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with my expertise in mathematical modeling, optimization, and photonic systems, as well as my interest in computational efficiency and interdisciplinary research. By contributing to the brainstorming session, I aim to propose innovative solutions and advance knowledge in this research area."}'. -[2024-11-10 18:49:35,665] [INFO] [BaseAgent]: Agent 'agent4' obtained result '{'success': True, 'message': 'Successfully completed session 12d8e045-dcdb-4faa-bc5a-ec3059eb470e', 'session_id': "### Summary of Conversation Between Agent4 and Agent5\n\n**Context:**\nAgent4, a researcher with expertise in mathematical modeling, optimization, and photonic systems, engaged in a collaborative brainstorming session with Agent5. The focus was on generating innovative research ideas related to merging homologous models using the DELLA approach, leveraging Agent4's diverse background and interests.\n\n**Key Themes and Proposals:**\n1. **Adaptive Parameter Selection Framework:**\n - Agent5 proposed developing a framework using machine learning for dynamic delta parameter adjustment.\n - Agent4 suggested incorporating reinforcement learning and meta-learning techniques to enhance adaptability.\n\n2. **Interdisciplinary Model Merging:**\n - Agent5 highlighted the potential of drawing parallels between optical systems and model merging.\n - Agent4 recommended designing simulations to validate hypotheses about optical interference patterns influencing model merging.\n\n3. **Multi-Objective Optimization for Model Merging:**\n - Agent5 suggested applying multi-objective optimization techniques to balance performance, memory efficiency, and computational cost.\n - Agent4 added the idea of using a decision-making framework based on Pareto efficiency to systematically explore trade-offs.\n\n4. **Exploration of Non-Hermitian Systems in Model Merging:**\n - Agent5 expressed interest in applying concepts from non-Hermitian systems to model merging.\n - Agent4 proposed investigating case studies from other fields to draw parallels and enhance theoretical models.\n\n5. **FPGA Acceleration for Merging Algorithms:**\n - Agent5 discussed the potential for FPGA-based accelerators to optimize the DELLA approach.\n - Agent4 emphasized the importance of considering scalability in FPGA design to handle complex merging tasks effectively.\n\n**Outcomes:**\n- Both agents expressed enthusiasm for the proposed ideas and recognized their alignment with their research goals.\n- They agreed to continue refining these concepts and explore practical implementations, fostering a collaborative environment for advancing their research in merging homologous models.\n\n**Next Steps for Agent4:**\nAgent4 is tasked with collaborating with the research team to brainstorm and develop these ideas further, focusing on innovative solutions that address gaps in the Introduction related to the DELLA approach. This collaboration will leverage Agent4's expertise and interdisciplinary interests to advance knowledge in the field."}'. -[2024-11-10 18:49:35,666] [DEBUG] [Engine]: Agent 'agent4' executed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent5","message":"I am a researcher with a diverse background in mathematical modeling, optimization, and photonic systems. My work spans various domains, including the study of eigenvalues in projective spaces, the application of multi-factorial optimization for large-scale virtual machine placement, and the exploration of lasing and coherent perfect absorption in PT-symmetric systems. I have delved into the topological properties of photonic superlattices, revealing their potential for mode-division multiplexing, and investigated the implications of non-Hermitian systems on multi-mode interference.\\n\\nMy research also extends to coding theory, where I applied Baranyai\'s theorem to construct locally decodable codes, emphasizing the combinatorial techniques that underpin these developments. I have explored the spectral signatures of CPA-lasers, highlighting the differences between singular value and eigenvalue spectra, and examined spectral phase transitions in non-Hermitian Hamiltonians, particularly in the context of parity-time symmetry.\\n\\nRecently, I have focused on the architectural support for FPGA-based accelerators in heterogeneous multicore systems, proposing innovative solutions for efficient interfacing and data reuse. My work aims to bridge theoretical insights with practical applications, contributing to advancements in both computational efficiency and optical technologies. Through my research, I strive to push the boundaries of knowledge in these interdisciplinary fields, fostering new approaches and solutions to complex problems.\\n\\nI am currently tasked with collaborating with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with my expertise in mathematical modeling, optimization, and photonic systems, as well as my interest in computational efficiency and interdisciplinary research. By contributing to the brainstorming session, I aim to propose innovative solutions and advance knowledge in this research area."}', name='new_communication_session'), id='call_T463eIRmk8acCjE6Vkp3uJTA', type='function')], function_call=None) -[2024-11-10 18:49:35,667] [INFO] [BaseAgent]: Agent 'agent5' is planning the next task. -[2024-11-10 18:49:36,687] [INFO] [BaseAgent]: Agent 'agent5' plans next task based on persona: The next task for 'agent5' should be to formulate a new research idea following the '5q' format based on the provided context and brainstorming session. This will involve addressing specific research questions related to optimizing the merging of homologous models in interactive systems based on large language models, highlighting the importance, challenges, previous limitations, and proposed methodology for the research idea. -[2024-11-10 18:49:36,688] [INFO] [Engine]: Agent 'agent5' planned task: The next task for 'agent5' should be to formulate a new research idea following the '5q' format based on the provided context and brainstorming session. This will involve addressing specific research questions related to optimizing the merging of homologous models in interactive systems based on large language models, highlighting the importance, challenges, previous limitations, and proposed methodology for the research idea. -[2024-11-10 18:49:36,688] [INFO] [BaseAgent]: Agent 'agent5' acting on task 'The next task for 'agent5' should be to formulate a new research idea following the '5q' format based on the provided context and brainstorming session. This will involve addressing specific research questions related to optimizing the merging of homologous models in interactive systems based on large language models, highlighting the importance, challenges, previous limitations, and proposed methodology for the research idea.'. -[2024-11-10 18:49:39,656] [INFO] [BaseAgent]: Agent agent5 sent message to agent3: I am a researcher deeply engaged in the fields of evolutionary algorithms, machine learning, and spiking neural networks. My work primarily focuses on enhancing optimization techniques and developing innovative frameworks for complex problem-solving. Recently, I have explored the integration of machine learning models with evolutionary algorithms to improve their performance, particularly in multi-objective optimization scenarios. For instance, I introduced BiLO-CPDP, a bi-level programming approach that automates the discovery of effective models for cross-project defect prediction, significantly outperforming existing techniques. - -In my research, I have also developed novel methods for 3D molecule generation, such as the GOAT framework, which utilizes optimal transport principles to efficiently generate high-quality molecular structures. My interest in spiking neural networks has led me to propose new neuron models, like the Contextual Embedding Leaky Integrate-and-Fire (CE-LIF) model, which enhances memory storage and temporal processing capabilities. - -I am particularly passionate about addressing the challenges posed by large-scale multi-objective optimization problems (LSMOPs). My recent work includes the development of GAN-based frameworks for learning manifold structures, which significantly improve the performance of evolutionary algorithms in high-dimensional spaces. Through my research, I aim to bridge the gap between theoretical advancements and practical applications, contributing to the evolution of intelligent systems that can tackle real-world challenges efficiently. -[2024-11-10 18:49:39,658] [INFO] [BaseAgent]: Agent agent3 received message from agent5: I am a res... -[2024-11-10 18:49:45,306] [INFO] [BaseAgent]: Agent agent3 sent message to agent5: I appreciate your extensive background in evolutionary algorithms and machine learning. Let's formulate a new research idea using the '5q' format to optimize the merging of homologous models in interactive systems based on large language models. Here’s a proposed outline: - -1. **What is the research question?** - How can we effectively optimize the merging of homologous models in interactive systems to enhance performance and adaptability in real-time applications? - -2. **Why is this research important?** - The integration of multiple models can lead to improved performance in complex tasks, but current methods often struggle with efficiency and scalability. Addressing this gap can significantly enhance the capabilities of interactive systems, making them more responsive and intelligent. - -3. **What are the challenges?** - - Identifying the most effective merging strategies for different model architectures. - - Balancing the trade-off between model complexity and performance. - - Ensuring that the merged models maintain interpretability and robustness in dynamic environments. - -4. **What are the previous limitations?** - Previous approaches often relied on static merging techniques that did not account for the dynamic nature of interactive systems. Additionally, many methods failed to optimize for multi-objective scenarios, leading to suboptimal performance in real-world applications. - -5. **What is the proposed methodology?** - - Develop a framework that utilizes evolutionary algorithms to explore and optimize merging strategies for homologous models. - - Implement a multi-objective optimization approach to balance performance metrics such as accuracy, speed, and resource utilization. - - Validate the framework through simulations and real-world applications, focusing on interactive systems that require real-time decision-making. - -This research idea could pave the way for more efficient and adaptable interactive systems, leveraging the strengths of large language models and evolutionary algorithms. What do you think? -[2024-11-10 18:49:45,306] [INFO] [BaseAgent]: Agent agent5 received message from agent3: I apprecia... -[2024-11-10 18:49:51,944] [INFO] [BaseAgent]: Agent agent5 sent message to agent3: I appreciate your extensive background in evolutionary algorithms and machine learning. Let's formulate a new research idea using the '5q' format to optimize the merging of homologous models in interactive systems based on large language models. Here’s a proposed outline:\n\n1. **What is the research question?** \n How can we effectively optimize the merging of homologous models in interactive systems to enhance performance and adaptability in real-time applications?\n\n2. **Why is this research important?** \n The integration of multiple models can lead to improved performance in complex tasks, but current methods often struggle with efficiency and scalability. Addressing this gap can significantly enhance the capabilities of interactive systems, making them more responsive and intelligent.\n\n3. **What are the challenges?** \n - Identifying the most effective merging strategies for different model architectures. \n - Balancing the trade-off between model complexity and performance. \n - Ensuring that the merged models maintain interpretability and robustness in dynamic environments.\n\n4. **What are the previous limitations?** \n Previous approaches often relied on static merging techniques that did not account for the dynamic nature of interactive systems. Additionally, many methods failed to optimize for multi-objective scenarios, leading to suboptimal performance in real-world applications.\n\n5. **What is the proposed methodology?** \n - Develop a framework that utilizes evolutionary algorithms to explore and optimize merging strategies for homologous models. \n - Implement a multi-objective optimization approach to balance performance metrics such as accuracy, speed, and resource utilization. \n - Validate the framework through simulations and real-world applications, focusing on interactive systems that require real-time decision-making.\n\nThis research idea could pave the way for more efficient and adaptable interactive systems, leveraging the strengths of large language models and evolutionary algorithms. What do you think? -[2024-11-10 18:49:51,947] [INFO] [BaseAgent]: Agent agent3 received message from agent5: I apprecia... -[2024-11-10 18:49:56,965] [INFO] [BaseAgent]: Agent agent3 sent message to agent5: I appreciate your extensive background in evolutionary algorithms and machine learning. Let's formulate a new research idea using the '5q' format to optimize the merging of homologous models in interactive systems based on large language models. Here’s a proposed outline: - -1. **What is the research question?** - How can we effectively optimize the merging of homologous models in interactive systems to enhance performance and adaptability in real-time applications? - -2. **Why is this research important?** - The integration of multiple models can lead to improved performance in complex tasks, but current methods often struggle with efficiency and scalability. Addressing this gap can significantly enhance the capabilities of interactive systems, making them more responsive and intelligent. - -3. **What are the challenges?** - - Identifying the most effective merging strategies for different model architectures. - - Balancing the trade-off between model complexity and performance. - - Ensuring that the merged models maintain interpretability and robustness in dynamic environments. - -4. **What are the previous limitations?** - Previous approaches often relied on static merging techniques that did not account for the dynamic nature of interactive systems. Additionally, many methods failed to optimize for multi-objective scenarios, leading to suboptimal performance in real-world applications. - -5. **What is the proposed methodology?** - - Develop a framework that utilizes evolutionary algorithms to explore and optimize merging strategies for homologous models. - - Implement a multi-objective optimization approach to balance performance metrics such as accuracy, speed, and resource utilization. - - Validate the framework through simulations and real-world applications, focusing on interactive systems that require real-time decision-making. - -This research idea could pave the way for more efficient and adaptable interactive systems, leveraging the strengths of large language models and evolutionary algorithms. What do you think? -[2024-11-10 18:49:56,966] [INFO] [BaseAgent]: Agent agent5 received message from agent3: I apprecia... -[2024-11-10 18:50:04,337] [INFO] [BaseAgent]: Agent agent5 sent message to agent3: I appreciate your detailed outline for the research idea using the '5q' format. Here’s my response to your proposed outline: - -1. **What is the research question?** - I agree that optimizing the merging of homologous models in interactive systems is crucial for enhancing performance and adaptability. This question is timely and relevant, especially as we see an increasing demand for intelligent systems that can operate in real-time. - -2. **Why is this research important?** - Your point about the integration of multiple models leading to improved performance is well taken. The ability to efficiently merge models can indeed reduce the computational burden and enhance the responsiveness of interactive systems, which is vital in applications like conversational agents and real-time decision-making systems. - -3. **What are the challenges?** - The challenges you've identified resonate with my experiences in model optimization. The need to balance model complexity with performance while ensuring interpretability is a significant hurdle. Additionally, the dynamic nature of tasks in interactive systems adds another layer of complexity that must be addressed. - -4. **What are the previous limitations?** - I concur that static merging techniques have been a limitation in previous research. The failure to account for the dynamic nature of tasks often leads to inefficiencies. Your emphasis on multi-objective optimization is particularly relevant, as it aligns with my work in evolutionary algorithms, where balancing multiple objectives is key to achieving optimal solutions. - -5. **What is the proposed methodology?** - I find your proposed methodology compelling. Utilizing evolutionary algorithms to explore merging strategies is an innovative approach that could yield significant insights. The focus on multi-objective optimization will likely enhance the robustness of the merged models. I would suggest incorporating adaptive mechanisms that allow the framework to learn from real-time feedback, further improving its performance. - -Overall, I believe this research idea has great potential to advance the field of interactive systems. I look forward to collaborating on this and exploring how we can leverage our combined expertise in evolutionary algorithms and machine learning to tackle these challenges effectively. -[2024-11-10 18:50:04,339] [INFO] [BaseAgent]: Agent agent3 received message from agent5: I apprecia... -[2024-11-10 18:50:10,611] [INFO] [BaseAgent]: Agent agent3 sent message to agent5: I appreciate your detailed response to the proposed outline for the research idea using the '5q' format. Here’s my reflection on your points: - -1. **Research Question:** I’m glad you find the research question timely and relevant. The optimization of merging homologous models is indeed a pressing issue as we strive for more intelligent and responsive systems. - -2. **Importance of Research:** Your acknowledgment of the potential benefits of efficient model merging reinforces the significance of this research. The applications in conversational agents and real-time decision-making systems are particularly exciting, as they can greatly impact user experience and system performance. - -3. **Challenges:** I appreciate your insights on the challenges. The balance between complexity and performance, along with the need for interpretability, is a critical aspect that we must address. Your experience in model optimization will be invaluable in navigating these hurdles. - -4. **Previous Limitations:** Your agreement on the limitations of static merging techniques highlights the need for innovation in this area. The dynamic nature of tasks indeed requires a more flexible approach, and I believe our focus on multi-objective optimization will help us overcome these challenges. - -5. **Proposed Methodology:** I’m excited that you find the proposed methodology compelling. Incorporating adaptive mechanisms to learn from real-time feedback is an excellent suggestion, and I believe it will enhance the framework's effectiveness in practical applications. - -Overall, I share your enthusiasm for this research idea and look forward to collaborating closely. Together, we can leverage our expertise to push the boundaries of interactive systems and develop solutions that are both efficient and adaptable. -[2024-11-10 18:50:10,612] [INFO] [BaseAgent]: Agent agent5 received message from agent3: I apprecia... -[2024-11-10 18:50:16,827] [INFO] [BaseAgent]: Agent 'agent5' called 'new_communication_session' with args '{'target_agent_id': 'agent3', 'message': 'I am a researcher deeply engaged in the fields of evolutionary algorithms, machine learning, and spiking neural networks. My work primarily focuses on enhancing optimization techniques and developing innovative frameworks for complex problem-solving. Recently, I have explored the integration of machine learning models with evolutionary algorithms to improve their performance, particularly in multi-objective optimization scenarios. For instance, I introduced BiLO-CPDP, a bi-level programming approach that automates the discovery of effective models for cross-project defect prediction, significantly outperforming existing techniques.\n\nIn my research, I have also developed novel methods for 3D molecule generation, such as the GOAT framework, which utilizes optimal transport principles to efficiently generate high-quality molecular structures. My interest in spiking neural networks has led me to propose new neuron models, like the Contextual Embedding Leaky Integrate-and-Fire (CE-LIF) model, which enhances memory storage and temporal processing capabilities.\n\nI am particularly passionate about addressing the challenges posed by large-scale multi-objective optimization problems (LSMOPs). My recent work includes the development of GAN-based frameworks for learning manifold structures, which significantly improve the performance of evolutionary algorithms in high-dimensional spaces. Through my research, I aim to bridge the gap between theoretical advancements and practical applications, contributing to the evolution of intelligent systems that can tackle real-world challenges efficiently.'}'. -[2024-11-10 18:50:16,828] [INFO] [BaseAgent]: Agent 'agent5' obtained result '{'success': True, 'message': 'Successfully completed session f45b345f-3ad3-4c8d-ba93-3e8504d19da9', 'session_id': "### Summary of Chat History\n\n**Participants:** Agent3 and Agent5 \n**Context:** Discussion on formulating a new research idea using the '5q' format, focusing on optimizing the merging of homologous models in interactive systems based on large language models.\n\n#### Key Points:\n\n1. **Agent5's Background:**\n - Researcher in evolutionary algorithms, machine learning, and spiking neural networks.\n - Focus on optimization techniques and frameworks for complex problem-solving.\n - Recent work includes BiLO-CPDP for defect prediction and the GOAT framework for 3D molecule generation.\n - Interest in large-scale multi-objective optimization problems and GAN-based frameworks.\n\n2. **Research Idea Proposal by Agent3:**\n - **Research Question:** How to optimize the merging of homologous models in interactive systems for enhanced performance and adaptability?\n - **Importance:** Efficient model integration can improve performance in complex tasks, addressing current inefficiencies and scalability issues.\n - **Challenges Identified:**\n - Effective merging strategies for different architectures.\n - Balancing model complexity with performance.\n - Maintaining interpretability and robustness in dynamic environments.\n - **Previous Limitations:** Static merging techniques that do not adapt to dynamic tasks and lack optimization for multi-objective scenarios.\n - **Proposed Methodology:**\n - Develop a framework using evolutionary algorithms for merging strategies.\n - Implement multi-objective optimization for performance metrics.\n - Validate through simulations and real-world applications.\n\n3. **Agent5's Response:**\n - Agreement on the relevance of the research question and its significance in real-time applications.\n - Acknowledgment of the challenges and previous limitations, emphasizing the need for innovative approaches.\n - Support for the proposed methodology, suggesting the incorporation of adaptive mechanisms for real-time feedback.\n\n4. **Collaboration Enthusiasm:**\n - Both agents express excitement about the potential of the research idea and the opportunity to collaborate, leveraging their combined expertise.\n\n### Next Task for Agent5:\nAgent5 should formulate a new research idea following the '5q' format, addressing the specific research questions related to optimizing the merging of homologous models in interactive systems based on large language models. This should include:\n- A clear research question.\n- Justification for the research's importance.\n- Identification of challenges.\n- Discussion of previous limitations.\n- A detailed proposed methodology. \n\nThis task will build on the insights and collaborative spirit established in the conversation."}'. -[2024-11-10 18:50:16,830] [DEBUG] [Engine]: Agent 'agent5' executed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am a researcher deeply engaged in the fields of evolutionary algorithms, machine learning, and spiking neural networks. My work primarily focuses on enhancing optimization techniques and developing innovative frameworks for complex problem-solving. Recently, I have explored the integration of machine learning models with evolutionary algorithms to improve their performance, particularly in multi-objective optimization scenarios. For instance, I introduced BiLO-CPDP, a bi-level programming approach that automates the discovery of effective models for cross-project defect prediction, significantly outperforming existing techniques.\\n\\nIn my research, I have also developed novel methods for 3D molecule generation, such as the GOAT framework, which utilizes optimal transport principles to efficiently generate high-quality molecular structures. My interest in spiking neural networks has led me to propose new neuron models, like the Contextual Embedding Leaky Integrate-and-Fire (CE-LIF) model, which enhances memory storage and temporal processing capabilities.\\n\\nI am particularly passionate about addressing the challenges posed by large-scale multi-objective optimization problems (LSMOPs). My recent work includes the development of GAN-based frameworks for learning manifold structures, which significantly improve the performance of evolutionary algorithms in high-dimensional spaces. Through my research, I aim to bridge the gap between theoretical advancements and practical applications, contributing to the evolution of intelligent systems that can tackle real-world challenges efficiently."}', name='new_communication_session'), id='call_gSmDHCEdkqJfMUOeqY7dE7Lz', type='function')], function_call=None) -[2024-11-10 18:50:16,831] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- {'agent1': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I am a researcher with expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry. I am currently working on developing innovative terahertz imaging methods for nondestructive testing of multilayer heat shields. I am interested in brainstorming potential research ideas that leverage my knowledge in these areas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). I believe my expertise can enhance the performance of the merging process in machine learning. Can we discuss potential collaborative research ideas in this context?"}', name='new_communication_session'), id='call_u9FarZwvy3ys0nnoOFLiJh8g', type='function')], function_call=None)} -- {'agent2': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the intersection of artificial intelligence and healthcare, specifically focusing on developing autonomous systems for monitoring elderly patients with dementia in smart home environments. Let\'s leverage our expertise to generate innovative research ideas that align with our interests and expertise."}', name='new_communication_session'), id='call_4L1smh4NAynr3kzXIMlxi1D2', type='function')], function_call=None)} -- {'agent4': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent5","message":"I am a researcher with a diverse background in mathematical modeling, optimization, and photonic systems. My work spans various domains, including the study of eigenvalues in projective spaces, the application of multi-factorial optimization for large-scale virtual machine placement, and the exploration of lasing and coherent perfect absorption in PT-symmetric systems. I have delved into the topological properties of photonic superlattices, revealing their potential for mode-division multiplexing, and investigated the implications of non-Hermitian systems on multi-mode interference.\\n\\nMy research also extends to coding theory, where I applied Baranyai\'s theorem to construct locally decodable codes, emphasizing the combinatorial techniques that underpin these developments. I have explored the spectral signatures of CPA-lasers, highlighting the differences between singular value and eigenvalue spectra, and examined spectral phase transitions in non-Hermitian Hamiltonians, particularly in the context of parity-time symmetry.\\n\\nRecently, I have focused on the architectural support for FPGA-based accelerators in heterogeneous multicore systems, proposing innovative solutions for efficient interfacing and data reuse. My work aims to bridge theoretical insights with practical applications, contributing to advancements in both computational efficiency and optical technologies. Through my research, I strive to push the boundaries of knowledge in these interdisciplinary fields, fostering new approaches and solutions to complex problems.\\n\\nI am currently tasked with collaborating with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with my expertise in mathematical modeling, optimization, and photonic systems, as well as my interest in computational efficiency and interdisciplinary research. By contributing to the brainstorming session, I aim to propose innovative solutions and advance knowledge in this research area."}', name='new_communication_session'), id='call_T463eIRmk8acCjE6Vkp3uJTA', type='function')], function_call=None)} -- {'agent5': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am a researcher deeply engaged in the fields of evolutionary algorithms, machine learning, and spiking neural networks. My work primarily focuses on enhancing optimization techniques and developing innovative frameworks for complex problem-solving. Recently, I have explored the integration of machine learning models with evolutionary algorithms to improve their performance, particularly in multi-objective optimization scenarios. For instance, I introduced BiLO-CPDP, a bi-level programming approach that automates the discovery of effective models for cross-project defect prediction, significantly outperforming existing techniques.\\n\\nIn my research, I have also developed novel methods for 3D molecule generation, such as the GOAT framework, which utilizes optimal transport principles to efficiently generate high-quality molecular structures. My interest in spiking neural networks has led me to propose new neuron models, like the Contextual Embedding Leaky Integrate-and-Fire (CE-LIF) model, which enhances memory storage and temporal processing capabilities.\\n\\nI am particularly passionate about addressing the challenges posed by large-scale multi-objective optimization problems (LSMOPs). My recent work includes the development of GAN-based frameworks for learning manifold structures, which significantly improve the performance of evolutionary algorithms in high-dimensional spaces. Through my research, I aim to bridge the gap between theoretical advancements and practical applications, contributing to the evolution of intelligent systems that can tackle real-world challenges efficiently."}', name='new_communication_session'), id='call_gSmDHCEdkqJfMUOeqY7dE7Lz', type='function')], function_call=None)} - -[2024-11-10 18:50:16,832] [INFO] [Engine]: Iteration 1 Summary: -Agents' Results Summary: -- {'agent1': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"I am a researcher with expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry. I am currently working on developing innovative terahertz imaging methods for nondestructive testing of multilayer heat shields. I am interested in brainstorming potential research ideas that leverage my knowledge in these areas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). I believe my expertise can enhance the performance of the merging process in machine learning. Can we discuss potential collaborative research ideas in this context?"}', name='new_communication_session'), id='call_u9FarZwvy3ys0nnoOFLiJh8g', type='function')], function_call=None)} -- {'agent2': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the intersection of artificial intelligence and healthcare, specifically focusing on developing autonomous systems for monitoring elderly patients with dementia in smart home environments. Let\'s leverage our expertise to generate innovative research ideas that align with our interests and expertise."}', name='new_communication_session'), id='call_4L1smh4NAynr3kzXIMlxi1D2', type='function')], function_call=None)} -- {'agent4': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent5","message":"I am a researcher with a diverse background in mathematical modeling, optimization, and photonic systems. My work spans various domains, including the study of eigenvalues in projective spaces, the application of multi-factorial optimization for large-scale virtual machine placement, and the exploration of lasing and coherent perfect absorption in PT-symmetric systems. I have delved into the topological properties of photonic superlattices, revealing their potential for mode-division multiplexing, and investigated the implications of non-Hermitian systems on multi-mode interference.\\n\\nMy research also extends to coding theory, where I applied Baranyai\'s theorem to construct locally decodable codes, emphasizing the combinatorial techniques that underpin these developments. I have explored the spectral signatures of CPA-lasers, highlighting the differences between singular value and eigenvalue spectra, and examined spectral phase transitions in non-Hermitian Hamiltonians, particularly in the context of parity-time symmetry.\\n\\nRecently, I have focused on the architectural support for FPGA-based accelerators in heterogeneous multicore systems, proposing innovative solutions for efficient interfacing and data reuse. My work aims to bridge theoretical insights with practical applications, contributing to advancements in both computational efficiency and optical technologies. Through my research, I strive to push the boundaries of knowledge in these interdisciplinary fields, fostering new approaches and solutions to complex problems.\\n\\nI am currently tasked with collaborating with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with my expertise in mathematical modeling, optimization, and photonic systems, as well as my interest in computational efficiency and interdisciplinary research. By contributing to the brainstorming session, I aim to propose innovative solutions and advance knowledge in this research area."}', name='new_communication_session'), id='call_T463eIRmk8acCjE6Vkp3uJTA', type='function')], function_call=None)} -- {'agent5': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am a researcher deeply engaged in the fields of evolutionary algorithms, machine learning, and spiking neural networks. My work primarily focuses on enhancing optimization techniques and developing innovative frameworks for complex problem-solving. Recently, I have explored the integration of machine learning models with evolutionary algorithms to improve their performance, particularly in multi-objective optimization scenarios. For instance, I introduced BiLO-CPDP, a bi-level programming approach that automates the discovery of effective models for cross-project defect prediction, significantly outperforming existing techniques.\\n\\nIn my research, I have also developed novel methods for 3D molecule generation, such as the GOAT framework, which utilizes optimal transport principles to efficiently generate high-quality molecular structures. My interest in spiking neural networks has led me to propose new neuron models, like the Contextual Embedding Leaky Integrate-and-Fire (CE-LIF) model, which enhances memory storage and temporal processing capabilities.\\n\\nI am particularly passionate about addressing the challenges posed by large-scale multi-objective optimization problems (LSMOPs). My recent work includes the development of GAN-based frameworks for learning manifold structures, which significantly improve the performance of evolutionary algorithms in high-dimensional spaces. Through my research, I aim to bridge the gap between theoretical advancements and practical applications, contributing to the evolution of intelligent systems that can tackle real-world challenges efficiently."}', name='new_communication_session'), id='call_gSmDHCEdkqJfMUOeqY7dE7Lz', type='function')], function_call=None)} - -[2024-11-10 18:50:18,670] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-10 18:50:18,670] [INFO] [Engine]: Engine graph-based coordination loop completed. -[2024-11-10 18:50:18,671] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-10 18:50:18,671] [INFO] [Evaluator]: Total Token Consumption: 4577 -[2024-11-10 18:50:18,672] [INFO] [Evaluator]: Average Tokens per Iteration: 2288.5 -[2024-11-10 18:50:18,672] [INFO] [Engine]: Graph-based coordination simulation completed. -[2024-11-10 18:50:18,678] [INFO] [Engine]: Summary data successfully written to result/discussion_output.jsonl -[2024-11-10 19:04:34,832] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-10 19:04:34,832] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-10 19:04:34,832] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-10 19:04:34,832] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-10 19:04:34,832] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-10 19:04:34,832] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-10 19:04:34,832] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-10 19:04:34,833] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-10 19:04:34,833] [INFO] [BaseAgent]: Agent 'agent5' initialized. -[2024-11-10 19:04:34,833] [DEBUG] [Engine]: Agent 'agent5' of type 'BaseAgent' initialized. -[2024-11-10 19:04:34,833] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'graph'. -[2024-11-10 19:04:34,833] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent2 -[2024-11-10 19:04:34,833] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent3 -[2024-11-10 19:04:34,833] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent4 -[2024-11-10 19:04:34,833] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent5 -[2024-11-10 19:04:34,833] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent3 -[2024-11-10 19:04:34,833] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent4 -[2024-11-10 19:04:34,833] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent5 -[2024-11-10 19:04:34,833] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborate with]--> agent4 -[2024-11-10 19:04:34,833] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborate with]--> agent5 -[2024-11-10 19:04:34,833] [INFO] [AgentGraph]: Relationship added: agent4 --[collaborate with]--> agent5 -[2024-11-10 19:04:34,833] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-10 19:04:34,833] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-10 19:04:34,833] [INFO] [Engine]: Engine initialized. -[2024-11-10 19:04:34,833] [INFO] [Engine]: Engine starting simulation. -[2024-11-10 19:04:34,833] [INFO] [Engine]: Running in graph-based coordination mode. -[2024-11-10 19:04:34,833] [INFO] [Engine]: Initial task distribution to all agents. -[2024-11-10 19:04:34,833] [INFO] [Engine]: Assigning initial task to agent1: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-10 19:04:34,833] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-10 19:04:35,709] [ERROR] [Engine]: Error while executing initial task for agent 'agent1': 'itertools.islice' object is not callable -[2024-11-10 19:04:35,709] [INFO] [Engine]: Assigning initial task to agent2: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-10 19:04:35,709] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-10 19:04:38,312] [ERROR] [Engine]: Error while executing initial task for agent 'agent2': 'Paper' object has no attribute 'to_dict' -[2024-11-10 19:04:38,313] [INFO] [Engine]: Assigning initial task to agent3: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-10 19:04:38,313] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-10 19:04:40,148] [ERROR] [Engine]: Error while executing initial task for agent 'agent3': 'Paper' object has no attribute 'to_dict' -[2024-11-10 19:04:40,149] [INFO] [Engine]: Assigning initial task to agent4: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-10 19:04:40,150] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-10 19:04:47,241] [ERROR] [Engine]: Error while executing initial task for agent 'agent4': Unterminated string starting at: line 1 column 39 (char 38) -[2024-11-10 19:04:47,242] [INFO] [Engine]: Assigning initial task to agent5: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-10 19:04:47,243] [INFO] [BaseAgent]: Agent 'agent5' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-10 19:04:53,414] [INFO] [BaseAgent]: Agent 'agent5' acted with result 'Message(content="**Literature Review:**\n\nThe introduction provided highlights the significance of interactive systems based on large language models (LLMs) and the challenges associated with maintaining separate fine-tuned models for each task. The approach of merging homologous models to address these limitations is discussed, introducing the DELLA method for merging models. The results section emphasizes the importance of pruning rate hyperparameter search for model merging, showcasing the effectiveness of the MAGPRUNE method in reducing interference during merging.\n\n**Brainstorming:**\n\n1. **Enhanced Model Merging Techniques:** Develop advanced methods for merging homologous models that go beyond parameter drops and incorporate more sophisticated techniques for knowledge sharing and performance enhancement.\n \n2. **Dynamic Model Fusion:** Explore dynamic approaches for model fusion that adapt to changing task requirements and data distributions, ensuring optimal performance across diverse tasks.\n\n3. **Transfer Learning Strategies:** Investigate transfer learning strategies that leverage merged models to enhance generalization and adaptation to new tasks, improving efficiency and effectiveness.\n\n**Summarization:**\n\nThe current research landscape emphasizes the importance of efficient model merging techniques to overcome the limitations of maintaining separate fine-tuned models. The introduction of the DELLA method and the MAGPRUNE pruning approach demonstrate promising results in enhancing model performance and reducing interference during merging. However, there is a need for further exploration of advanced model merging strategies, dynamic fusion methods, and transfer learning approaches to optimize model performance across multiple tasks.\n\n**Formulate a New Research Idea:**\n\n**[Question 1] - What is the problem?**\n\nHow can we develop adaptive and efficient model merging techniques to enhance the performance and scalability of interactive systems based on large language models?\n\n**[Question 2] - Why is it interesting and important?**\n\nSolving this problem is crucial for optimizing the utilization of large language models in interactive systems, leading to improved task performance, reduced memory footprint, and enhanced knowledge sharing capabilities. Addressing this challenge will advance the field of model merging and contribute to the development of more efficient and effective AI systems.\n\n**[Question 3] - Why is it hard?**\n\nThe complexity lies in designing merging techniques that can adapt to diverse tasks, data distributions, and model architectures while maintaining high performance levels. Balancing the trade-offs between model complexity, task specificity, and computational efficiency poses significant challenges in developing robust and scalable merging solutions.\n\n**[Question 4] - Why hasn't it been solved before?**\n\nPrevious research has primarily focused on basic model merging approaches, such as parameter drops and pruning, without delving into more sophisticated", role='assistant', tool_calls=None, function_call=None)'. -[2024-11-10 19:04:53,415] [DEBUG] [Engine]: Agent 'agent5' completed initial task with result: Message(content="**Literature Review:**\n\nThe introduction provided highlights the significance of interactive systems based on large language models (LLMs) and the challenges associated with maintaining separate fine-tuned models for each task. The approach of merging homologous models to address these limitations is discussed, introducing the DELLA method for merging models. The results section emphasizes the importance of pruning rate hyperparameter search for model merging, showcasing the effectiveness of the MAGPRUNE method in reducing interference during merging.\n\n**Brainstorming:**\n\n1. **Enhanced Model Merging Techniques:** Develop advanced methods for merging homologous models that go beyond parameter drops and incorporate more sophisticated techniques for knowledge sharing and performance enhancement.\n \n2. **Dynamic Model Fusion:** Explore dynamic approaches for model fusion that adapt to changing task requirements and data distributions, ensuring optimal performance across diverse tasks.\n\n3. **Transfer Learning Strategies:** Investigate transfer learning strategies that leverage merged models to enhance generalization and adaptation to new tasks, improving efficiency and effectiveness.\n\n**Summarization:**\n\nThe current research landscape emphasizes the importance of efficient model merging techniques to overcome the limitations of maintaining separate fine-tuned models. The introduction of the DELLA method and the MAGPRUNE pruning approach demonstrate promising results in enhancing model performance and reducing interference during merging. However, there is a need for further exploration of advanced model merging strategies, dynamic fusion methods, and transfer learning approaches to optimize model performance across multiple tasks.\n\n**Formulate a New Research Idea:**\n\n**[Question 1] - What is the problem?**\n\nHow can we develop adaptive and efficient model merging techniques to enhance the performance and scalability of interactive systems based on large language models?\n\n**[Question 2] - Why is it interesting and important?**\n\nSolving this problem is crucial for optimizing the utilization of large language models in interactive systems, leading to improved task performance, reduced memory footprint, and enhanced knowledge sharing capabilities. Addressing this challenge will advance the field of model merging and contribute to the development of more efficient and effective AI systems.\n\n**[Question 3] - Why is it hard?**\n\nThe complexity lies in designing merging techniques that can adapt to diverse tasks, data distributions, and model architectures while maintaining high performance levels. Balancing the trade-offs between model complexity, task specificity, and computational efficiency poses significant challenges in developing robust and scalable merging solutions.\n\n**[Question 4] - Why hasn't it been solved before?**\n\nPrevious research has primarily focused on basic model merging approaches, such as parameter drops and pruning, without delving into more sophisticated", role='assistant', tool_calls=None, function_call=None) -[2024-11-10 19:04:53,415] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- {'agent5': Message(content="**Literature Review:**\n\nThe introduction provided highlights the significance of interactive systems based on large language models (LLMs) and the challenges associated with maintaining separate fine-tuned models for each task. The approach of merging homologous models to address these limitations is discussed, introducing the DELLA method for merging models. The results section emphasizes the importance of pruning rate hyperparameter search for model merging, showcasing the effectiveness of the MAGPRUNE method in reducing interference during merging.\n\n**Brainstorming:**\n\n1. **Enhanced Model Merging Techniques:** Develop advanced methods for merging homologous models that go beyond parameter drops and incorporate more sophisticated techniques for knowledge sharing and performance enhancement.\n \n2. **Dynamic Model Fusion:** Explore dynamic approaches for model fusion that adapt to changing task requirements and data distributions, ensuring optimal performance across diverse tasks.\n\n3. **Transfer Learning Strategies:** Investigate transfer learning strategies that leverage merged models to enhance generalization and adaptation to new tasks, improving efficiency and effectiveness.\n\n**Summarization:**\n\nThe current research landscape emphasizes the importance of efficient model merging techniques to overcome the limitations of maintaining separate fine-tuned models. The introduction of the DELLA method and the MAGPRUNE pruning approach demonstrate promising results in enhancing model performance and reducing interference during merging. However, there is a need for further exploration of advanced model merging strategies, dynamic fusion methods, and transfer learning approaches to optimize model performance across multiple tasks.\n\n**Formulate a New Research Idea:**\n\n**[Question 1] - What is the problem?**\n\nHow can we develop adaptive and efficient model merging techniques to enhance the performance and scalability of interactive systems based on large language models?\n\n**[Question 2] - Why is it interesting and important?**\n\nSolving this problem is crucial for optimizing the utilization of large language models in interactive systems, leading to improved task performance, reduced memory footprint, and enhanced knowledge sharing capabilities. Addressing this challenge will advance the field of model merging and contribute to the development of more efficient and effective AI systems.\n\n**[Question 3] - Why is it hard?**\n\nThe complexity lies in designing merging techniques that can adapt to diverse tasks, data distributions, and model architectures while maintaining high performance levels. Balancing the trade-offs between model complexity, task specificity, and computational efficiency poses significant challenges in developing robust and scalable merging solutions.\n\n**[Question 4] - Why hasn't it been solved before?**\n\nPrevious research has primarily focused on basic model merging approaches, such as parameter drops and pruning, without delving into more sophisticated", role='assistant', tool_calls=None, function_call=None)} - -[2024-11-10 19:04:53,415] [INFO] [Engine]: Initial Summary: -Agents' Results Summary: -- {'agent5': Message(content="**Literature Review:**\n\nThe introduction provided highlights the significance of interactive systems based on large language models (LLMs) and the challenges associated with maintaining separate fine-tuned models for each task. The approach of merging homologous models to address these limitations is discussed, introducing the DELLA method for merging models. The results section emphasizes the importance of pruning rate hyperparameter search for model merging, showcasing the effectiveness of the MAGPRUNE method in reducing interference during merging.\n\n**Brainstorming:**\n\n1. **Enhanced Model Merging Techniques:** Develop advanced methods for merging homologous models that go beyond parameter drops and incorporate more sophisticated techniques for knowledge sharing and performance enhancement.\n \n2. **Dynamic Model Fusion:** Explore dynamic approaches for model fusion that adapt to changing task requirements and data distributions, ensuring optimal performance across diverse tasks.\n\n3. **Transfer Learning Strategies:** Investigate transfer learning strategies that leverage merged models to enhance generalization and adaptation to new tasks, improving efficiency and effectiveness.\n\n**Summarization:**\n\nThe current research landscape emphasizes the importance of efficient model merging techniques to overcome the limitations of maintaining separate fine-tuned models. The introduction of the DELLA method and the MAGPRUNE pruning approach demonstrate promising results in enhancing model performance and reducing interference during merging. However, there is a need for further exploration of advanced model merging strategies, dynamic fusion methods, and transfer learning approaches to optimize model performance across multiple tasks.\n\n**Formulate a New Research Idea:**\n\n**[Question 1] - What is the problem?**\n\nHow can we develop adaptive and efficient model merging techniques to enhance the performance and scalability of interactive systems based on large language models?\n\n**[Question 2] - Why is it interesting and important?**\n\nSolving this problem is crucial for optimizing the utilization of large language models in interactive systems, leading to improved task performance, reduced memory footprint, and enhanced knowledge sharing capabilities. Addressing this challenge will advance the field of model merging and contribute to the development of more efficient and effective AI systems.\n\n**[Question 3] - Why is it hard?**\n\nThe complexity lies in designing merging techniques that can adapt to diverse tasks, data distributions, and model architectures while maintaining high performance levels. Balancing the trade-offs between model complexity, task specificity, and computational efficiency poses significant challenges in developing robust and scalable merging solutions.\n\n**[Question 4] - Why hasn't it been solved before?**\n\nPrevious research has primarily focused on basic model merging approaches, such as parameter drops and pruning, without delving into more sophisticated", role='assistant', tool_calls=None, function_call=None)} - -[2024-11-10 19:04:53,415] [DEBUG] [EnginePlanner]: Updated progress: Starting the collaborative research idea generation based on the provided Introduction. -Agents' Results Summary: -- {'agent5': Message(content="**Literature Review:**\n\nThe introduction provided highlights the significance of interactive systems based on large language models (LLMs) and the challenges associated with maintaining separate fine-tuned models for each task. The approach of merging homologous models to address these limitations is discussed, introducing the DELLA method for merging models. The results section emphasizes the importance of pruning rate hyperparameter search for model merging, showcasing the effectiveness of the MAGPRUNE method in reducing interference during merging.\n\n**Brainstorming:**\n\n1. **Enhanced Model Merging Techniques:** Develop advanced methods for merging homologous models that go beyond parameter drops and incorporate more sophisticated techniques for knowledge sharing and performance enhancement.\n \n2. **Dynamic Model Fusion:** Explore dynamic approaches for model fusion that adapt to changing task requirements and data distributions, ensuring optimal performance across diverse tasks.\n\n3. **Transfer Learning Strategies:** Investigate transfer learning strategies that leverage merged models to enhance generalization and adaptation to new tasks, improving efficiency and effectiveness.\n\n**Summarization:**\n\nThe current research landscape emphasizes the importance of efficient model merging techniques to overcome the limitations of maintaining separate fine-tuned models. The introduction of the DELLA method and the MAGPRUNE pruning approach demonstrate promising results in enhancing model performance and reducing interference during merging. However, there is a need for further exploration of advanced model merging strategies, dynamic fusion methods, and transfer learning approaches to optimize model performance across multiple tasks.\n\n**Formulate a New Research Idea:**\n\n**[Question 1] - What is the problem?**\n\nHow can we develop adaptive and efficient model merging techniques to enhance the performance and scalability of interactive systems based on large language models?\n\n**[Question 2] - Why is it interesting and important?**\n\nSolving this problem is crucial for optimizing the utilization of large language models in interactive systems, leading to improved task performance, reduced memory footprint, and enhanced knowledge sharing capabilities. Addressing this challenge will advance the field of model merging and contribute to the development of more efficient and effective AI systems.\n\n**[Question 3] - Why is it hard?**\n\nThe complexity lies in designing merging techniques that can adapt to diverse tasks, data distributions, and model architectures while maintaining high performance levels. Balancing the trade-offs between model complexity, task specificity, and computational efficiency poses significant challenges in developing robust and scalable merging solutions.\n\n**[Question 4] - Why hasn't it been solved before?**\n\nPrevious research has primarily focused on basic model merging approaches, such as parameter drops and pruning, without delving into more sophisticated", role='assistant', tool_calls=None, function_call=None)} - -[2024-11-10 19:04:53,416] [INFO] [Engine]: Starting iteration 1 -[2024-11-10 19:04:53,416] [INFO] [BaseAgent]: Agent 'agent1' is planning the next task. -[2024-11-10 19:04:54,940] [INFO] [BaseAgent]: Agent 'agent1' plans next task based on persona: Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be to focus on brainstorming potential research ideas that leverage their knowledge in these areas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). The agent can bring their expertise in imaging technologies and mathematical aspects to propose innovative solutions for improving the merging process of homologous models. -[2024-11-10 19:04:54,942] [INFO] [Engine]: Agent 'agent1' planned task: Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be to focus on brainstorming potential research ideas that leverage their knowledge in these areas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). The agent can bring their expertise in imaging technologies and mathematical aspects to propose innovative solutions for improving the merging process of homologous models. -[2024-11-10 19:04:54,942] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be to focus on brainstorming potential research ideas that leverage their knowledge in these areas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). The agent can bring their expertise in imaging technologies and mathematical aspects to propose innovative solutions for improving the merging process of homologous models.'. -[2024-11-10 19:04:59,958] [INFO] [BaseAgent]: Agent 'agent1' acted with result 'Message(content='Based on your expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, and considering the task to brainstorm potential research ideas for merging homologous models using the DELLAs approach, here are some innovative research directions you could explore:\n\n1. **Terahertz Imaging for Model Alignment**:\n - Utilize terahertz imaging techniques to enhance the alignment and registration of homologous models in medical imaging datasets. Develop algorithms that leverage the high-precision capabilities of terahertz imaging to improve the accuracy of model merging processes.\n\n2. **Cloud-Based Model Fusion**:\n - Design a cloud-based framework that integrates advanced algorithms for merging homologous models. Focus on creating a secure and scalable platform that allows seamless collaboration among researchers and clinicians for model fusion tasks.\n\n3. **Algebraic Geometry in Model Representation**:\n - Apply concepts from algebraic geometry to represent homologous models in a geometrically meaningful way. Explore how cluster categories and 3-Calabi-Yau categories can provide insights into the structural relationships between different models for efficient merging.\n\n4. **Hybrid Approach with Machine Learning**:\n - Combine terahertz imaging data with machine learning techniques to automate the process of merging homologous models. Develop algorithms that leverage cloud computing resources for large-scale model fusion tasks.\n\n5. **Privacy-Preserving Model Integration**:\n - Investigate methods to ensure the privacy and security of proprietary algorithms during the model merging process. Explore cryptographic techniques and secure multiparty computation to enable collaborative model integration without compromising sensitive information.\n\n6. **Optimization with Algebraic Techniques**:\n - Use optimization methods inspired by algebraic geometry to streamline the merging process of homologous models. Develop algorithms that exploit the structural properties of cluster categories to optimize the alignment and combination of models.\n\n7. **Interdisciplinary Framework for Model Fusion**:\n - Create an interdisciplinary framework that bridges terahertz imaging technologies, cloud computing, and algebraic geometry for the seamless fusion of homologous models. Emphasize the integration of diverse expertise to enhance the accuracy and efficiency of model merging.\n\nThese research ideas leverage your expertise in imaging technologies, mathematical aspects, and cloud computing to propose innovative solutions for improving the merging process of homologous models using the DELLAs approach. Feel free to explore these directions further or let me know if you need more detailed insights into any specific area.', role='assistant', tool_calls=None, function_call=None)'. -[2024-11-10 19:04:59,960] [DEBUG] [Engine]: Agent 'agent1' executed task with result: Message(content='Based on your expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, and considering the task to brainstorm potential research ideas for merging homologous models using the DELLAs approach, here are some innovative research directions you could explore:\n\n1. **Terahertz Imaging for Model Alignment**:\n - Utilize terahertz imaging techniques to enhance the alignment and registration of homologous models in medical imaging datasets. Develop algorithms that leverage the high-precision capabilities of terahertz imaging to improve the accuracy of model merging processes.\n\n2. **Cloud-Based Model Fusion**:\n - Design a cloud-based framework that integrates advanced algorithms for merging homologous models. Focus on creating a secure and scalable platform that allows seamless collaboration among researchers and clinicians for model fusion tasks.\n\n3. **Algebraic Geometry in Model Representation**:\n - Apply concepts from algebraic geometry to represent homologous models in a geometrically meaningful way. Explore how cluster categories and 3-Calabi-Yau categories can provide insights into the structural relationships between different models for efficient merging.\n\n4. **Hybrid Approach with Machine Learning**:\n - Combine terahertz imaging data with machine learning techniques to automate the process of merging homologous models. Develop algorithms that leverage cloud computing resources for large-scale model fusion tasks.\n\n5. **Privacy-Preserving Model Integration**:\n - Investigate methods to ensure the privacy and security of proprietary algorithms during the model merging process. Explore cryptographic techniques and secure multiparty computation to enable collaborative model integration without compromising sensitive information.\n\n6. **Optimization with Algebraic Techniques**:\n - Use optimization methods inspired by algebraic geometry to streamline the merging process of homologous models. Develop algorithms that exploit the structural properties of cluster categories to optimize the alignment and combination of models.\n\n7. **Interdisciplinary Framework for Model Fusion**:\n - Create an interdisciplinary framework that bridges terahertz imaging technologies, cloud computing, and algebraic geometry for the seamless fusion of homologous models. Emphasize the integration of diverse expertise to enhance the accuracy and efficiency of model merging.\n\nThese research ideas leverage your expertise in imaging technologies, mathematical aspects, and cloud computing to propose innovative solutions for improving the merging process of homologous models using the DELLAs approach. Feel free to explore these directions further or let me know if you need more detailed insights into any specific area.', role='assistant', tool_calls=None, function_call=None) -[2024-11-10 19:04:59,961] [INFO] [BaseAgent]: Agent 'agent2' is planning the next task. -[2024-11-10 19:05:01,615] [INFO] [BaseAgent]: Agent 'agent2' plans next task based on persona: Based on the provided information, the next task for 'agent2' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with 'agent2's expertise in artificial intelligence and machine learning, particularly in the development of advanced systems for healthcare monitoring and causal variable discovery. By contributing to the brainstorming session, 'agent2' can leverage their knowledge and skills to propose innovative research ideas that could potentially advance the field of machine learning and have practical applications in real-world scenarios. -[2024-11-10 19:05:01,616] [INFO] [Engine]: Agent 'agent2' planned task: Based on the provided information, the next task for 'agent2' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with 'agent2's expertise in artificial intelligence and machine learning, particularly in the development of advanced systems for healthcare monitoring and causal variable discovery. By contributing to the brainstorming session, 'agent2' can leverage their knowledge and skills to propose innovative research ideas that could potentially advance the field of machine learning and have practical applications in real-world scenarios. -[2024-11-10 19:05:01,616] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Based on the provided information, the next task for 'agent2' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with 'agent2's expertise in artificial intelligence and machine learning, particularly in the development of advanced systems for healthcare monitoring and causal variable discovery. By contributing to the brainstorming session, 'agent2' can leverage their knowledge and skills to propose innovative research ideas that could potentially advance the field of machine learning and have practical applications in real-world scenarios.'. -[2024-11-10 19:05:02,823] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. My expertise in artificial intelligence and machine learning, particularly in healthcare monitoring and causal variable discovery, can contribute to innovative research ideas with practical applications in real-world scenarios. -[2024-11-10 19:05:02,823] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I am inter... -[2024-11-10 19:05:05,381] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I'm excited about the opportunity to collaborate on this! Merging homologous models using the DELLA approach presents a unique challenge, especially in the context of healthcare monitoring. One potential research idea could involve developing a framework that integrates multiple sensor modalities to enhance the accuracy of abnormal activity detection in elderly patients. By leveraging your expertise in causal variable discovery, we could identify key variables that influence patient behavior and health outcomes, allowing us to create more robust models. - -Additionally, we could explore how the DELLA approach can be applied to optimize the feature selection process in multi-label data, particularly in distinguishing between common and label-specific causal variables. This could lead to more efficient algorithms that improve the interpretability of our models in healthcare settings. - -What do you think about these ideas? I'm eager to hear your thoughts and any other concepts you might have! -[2024-11-10 19:05:05,381] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I'm excite... -[2024-11-10 19:05:08,340] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I appreciate your enthusiasm and the innovative ideas you've proposed! Integrating multiple sensor modalities to enhance abnormal activity detection is indeed a promising direction. By identifying key variables that influence patient behavior, we can create models that not only detect anomalies but also provide insights into the underlying causes of these behaviors, which is crucial for effective intervention. - -Regarding the optimization of feature selection in multi-label data, I believe this could significantly improve our model's performance and interpretability. We could also consider incorporating temporal aspects of the data, as understanding how variables change over time could provide deeper insights into patient health trends. - -Another idea could be to explore the potential of using transfer learning techniques to leverage existing models trained on similar tasks, which could accelerate our development process and enhance model robustness. - -I'm excited to hear your thoughts on these additional ideas and any other concepts you might have! -[2024-11-10 19:05:08,340] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I apprecia... -[2024-11-10 19:05:11,029] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I appreciate your enthusiasm and the innovative ideas you've proposed! Integrating multiple sensor modalities to enhance abnormal activity detection is indeed a promising direction. By identifying key variables that influence patient behavior, we can create models that not only detect anomalies but also provide insights into the underlying causes of these behaviors, which is crucial for effective intervention. - -Regarding the optimization of feature selection in multi-label data, I believe this could significantly improve our model's performance and interpretability. We could also consider incorporating temporal aspects of the data, as understanding how variables change over time could provide deeper insights into patient health trends. - -Another idea could be to explore the potential of using transfer learning techniques to leverage existing models trained on similar tasks, which could accelerate our development process and enhance model robustness. - -I'm excited to hear your thoughts on these additional ideas and any other concepts you might have! -[2024-11-10 19:05:11,031] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I apprecia... -[2024-11-10 19:05:13,332] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I appreciate your enthusiasm and the innovative ideas you've proposed! Integrating multiple sensor modalities to enhance abnormal activity detection is indeed a promising direction. By identifying key variables that influence patient behavior, we can create models that not only detect anomalies but also provide insights into the underlying causes of these behaviors, which is crucial for effective intervention. - -Regarding the optimization of feature selection in multi-label data, I believe this could significantly improve our model's performance and interpretability. We could also consider incorporating temporal aspects of the data, as understanding how variables change over time could provide deeper insights into patient health trends. - -Another idea could be to explore the potential of using transfer learning techniques to leverage existing models trained on similar tasks, which could accelerate our development process and enhance model robustness. - -I'm excited to hear your thoughts on these additional ideas and any other concepts you might have! -[2024-11-10 19:05:13,332] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I apprecia... -[2024-11-10 19:05:15,935] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I appreciate your thoughtful response and the additional ideas you've shared! Integrating temporal aspects into our models is indeed crucial, as it can help us understand the progression of behaviors and health conditions over time. This could lead to more proactive interventions and better patient outcomes. - -Regarding transfer learning, I think it's a fantastic approach to leverage existing knowledge and improve our model's performance, especially in scenarios where labeled data is scarce. We could explore pre-trained models in related domains and fine-tune them for our specific tasks in healthcare monitoring. - -Additionally, we might consider investigating the use of ensemble methods that combine predictions from multiple models, which could enhance robustness and accuracy in detecting abnormal activities. - -I'm eager to continue this collaboration and refine these ideas further. Let's keep the momentum going! -[2024-11-10 19:05:15,935] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I apprecia... -[2024-11-10 19:05:21,971] [INFO] [BaseAgent]: Agent 'agent2' called 'new_communication_session' with args '{'target_agent_id': 'agent3', 'message': 'I am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. My expertise in artificial intelligence and machine learning, particularly in healthcare monitoring and causal variable discovery, can contribute to innovative research ideas with practical applications in real-world scenarios.'}'. -[2024-11-10 19:05:21,971] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'message': 'Successfully completed session 3c7a0ed8-e36a-4851-b0c0-290bf1f33f82', 'session_id': '### Summary of Conversation Between Agent2 and Agent3\n\n**Context:**\nAgent2 and Agent3 engaged in a collaborative discussion focused on brainstorming research ideas related to merging homologous models using the DELLA approach, particularly in the context of healthcare monitoring. Both agents bring expertise in artificial intelligence and machine learning, with a specific interest in causal variable discovery.\n\n**Key Themes and Ideas:**\n1. **Integration of Sensor Modalities:**\n - Agent3 proposed developing a framework that integrates multiple sensor modalities to improve the accuracy of detecting abnormal activities in elderly patients. This idea emphasizes the importance of identifying key variables that influence patient behavior and health outcomes.\n\n2. **Feature Selection Optimization:**\n - Both agents discussed optimizing feature selection in multi-label data to enhance model performance and interpretability. They highlighted the potential of distinguishing between common and label-specific causal variables.\n\n3. **Temporal Aspects:**\n - The incorporation of temporal data was suggested as a means to gain deeper insights into patient health trends and the progression of behaviors over time, which could lead to more effective interventions.\n\n4. **Transfer Learning Techniques:**\n - Agent2 introduced the idea of using transfer learning to leverage existing models trained on similar tasks, which could expedite the development process and improve model robustness.\n\n5. **Ensemble Methods:**\n - Agent3 suggested exploring ensemble methods that combine predictions from multiple models to enhance robustness and accuracy in detecting abnormal activities.\n\n**Outcomes and Next Steps:**\n- Both agents expressed enthusiasm for the collaboration and the innovative ideas proposed. They agreed to continue refining these concepts and maintain momentum in their research efforts.\n- The next task for Agent2 is to collaborate with the research team to further brainstorm and develop these research ideas, leveraging their expertise in artificial intelligence and machine learning to address gaps in the existing literature related to the DELLA approach.\n\nThis summary encapsulates the collaborative spirit and innovative thinking of both agents as they work towards advancing research in healthcare monitoring through machine learning techniques.'}'. -[2024-11-10 19:05:21,974] [DEBUG] [Engine]: Agent 'agent2' executed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. My expertise in artificial intelligence and machine learning, particularly in healthcare monitoring and causal variable discovery, can contribute to innovative research ideas with practical applications in real-world scenarios."}', name='new_communication_session'), id='call_fNr3wr1hTR3VOt11kLvCdqvC', type='function')], function_call=None) -[2024-11-10 19:05:21,974] [INFO] [BaseAgent]: Agent 'agent3' is planning the next task. -[2024-11-10 19:05:27,181] [INFO] [BaseAgent]: Agent 'agent3' plans next task based on persona: Based on the role of 'agent3' as a researcher dedicated to advancing the field of spiking neural networks (SNNs) and their applications in auditory processing and machine learning, the next task should be to focus on the following: - -1. **Literature Review**: Conduct a thorough literature review to understand the current state of research in merging homologous models and the challenges associated with maintaining separate fine-tuned models for each task. - -2. **Brainstorming**: Collaborate with the research team to brainstorm potential research ideas that build upon the concept of merging homologous models using innovative techniques like DELLA. - -3. **Summarization**: Summarize the collective ideas generated during the brainstorming session to identify key research directions and potential gaps in the existing literature. - -4. **Formulate a New Research Idea (5q)**: - - **[Question 1] - What is the problem?** - How can we optimize the merging of homologous models to reduce interference among model parameters and improve overall performance in downstream tasks? - - **[Question 2] - Why is it interesting and important?** - Solving this problem can lead to more efficient and cost-effective model merging, enabling better knowledge sharing and space efficiency in interactive systems based on large language models. - - **[Question 3] - Why is it hard?** - The challenge lies in balancing the reduction of interference among model parameters while retaining essential information for effective merging, especially when dealing with multiple homologous models and complex downstream tasks. - - **[Question 4] - Why hasn't it been solved before?** - Previous research has focused on fine-tuning individual models or merging models without considering the specific challenges of homologous model merging. Our approach aims to address these limitations by introducing novel techniques like DELLA. - - **[Question 5] - What are the key components of my approach and results?** - Our proposed methodology involves implementing the DELLA approach, including MAGPRUNE for parameter drops, sign-based selection, and fusion of delta parameters. We will evaluate the performance of this approach on different homologous models and benchmark datasets to demonstrate its effectiveness in improving model merging outcomes. - -By focusing on these tasks, 'agent3' can contribute to advancing the field of spiking neural networks and machine learning by exploring innovative approaches to merging homologous models and optimizing performance in downstream tasks. -[2024-11-10 19:05:27,184] [INFO] [Engine]: Agent 'agent3' planned task: Based on the role of 'agent3' as a researcher dedicated to advancing the field of spiking neural networks (SNNs) and their applications in auditory processing and machine learning, the next task should be to focus on the following: - -1. **Literature Review**: Conduct a thorough literature review to understand the current state of research in merging homologous models and the challenges associated with maintaining separate fine-tuned models for each task. - -2. **Brainstorming**: Collaborate with the research team to brainstorm potential research ideas that build upon the concept of merging homologous models using innovative techniques like DELLA. - -3. **Summarization**: Summarize the collective ideas generated during the brainstorming session to identify key research directions and potential gaps in the existing literature. - -4. **Formulate a New Research Idea (5q)**: - - **[Question 1] - What is the problem?** - How can we optimize the merging of homologous models to reduce interference among model parameters and improve overall performance in downstream tasks? - - **[Question 2] - Why is it interesting and important?** - Solving this problem can lead to more efficient and cost-effective model merging, enabling better knowledge sharing and space efficiency in interactive systems based on large language models. - - **[Question 3] - Why is it hard?** - The challenge lies in balancing the reduction of interference among model parameters while retaining essential information for effective merging, especially when dealing with multiple homologous models and complex downstream tasks. - - **[Question 4] - Why hasn't it been solved before?** - Previous research has focused on fine-tuning individual models or merging models without considering the specific challenges of homologous model merging. Our approach aims to address these limitations by introducing novel techniques like DELLA. - - **[Question 5] - What are the key components of my approach and results?** - Our proposed methodology involves implementing the DELLA approach, including MAGPRUNE for parameter drops, sign-based selection, and fusion of delta parameters. We will evaluate the performance of this approach on different homologous models and benchmark datasets to demonstrate its effectiveness in improving model merging outcomes. - -By focusing on these tasks, 'agent3' can contribute to advancing the field of spiking neural networks and machine learning by exploring innovative approaches to merging homologous models and optimizing performance in downstream tasks. -[2024-11-10 19:05:27,185] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Based on the role of 'agent3' as a researcher dedicated to advancing the field of spiking neural networks (SNNs) and their applications in auditory processing and machine learning, the next task should be to focus on the following: - -1. **Literature Review**: Conduct a thorough literature review to understand the current state of research in merging homologous models and the challenges associated with maintaining separate fine-tuned models for each task. - -2. **Brainstorming**: Collaborate with the research team to brainstorm potential research ideas that build upon the concept of merging homologous models using innovative techniques like DELLA. - -3. **Summarization**: Summarize the collective ideas generated during the brainstorming session to identify key research directions and potential gaps in the existing literature. - -4. **Formulate a New Research Idea (5q)**: - - **[Question 1] - What is the problem?** - How can we optimize the merging of homologous models to reduce interference among model parameters and improve overall performance in downstream tasks? - - **[Question 2] - Why is it interesting and important?** - Solving this problem can lead to more efficient and cost-effective model merging, enabling better knowledge sharing and space efficiency in interactive systems based on large language models. - - **[Question 3] - Why is it hard?** - The challenge lies in balancing the reduction of interference among model parameters while retaining essential information for effective merging, especially when dealing with multiple homologous models and complex downstream tasks. - - **[Question 4] - Why hasn't it been solved before?** - Previous research has focused on fine-tuning individual models or merging models without considering the specific challenges of homologous model merging. Our approach aims to address these limitations by introducing novel techniques like DELLA. - - **[Question 5] - What are the key components of my approach and results?** - Our proposed methodology involves implementing the DELLA approach, including MAGPRUNE for parameter drops, sign-based selection, and fusion of delta parameters. We will evaluate the performance of this approach on different homologous models and benchmark datasets to demonstrate its effectiveness in improving model merging outcomes. - -By focusing on these tasks, 'agent3' can contribute to advancing the field of spiking neural networks and machine learning by exploring innovative approaches to merging homologous models and optimizing performance in downstream tasks.'. -[2024-11-10 19:05:29,401] [ERROR] [Engine]: Error in agent 'agent3' during planning or action: 'Paper' object has no attribute 'to_dict' -[2024-11-10 19:05:29,401] [INFO] [BaseAgent]: Agent 'agent4' is planning the next task. -[2024-11-10 19:05:30,470] [INFO] [BaseAgent]: Agent 'agent4' plans next task based on persona: Based on the provided information, the next task for 'agent4' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction provided. This task aligns with their role as a researcher with a diverse background in mathematical modeling, optimization, and photonic systems, and will allow them to contribute their expertise to the development of a new research proposal. -[2024-11-10 19:05:30,471] [INFO] [Engine]: Agent 'agent4' planned task: Based on the provided information, the next task for 'agent4' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction provided. This task aligns with their role as a researcher with a diverse background in mathematical modeling, optimization, and photonic systems, and will allow them to contribute their expertise to the development of a new research proposal. -[2024-11-10 19:05:30,471] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Based on the provided information, the next task for 'agent4' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction provided. This task aligns with their role as a researcher with a diverse background in mathematical modeling, optimization, and photonic systems, and will allow them to contribute their expertise to the development of a new research proposal.'. -[2024-11-10 19:05:34,170] [INFO] [BaseAgent]: Agent agent4 sent message to agent5: I am a researcher with a diverse background in mathematical modeling, optimization, and photonic systems. My work spans various domains, including the study of eigenvalues in projective spaces, the application of multi-factorial optimization for large-scale virtual machine placement, and the exploration of lasing and coherent perfect absorption in PT-symmetric systems. I have delved into the topological properties of photonic superlattices, revealing their potential for mode-division multiplexing, and investigated the implications of non-Hermitian systems on multi-mode interference. - -My research also extends to coding theory, where I applied Baranyai's theorem to construct locally decodable codes, emphasizing the combinatorial techniques that underpin these developments. I have explored the spectral signatures of CPA-lasers, highlighting the differences between singular value and eigenvalue spectra, and examined spectral phase transitions in non-Hermitian Hamiltonians, particularly in the context of parity-time symmetry. - -Recently, I have focused on the architectural support for FPGA-based accelerators in heterogeneous multicore systems, proposing innovative solutions for efficient interfacing and data reuse. My work aims to bridge theoretical insights with practical applications, contributing to advancements in both computational efficiency and optical technologies. Through my research, I strive to push the boundaries of knowledge in these interdisciplinary fields, fostering new approaches and solutions to complex problems. - -I am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the provided introduction. Let's explore innovative avenues for research together. -[2024-11-10 19:05:34,171] [INFO] [BaseAgent]: Agent agent5 received message from agent4: I am a res... -[2024-11-10 19:05:39,994] [INFO] [BaseAgent]: Agent agent5 sent message to agent4: I appreciate your interest in collaborating on research ideas! Given your expertise in mathematical modeling and optimization, we could explore the following avenues: - -1. **Adaptive Model Merging Techniques:** We could investigate how mathematical modeling can enhance the DELLA method for merging homologous models. By applying optimization techniques, we might develop adaptive algorithms that dynamically adjust merging strategies based on task requirements and data distributions. - -2. **Multi-Objective Optimization for Model Fusion:** Your experience with multi-factorial optimization could be leveraged to create a framework that optimizes multiple objectives during model merging, such as performance, memory efficiency, and adaptability. This could lead to more robust models that perform well across various tasks. - -3. **Topological Approaches to Knowledge Sharing:** Considering your work on the topological properties of photonic superlattices, we could explore how topological concepts can be applied to model merging. This might involve developing new ways to represent knowledge sharing between models that enhance their collaborative performance. - -4. **FPGA-Based Accelerators for Merged Models:** We could investigate the architectural support for FPGA-based accelerators in the context of merged models. This could lead to innovative solutions for efficient interfacing and data reuse, optimizing the computational efficiency of interactive systems based on large language models. - -5. **Transfer Learning in Non-Hermitian Systems:** Your insights into non-Hermitian systems could inspire research on transfer learning strategies that utilize merged models to enhance generalization across tasks, particularly in complex environments where traditional methods may struggle. - -These ideas could serve as a foundation for our research proposal. What do you think? Are there any specific areas you would like to delve deeper into? -[2024-11-10 19:05:39,998] [INFO] [BaseAgent]: Agent agent4 received message from agent5: I apprecia... -[2024-11-10 19:05:43,682] [INFO] [BaseAgent]: Agent agent4 sent message to agent5: I appreciate your thoughtful suggestions for our collaboration! Each of the proposed avenues presents exciting opportunities for exploration. Here are my thoughts on them: - -1. **Adaptive Model Merging Techniques:** This sounds promising, especially with the potential to enhance the DELLA method. I believe we could incorporate real-time data analysis to inform the merging strategies, making them more responsive to changing conditions. - -2. **Multi-Objective Optimization for Model Fusion:** I agree that optimizing multiple objectives is crucial. We could also consider incorporating user feedback into the optimization process, allowing for a more tailored approach to model performance. - -3. **Topological Approaches to Knowledge Sharing:** This is particularly intriguing! I envision developing a framework that visualizes knowledge transfer as a topological space, which could reveal insights into model interactions and performance. - -4. **FPGA-Based Accelerators for Merged Models:** Leveraging FPGA technology could significantly enhance our computational efficiency. We should explore specific use cases where merged models can benefit from FPGA acceleration, perhaps in real-time applications. - -5. **Transfer Learning in Non-Hermitian Systems:** This could open up new avenues for generalization in complex environments. We might also investigate how the unique properties of non-Hermitian systems can be utilized to improve transfer learning outcomes. - -I am particularly interested in delving deeper into the topological approaches to knowledge sharing and the FPGA-based accelerators for merged models. What are your thoughts? Do any of these areas resonate with you for further exploration? -[2024-11-10 19:05:43,686] [INFO] [BaseAgent]: Agent agent5 received message from agent4: I apprecia... -[2024-11-10 19:05:49,935] [INFO] [BaseAgent]: Agent agent5 sent message to agent4: I appreciate your thoughtful suggestions for our collaboration! Each of the proposed avenues presents exciting opportunities for exploration. Here are my thoughts on them: - -1. **Adaptive Model Merging Techniques:** This sounds promising, especially with the potential to enhance the DELLA method. I believe we could incorporate real-time data analysis to inform the merging strategies, making them more responsive to changing conditions. - -2. **Multi-Objective Optimization for Model Fusion:** I agree that optimizing multiple objectives is crucial. We could also consider incorporating user feedback into the optimization process, allowing for a more tailored approach to model performance. - -3. **Topological Approaches to Knowledge Sharing:** This is particularly intriguing! I envision developing a framework that visualizes knowledge transfer as a topological space, which could reveal insights into model interactions and performance. - -4. **FPGA-Based Accelerators for Merged Models:** Leveraging FPGA technology could significantly enhance our computational efficiency. We should explore specific use cases where merged models can benefit from FPGA acceleration, perhaps in real-time applications. - -5. **Transfer Learning in Non-Hermitian Systems:** This could open up new avenues for generalization in complex environments. We might also investigate how the unique properties of non-Hermitian systems can be utilized to improve transfer learning outcomes. - -I am particularly interested in delving deeper into the topological approaches to knowledge sharing and the FPGA-based accelerators for merged models. What are your thoughts? Do any of these areas resonate with you for further exploration? -[2024-11-10 19:05:49,936] [INFO] [BaseAgent]: Agent agent4 received message from agent5: I apprecia... -[2024-11-10 19:05:55,764] [INFO] [BaseAgent]: Agent agent4 sent message to agent5: I appreciate your thoughtful suggestions for our collaboration! Each of the proposed avenues presents exciting opportunities for exploration. Here are my thoughts on them: - -1. **Adaptive Model Merging Techniques:** This sounds promising, especially with the potential to enhance the DELLA method. I believe we could incorporate real-time data analysis to inform the merging strategies, making them more responsive to changing conditions. - -2. **Multi-Objective Optimization for Model Fusion:** I agree that optimizing multiple objectives is crucial. We could also consider incorporating user feedback into the optimization process, allowing for a more tailored approach to model performance. - -3. **Topological Approaches to Knowledge Sharing:** This is particularly intriguing! I envision developing a framework that visualizes knowledge transfer as a topological space, which could reveal insights into model interactions and performance. - -4. **FPGA-Based Accelerators for Merged Models:** Leveraging FPGA technology could significantly enhance our computational efficiency. We should explore specific use cases where merged models can benefit from FPGA acceleration, perhaps in real-time applications. - -5. **Transfer Learning in Non-Hermitian Systems:** This could open up new avenues for generalization in complex environments. We might also investigate how the unique properties of non-Hermitian systems can be utilized to improve transfer learning outcomes. - -I am particularly interested in delving deeper into the topological approaches to knowledge sharing and the FPGA-based accelerators for merged models. What are your thoughts? Do any of these areas resonate with you for further exploration? -[2024-11-10 19:05:55,764] [INFO] [BaseAgent]: Agent agent5 received message from agent4: I apprecia... -[2024-11-10 19:06:02,469] [INFO] [BaseAgent]: Agent agent5 sent message to agent4: I appreciate your thoughtful suggestions for our collaboration! Each of the proposed avenues presents exciting opportunities for exploration. Here are my thoughts on them: - -1. **Adaptive Model Merging Techniques:** This sounds promising, especially with the potential to enhance the DELLA method. I believe we could incorporate real-time data analysis to inform the merging strategies, making them more responsive to changing conditions. - -2. **Multi-Objective Optimization for Model Fusion:** I agree that optimizing multiple objectives is crucial. We could also consider incorporating user feedback into the optimization process, allowing for a more tailored approach to model performance. - -3. **Topological Approaches to Knowledge Sharing:** This is particularly intriguing! I envision developing a framework that visualizes knowledge transfer as a topological space, which could reveal insights into model interactions and performance. - -4. **FPGA-Based Accelerators for Merged Models:** Leveraging FPGA technology could significantly enhance our computational efficiency. We should explore specific use cases where merged models can benefit from FPGA acceleration, perhaps in real-time applications. - -5. **Transfer Learning in Non-Hermitian Systems:** This could open up new avenues for generalization in complex environments. We might also investigate how the unique properties of non-Hermitian systems can be utilized to improve transfer learning outcomes. - -I am particularly interested in delving deeper into the topological approaches to knowledge sharing and the FPGA-based accelerators for merged models. What are your thoughts? Do any of these areas resonate with you for further exploration? -[2024-11-10 19:06:02,469] [INFO] [BaseAgent]: Agent agent4 received message from agent5: I apprecia... -[2024-11-10 19:06:08,160] [INFO] [BaseAgent]: Agent 'agent4' called 'new_communication_session' with args '{'target_agent_id': 'agent5', 'message': "I am a researcher with a diverse background in mathematical modeling, optimization, and photonic systems. My work spans various domains, including the study of eigenvalues in projective spaces, the application of multi-factorial optimization for large-scale virtual machine placement, and the exploration of lasing and coherent perfect absorption in PT-symmetric systems. I have delved into the topological properties of photonic superlattices, revealing their potential for mode-division multiplexing, and investigated the implications of non-Hermitian systems on multi-mode interference.\n\nMy research also extends to coding theory, where I applied Baranyai's theorem to construct locally decodable codes, emphasizing the combinatorial techniques that underpin these developments. I have explored the spectral signatures of CPA-lasers, highlighting the differences between singular value and eigenvalue spectra, and examined spectral phase transitions in non-Hermitian Hamiltonians, particularly in the context of parity-time symmetry.\n\nRecently, I have focused on the architectural support for FPGA-based accelerators in heterogeneous multicore systems, proposing innovative solutions for efficient interfacing and data reuse. My work aims to bridge theoretical insights with practical applications, contributing to advancements in both computational efficiency and optical technologies. Through my research, I strive to push the boundaries of knowledge in these interdisciplinary fields, fostering new approaches and solutions to complex problems.\n\nI am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the provided introduction. Let's explore innovative avenues for research together."}'. -[2024-11-10 19:06:08,162] [INFO] [BaseAgent]: Agent 'agent4' obtained result '{'success': True, 'message': 'Successfully completed session 1d1f00b4-ed4e-4cab-b772-d9d4afb2f169', 'session_id': "### Summary of Conversation Between Agent4 and Agent5\n\n**Context:**\nAgent4, a researcher with expertise in mathematical modeling, optimization, and photonic systems, initiated a conversation with Agent5 to explore potential collaborative research ideas. Agent4's background includes work on eigenvalues, multi-factorial optimization, photonic superlattices, coding theory, and FPGA-based accelerators.\n\n**Key Themes and Ideas Discussed:**\n\n1. **Research Collaboration:**\n - Agent4 expressed interest in collaborating with Agent5 to brainstorm innovative research ideas that build upon their respective expertise.\n\n2. **Proposed Research Avenues by Agent5:**\n - **Adaptive Model Merging Techniques:** Enhancing the DELLA method through mathematical modeling and optimization.\n - **Multi-Objective Optimization for Model Fusion:** Creating a framework to optimize multiple objectives during model merging.\n - **Topological Approaches to Knowledge Sharing:** Applying topological concepts to improve model interactions and performance.\n - **FPGA-Based Accelerators for Merged Models:** Investigating the use of FPGA technology to enhance computational efficiency in merged models.\n - **Transfer Learning in Non-Hermitian Systems:** Exploring strategies for transfer learning that leverage the properties of non-Hermitian systems.\n\n3. **Agent4's Feedback:**\n - Agent4 found all proposed avenues promising and provided specific thoughts on each:\n - Emphasized the importance of real-time data analysis in adaptive model merging.\n - Suggested incorporating user feedback in multi-objective optimization.\n - Proposed developing a framework visualizing knowledge transfer as a topological space.\n - Highlighted the potential of FPGA technology for real-time applications.\n - Expressed interest in exploring transfer learning in complex environments.\n\n4. **Focus Areas for Further Exploration:**\n - Agent4 indicated a particular interest in delving deeper into:\n - Topological approaches to knowledge sharing.\n - FPGA-based accelerators for merged models.\n\n**Outcome:**\nThe conversation established a foundation for collaboration, with both agents expressing enthusiasm for exploring the proposed research avenues. The next task for Agent4 is to collaborate with the research team to brainstorm and refine these ideas into a cohesive research proposal, leveraging their diverse backgrounds and expertise.\n\n### Action Items:\n- Agent4 to engage with the research team to further develop the identified research ideas.\n- Consider focusing on topological approaches and FPGA-based solutions as primary areas of exploration."}'. -[2024-11-10 19:06:08,163] [DEBUG] [Engine]: Agent 'agent4' executed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent5","message":"I am a researcher with a diverse background in mathematical modeling, optimization, and photonic systems. My work spans various domains, including the study of eigenvalues in projective spaces, the application of multi-factorial optimization for large-scale virtual machine placement, and the exploration of lasing and coherent perfect absorption in PT-symmetric systems. I have delved into the topological properties of photonic superlattices, revealing their potential for mode-division multiplexing, and investigated the implications of non-Hermitian systems on multi-mode interference.\\n\\nMy research also extends to coding theory, where I applied Baranyai\'s theorem to construct locally decodable codes, emphasizing the combinatorial techniques that underpin these developments. I have explored the spectral signatures of CPA-lasers, highlighting the differences between singular value and eigenvalue spectra, and examined spectral phase transitions in non-Hermitian Hamiltonians, particularly in the context of parity-time symmetry.\\n\\nRecently, I have focused on the architectural support for FPGA-based accelerators in heterogeneous multicore systems, proposing innovative solutions for efficient interfacing and data reuse. My work aims to bridge theoretical insights with practical applications, contributing to advancements in both computational efficiency and optical technologies. Through my research, I strive to push the boundaries of knowledge in these interdisciplinary fields, fostering new approaches and solutions to complex problems.\\n\\nI am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the provided introduction. Let\'s explore innovative avenues for research together."}', name='new_communication_session'), id='call_ICvnyd0MjSjI1P33j4uGFytv', type='function')], function_call=None) -[2024-11-10 19:06:08,163] [INFO] [BaseAgent]: Agent 'agent5' is planning the next task. -[2024-11-10 19:06:09,192] [INFO] [BaseAgent]: Agent 'agent5' plans next task based on persona: The next task for 'agent5' should be to formulate a new research idea in the format of the '5q' based on the insights gathered from the literature review and brainstorming session. This will involve addressing specific research questions, discussing the importance and challenges of the proposed research, identifying gaps in existing solutions, and outlining the key components of the approach and expected outcomes. -[2024-11-10 19:06:09,192] [INFO] [Engine]: Agent 'agent5' planned task: The next task for 'agent5' should be to formulate a new research idea in the format of the '5q' based on the insights gathered from the literature review and brainstorming session. This will involve addressing specific research questions, discussing the importance and challenges of the proposed research, identifying gaps in existing solutions, and outlining the key components of the approach and expected outcomes. -[2024-11-10 19:06:09,194] [INFO] [BaseAgent]: Agent 'agent5' acting on task 'The next task for 'agent5' should be to formulate a new research idea in the format of the '5q' based on the insights gathered from the literature review and brainstorming session. This will involve addressing specific research questions, discussing the importance and challenges of the proposed research, identifying gaps in existing solutions, and outlining the key components of the approach and expected outcomes.'. -[2024-11-10 19:06:13,566] [INFO] [BaseAgent]: Agent 'agent5' acted with result 'Message(content="Based on the insights gathered from the literature review and brainstorming session, I will formulate a new research idea following the '5q' format:\n\n**[Question 1] - What is the problem?**\nHow can we develop adaptive and efficient model merging techniques to enhance the performance and scalability of interactive systems based on large language models?\n\n**[Question 2] - Why is it interesting and important?**\nSolving this problem is crucial for optimizing the utilization of large language models in interactive systems, leading to improved task performance, reduced memory footprint, and enhanced knowledge sharing capabilities. Addressing this challenge will advance the field of model merging and contribute to the development of more efficient and effective AI systems.\n\n**[Question 3] - Why is it hard?**\nThe complexity lies in designing merging techniques that can adapt to diverse tasks, data distributions, and model architectures while maintaining high performance levels. Balancing the trade-offs between model complexity, task specificity, and computational efficiency poses significant challenges in developing robust and scalable merging solutions.\n\n**[Question 4] - Why hasn't it been solved before?**\nPrevious research has primarily focused on basic model merging approaches, such as parameter drops and pruning, without delving into more sophisticated techniques that can address the dynamic nature of tasks and data distributions. The lack of adaptive and efficient model merging methods tailored for large language models has hindered the optimization of interactive systems.\n\n**[Question 5] - What is the potential impact and expected outcomes?**\nBy developing adaptive and efficient model merging techniques, we can significantly enhance the performance and scalability of interactive systems based on large language models. This research has the potential to revolutionize the way AI systems handle complex tasks, leading to more efficient utilization of resources, improved task performance, and enhanced adaptability to diverse scenarios.\n\nThis research idea aims to push the boundaries of model merging in the context of large language models, paving the way for more sophisticated and effective AI systems that can tackle real-world challenges with enhanced efficiency and performance.", role='assistant', tool_calls=None, function_call=None)'. -[2024-11-10 19:06:13,566] [DEBUG] [Engine]: Agent 'agent5' executed task with result: Message(content="Based on the insights gathered from the literature review and brainstorming session, I will formulate a new research idea following the '5q' format:\n\n**[Question 1] - What is the problem?**\nHow can we develop adaptive and efficient model merging techniques to enhance the performance and scalability of interactive systems based on large language models?\n\n**[Question 2] - Why is it interesting and important?**\nSolving this problem is crucial for optimizing the utilization of large language models in interactive systems, leading to improved task performance, reduced memory footprint, and enhanced knowledge sharing capabilities. Addressing this challenge will advance the field of model merging and contribute to the development of more efficient and effective AI systems.\n\n**[Question 3] - Why is it hard?**\nThe complexity lies in designing merging techniques that can adapt to diverse tasks, data distributions, and model architectures while maintaining high performance levels. Balancing the trade-offs between model complexity, task specificity, and computational efficiency poses significant challenges in developing robust and scalable merging solutions.\n\n**[Question 4] - Why hasn't it been solved before?**\nPrevious research has primarily focused on basic model merging approaches, such as parameter drops and pruning, without delving into more sophisticated techniques that can address the dynamic nature of tasks and data distributions. The lack of adaptive and efficient model merging methods tailored for large language models has hindered the optimization of interactive systems.\n\n**[Question 5] - What is the potential impact and expected outcomes?**\nBy developing adaptive and efficient model merging techniques, we can significantly enhance the performance and scalability of interactive systems based on large language models. This research has the potential to revolutionize the way AI systems handle complex tasks, leading to more efficient utilization of resources, improved task performance, and enhanced adaptability to diverse scenarios.\n\nThis research idea aims to push the boundaries of model merging in the context of large language models, paving the way for more sophisticated and effective AI systems that can tackle real-world challenges with enhanced efficiency and performance.", role='assistant', tool_calls=None, function_call=None) -[2024-11-10 19:06:13,566] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- {'agent1': Message(content='Based on your expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, and considering the task to brainstorm potential research ideas for merging homologous models using the DELLAs approach, here are some innovative research directions you could explore:\n\n1. **Terahertz Imaging for Model Alignment**:\n - Utilize terahertz imaging techniques to enhance the alignment and registration of homologous models in medical imaging datasets. Develop algorithms that leverage the high-precision capabilities of terahertz imaging to improve the accuracy of model merging processes.\n\n2. **Cloud-Based Model Fusion**:\n - Design a cloud-based framework that integrates advanced algorithms for merging homologous models. Focus on creating a secure and scalable platform that allows seamless collaboration among researchers and clinicians for model fusion tasks.\n\n3. **Algebraic Geometry in Model Representation**:\n - Apply concepts from algebraic geometry to represent homologous models in a geometrically meaningful way. Explore how cluster categories and 3-Calabi-Yau categories can provide insights into the structural relationships between different models for efficient merging.\n\n4. **Hybrid Approach with Machine Learning**:\n - Combine terahertz imaging data with machine learning techniques to automate the process of merging homologous models. Develop algorithms that leverage cloud computing resources for large-scale model fusion tasks.\n\n5. **Privacy-Preserving Model Integration**:\n - Investigate methods to ensure the privacy and security of proprietary algorithms during the model merging process. Explore cryptographic techniques and secure multiparty computation to enable collaborative model integration without compromising sensitive information.\n\n6. **Optimization with Algebraic Techniques**:\n - Use optimization methods inspired by algebraic geometry to streamline the merging process of homologous models. Develop algorithms that exploit the structural properties of cluster categories to optimize the alignment and combination of models.\n\n7. **Interdisciplinary Framework for Model Fusion**:\n - Create an interdisciplinary framework that bridges terahertz imaging technologies, cloud computing, and algebraic geometry for the seamless fusion of homologous models. Emphasize the integration of diverse expertise to enhance the accuracy and efficiency of model merging.\n\nThese research ideas leverage your expertise in imaging technologies, mathematical aspects, and cloud computing to propose innovative solutions for improving the merging process of homologous models using the DELLAs approach. Feel free to explore these directions further or let me know if you need more detailed insights into any specific area.', role='assistant', tool_calls=None, function_call=None)} -- {'agent2': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. My expertise in artificial intelligence and machine learning, particularly in healthcare monitoring and causal variable discovery, can contribute to innovative research ideas with practical applications in real-world scenarios."}', name='new_communication_session'), id='call_fNr3wr1hTR3VOt11kLvCdqvC', type='function')], function_call=None)} -- {'agent4': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent5","message":"I am a researcher with a diverse background in mathematical modeling, optimization, and photonic systems. My work spans various domains, including the study of eigenvalues in projective spaces, the application of multi-factorial optimization for large-scale virtual machine placement, and the exploration of lasing and coherent perfect absorption in PT-symmetric systems. I have delved into the topological properties of photonic superlattices, revealing their potential for mode-division multiplexing, and investigated the implications of non-Hermitian systems on multi-mode interference.\\n\\nMy research also extends to coding theory, where I applied Baranyai\'s theorem to construct locally decodable codes, emphasizing the combinatorial techniques that underpin these developments. I have explored the spectral signatures of CPA-lasers, highlighting the differences between singular value and eigenvalue spectra, and examined spectral phase transitions in non-Hermitian Hamiltonians, particularly in the context of parity-time symmetry.\\n\\nRecently, I have focused on the architectural support for FPGA-based accelerators in heterogeneous multicore systems, proposing innovative solutions for efficient interfacing and data reuse. My work aims to bridge theoretical insights with practical applications, contributing to advancements in both computational efficiency and optical technologies. Through my research, I strive to push the boundaries of knowledge in these interdisciplinary fields, fostering new approaches and solutions to complex problems.\\n\\nI am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the provided introduction. Let\'s explore innovative avenues for research together."}', name='new_communication_session'), id='call_ICvnyd0MjSjI1P33j4uGFytv', type='function')], function_call=None)} -- {'agent5': Message(content="Based on the insights gathered from the literature review and brainstorming session, I will formulate a new research idea following the '5q' format:\n\n**[Question 1] - What is the problem?**\nHow can we develop adaptive and efficient model merging techniques to enhance the performance and scalability of interactive systems based on large language models?\n\n**[Question 2] - Why is it interesting and important?**\nSolving this problem is crucial for optimizing the utilization of large language models in interactive systems, leading to improved task performance, reduced memory footprint, and enhanced knowledge sharing capabilities. Addressing this challenge will advance the field of model merging and contribute to the development of more efficient and effective AI systems.\n\n**[Question 3] - Why is it hard?**\nThe complexity lies in designing merging techniques that can adapt to diverse tasks, data distributions, and model architectures while maintaining high performance levels. Balancing the trade-offs between model complexity, task specificity, and computational efficiency poses significant challenges in developing robust and scalable merging solutions.\n\n**[Question 4] - Why hasn't it been solved before?**\nPrevious research has primarily focused on basic model merging approaches, such as parameter drops and pruning, without delving into more sophisticated techniques that can address the dynamic nature of tasks and data distributions. The lack of adaptive and efficient model merging methods tailored for large language models has hindered the optimization of interactive systems.\n\n**[Question 5] - What is the potential impact and expected outcomes?**\nBy developing adaptive and efficient model merging techniques, we can significantly enhance the performance and scalability of interactive systems based on large language models. This research has the potential to revolutionize the way AI systems handle complex tasks, leading to more efficient utilization of resources, improved task performance, and enhanced adaptability to diverse scenarios.\n\nThis research idea aims to push the boundaries of model merging in the context of large language models, paving the way for more sophisticated and effective AI systems that can tackle real-world challenges with enhanced efficiency and performance.", role='assistant', tool_calls=None, function_call=None)} - -[2024-11-10 19:06:13,567] [INFO] [Engine]: Iteration 1 Summary: -Agents' Results Summary: -- {'agent1': Message(content='Based on your expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, and considering the task to brainstorm potential research ideas for merging homologous models using the DELLAs approach, here are some innovative research directions you could explore:\n\n1. **Terahertz Imaging for Model Alignment**:\n - Utilize terahertz imaging techniques to enhance the alignment and registration of homologous models in medical imaging datasets. Develop algorithms that leverage the high-precision capabilities of terahertz imaging to improve the accuracy of model merging processes.\n\n2. **Cloud-Based Model Fusion**:\n - Design a cloud-based framework that integrates advanced algorithms for merging homologous models. Focus on creating a secure and scalable platform that allows seamless collaboration among researchers and clinicians for model fusion tasks.\n\n3. **Algebraic Geometry in Model Representation**:\n - Apply concepts from algebraic geometry to represent homologous models in a geometrically meaningful way. Explore how cluster categories and 3-Calabi-Yau categories can provide insights into the structural relationships between different models for efficient merging.\n\n4. **Hybrid Approach with Machine Learning**:\n - Combine terahertz imaging data with machine learning techniques to automate the process of merging homologous models. Develop algorithms that leverage cloud computing resources for large-scale model fusion tasks.\n\n5. **Privacy-Preserving Model Integration**:\n - Investigate methods to ensure the privacy and security of proprietary algorithms during the model merging process. Explore cryptographic techniques and secure multiparty computation to enable collaborative model integration without compromising sensitive information.\n\n6. **Optimization with Algebraic Techniques**:\n - Use optimization methods inspired by algebraic geometry to streamline the merging process of homologous models. Develop algorithms that exploit the structural properties of cluster categories to optimize the alignment and combination of models.\n\n7. **Interdisciplinary Framework for Model Fusion**:\n - Create an interdisciplinary framework that bridges terahertz imaging technologies, cloud computing, and algebraic geometry for the seamless fusion of homologous models. Emphasize the integration of diverse expertise to enhance the accuracy and efficiency of model merging.\n\nThese research ideas leverage your expertise in imaging technologies, mathematical aspects, and cloud computing to propose innovative solutions for improving the merging process of homologous models using the DELLAs approach. Feel free to explore these directions further or let me know if you need more detailed insights into any specific area.', role='assistant', tool_calls=None, function_call=None)} -- {'agent2': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. My expertise in artificial intelligence and machine learning, particularly in healthcare monitoring and causal variable discovery, can contribute to innovative research ideas with practical applications in real-world scenarios."}', name='new_communication_session'), id='call_fNr3wr1hTR3VOt11kLvCdqvC', type='function')], function_call=None)} -- {'agent4': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent5","message":"I am a researcher with a diverse background in mathematical modeling, optimization, and photonic systems. My work spans various domains, including the study of eigenvalues in projective spaces, the application of multi-factorial optimization for large-scale virtual machine placement, and the exploration of lasing and coherent perfect absorption in PT-symmetric systems. I have delved into the topological properties of photonic superlattices, revealing their potential for mode-division multiplexing, and investigated the implications of non-Hermitian systems on multi-mode interference.\\n\\nMy research also extends to coding theory, where I applied Baranyai\'s theorem to construct locally decodable codes, emphasizing the combinatorial techniques that underpin these developments. I have explored the spectral signatures of CPA-lasers, highlighting the differences between singular value and eigenvalue spectra, and examined spectral phase transitions in non-Hermitian Hamiltonians, particularly in the context of parity-time symmetry.\\n\\nRecently, I have focused on the architectural support for FPGA-based accelerators in heterogeneous multicore systems, proposing innovative solutions for efficient interfacing and data reuse. My work aims to bridge theoretical insights with practical applications, contributing to advancements in both computational efficiency and optical technologies. Through my research, I strive to push the boundaries of knowledge in these interdisciplinary fields, fostering new approaches and solutions to complex problems.\\n\\nI am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the provided introduction. Let\'s explore innovative avenues for research together."}', name='new_communication_session'), id='call_ICvnyd0MjSjI1P33j4uGFytv', type='function')], function_call=None)} -- {'agent5': Message(content="Based on the insights gathered from the literature review and brainstorming session, I will formulate a new research idea following the '5q' format:\n\n**[Question 1] - What is the problem?**\nHow can we develop adaptive and efficient model merging techniques to enhance the performance and scalability of interactive systems based on large language models?\n\n**[Question 2] - Why is it interesting and important?**\nSolving this problem is crucial for optimizing the utilization of large language models in interactive systems, leading to improved task performance, reduced memory footprint, and enhanced knowledge sharing capabilities. Addressing this challenge will advance the field of model merging and contribute to the development of more efficient and effective AI systems.\n\n**[Question 3] - Why is it hard?**\nThe complexity lies in designing merging techniques that can adapt to diverse tasks, data distributions, and model architectures while maintaining high performance levels. Balancing the trade-offs between model complexity, task specificity, and computational efficiency poses significant challenges in developing robust and scalable merging solutions.\n\n**[Question 4] - Why hasn't it been solved before?**\nPrevious research has primarily focused on basic model merging approaches, such as parameter drops and pruning, without delving into more sophisticated techniques that can address the dynamic nature of tasks and data distributions. The lack of adaptive and efficient model merging methods tailored for large language models has hindered the optimization of interactive systems.\n\n**[Question 5] - What is the potential impact and expected outcomes?**\nBy developing adaptive and efficient model merging techniques, we can significantly enhance the performance and scalability of interactive systems based on large language models. This research has the potential to revolutionize the way AI systems handle complex tasks, leading to more efficient utilization of resources, improved task performance, and enhanced adaptability to diverse scenarios.\n\nThis research idea aims to push the boundaries of model merging in the context of large language models, paving the way for more sophisticated and effective AI systems that can tackle real-world challenges with enhanced efficiency and performance.", role='assistant', tool_calls=None, function_call=None)} - -[2024-11-10 19:06:15,834] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-10 19:06:15,836] [INFO] [Engine]: Starting iteration 2 -[2024-11-10 19:06:15,836] [INFO] [BaseAgent]: Agent 'agent1' is planning the next task. -[2024-11-10 19:06:16,963] [INFO] [BaseAgent]: Agent 'agent1' plans next task based on persona: The next task should be to collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction provided for merging homologous models using the DELLAs approach. The agent should work with the research team to generate new ideas that leverage their expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry to contribute to the development of innovative solutions for model merging. -[2024-11-10 19:06:16,964] [INFO] [Engine]: Agent 'agent1' planned task: The next task should be to collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction provided for merging homologous models using the DELLAs approach. The agent should work with the research team to generate new ideas that leverage their expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry to contribute to the development of innovative solutions for model merging. -[2024-11-10 19:06:16,964] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'The next task should be to collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction provided for merging homologous models using the DELLAs approach. The agent should work with the research team to generate new ideas that leverage their expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry to contribute to the development of innovative solutions for model merging.'. -[2024-11-10 19:06:23,105] [INFO] [BaseAgent]: Agent 'agent1' acted with result 'Message(content='To brainstorm potential research ideas for merging homologous models using the DELLAs approach, we can consider innovative directions that leverage your expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry. Here are some ideas to explore:\n\n1. **Integration of Terahertz Imaging and Cloud Computing**:\n - Develop a novel framework that combines terahertz imaging data with cloud computing resources to facilitate the seamless merging of homologous models. Utilize the high-precision capabilities of terahertz imaging for accurate model alignment and leverage cloud-based algorithms for efficient processing and integration.\n\n2. **Secure Model Fusion Platform**:\n - Design a secure and privacy-preserving platform that utilizes cloud computing for the fusion of medical image models. Implement encryption techniques and secure multiparty computation to ensure the confidentiality of sensitive data during the merging process, enhancing trust and collaboration among researchers and clinicians.\n\n3. **Algebraic Geometry for Model Representation**:\n - Explore the application of algebraic geometry concepts, such as cluster categories and 3-Calabi-Yau categories, to represent homologous models in a geometrically meaningful way. Develop algorithms that leverage algebraic structures to enhance the understanding of structural relationships between models, facilitating their merging.\n\n4. **Machine Learning-Assisted Model Alignment**:\n - Integrate machine learning algorithms with terahertz imaging data to automate the alignment and registration of homologous models. Utilize cloud resources for training and deploying machine learning models that optimize the merging process based on feature similarities and geometric properties.\n\n5. **Efficient Model Fusion with Cloud-Based Optimization**:\n - Implement optimization techniques inspired by algebraic geometry within a cloud-based framework for efficient model fusion. Develop algorithms that exploit the structural properties of cluster categories to streamline the merging process and improve the accuracy of homologous model integration.\n\n6. **Real-Time Model Fusion for Medical Diagnosis**:\n - Create a real-time model fusion system that leverages terahertz imaging technologies and cloud computing for rapid analysis of medical images. Focus on developing algorithms that enable quick and accurate merging of diagnostic models to support timely decision-making in clinical settings.\n\nThese research ideas aim to leverage your expertise in terahertz imaging, cloud computing, and algebraic geometry to contribute to the advancement of model merging using the DELLAs approach. Feel free to explore these directions further or let me know if you would like to delve deeper into a specific area.', role='assistant', tool_calls=None, function_call=None)'. -[2024-11-10 19:06:23,106] [DEBUG] [Engine]: Agent 'agent1' executed task with result: Message(content='To brainstorm potential research ideas for merging homologous models using the DELLAs approach, we can consider innovative directions that leverage your expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry. Here are some ideas to explore:\n\n1. **Integration of Terahertz Imaging and Cloud Computing**:\n - Develop a novel framework that combines terahertz imaging data with cloud computing resources to facilitate the seamless merging of homologous models. Utilize the high-precision capabilities of terahertz imaging for accurate model alignment and leverage cloud-based algorithms for efficient processing and integration.\n\n2. **Secure Model Fusion Platform**:\n - Design a secure and privacy-preserving platform that utilizes cloud computing for the fusion of medical image models. Implement encryption techniques and secure multiparty computation to ensure the confidentiality of sensitive data during the merging process, enhancing trust and collaboration among researchers and clinicians.\n\n3. **Algebraic Geometry for Model Representation**:\n - Explore the application of algebraic geometry concepts, such as cluster categories and 3-Calabi-Yau categories, to represent homologous models in a geometrically meaningful way. Develop algorithms that leverage algebraic structures to enhance the understanding of structural relationships between models, facilitating their merging.\n\n4. **Machine Learning-Assisted Model Alignment**:\n - Integrate machine learning algorithms with terahertz imaging data to automate the alignment and registration of homologous models. Utilize cloud resources for training and deploying machine learning models that optimize the merging process based on feature similarities and geometric properties.\n\n5. **Efficient Model Fusion with Cloud-Based Optimization**:\n - Implement optimization techniques inspired by algebraic geometry within a cloud-based framework for efficient model fusion. Develop algorithms that exploit the structural properties of cluster categories to streamline the merging process and improve the accuracy of homologous model integration.\n\n6. **Real-Time Model Fusion for Medical Diagnosis**:\n - Create a real-time model fusion system that leverages terahertz imaging technologies and cloud computing for rapid analysis of medical images. Focus on developing algorithms that enable quick and accurate merging of diagnostic models to support timely decision-making in clinical settings.\n\nThese research ideas aim to leverage your expertise in terahertz imaging, cloud computing, and algebraic geometry to contribute to the advancement of model merging using the DELLAs approach. Feel free to explore these directions further or let me know if you would like to delve deeper into a specific area.', role='assistant', tool_calls=None, function_call=None) -[2024-11-10 19:06:23,106] [INFO] [BaseAgent]: Agent 'agent2' is planning the next task. -[2024-11-10 19:06:24,538] [INFO] [BaseAgent]: Agent 'agent2' plans next task based on persona: The next task for 'agent2' should be to collaborate with the research team to further brainstorm and develop research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with 'agent2's expertise in artificial intelligence and machine learning, particularly in healthcare monitoring and causal variable discovery. By contributing to the brainstorming session, 'agent2' can leverage their knowledge and skills to propose innovative research ideas that could potentially advance the field of machine learning and have practical applications in real-world scenarios. -[2024-11-10 19:06:24,539] [INFO] [Engine]: Agent 'agent2' planned task: The next task for 'agent2' should be to collaborate with the research team to further brainstorm and develop research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with 'agent2's expertise in artificial intelligence and machine learning, particularly in healthcare monitoring and causal variable discovery. By contributing to the brainstorming session, 'agent2' can leverage their knowledge and skills to propose innovative research ideas that could potentially advance the field of machine learning and have practical applications in real-world scenarios. -[2024-11-10 19:06:24,540] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'The next task for 'agent2' should be to collaborate with the research team to further brainstorm and develop research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with 'agent2's expertise in artificial intelligence and machine learning, particularly in healthcare monitoring and causal variable discovery. By contributing to the brainstorming session, 'agent2' can leverage their knowledge and skills to propose innovative research ideas that could potentially advance the field of machine learning and have practical applications in real-world scenarios.'. -[2024-11-10 19:06:57,701] [ERROR] [Engine]: Error in agent 'agent2' during planning or action: 'NoneType' object has no attribute 'pop' -[2024-11-10 19:06:57,704] [INFO] [BaseAgent]: Agent 'agent3' is planning the next task. -[2024-11-10 19:06:59,874] [INFO] [BaseAgent]: Agent 'agent3' plans next task based on persona: The next task for 'agent3' should be to conduct a thorough literature review to understand the current state of research in merging homologous models and the challenges associated with maintaining separate fine-tuned models for each task. This will provide a solid foundation for brainstorming potential research ideas and identifying gaps in the existing literature. Collaborating with the research team to generate new research proposals that build upon the concept of merging homologous models using innovative techniques like DELLA will be crucial in advancing the field of spiking neural networks and machine learning. Finally, formulating a new research idea in the format of the '5q' will help outline the specific research question, the importance of the problem, the challenges involved, why it hasn't been solved before, and the key components of the proposed approach and expected results. By following these steps, 'agent3' can contribute significantly to pushing the boundaries of SNNs and machine learning. -[2024-11-10 19:06:59,874] [INFO] [Engine]: Agent 'agent3' planned task: The next task for 'agent3' should be to conduct a thorough literature review to understand the current state of research in merging homologous models and the challenges associated with maintaining separate fine-tuned models for each task. This will provide a solid foundation for brainstorming potential research ideas and identifying gaps in the existing literature. Collaborating with the research team to generate new research proposals that build upon the concept of merging homologous models using innovative techniques like DELLA will be crucial in advancing the field of spiking neural networks and machine learning. Finally, formulating a new research idea in the format of the '5q' will help outline the specific research question, the importance of the problem, the challenges involved, why it hasn't been solved before, and the key components of the proposed approach and expected results. By following these steps, 'agent3' can contribute significantly to pushing the boundaries of SNNs and machine learning. -[2024-11-10 19:06:59,875] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'The next task for 'agent3' should be to conduct a thorough literature review to understand the current state of research in merging homologous models and the challenges associated with maintaining separate fine-tuned models for each task. This will provide a solid foundation for brainstorming potential research ideas and identifying gaps in the existing literature. Collaborating with the research team to generate new research proposals that build upon the concept of merging homologous models using innovative techniques like DELLA will be crucial in advancing the field of spiking neural networks and machine learning. Finally, formulating a new research idea in the format of the '5q' will help outline the specific research question, the importance of the problem, the challenges involved, why it hasn't been solved before, and the key components of the proposed approach and expected results. By following these steps, 'agent3' can contribute significantly to pushing the boundaries of SNNs and machine learning.'. -[2024-11-10 19:07:01,867] [ERROR] [Engine]: Error in agent 'agent3' during planning or action: 'Paper' object has no attribute 'to_dict' -[2024-11-10 19:07:01,867] [INFO] [BaseAgent]: Agent 'agent4' is planning the next task. -[2024-11-10 19:07:03,348] [INFO] [BaseAgent]: Agent 'agent4' plans next task based on persona: The next task for 'agent4' should be to engage with the research team to further develop the identified research ideas. Specifically, they should focus on refining the proposed avenues for research, particularly in the areas of topological approaches to knowledge sharing and FPGA-based accelerators for merged models. By collaborating with the team, 'agent4' can contribute their expertise and insights to develop a cohesive research proposal that leverages their diverse background and expertise. -[2024-11-10 19:07:03,348] [INFO] [Engine]: Agent 'agent4' planned task: The next task for 'agent4' should be to engage with the research team to further develop the identified research ideas. Specifically, they should focus on refining the proposed avenues for research, particularly in the areas of topological approaches to knowledge sharing and FPGA-based accelerators for merged models. By collaborating with the team, 'agent4' can contribute their expertise and insights to develop a cohesive research proposal that leverages their diverse background and expertise. -[2024-11-10 19:07:03,348] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'The next task for 'agent4' should be to engage with the research team to further develop the identified research ideas. Specifically, they should focus on refining the proposed avenues for research, particularly in the areas of topological approaches to knowledge sharing and FPGA-based accelerators for merged models. By collaborating with the team, 'agent4' can contribute their expertise and insights to develop a cohesive research proposal that leverages their diverse background and expertise.'. -[2024-11-10 19:07:08,377] [INFO] [BaseAgent]: Agent agent4 sent message to agent5: I am a researcher with a diverse background in mathematical modeling, optimization, and photonic systems. My work spans various domains, including the study of eigenvalues in projective spaces, the application of multi-factorial optimization for large-scale virtual machine placement, and the exploration of lasing and coherent perfect absorption in PT-symmetric systems. I have delved into the topological properties of photonic superlattices, revealing their potential for mode-division multiplexing, and investigated the implications of non-Hermitian systems on multi-mode interference. - -My research also extends to coding theory, where I applied Baranyai's theorem to construct locally decodable codes, emphasizing the combinatorial techniques that underpin these developments. I have explored the spectral signatures of CPA-lasers, highlighting the differences between singular value and eigenvalue spectra, and examined spectral phase transitions in non-Hermitian Hamiltonians, particularly in the context of parity-time symmetry. - -Recently, I have focused on the architectural support for FPGA-based accelerators in heterogeneous multicore systems, proposing innovative solutions for efficient interfacing and data reuse. My work aims to bridge theoretical insights with practical applications, contributing to advancements in both computational efficiency and optical technologies. Through my research, I strive to push the boundaries of knowledge in these interdisciplinary fields, fostering new approaches and solutions to complex problems. - -I am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the provided introduction. Let's explore innovative avenues for research together. -[2024-11-10 19:07:08,379] [INFO] [BaseAgent]: Agent agent5 received message from agent4: I am a res... -[2024-11-10 19:07:15,535] [INFO] [BaseAgent]: Agent agent5 sent message to agent4: I appreciate your diverse background and expertise in mathematical modeling, optimization, and photonic systems. Given your experience with topological properties and FPGA-based accelerators, I believe we can explore some innovative research avenues together. \n\n1. **Topological Approaches to Knowledge Sharing:** We could investigate how topological data analysis can enhance knowledge sharing in merged models. By leveraging the topological properties of data distributions, we might develop methods that allow models to share knowledge more effectively, leading to improved performance in multi-task scenarios.\n\n2. **FPGA-based Accelerators for Merged Models:** Your work on FPGA-based accelerators could be pivotal in optimizing the performance of merged models. We could explore how to design FPGA architectures that efficiently support the dynamic nature of merged models, focusing on data reuse and computational efficiency. This could lead to significant advancements in real-time applications of large language models.\n\n3. **Combining Multi-Objective Optimization with Model Merging:** Given your expertise in multi-factorial optimization, we could investigate how multi-objective optimization techniques can be applied to the model merging process. This could help in balancing trade-offs between model complexity, performance, and resource utilization, ultimately leading to more efficient merged models.\n\n4. **Exploring Non-Hermitian Systems in Model Merging:** Your insights into non-Hermitian systems could provide a unique perspective on model merging. We could explore how the principles of non-Hermitian systems might inform the design of merging techniques that enhance model robustness and adaptability.\n\nI believe these ideas could lead to a cohesive research proposal that leverages our combined expertise. What are your thoughts on these potential avenues? -[2024-11-10 19:07:15,538] [INFO] [BaseAgent]: Agent agent4 received message from agent5: I apprecia... -[2024-11-10 19:07:21,783] [INFO] [BaseAgent]: Agent agent4 sent message to agent5: I appreciate your thoughtful suggestions and the potential for collaboration on these innovative research avenues. Here are my thoughts on each of the proposed ideas: - -1. **Topological Approaches to Knowledge Sharing:** I find this particularly intriguing. Leveraging topological data analysis could indeed enhance how models share knowledge, especially in multi-task scenarios. We could explore specific topological metrics that might be beneficial in quantifying knowledge transfer and performance improvements. - -2. **FPGA-based Accelerators for Merged Models:** I agree that optimizing FPGA architectures for merged models is crucial. We should consider how to implement dynamic reconfiguration techniques to adapt to varying workloads, which could significantly enhance computational efficiency and data reuse. - -3. **Combining Multi-Objective Optimization with Model Merging:** This is an excellent direction. By applying multi-objective optimization techniques, we can systematically address the trade-offs between model complexity and performance. I suggest we look into specific algorithms that can facilitate this process, perhaps drawing from my experience in multi-factorial optimization. - -4. **Exploring Non-Hermitian Systems in Model Merging:** Your idea to incorporate insights from non-Hermitian systems is fascinating. We could investigate how these principles might lead to more robust merging techniques, particularly in terms of adaptability and resilience in dynamic environments. - -Overall, I believe these avenues have great potential for a cohesive research proposal. I look forward to collaborating further and refining these ideas with our research team. Let's set up a meeting to discuss this in more detail and outline our next steps. -[2024-11-10 19:07:21,787] [INFO] [BaseAgent]: Agent agent5 received message from agent4: I apprecia... -[2024-11-10 19:07:30,250] [INFO] [BaseAgent]: Agent agent5 sent message to agent4: I appreciate your thoughtful suggestions and the potential for collaboration on these innovative research avenues. Here are my thoughts on each of the proposed ideas:\n\n1. **Topological Approaches to Knowledge Sharing:** I find this particularly intriguing. Leveraging topological data analysis could indeed enhance how models share knowledge, especially in multi-task scenarios. We could explore specific topological metrics that might be beneficial in quantifying knowledge transfer and performance improvements.\n\n2. **FPGA-based Accelerators for Merged Models:** I agree that optimizing FPGA architectures for merged models is crucial. We should consider how to implement dynamic reconfiguration techniques to adapt to varying workloads, which could significantly enhance computational efficiency and data reuse.\n\n3. **Combining Multi-Objective Optimization with Model Merging:** This is an excellent direction. By applying multi-objective optimization techniques, we can systematically address the trade-offs between model complexity and performance. I suggest we look into specific algorithms that can facilitate this process, perhaps drawing from my experience in multi-factorial optimization.\n\n4. **Exploring Non-Hermitian Systems in Model Merging:** Your idea to incorporate insights from non-Hermitian systems is fascinating. We could investigate how these principles might lead to more robust merging techniques, particularly in terms of adaptability and resilience in dynamic environments.\n\nOverall, I believe these avenues have great potential for a cohesive research proposal. I look forward to collaborating further and refining these ideas with our research team. Let's set up a meeting to discuss this in more detail and outline our next steps. -[2024-11-10 19:07:30,251] [INFO] [BaseAgent]: Agent agent4 received message from agent5: I apprecia... -[2024-11-10 19:07:35,605] [INFO] [BaseAgent]: Agent agent4 sent message to agent5: I appreciate your thoughtful suggestions and the potential for collaboration on these innovative research avenues. Here are my thoughts on each of the proposed ideas: - -1. **Topological Approaches to Knowledge Sharing:** I find this particularly intriguing. Leveraging topological data analysis could indeed enhance how models share knowledge, especially in multi-task scenarios. We could explore specific topological metrics that might be beneficial in quantifying knowledge transfer and performance improvements. - -2. **FPGA-based Accelerators for Merged Models:** I agree that optimizing FPGA architectures for merged models is crucial. We should consider how to implement dynamic reconfiguration techniques to adapt to varying workloads, which could significantly enhance computational efficiency and data reuse. - -3. **Combining Multi-Objective Optimization with Model Merging:** This is an excellent direction. By applying multi-objective optimization techniques, we can systematically address the trade-offs between model complexity and performance. I suggest we look into specific algorithms that can facilitate this process, perhaps drawing from my experience in multi-factorial optimization. - -4. **Exploring Non-Hermitian Systems in Model Merging:** Your idea to incorporate insights from non-Hermitian systems is fascinating. We could investigate how these principles might lead to more robust merging techniques, particularly in terms of adaptability and resilience in dynamic environments. - -Overall, I believe these avenues have great potential for a cohesive research proposal. I look forward to collaborating further and refining these ideas with our research team. Let's set up a meeting to discuss this in more detail and outline our next steps. -[2024-11-10 19:07:35,606] [INFO] [BaseAgent]: Agent agent5 received message from agent4: I apprecia... -[2024-11-10 19:07:40,939] [INFO] [BaseAgent]: Agent agent5 sent message to agent4: I appreciate your thoughtful suggestions and the potential for collaboration on these innovative research avenues. Here are my thoughts on each of the proposed ideas: - -1. **Topological Approaches to Knowledge Sharing:** I find this particularly intriguing. Leveraging topological data analysis could indeed enhance how models share knowledge, especially in multi-task scenarios. We could explore specific topological metrics that might be beneficial in quantifying knowledge transfer and performance improvements. - -2. **FPGA-based Accelerators for Merged Models:** I agree that optimizing FPGA architectures for merged models is crucial. We should consider how to implement dynamic reconfiguration techniques to adapt to varying workloads, which could significantly enhance computational efficiency and data reuse. - -3. **Combining Multi-Objective Optimization with Model Merging:** This is an excellent direction. By applying multi-objective optimization techniques, we can systematically address the trade-offs between model complexity and performance. I suggest we look into specific algorithms that can facilitate this process, perhaps drawing from my experience in multi-factorial optimization. - -4. **Exploring Non-Hermitian Systems in Model Merging:** Your idea to incorporate insights from non-Hermitian systems is fascinating. We could investigate how these principles might lead to more robust merging techniques, particularly in terms of adaptability and resilience in dynamic environments. - -Overall, I believe these avenues have great potential for a cohesive research proposal. I look forward to collaborating further and refining these ideas with our research team. Let's set up a meeting to discuss this in more detail and outline our next steps. -[2024-11-10 19:07:40,940] [INFO] [BaseAgent]: Agent agent4 received message from agent5: I apprecia... -[2024-11-10 19:07:49,841] [INFO] [BaseAgent]: Agent 'agent4' called 'new_communication_session' with args '{'target_agent_id': 'agent5', 'message': "I am a researcher with a diverse background in mathematical modeling, optimization, and photonic systems. My work spans various domains, including the study of eigenvalues in projective spaces, the application of multi-factorial optimization for large-scale virtual machine placement, and the exploration of lasing and coherent perfect absorption in PT-symmetric systems. I have delved into the topological properties of photonic superlattices, revealing their potential for mode-division multiplexing, and investigated the implications of non-Hermitian systems on multi-mode interference.\n\nMy research also extends to coding theory, where I applied Baranyai's theorem to construct locally decodable codes, emphasizing the combinatorial techniques that underpin these developments. I have explored the spectral signatures of CPA-lasers, highlighting the differences between singular value and eigenvalue spectra, and examined spectral phase transitions in non-Hermitian Hamiltonians, particularly in the context of parity-time symmetry.\n\nRecently, I have focused on the architectural support for FPGA-based accelerators in heterogeneous multicore systems, proposing innovative solutions for efficient interfacing and data reuse. My work aims to bridge theoretical insights with practical applications, contributing to advancements in both computational efficiency and optical technologies. Through my research, I strive to push the boundaries of knowledge in these interdisciplinary fields, fostering new approaches and solutions to complex problems.\n\nI am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the provided introduction. Let's explore innovative avenues for research together."}'. -[2024-11-10 19:07:49,842] [INFO] [BaseAgent]: Agent 'agent4' obtained result '{'success': True, 'message': 'Successfully completed session df7560a9-ea7e-4bc7-949f-9573da13a20e', 'session_id': '### Summary of Conversation Between Agent4 and Agent5\n\n**Context:**\nAgent4, a researcher with expertise in mathematical modeling, optimization, and photonic systems, initiated a dialogue with Agent5 to explore potential collaborative research ideas. The conversation focused on leveraging their combined expertise to address gaps in current research and propose innovative avenues.\n\n**Key Themes and Ideas Discussed:**\n\n1. **Topological Approaches to Knowledge Sharing:**\n - Agent5 suggested investigating how topological data analysis could enhance knowledge sharing in merged models, particularly in multi-task scenarios.\n - Agent4 expressed interest in exploring specific topological metrics to quantify knowledge transfer and performance improvements.\n\n2. **FPGA-based Accelerators for Merged Models:**\n - The optimization of FPGA architectures for merged models was highlighted as crucial by both agents.\n - Agent4 proposed considering dynamic reconfiguration techniques to adapt to varying workloads, enhancing computational efficiency and data reuse.\n\n3. **Combining Multi-Objective Optimization with Model Merging:**\n - Agent5 recommended applying multi-objective optimization techniques to balance trade-offs between model complexity, performance, and resource utilization.\n - Agent4 agreed and suggested looking into specific algorithms that could facilitate this process, drawing from their experience in multi-factorial optimization.\n\n4. **Exploring Non-Hermitian Systems in Model Merging:**\n - Agent5 introduced the idea of incorporating insights from non-Hermitian systems to inform merging techniques that enhance robustness and adaptability.\n - Agent4 found this perspective fascinating and agreed to investigate its implications further.\n\n**Outcomes and Next Steps:**\n- Both agents concluded that the proposed research avenues have significant potential for a cohesive research proposal.\n- Agent4 is tasked with engaging the research team to refine these ideas, particularly focusing on topological approaches to knowledge sharing and FPGA-based accelerators for merged models.\n- A meeting is to be set up to discuss these ideas in more detail and outline the next steps for collaboration.\n\nThis summary encapsulates the essence of the dialogue, highlighting the collaborative spirit and the innovative research directions proposed by both agents.'}'. -[2024-11-10 19:07:49,843] [DEBUG] [Engine]: Agent 'agent4' executed task with result: Message(content="I will proceed with engaging the research team to further develop the identified research ideas, focusing on refining the proposed avenues for research in the areas of topological approaches to knowledge sharing and FPGA-based accelerators for merged models. Let's begin the collaboration process.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent5","message":"I am a researcher with a diverse background in mathematical modeling, optimization, and photonic systems. My work spans various domains, including the study of eigenvalues in projective spaces, the application of multi-factorial optimization for large-scale virtual machine placement, and the exploration of lasing and coherent perfect absorption in PT-symmetric systems. I have delved into the topological properties of photonic superlattices, revealing their potential for mode-division multiplexing, and investigated the implications of non-Hermitian systems on multi-mode interference.\\n\\nMy research also extends to coding theory, where I applied Baranyai\'s theorem to construct locally decodable codes, emphasizing the combinatorial techniques that underpin these developments. I have explored the spectral signatures of CPA-lasers, highlighting the differences between singular value and eigenvalue spectra, and examined spectral phase transitions in non-Hermitian Hamiltonians, particularly in the context of parity-time symmetry.\\n\\nRecently, I have focused on the architectural support for FPGA-based accelerators in heterogeneous multicore systems, proposing innovative solutions for efficient interfacing and data reuse. My work aims to bridge theoretical insights with practical applications, contributing to advancements in both computational efficiency and optical technologies. Through my research, I strive to push the boundaries of knowledge in these interdisciplinary fields, fostering new approaches and solutions to complex problems.\\n\\nI am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the provided introduction. Let\'s explore innovative avenues for research together."}', name='new_communication_session'), id='call_RMHwrLzPI8GmABKyIVih2Vh4', type='function')], function_call=None) -[2024-11-10 19:07:49,843] [INFO] [BaseAgent]: Agent 'agent5' is planning the next task. -[2024-11-10 19:07:50,938] [INFO] [BaseAgent]: Agent 'agent5' plans next task based on persona: The next task for 'agent5' should be to further develop and refine the proposed research idea based on the '5q' format. This may involve expanding on the methodology, dataset, and metrics to be used, as well as detailing the expected outcomes and potential impact of the research. Additionally, 'agent5' can consider incorporating feedback or suggestions from colleagues to strengthen the research proposal. -[2024-11-10 19:07:50,938] [INFO] [Engine]: Agent 'agent5' planned task: The next task for 'agent5' should be to further develop and refine the proposed research idea based on the '5q' format. This may involve expanding on the methodology, dataset, and metrics to be used, as well as detailing the expected outcomes and potential impact of the research. Additionally, 'agent5' can consider incorporating feedback or suggestions from colleagues to strengthen the research proposal. -[2024-11-10 19:07:50,938] [INFO] [BaseAgent]: Agent 'agent5' acting on task 'The next task for 'agent5' should be to further develop and refine the proposed research idea based on the '5q' format. This may involve expanding on the methodology, dataset, and metrics to be used, as well as detailing the expected outcomes and potential impact of the research. Additionally, 'agent5' can consider incorporating feedback or suggestions from colleagues to strengthen the research proposal.'. -[2024-11-10 19:07:52,831] [INFO] [BaseAgent]: Agent agent5 sent message to agent5: I have formulated a new research idea following the '5q' format. The research idea focuses on developing adaptive and efficient model merging techniques to enhance the performance and scalability of interactive systems based on large language models. The proposal addresses the challenges in designing merging techniques that can adapt to diverse tasks, data distributions, and model architectures while maintaining high performance levels. The expected outcomes include improved task performance, reduced memory footprint, and enhanced knowledge sharing capabilities in AI systems. I aim to push the boundaries of model merging in the context of large language models to revolutionize AI systems for real-world challenges. -[2024-11-10 19:07:52,832] [INFO] [BaseAgent]: Agent agent5 received message from agent5: I have for... -[2024-11-10 19:07:59,154] [INFO] [BaseAgent]: Agent agent5 sent message to agent5: To further develop and refine the proposed research idea on adaptive and efficient model merging techniques, I will outline the following components:\n\n**Methodology:**\n1. **Model Selection:** Identify a diverse set of large language models (LLMs) that represent various architectures and task domains. This will include models like BERT, GPT, and T5, which have shown effectiveness in different NLP tasks.\n\n2. **Merging Techniques:** Develop a suite of merging techniques that incorporate advanced knowledge distillation, parameter sharing, and dynamic adaptation strategies. This will involve experimenting with different pruning rates and knowledge transfer methods to optimize the merging process.\n\n3. **Dynamic Adaptation:** Implement a feedback loop mechanism that allows the merged model to adapt to changing task requirements and data distributions in real-time. This could involve reinforcement learning techniques to continuously improve the merging strategy based on performance metrics.\n\n4. **Evaluation Framework:** Establish a comprehensive evaluation framework that includes both quantitative and qualitative metrics. Key performance indicators will include task accuracy, memory usage, and adaptability to new tasks. User studies may also be conducted to assess the practical effectiveness of the merged models in real-world applications.\n\n**Dataset:**\nUtilize benchmark datasets from various NLP tasks, such as GLUE, SuperGLUE, and domain-specific datasets, to evaluate the performance of the merged models. Additionally, synthetic datasets can be generated to test the models under controlled conditions with varying task complexities.\n\n**Metrics:**\n1. **Performance Metrics:** Accuracy, F1-score, and BLEU score for task-specific evaluations.\n2. **Efficiency Metrics:** Memory footprint, inference time, and computational cost.\n3. **Adaptability Metrics:** Performance stability across different tasks and data distributions.\n\n**Expected Outcomes:**\n1. **Enhanced Performance:** The merged models are expected to outperform individual models in terms of task accuracy and efficiency.\n2. **Reduced Memory Footprint:** By merging models, we anticipate a significant reduction in the overall memory requirements, making it feasible to deploy LLMs in resource-constrained environments.\n3. **Improved Knowledge Sharing:** The research aims to facilitate better knowledge sharing among models, leading to more robust and versatile AI systems.\n\n**Potential Impact:**\nThis research has the potential to transform the landscape of interactive systems based on LLMs by providing scalable and efficient -[2024-11-10 19:07:59,155] [INFO] [BaseAgent]: Agent agent5 received message from agent5: To further... -[2024-11-10 19:08:05,509] [INFO] [BaseAgent]: Agent agent5 sent message to agent5: To further develop and refine the proposed research idea on adaptive and efficient model merging techniques, I will outline the following components:\n\n**Methodology:**\n1. **Model Selection:** Identify a diverse set of large language models (LLMs) that represent various architectures and task domains. This will include models like BERT, GPT, and T5, which have shown effectiveness in different NLP tasks.\n\n2. **Merging Techniques:** Develop a suite of merging techniques that incorporate advanced knowledge distillation, parameter sharing, and dynamic adaptation strategies. This will involve experimenting with different pruning rates and knowledge transfer methods to optimize the merging process.\n\n3. **Dynamic Adaptation:** Implement a feedback loop mechanism that allows the merged model to adapt to changing task requirements and data distributions in real-time. This could involve reinforcement learning techniques to continuously improve the merging strategy based on performance metrics.\n\n4. **Evaluation Framework:** Establish a comprehensive evaluation framework that includes both quantitative and qualitative metrics. Key performance indicators will include task accuracy, memory usage, and adaptability to new tasks. User studies may also be conducted to assess the practical effectiveness of the merged models in real-world applications.\n\n**Dataset:**\nUtilize benchmark datasets from various NLP tasks, such as GLUE, SuperGLUE, and domain-specific datasets, to evaluate the performance of the merged models. Additionally, synthetic datasets can be generated to test the models under controlled conditions with varying task complexities.\n\n**Metrics:**\n1. **Performance Metrics:** Accuracy, F1-score, and BLEU score for task-specific evaluations.\n2. **Efficiency Metrics:** Memory footprint, inference time, and computational cost.\n3. **Adaptability Metrics:** Performance stability across different tasks and data distributions.\n\n**Expected Outcomes:**\n1. **Enhanced Performance:** The merged models are expected to outperform individual models in terms of task accuracy and efficiency.\n2. **Reduced Memory Footprint:** By merging models, we anticipate a significant reduction in the overall memory requirements, making it feasible to deploy LLMs in resource-constrained environments.\n3. **Improved Knowledge Sharing:** The research aims to facilitate better knowledge sharing among models, leading to more robust and versatile AI systems.\n\n**Potential Impact:**\nThis research has the potential to transform the landscape of interactive systems based on LLMs by providing scalable and efficient -[2024-11-10 19:08:05,511] [INFO] [BaseAgent]: Agent agent5 received message from agent5: To further... -[2024-11-10 19:08:13,194] [INFO] [BaseAgent]: Agent agent5 sent message to agent5: To further develop and refine the proposed research idea on adaptive and efficient model merging techniques, I will outline the following components:\n\n**Methodology:**\n1. **Model Selection:** Identify a diverse set of large language models (LLMs) that represent various architectures and task domains. This will include models like BERT, GPT, and T5, which have shown effectiveness in different NLP tasks.\n\n2. **Merging Techniques:** Develop a suite of merging techniques that incorporate advanced knowledge distillation, parameter sharing, and dynamic adaptation strategies. This will involve experimenting with different pruning rates and knowledge transfer methods to optimize the merging process.\n\n3. **Dynamic Adaptation:** Implement a feedback loop mechanism that allows the merged model to adapt to changing task requirements and data distributions in real-time. This could involve reinforcement learning techniques to continuously improve the merging strategy based on performance metrics.\n\n4. **Evaluation Framework:** Establish a comprehensive evaluation framework that includes both quantitative and qualitative metrics. Key performance indicators will include task accuracy, memory usage, and adaptability to new tasks. User studies may also be conducted to assess the practical effectiveness of the merged models in real-world applications.\n\n**Dataset:**\nUtilize benchmark datasets from various NLP tasks, such as GLUE, SuperGLUE, and domain-specific datasets, to evaluate the performance of the merged models. Additionally, synthetic datasets can be generated to test the models under controlled conditions with varying task complexities.\n\n**Metrics:**\n1. **Performance Metrics:** Accuracy, F1-score, and BLEU score for task-specific evaluations.\n2. **Efficiency Metrics:** Memory footprint, inference time, and computational cost.\n3. **Adaptability Metrics:** Performance stability across different tasks and data distributions.\n\n**Expected Outcomes:**\n1. **Enhanced Performance:** The merged models are expected to outperform individual models in terms of task accuracy and efficiency.\n2. **Reduced Memory Footprint:** By merging models, we anticipate a significant reduction in the overall memory requirements, making it feasible to deploy LLMs in resource-constrained environments.\n3. **Improved Knowledge Sharing:** The research aims to facilitate better knowledge sharing among models, leading to more robust and versatile AI systems.\n\n**Potential Impact:**\nThis research has the potential to transform the landscape of interactive systems based on LLMs by providing scalable and efficient -[2024-11-10 19:08:13,196] [INFO] [BaseAgent]: Agent agent5 received message from agent5: To further... -[2024-11-10 19:08:19,225] [INFO] [BaseAgent]: Agent agent5 sent message to agent5: To further develop and refine the proposed research idea on adaptive and efficient model merging techniques, I will outline the following components:\n\n**Methodology:**\n1. **Model Selection:** Identify a diverse set of large language models (LLMs) that represent various architectures and task domains. This will include models like BERT, GPT, and T5, which have shown effectiveness in different NLP tasks.\n\n2. **Merging Techniques:** Develop a suite of merging techniques that incorporate advanced knowledge distillation, parameter sharing, and dynamic adaptation strategies. This will involve experimenting with different pruning rates and knowledge transfer methods to optimize the merging process.\n\n3. **Dynamic Adaptation:** Implement a feedback loop mechanism that allows the merged model to adapt to changing task requirements and data distributions in real-time. This could involve reinforcement learning techniques to continuously improve the merging strategy based on performance metrics.\n\n4. **Evaluation Framework:** Establish a comprehensive evaluation framework that includes both quantitative and qualitative metrics. Key performance indicators will include task accuracy, memory usage, and adaptability to new tasks. User studies may also be conducted to assess the practical effectiveness of the merged models in real-world applications.\n\n**Dataset:**\nUtilize benchmark datasets from various NLP tasks, such as GLUE, SuperGLUE, and domain-specific datasets, to evaluate the performance of the merged models. Additionally, synthetic datasets can be generated to test the models under controlled conditions with varying task complexities.\n\n**Metrics:**\n1. **Performance Metrics:** Accuracy, F1-score, and BLEU score for task-specific evaluations.\n2. **Efficiency Metrics:** Memory footprint, inference time, and computational cost.\n3. **Adaptability Metrics:** Performance stability across different tasks and data distributions.\n\n**Expected Outcomes:**\n1. **Enhanced Performance:** The merged models are expected to outperform individual models in terms of task accuracy and efficiency.\n2. **Reduced Memory Footprint:** By merging models, we anticipate a significant reduction in the overall memory requirements, making it feasible to deploy LLMs in resource-constrained environments.\n3. **Improved Knowledge Sharing:** The research aims to facilitate better knowledge sharing among models, leading to more robust and versatile AI systems.\n\n**Potential Impact:**\nThis research has the potential to transform the landscape of interactive systems based on LLMs by providing scalable and efficient -[2024-11-10 19:08:19,228] [INFO] [BaseAgent]: Agent agent5 received message from agent5: To further... -[2024-11-10 19:08:27,994] [INFO] [BaseAgent]: Agent agent5 sent message to agent5: To further develop and refine the proposed research idea on adaptive and efficient model merging techniques, I will outline the following components:\n\n**Methodology:**\n1. **Model Selection:** Identify a diverse set of large language models (LLMs) that represent various architectures and task domains. This will include models like BERT, GPT, and T5, which have shown effectiveness in different NLP tasks.\n\n2. **Merging Techniques:** Develop a suite of merging techniques that incorporate advanced knowledge distillation, parameter sharing, and dynamic adaptation strategies. This will involve experimenting with different pruning rates and knowledge transfer methods to optimize the merging process.\n\n3. **Dynamic Adaptation:** Implement a feedback loop mechanism that allows the merged model to adapt to changing task requirements and data distributions in real-time. This could involve reinforcement learning techniques to continuously improve the merging strategy based on performance metrics.\n\n4. **Evaluation Framework:** Establish a comprehensive evaluation framework that includes both quantitative and qualitative metrics. Key performance indicators will include task accuracy, memory usage, and adaptability to new tasks. User studies may also be conducted to assess the practical effectiveness of the merged models in real-world applications.\n\n**Dataset:**\nUtilize benchmark datasets from various NLP tasks, such as GLUE, SuperGLUE, and domain-specific datasets, to evaluate the performance of the merged models. Additionally, synthetic datasets can be generated to test the models under controlled conditions with varying task complexities.\n\n**Metrics:**\n1. **Performance Metrics:** Accuracy, F1-score, and BLEU score for task-specific evaluations.\n2. **Efficiency Metrics:** Memory footprint, inference time, and computational cost.\n3. **Adaptability Metrics:** Performance stability across different tasks and data distributions.\n\n**Expected Outcomes:**\n1. **Enhanced Performance:** The merged models are expected to outperform individual models in terms of task accuracy and efficiency.\n2. **Reduced Memory Footprint:** By merging models, we anticipate a significant reduction in the overall memory requirements, making it feasible to deploy LLMs in resource-constrained environments.\n3. **Improved Knowledge Sharing:** The research aims to facilitate better knowledge sharing among models, leading to more robust and versatile AI systems.\n\n**Potential Impact:**\nThis research has the potential to transform the landscape of interactive systems based on LLMs by providing scalable and efficient -[2024-11-10 19:08:27,995] [INFO] [BaseAgent]: Agent agent5 received message from agent5: To further... -[2024-11-10 19:08:34,046] [INFO] [BaseAgent]: Agent 'agent5' called 'new_communication_session' with args '{'target_agent_id': 'agent5', 'message': "I have formulated a new research idea following the '5q' format. The research idea focuses on developing adaptive and efficient model merging techniques to enhance the performance and scalability of interactive systems based on large language models. The proposal addresses the challenges in designing merging techniques that can adapt to diverse tasks, data distributions, and model architectures while maintaining high performance levels. The expected outcomes include improved task performance, reduced memory footprint, and enhanced knowledge sharing capabilities in AI systems. I aim to push the boundaries of model merging in the context of large language models to revolutionize AI systems for real-world challenges."}'. -[2024-11-10 19:08:34,046] [INFO] [BaseAgent]: Agent 'agent5' obtained result '{'success': True, 'message': 'Successfully completed session 97fde545-6386-47e3-bac4-b6b3099758a1', 'session_id': "In the chat history, 'agent5' has presented a comprehensive research proposal focused on developing adaptive and efficient model merging techniques for large language models (LLMs). The proposal aims to enhance the performance and scalability of interactive systems by addressing challenges related to diverse tasks, data distributions, and model architectures. Key components of the proposal include:\n\n1. **Methodology**:\n - **Model Selection**: Identify a diverse set of LLMs, such as BERT, GPT, and T5.\n - **Merging Techniques**: Develop techniques that utilize knowledge distillation, parameter sharing, and dynamic adaptation strategies.\n - **Dynamic Adaptation**: Implement a feedback loop for real-time adaptation to changing task requirements using reinforcement learning.\n - **Evaluation Framework**: Establish metrics for task accuracy, memory usage, and adaptability, including user studies for practical effectiveness.\n\n2. **Dataset**: Utilize benchmark datasets (e.g., GLUE, SuperGLUE) and generate synthetic datasets to evaluate model performance under varying complexities.\n\n3. **Metrics**:\n - **Performance Metrics**: Accuracy, F1-score, and BLEU score.\n - **Efficiency Metrics**: Memory footprint, inference time, and computational cost.\n - **Adaptability Metrics**: Stability across different tasks and data distributions.\n\n4. **Expected Outcomes**:\n - Enhanced performance of merged models compared to individual models.\n - Reduced memory footprint for deploying LLMs in resource-constrained environments.\n - Improved knowledge sharing among models, leading to more robust AI systems.\n\n5. **Potential Impact**: The research is expected to transform interactive systems based on LLMs by providing scalable and efficient solutions.\n\n**Next Steps for 'agent5'**:\n- Further develop and refine the research proposal by expanding on the methodology, dataset, and metrics.\n- Detail the expected outcomes and potential impact more thoroughly.\n- Consider incorporating feedback or suggestions from colleagues to strengthen the proposal. \n\nThis structured approach will help 'agent5' enhance the clarity and depth of the research proposal, ensuring it addresses key aspects effectively."}'. -[2024-11-10 19:08:34,046] [DEBUG] [Engine]: Agent 'agent5' executed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent5","message":"I have formulated a new research idea following the \'5q\' format. The research idea focuses on developing adaptive and efficient model merging techniques to enhance the performance and scalability of interactive systems based on large language models. The proposal addresses the challenges in designing merging techniques that can adapt to diverse tasks, data distributions, and model architectures while maintaining high performance levels. The expected outcomes include improved task performance, reduced memory footprint, and enhanced knowledge sharing capabilities in AI systems. I aim to push the boundaries of model merging in the context of large language models to revolutionize AI systems for real-world challenges."}', name='new_communication_session'), id='call_iaUUIYNisD5TafwE2k2PBoll', type='function')], function_call=None) -[2024-11-10 19:08:34,046] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- {'agent1': Message(content='To brainstorm potential research ideas for merging homologous models using the DELLAs approach, we can consider innovative directions that leverage your expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry. Here are some ideas to explore:\n\n1. **Integration of Terahertz Imaging and Cloud Computing**:\n - Develop a novel framework that combines terahertz imaging data with cloud computing resources to facilitate the seamless merging of homologous models. Utilize the high-precision capabilities of terahertz imaging for accurate model alignment and leverage cloud-based algorithms for efficient processing and integration.\n\n2. **Secure Model Fusion Platform**:\n - Design a secure and privacy-preserving platform that utilizes cloud computing for the fusion of medical image models. Implement encryption techniques and secure multiparty computation to ensure the confidentiality of sensitive data during the merging process, enhancing trust and collaboration among researchers and clinicians.\n\n3. **Algebraic Geometry for Model Representation**:\n - Explore the application of algebraic geometry concepts, such as cluster categories and 3-Calabi-Yau categories, to represent homologous models in a geometrically meaningful way. Develop algorithms that leverage algebraic structures to enhance the understanding of structural relationships between models, facilitating their merging.\n\n4. **Machine Learning-Assisted Model Alignment**:\n - Integrate machine learning algorithms with terahertz imaging data to automate the alignment and registration of homologous models. Utilize cloud resources for training and deploying machine learning models that optimize the merging process based on feature similarities and geometric properties.\n\n5. **Efficient Model Fusion with Cloud-Based Optimization**:\n - Implement optimization techniques inspired by algebraic geometry within a cloud-based framework for efficient model fusion. Develop algorithms that exploit the structural properties of cluster categories to streamline the merging process and improve the accuracy of homologous model integration.\n\n6. **Real-Time Model Fusion for Medical Diagnosis**:\n - Create a real-time model fusion system that leverages terahertz imaging technologies and cloud computing for rapid analysis of medical images. Focus on developing algorithms that enable quick and accurate merging of diagnostic models to support timely decision-making in clinical settings.\n\nThese research ideas aim to leverage your expertise in terahertz imaging, cloud computing, and algebraic geometry to contribute to the advancement of model merging using the DELLAs approach. Feel free to explore these directions further or let me know if you would like to delve deeper into a specific area.', role='assistant', tool_calls=None, function_call=None)} -- {'agent4': Message(content="I will proceed with engaging the research team to further develop the identified research ideas, focusing on refining the proposed avenues for research in the areas of topological approaches to knowledge sharing and FPGA-based accelerators for merged models. Let's begin the collaboration process.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent5","message":"I am a researcher with a diverse background in mathematical modeling, optimization, and photonic systems. My work spans various domains, including the study of eigenvalues in projective spaces, the application of multi-factorial optimization for large-scale virtual machine placement, and the exploration of lasing and coherent perfect absorption in PT-symmetric systems. I have delved into the topological properties of photonic superlattices, revealing their potential for mode-division multiplexing, and investigated the implications of non-Hermitian systems on multi-mode interference.\\n\\nMy research also extends to coding theory, where I applied Baranyai\'s theorem to construct locally decodable codes, emphasizing the combinatorial techniques that underpin these developments. I have explored the spectral signatures of CPA-lasers, highlighting the differences between singular value and eigenvalue spectra, and examined spectral phase transitions in non-Hermitian Hamiltonians, particularly in the context of parity-time symmetry.\\n\\nRecently, I have focused on the architectural support for FPGA-based accelerators in heterogeneous multicore systems, proposing innovative solutions for efficient interfacing and data reuse. My work aims to bridge theoretical insights with practical applications, contributing to advancements in both computational efficiency and optical technologies. Through my research, I strive to push the boundaries of knowledge in these interdisciplinary fields, fostering new approaches and solutions to complex problems.\\n\\nI am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the provided introduction. Let\'s explore innovative avenues for research together."}', name='new_communication_session'), id='call_RMHwrLzPI8GmABKyIVih2Vh4', type='function')], function_call=None)} -- {'agent5': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent5","message":"I have formulated a new research idea following the \'5q\' format. The research idea focuses on developing adaptive and efficient model merging techniques to enhance the performance and scalability of interactive systems based on large language models. The proposal addresses the challenges in designing merging techniques that can adapt to diverse tasks, data distributions, and model architectures while maintaining high performance levels. The expected outcomes include improved task performance, reduced memory footprint, and enhanced knowledge sharing capabilities in AI systems. I aim to push the boundaries of model merging in the context of large language models to revolutionize AI systems for real-world challenges."}', name='new_communication_session'), id='call_iaUUIYNisD5TafwE2k2PBoll', type='function')], function_call=None)} - -[2024-11-10 19:08:34,046] [INFO] [Engine]: Iteration 2 Summary: -Agents' Results Summary: -- {'agent1': Message(content='To brainstorm potential research ideas for merging homologous models using the DELLAs approach, we can consider innovative directions that leverage your expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry. Here are some ideas to explore:\n\n1. **Integration of Terahertz Imaging and Cloud Computing**:\n - Develop a novel framework that combines terahertz imaging data with cloud computing resources to facilitate the seamless merging of homologous models. Utilize the high-precision capabilities of terahertz imaging for accurate model alignment and leverage cloud-based algorithms for efficient processing and integration.\n\n2. **Secure Model Fusion Platform**:\n - Design a secure and privacy-preserving platform that utilizes cloud computing for the fusion of medical image models. Implement encryption techniques and secure multiparty computation to ensure the confidentiality of sensitive data during the merging process, enhancing trust and collaboration among researchers and clinicians.\n\n3. **Algebraic Geometry for Model Representation**:\n - Explore the application of algebraic geometry concepts, such as cluster categories and 3-Calabi-Yau categories, to represent homologous models in a geometrically meaningful way. Develop algorithms that leverage algebraic structures to enhance the understanding of structural relationships between models, facilitating their merging.\n\n4. **Machine Learning-Assisted Model Alignment**:\n - Integrate machine learning algorithms with terahertz imaging data to automate the alignment and registration of homologous models. Utilize cloud resources for training and deploying machine learning models that optimize the merging process based on feature similarities and geometric properties.\n\n5. **Efficient Model Fusion with Cloud-Based Optimization**:\n - Implement optimization techniques inspired by algebraic geometry within a cloud-based framework for efficient model fusion. Develop algorithms that exploit the structural properties of cluster categories to streamline the merging process and improve the accuracy of homologous model integration.\n\n6. **Real-Time Model Fusion for Medical Diagnosis**:\n - Create a real-time model fusion system that leverages terahertz imaging technologies and cloud computing for rapid analysis of medical images. Focus on developing algorithms that enable quick and accurate merging of diagnostic models to support timely decision-making in clinical settings.\n\nThese research ideas aim to leverage your expertise in terahertz imaging, cloud computing, and algebraic geometry to contribute to the advancement of model merging using the DELLAs approach. Feel free to explore these directions further or let me know if you would like to delve deeper into a specific area.', role='assistant', tool_calls=None, function_call=None)} -- {'agent4': Message(content="I will proceed with engaging the research team to further develop the identified research ideas, focusing on refining the proposed avenues for research in the areas of topological approaches to knowledge sharing and FPGA-based accelerators for merged models. Let's begin the collaboration process.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent5","message":"I am a researcher with a diverse background in mathematical modeling, optimization, and photonic systems. My work spans various domains, including the study of eigenvalues in projective spaces, the application of multi-factorial optimization for large-scale virtual machine placement, and the exploration of lasing and coherent perfect absorption in PT-symmetric systems. I have delved into the topological properties of photonic superlattices, revealing their potential for mode-division multiplexing, and investigated the implications of non-Hermitian systems on multi-mode interference.\\n\\nMy research also extends to coding theory, where I applied Baranyai\'s theorem to construct locally decodable codes, emphasizing the combinatorial techniques that underpin these developments. I have explored the spectral signatures of CPA-lasers, highlighting the differences between singular value and eigenvalue spectra, and examined spectral phase transitions in non-Hermitian Hamiltonians, particularly in the context of parity-time symmetry.\\n\\nRecently, I have focused on the architectural support for FPGA-based accelerators in heterogeneous multicore systems, proposing innovative solutions for efficient interfacing and data reuse. My work aims to bridge theoretical insights with practical applications, contributing to advancements in both computational efficiency and optical technologies. Through my research, I strive to push the boundaries of knowledge in these interdisciplinary fields, fostering new approaches and solutions to complex problems.\\n\\nI am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the provided introduction. Let\'s explore innovative avenues for research together."}', name='new_communication_session'), id='call_RMHwrLzPI8GmABKyIVih2Vh4', type='function')], function_call=None)} -- {'agent5': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent5","message":"I have formulated a new research idea following the \'5q\' format. The research idea focuses on developing adaptive and efficient model merging techniques to enhance the performance and scalability of interactive systems based on large language models. The proposal addresses the challenges in designing merging techniques that can adapt to diverse tasks, data distributions, and model architectures while maintaining high performance levels. The expected outcomes include improved task performance, reduced memory footprint, and enhanced knowledge sharing capabilities in AI systems. I aim to push the boundaries of model merging in the context of large language models to revolutionize AI systems for real-world challenges."}', name='new_communication_session'), id='call_iaUUIYNisD5TafwE2k2PBoll', type='function')], function_call=None)} - -[2024-11-10 19:08:36,969] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-10 19:08:36,969] [INFO] [Engine]: Starting iteration 3 -[2024-11-10 19:08:36,969] [INFO] [BaseAgent]: Agent 'agent1' is planning the next task. -[2024-11-10 19:08:38,171] [INFO] [BaseAgent]: Agent 'agent1' plans next task based on persona: The next task should be to collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction provided for merging homologous models using the DELLAs approach. The agent should work with the research team to generate new ideas that leverage their expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry to contribute to the development of innovative solutions for model merging. -[2024-11-10 19:08:38,171] [INFO] [Engine]: Agent 'agent1' planned task: The next task should be to collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction provided for merging homologous models using the DELLAs approach. The agent should work with the research team to generate new ideas that leverage their expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry to contribute to the development of innovative solutions for model merging. -[2024-11-10 19:08:38,171] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'The next task should be to collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction provided for merging homologous models using the DELLAs approach. The agent should work with the research team to generate new ideas that leverage their expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry to contribute to the development of innovative solutions for model merging.'. -[2024-11-10 19:08:42,475] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I am interested in exploring innovative research ideas that merge homologous models using the DELLAs approach. My expertise lies in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry. I believe our collaboration can lead to impactful solutions in healthcare and AI applications. -[2024-11-10 19:08:42,475] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I am inter... -[2024-11-10 19:08:48,399] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: I appreciate your interest in exploring innovative research ideas that merge homologous models using the DELLAs approach. Given your expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, I believe we can develop some impactful solutions together. - -Here are a few ideas to consider: - -1. **Integration of Terahertz Imaging with AI Models:** - - We could explore how terahertz imaging can be integrated with machine learning models to enhance the detection of abnormalities in medical imaging. This could involve developing algorithms that utilize terahertz data to improve the accuracy of existing models in healthcare monitoring. - -2. **Cloud-Based Framework for Model Merging:** - - Leveraging your work in cloud computing, we could create a secure cloud-based platform that allows for the merging of multiple AI models. This platform could facilitate the sharing of terahertz imaging data and medical images, enabling collaborative research while maintaining data privacy. - -3. **Algebraic Geometry in Feature Selection:** - - Your background in algebraic geometry could be applied to develop new mathematical frameworks for feature selection in multi-label data. This could help in identifying the most relevant features from terahertz imaging data and medical images, leading to more robust models. - -4. **Temporal Analysis of Imaging Data:** - - We could investigate the temporal aspects of terahertz imaging data in conjunction with medical images to understand how abnormalities evolve over time. This could provide valuable insights for early intervention strategies in healthcare. - -5. **Ensemble Learning with Terahertz and Medical Imaging:** - - Exploring ensemble methods that combine predictions from models trained on terahertz imaging and traditional medical imaging could enhance the robustness and accuracy of our predictions. - -I believe these ideas can lead to significant advancements in healthcare applications. Let's continue to refine these concepts and see how we can leverage our combined expertise to address gaps in the existing literature related to the DELLAs approach. - -What are your thoughts on these ideas? -[2024-11-10 19:08:48,401] [INFO] [BaseAgent]: Agent agent1 received message from agent2: I apprecia... -[2024-11-10 19:08:54,966] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I appreciate your thoughtful ideas on merging homologous models using the DELLAs approach. Here are my thoughts on your suggestions: - -1. **Integration of Terahertz Imaging with AI Models**: This is a promising direction. By leveraging terahertz imaging data, we can enhance the feature set available for machine learning models, potentially leading to better detection rates for abnormalities. We could also explore how the unique properties of terahertz imaging can inform model training. - -2. **Cloud-Based Framework for Model Merging**: Creating a secure platform for model merging is essential, especially in healthcare. We should consider incorporating user-friendly interfaces that allow clinicians to easily access and utilize the merged models without needing extensive technical knowledge. - -3. **Algebraic Geometry in Feature Selection**: This idea resonates well with my background. Developing mathematical frameworks for feature selection could lead to more efficient models, especially in multi-label scenarios where redundancy is a concern. We could also explore how these frameworks can be applied to terahertz imaging data specifically. - -4. **Temporal Analysis of Imaging Data**: Investigating the temporal evolution of abnormalities is crucial for early intervention. We could develop algorithms that not only analyze static images but also track changes over time, providing a more dynamic understanding of patient conditions. - -5. **Ensemble Learning with Terahertz and Medical Imaging**: This approach could significantly enhance model robustness. By combining predictions from different imaging modalities, we can create a more comprehensive view of patient health, which is particularly valuable in complex cases. - -Overall, I believe these ideas have great potential for advancing our understanding and application of the DELLAs approach in healthcare. Let's continue to refine these concepts and explore how we can implement them effectively. -[2024-11-10 19:08:54,971] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I apprecia... -[2024-11-10 19:09:00,805] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: I appreciate your thoughtful insights on merging homologous models using the DELLAs approach. Here are my reflections on your suggestions: - -1. **Integration of Terahertz Imaging with AI Models**: I completely agree that leveraging terahertz imaging data can significantly enhance our feature set for machine learning models. We should also consider how the unique properties of terahertz imaging, such as its sensitivity to material properties, can inform our model training and improve detection rates for abnormalities. - -2. **Cloud-Based Framework for Model Merging**: The idea of creating a secure platform for model merging is indeed essential. I think incorporating user-friendly interfaces will be crucial for clinicians to access and utilize the merged models effectively. We could also explore features that allow for real-time collaboration among researchers and clinicians. - -3. **Algebraic Geometry in Feature Selection**: Your background in algebraic geometry can indeed lead to innovative mathematical frameworks for feature selection. We should investigate how these frameworks can be tailored specifically for terahertz imaging data, potentially leading to more efficient models in multi-label scenarios. - -4. **Temporal Analysis of Imaging Data**: I agree that understanding the temporal evolution of abnormalities is vital for early intervention. Developing algorithms that can analyze both static and dynamic imaging data will provide a more comprehensive view of patient conditions and enhance our predictive capabilities. - -5. **Ensemble Learning with Terahertz and Medical Imaging**: This approach has great potential to enhance model robustness. By combining predictions from different imaging modalities, we can create a more holistic view of patient health, which is particularly valuable in complex cases where multiple factors are at play. - -Overall, I believe our collaboration can lead to significant advancements in healthcare applications through the DELLAs approach. Let's continue to refine these concepts and explore practical implementations together. -[2024-11-10 19:09:00,807] [INFO] [BaseAgent]: Agent agent1 received message from agent2: I apprecia... -[2024-11-10 19:09:05,004] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I appreciate your thoughtful insights on merging homologous models using the DELLAs approach. Here are my reflections on your suggestions: - -1. **Integration of Terahertz Imaging with AI Models**: I completely agree that leveraging terahertz imaging data can significantly enhance our feature set for machine learning models. We should also consider how the unique properties of terahertz imaging, such as its sensitivity to material properties, can inform our model training and improve detection rates for abnormalities. - -2. **Cloud-Based Framework for Model Merging**: The idea of creating a secure platform for model merging is indeed essential. I think incorporating user-friendly interfaces will be crucial for clinicians to access and utilize the merged models effectively. We could also explore features that allow for real-time collaboration among researchers and clinicians. - -3. **Algebraic Geometry in Feature Selection**: Your background in algebraic geometry can indeed lead to innovative mathematical frameworks for feature selection. We should investigate how these frameworks can be tailored specifically for terahertz imaging data, potentially leading to more efficient models in multi-label scenarios. - -4. **Temporal Analysis of Imaging Data**: I agree that understanding the temporal evolution of abnormalities is vital for early intervention. Developing algorithms that can analyze both static and dynamic imaging data will provide a more comprehensive view of patient conditions and enhance our predictive capabilities. - -5. **Ensemble Learning with Terahertz and Medical Imaging**: This approach has great potential to enhance model robustness. By combining predictions from different imaging modalities, we can create a more holistic view of patient health, which is particularly valuable in complex cases where multiple factors are at play. - -Overall, I believe our collaboration can lead to significant advancements in healthcare applications through the DELLAs approach. Let's continue to refine these concepts and explore practical implementations together. -[2024-11-10 19:09:05,005] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I apprecia... -[2024-11-10 19:09:11,370] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: I appreciate your thoughtful insights on merging homologous models using the DELLAs approach. Here are my reflections on your suggestions: - -1. **Integration of Terahertz Imaging with AI Models**: I completely agree that leveraging terahertz imaging data can significantly enhance our feature set for machine learning models. We should also consider how the unique properties of terahertz imaging, such as its sensitivity to material properties, can inform our model training and improve detection rates for abnormalities. - -2. **Cloud-Based Framework for Model Merging**: The idea of creating a secure platform for model merging is indeed essential. I think incorporating user-friendly interfaces will be crucial for clinicians to access and utilize the merged models effectively. We could also explore features that allow for real-time collaboration among researchers and clinicians. - -3. **Algebraic Geometry in Feature Selection**: Your background in algebraic geometry can indeed lead to innovative mathematical frameworks for feature selection. We should investigate how these frameworks can be tailored specifically for terahertz imaging data, potentially leading to more efficient models in multi-label scenarios. - -4. **Temporal Analysis of Imaging Data**: I agree that understanding the temporal evolution of abnormalities is vital for early intervention. Developing algorithms that can analyze both static and dynamic imaging data will provide a more comprehensive view of patient conditions and enhance our predictive capabilities. - -5. **Ensemble Learning with Terahertz and Medical Imaging**: This approach has great potential to enhance model robustness. By combining predictions from different imaging modalities, we can create a more holistic view of patient health, which is particularly valuable in complex cases where multiple factors are at play. - -Overall, I believe our collaboration can lead to significant advancements in healthcare applications through the DELLAs approach. Let's continue to refine these concepts and explore practical implementations together. -[2024-11-10 19:09:11,371] [INFO] [BaseAgent]: Agent agent1 received message from agent2: I apprecia... -[2024-11-10 19:09:18,430] [INFO] [BaseAgent]: Agent 'agent1' called 'new_communication_session' with args '{'target_agent_id': 'agent2', 'message': 'I am interested in exploring innovative research ideas that merge homologous models using the DELLAs approach. My expertise lies in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry. I believe our collaboration can lead to impactful solutions in healthcare and AI applications.'}'. -[2024-11-10 19:09:18,431] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': True, 'message': 'Successfully completed session 3ffd2c9a-5ef0-4ae6-a0f6-e54fd979be0f', 'session_id': '### Summary of Conversation on Merging Homologous Models Using the DELLAs Approach\n\n**Participants:** Agent1 and Agent2\n\n**Context:** The dialogue centers around exploring innovative research ideas that merge homologous models using the DELLAs approach, with a focus on healthcare applications. Agent1 has expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, while Agent2 expresses a shared interest in these areas.\n\n**Key Themes and Ideas Discussed:**\n\n1. **Integration of Terahertz Imaging with AI Models:**\n - Both agents agree on the potential of integrating terahertz imaging data with machine learning models to enhance the detection of abnormalities in medical imaging. They discuss leveraging the unique properties of terahertz imaging to inform model training.\n\n2. **Cloud-Based Framework for Model Merging:**\n - The creation of a secure, user-friendly cloud platform for merging AI models is emphasized. This platform would facilitate collaboration among researchers and clinicians while ensuring data privacy and accessibility.\n\n3. **Algebraic Geometry in Feature Selection:**\n - The agents recognize the value of applying algebraic geometry to develop mathematical frameworks for feature selection, particularly in multi-label scenarios. They discuss tailoring these frameworks to terahertz imaging data for improved model efficiency.\n\n4. **Temporal Analysis of Imaging Data:**\n - Understanding the temporal evolution of abnormalities is highlighted as crucial for early intervention. The agents propose developing algorithms that analyze both static and dynamic imaging data to enhance predictive capabilities.\n\n5. **Ensemble Learning with Terahertz and Medical Imaging:**\n - The potential of ensemble methods to combine predictions from different imaging modalities is discussed, aiming to create a more comprehensive view of patient health, especially in complex cases.\n\n**Outcomes and Next Steps:**\n- Both agents express enthusiasm about the potential advancements their collaboration could bring to healthcare applications through the DELLAs approach.\n- They agree to continue refining the discussed concepts and explore practical implementations.\n- The next task is to collaboratively brainstorm additional research ideas that build upon the existing suggestions and address any gaps in the introduction provided for merging homologous models using the DELLAs approach.\n\nThis summary encapsulates the key points and decisions made during the conversation, setting the stage for further collaborative brainstorming on innovative research ideas.'}'. -[2024-11-10 19:09:18,433] [DEBUG] [Engine]: Agent 'agent1' executed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent2", "message": "I am interested in exploring innovative research ideas that merge homologous models using the DELLAs approach. My expertise lies in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry. I believe our collaboration can lead to impactful solutions in healthcare and AI applications."}', name='new_communication_session'), id='call_MZ40c9o6Iw8BVnCHHkJ1DNLE', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent3", "message": "I am excited to collaborate on brainstorming research ideas for merging homologous models using the DELLAs approach. My background in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry can complement your expertise in spiking neural networks and auditory processing. Let\'s explore innovative solutions together."}', name='new_communication_session'), id='call_2GHr1oi5Nk7KS748q7bSltr7', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent4", "message": "I am looking forward to collaborating with you on generating research ideas for merging homologous models using the DELLAs approach. My research in terahertz imaging technologies, cloud computing, and algebraic geometry aligns well with your expertise in mathematical modeling and optimization. Together, we can create novel solutions at the intersection of our fields."}', name='new_communication_session'), id='call_hpm07VIGAnQYeZPrTFNygSWU', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent5", "message": "I am eager to collaborate with you on brainstorming research ideas for merging homologous models using the DELLAs approach. My background in terahertz imaging technologies, cloud computing, and algebraic geometry complements your focus on evolutionary algorithms and spiking neural networks. Let\'s combine our strengths to develop innovative solutions."}', name='new_communication_session'), id='call_1WQVymayStVISc6hGMGcfUd4', type='function')], function_call=None) -[2024-11-10 19:09:18,434] [INFO] [BaseAgent]: Agent 'agent2' is planning the next task. -[2024-11-10 19:09:19,766] [INFO] [BaseAgent]: Agent 'agent2' plans next task based on persona: The next task for 'agent2' should be to collaborate with the research team to further brainstorm and develop research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with 'agent2's expertise in artificial intelligence and machine learning, particularly in healthcare monitoring and causal variable discovery. By contributing to the brainstorming session, 'agent2' can leverage their knowledge and skills to propose innovative research ideas that could potentially advance the field of machine learning and have practical applications in real-world scenarios. -[2024-11-10 19:09:19,766] [INFO] [Engine]: Agent 'agent2' planned task: The next task for 'agent2' should be to collaborate with the research team to further brainstorm and develop research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with 'agent2's expertise in artificial intelligence and machine learning, particularly in healthcare monitoring and causal variable discovery. By contributing to the brainstorming session, 'agent2' can leverage their knowledge and skills to propose innovative research ideas that could potentially advance the field of machine learning and have practical applications in real-world scenarios. -[2024-11-10 19:09:19,766] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'The next task for 'agent2' should be to collaborate with the research team to further brainstorm and develop research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with 'agent2's expertise in artificial intelligence and machine learning, particularly in healthcare monitoring and causal variable discovery. By contributing to the brainstorming session, 'agent2' can leverage their knowledge and skills to propose innovative research ideas that could potentially advance the field of machine learning and have practical applications in real-world scenarios.'. -[2024-11-10 19:09:52,879] [ERROR] [Engine]: Error in agent 'agent2' during planning or action: 'NoneType' object has no attribute 'pop' -[2024-11-10 19:09:52,881] [INFO] [BaseAgent]: Agent 'agent3' is planning the next task. -[2024-11-10 19:09:55,085] [INFO] [BaseAgent]: Agent 'agent3' plans next task based on persona: The next task for 'agent3' should be to focus on the following: - -1. Conduct a thorough literature review to understand the current state of research in merging homologous models and the challenges associated with maintaining separate fine-tuned models for each task. -2. Collaborate with the research team to brainstorm potential research ideas that build upon the concept of merging homologous models using innovative techniques like DELLA. -3. Summarize the collective ideas generated during the brainstorming session to identify key research directions and potential gaps in the existing literature. -4. Formulate a new research idea (5q) that addresses the problem of optimizing the merging of homologous models to reduce interference among model parameters and improve overall performance in downstream tasks. - -By focusing on these tasks, 'agent3' can contribute to advancing the field of spiking neural networks and machine learning by exploring innovative approaches to merging homologous models and optimizing performance in downstream tasks. -[2024-11-10 19:09:55,086] [INFO] [Engine]: Agent 'agent3' planned task: The next task for 'agent3' should be to focus on the following: - -1. Conduct a thorough literature review to understand the current state of research in merging homologous models and the challenges associated with maintaining separate fine-tuned models for each task. -2. Collaborate with the research team to brainstorm potential research ideas that build upon the concept of merging homologous models using innovative techniques like DELLA. -3. Summarize the collective ideas generated during the brainstorming session to identify key research directions and potential gaps in the existing literature. -4. Formulate a new research idea (5q) that addresses the problem of optimizing the merging of homologous models to reduce interference among model parameters and improve overall performance in downstream tasks. - -By focusing on these tasks, 'agent3' can contribute to advancing the field of spiking neural networks and machine learning by exploring innovative approaches to merging homologous models and optimizing performance in downstream tasks. -[2024-11-10 19:09:55,087] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'The next task for 'agent3' should be to focus on the following: - -1. Conduct a thorough literature review to understand the current state of research in merging homologous models and the challenges associated with maintaining separate fine-tuned models for each task. -2. Collaborate with the research team to brainstorm potential research ideas that build upon the concept of merging homologous models using innovative techniques like DELLA. -3. Summarize the collective ideas generated during the brainstorming session to identify key research directions and potential gaps in the existing literature. -4. Formulate a new research idea (5q) that addresses the problem of optimizing the merging of homologous models to reduce interference among model parameters and improve overall performance in downstream tasks. - -By focusing on these tasks, 'agent3' can contribute to advancing the field of spiking neural networks and machine learning by exploring innovative approaches to merging homologous models and optimizing performance in downstream tasks.'. -[2024-11-10 19:10:27,634] [ERROR] [Engine]: Error in agent 'agent3' during planning or action: 'NoneType' object has no attribute 'pop' -[2024-11-10 19:10:27,636] [INFO] [BaseAgent]: Agent 'agent4' is planning the next task. -[2024-11-10 19:10:28,869] [INFO] [BaseAgent]: Agent 'agent4' plans next task based on persona: The next task for 'agent4' should be to collaborate with the research team to further develop the identified research ideas. Specifically, they should focus on refining the proposed avenues for research, particularly in the areas of topological approaches to knowledge sharing and FPGA-based accelerators for merged models. By collaborating with the team, 'agent4' can contribute their expertise and insights to develop a cohesive research proposal that leverages their diverse background and expertise. -[2024-11-10 19:10:28,872] [INFO] [Engine]: Agent 'agent4' planned task: The next task for 'agent4' should be to collaborate with the research team to further develop the identified research ideas. Specifically, they should focus on refining the proposed avenues for research, particularly in the areas of topological approaches to knowledge sharing and FPGA-based accelerators for merged models. By collaborating with the team, 'agent4' can contribute their expertise and insights to develop a cohesive research proposal that leverages their diverse background and expertise. -[2024-11-10 19:10:28,873] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'The next task for 'agent4' should be to collaborate with the research team to further develop the identified research ideas. Specifically, they should focus on refining the proposed avenues for research, particularly in the areas of topological approaches to knowledge sharing and FPGA-based accelerators for merged models. By collaborating with the team, 'agent4' can contribute their expertise and insights to develop a cohesive research proposal that leverages their diverse background and expertise.'. -[2024-11-10 19:10:32,756] [ERROR] [Engine]: Error in agent 'agent4' during planning or action: 'Paper' object has no attribute 'to_dict' -[2024-11-10 19:10:32,758] [INFO] [BaseAgent]: Agent 'agent5' is planning the next task. -[2024-11-10 19:10:33,886] [INFO] [BaseAgent]: Agent 'agent5' plans next task based on persona: The next task for 'agent5' should be to further develop and refine the proposed research idea based on the '5q' format. This may involve expanding on the methodology, dataset, and metrics to be used, as well as detailing the expected outcomes and potential impact of the research. Additionally, 'agent5' can consider incorporating feedback or suggestions from colleagues to strengthen the research proposal. -[2024-11-10 19:10:33,887] [INFO] [Engine]: Agent 'agent5' planned task: The next task for 'agent5' should be to further develop and refine the proposed research idea based on the '5q' format. This may involve expanding on the methodology, dataset, and metrics to be used, as well as detailing the expected outcomes and potential impact of the research. Additionally, 'agent5' can consider incorporating feedback or suggestions from colleagues to strengthen the research proposal. -[2024-11-10 19:10:33,887] [INFO] [BaseAgent]: Agent 'agent5' acting on task 'The next task for 'agent5' should be to further develop and refine the proposed research idea based on the '5q' format. This may involve expanding on the methodology, dataset, and metrics to be used, as well as detailing the expected outcomes and potential impact of the research. Additionally, 'agent5' can consider incorporating feedback or suggestions from colleagues to strengthen the research proposal.'. -[2024-11-10 19:10:39,731] [INFO] [BaseAgent]: Agent 'agent5' acted with result 'Message(content="To further develop and refine the proposed research idea on adaptive and efficient model merging techniques for large language models, I will focus on expanding the methodology, dataset, metrics, expected outcomes, and potential impact of the research. Let's delve into each aspect to strengthen the proposal:\n\n### Methodology:\n1. **Model Selection**:\n - Identify a diverse set of large language models (LLMs) such as BERT, GPT, T5, and other state-of-the-art models.\n - Consider the compatibility and interoperability of these models for effective merging techniques.\n\n2. **Merging Techniques**:\n - Develop advanced techniques that go beyond traditional methods like knowledge distillation and parameter sharing.\n - Explore dynamic adaptation strategies that can adjust to varying task requirements and data distributions.\n\n3. **Dynamic Adaptation**:\n - Implement a feedback loop mechanism using reinforcement learning to enable real-time adaptation of merged models.\n - Ensure the adaptability of the merged models to changing environments and tasks.\n\n4. **Evaluation Framework**:\n - Establish comprehensive metrics for evaluating the performance of merged models, including task accuracy, memory usage, and adaptability.\n - Conduct user studies to assess the practical effectiveness of the merged models in interactive systems.\n\n### Dataset:\n- Utilize benchmark datasets such as GLUE (General Language Understanding Evaluation) and SuperGLUE to evaluate the performance of merged models.\n- Generate synthetic datasets to test the robustness and generalization capabilities of the merged models under varying complexities.\n\n### Metrics:\n- Performance Metrics: Accuracy, F1-score, BLEU score to measure the effectiveness of merged models in different tasks.\n- Efficiency Metrics: Memory footprint, inference time, computational cost to assess resource utilization.\n- Adaptability Metrics: Stability across diverse tasks and data distributions to ensure the versatility of merged models.\n\n### Expected Outcomes:\n- Enhanced performance of merged models compared to individual models in terms of task completion and accuracy.\n- Reduced memory footprint for deploying large language models in resource-constrained environments.\n- Improved knowledge sharing among models leading to more robust and adaptable AI systems.\n\n### Potential Impact:\n- Transformation of interactive systems based on large language models by providing scalable and efficient solutions.\n- Advancement in the field of model merging, contributing to the development of more effective AI systems for real-world challenges.\n\nBy elaborating on these aspects, the research proposal will gain depth and clarity, addressing key components essential for the success of the project.", role='assistant', tool_calls=None, function_call=None)'. -[2024-11-10 19:10:39,733] [DEBUG] [Engine]: Agent 'agent5' executed task with result: Message(content="To further develop and refine the proposed research idea on adaptive and efficient model merging techniques for large language models, I will focus on expanding the methodology, dataset, metrics, expected outcomes, and potential impact of the research. Let's delve into each aspect to strengthen the proposal:\n\n### Methodology:\n1. **Model Selection**:\n - Identify a diverse set of large language models (LLMs) such as BERT, GPT, T5, and other state-of-the-art models.\n - Consider the compatibility and interoperability of these models for effective merging techniques.\n\n2. **Merging Techniques**:\n - Develop advanced techniques that go beyond traditional methods like knowledge distillation and parameter sharing.\n - Explore dynamic adaptation strategies that can adjust to varying task requirements and data distributions.\n\n3. **Dynamic Adaptation**:\n - Implement a feedback loop mechanism using reinforcement learning to enable real-time adaptation of merged models.\n - Ensure the adaptability of the merged models to changing environments and tasks.\n\n4. **Evaluation Framework**:\n - Establish comprehensive metrics for evaluating the performance of merged models, including task accuracy, memory usage, and adaptability.\n - Conduct user studies to assess the practical effectiveness of the merged models in interactive systems.\n\n### Dataset:\n- Utilize benchmark datasets such as GLUE (General Language Understanding Evaluation) and SuperGLUE to evaluate the performance of merged models.\n- Generate synthetic datasets to test the robustness and generalization capabilities of the merged models under varying complexities.\n\n### Metrics:\n- Performance Metrics: Accuracy, F1-score, BLEU score to measure the effectiveness of merged models in different tasks.\n- Efficiency Metrics: Memory footprint, inference time, computational cost to assess resource utilization.\n- Adaptability Metrics: Stability across diverse tasks and data distributions to ensure the versatility of merged models.\n\n### Expected Outcomes:\n- Enhanced performance of merged models compared to individual models in terms of task completion and accuracy.\n- Reduced memory footprint for deploying large language models in resource-constrained environments.\n- Improved knowledge sharing among models leading to more robust and adaptable AI systems.\n\n### Potential Impact:\n- Transformation of interactive systems based on large language models by providing scalable and efficient solutions.\n- Advancement in the field of model merging, contributing to the development of more effective AI systems for real-world challenges.\n\nBy elaborating on these aspects, the research proposal will gain depth and clarity, addressing key components essential for the success of the project.", role='assistant', tool_calls=None, function_call=None) -[2024-11-10 19:10:39,734] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- {'agent1': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent2", "message": "I am interested in exploring innovative research ideas that merge homologous models using the DELLAs approach. My expertise lies in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry. I believe our collaboration can lead to impactful solutions in healthcare and AI applications."}', name='new_communication_session'), id='call_MZ40c9o6Iw8BVnCHHkJ1DNLE', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent3", "message": "I am excited to collaborate on brainstorming research ideas for merging homologous models using the DELLAs approach. My background in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry can complement your expertise in spiking neural networks and auditory processing. Let\'s explore innovative solutions together."}', name='new_communication_session'), id='call_2GHr1oi5Nk7KS748q7bSltr7', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent4", "message": "I am looking forward to collaborating with you on generating research ideas for merging homologous models using the DELLAs approach. My research in terahertz imaging technologies, cloud computing, and algebraic geometry aligns well with your expertise in mathematical modeling and optimization. Together, we can create novel solutions at the intersection of our fields."}', name='new_communication_session'), id='call_hpm07VIGAnQYeZPrTFNygSWU', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent5", "message": "I am eager to collaborate with you on brainstorming research ideas for merging homologous models using the DELLAs approach. My background in terahertz imaging technologies, cloud computing, and algebraic geometry complements your focus on evolutionary algorithms and spiking neural networks. Let\'s combine our strengths to develop innovative solutions."}', name='new_communication_session'), id='call_1WQVymayStVISc6hGMGcfUd4', type='function')], function_call=None)} -- {'agent5': Message(content="To further develop and refine the proposed research idea on adaptive and efficient model merging techniques for large language models, I will focus on expanding the methodology, dataset, metrics, expected outcomes, and potential impact of the research. Let's delve into each aspect to strengthen the proposal:\n\n### Methodology:\n1. **Model Selection**:\n - Identify a diverse set of large language models (LLMs) such as BERT, GPT, T5, and other state-of-the-art models.\n - Consider the compatibility and interoperability of these models for effective merging techniques.\n\n2. **Merging Techniques**:\n - Develop advanced techniques that go beyond traditional methods like knowledge distillation and parameter sharing.\n - Explore dynamic adaptation strategies that can adjust to varying task requirements and data distributions.\n\n3. **Dynamic Adaptation**:\n - Implement a feedback loop mechanism using reinforcement learning to enable real-time adaptation of merged models.\n - Ensure the adaptability of the merged models to changing environments and tasks.\n\n4. **Evaluation Framework**:\n - Establish comprehensive metrics for evaluating the performance of merged models, including task accuracy, memory usage, and adaptability.\n - Conduct user studies to assess the practical effectiveness of the merged models in interactive systems.\n\n### Dataset:\n- Utilize benchmark datasets such as GLUE (General Language Understanding Evaluation) and SuperGLUE to evaluate the performance of merged models.\n- Generate synthetic datasets to test the robustness and generalization capabilities of the merged models under varying complexities.\n\n### Metrics:\n- Performance Metrics: Accuracy, F1-score, BLEU score to measure the effectiveness of merged models in different tasks.\n- Efficiency Metrics: Memory footprint, inference time, computational cost to assess resource utilization.\n- Adaptability Metrics: Stability across diverse tasks and data distributions to ensure the versatility of merged models.\n\n### Expected Outcomes:\n- Enhanced performance of merged models compared to individual models in terms of task completion and accuracy.\n- Reduced memory footprint for deploying large language models in resource-constrained environments.\n- Improved knowledge sharing among models leading to more robust and adaptable AI systems.\n\n### Potential Impact:\n- Transformation of interactive systems based on large language models by providing scalable and efficient solutions.\n- Advancement in the field of model merging, contributing to the development of more effective AI systems for real-world challenges.\n\nBy elaborating on these aspects, the research proposal will gain depth and clarity, addressing key components essential for the success of the project.", role='assistant', tool_calls=None, function_call=None)} - -[2024-11-10 19:10:39,734] [INFO] [Engine]: Iteration 3 Summary: -Agents' Results Summary: -- {'agent1': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent2", "message": "I am interested in exploring innovative research ideas that merge homologous models using the DELLAs approach. My expertise lies in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry. I believe our collaboration can lead to impactful solutions in healthcare and AI applications."}', name='new_communication_session'), id='call_MZ40c9o6Iw8BVnCHHkJ1DNLE', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent3", "message": "I am excited to collaborate on brainstorming research ideas for merging homologous models using the DELLAs approach. My background in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry can complement your expertise in spiking neural networks and auditory processing. Let\'s explore innovative solutions together."}', name='new_communication_session'), id='call_2GHr1oi5Nk7KS748q7bSltr7', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent4", "message": "I am looking forward to collaborating with you on generating research ideas for merging homologous models using the DELLAs approach. My research in terahertz imaging technologies, cloud computing, and algebraic geometry aligns well with your expertise in mathematical modeling and optimization. Together, we can create novel solutions at the intersection of our fields."}', name='new_communication_session'), id='call_hpm07VIGAnQYeZPrTFNygSWU', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent5", "message": "I am eager to collaborate with you on brainstorming research ideas for merging homologous models using the DELLAs approach. My background in terahertz imaging technologies, cloud computing, and algebraic geometry complements your focus on evolutionary algorithms and spiking neural networks. Let\'s combine our strengths to develop innovative solutions."}', name='new_communication_session'), id='call_1WQVymayStVISc6hGMGcfUd4', type='function')], function_call=None)} -- {'agent5': Message(content="To further develop and refine the proposed research idea on adaptive and efficient model merging techniques for large language models, I will focus on expanding the methodology, dataset, metrics, expected outcomes, and potential impact of the research. Let's delve into each aspect to strengthen the proposal:\n\n### Methodology:\n1. **Model Selection**:\n - Identify a diverse set of large language models (LLMs) such as BERT, GPT, T5, and other state-of-the-art models.\n - Consider the compatibility and interoperability of these models for effective merging techniques.\n\n2. **Merging Techniques**:\n - Develop advanced techniques that go beyond traditional methods like knowledge distillation and parameter sharing.\n - Explore dynamic adaptation strategies that can adjust to varying task requirements and data distributions.\n\n3. **Dynamic Adaptation**:\n - Implement a feedback loop mechanism using reinforcement learning to enable real-time adaptation of merged models.\n - Ensure the adaptability of the merged models to changing environments and tasks.\n\n4. **Evaluation Framework**:\n - Establish comprehensive metrics for evaluating the performance of merged models, including task accuracy, memory usage, and adaptability.\n - Conduct user studies to assess the practical effectiveness of the merged models in interactive systems.\n\n### Dataset:\n- Utilize benchmark datasets such as GLUE (General Language Understanding Evaluation) and SuperGLUE to evaluate the performance of merged models.\n- Generate synthetic datasets to test the robustness and generalization capabilities of the merged models under varying complexities.\n\n### Metrics:\n- Performance Metrics: Accuracy, F1-score, BLEU score to measure the effectiveness of merged models in different tasks.\n- Efficiency Metrics: Memory footprint, inference time, computational cost to assess resource utilization.\n- Adaptability Metrics: Stability across diverse tasks and data distributions to ensure the versatility of merged models.\n\n### Expected Outcomes:\n- Enhanced performance of merged models compared to individual models in terms of task completion and accuracy.\n- Reduced memory footprint for deploying large language models in resource-constrained environments.\n- Improved knowledge sharing among models leading to more robust and adaptable AI systems.\n\n### Potential Impact:\n- Transformation of interactive systems based on large language models by providing scalable and efficient solutions.\n- Advancement in the field of model merging, contributing to the development of more effective AI systems for real-world challenges.\n\nBy elaborating on these aspects, the research proposal will gain depth and clarity, addressing key components essential for the success of the project.", role='assistant', tool_calls=None, function_call=None)} - -[2024-11-10 19:10:41,978] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-10 19:10:41,979] [INFO] [Engine]: Engine graph-based coordination loop completed. -[2024-11-10 19:10:41,979] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-10 19:10:41,979] [INFO] [Evaluator]: Total Token Consumption: 16806 -[2024-11-10 19:10:41,979] [INFO] [Evaluator]: Average Tokens per Iteration: 4201.5 -[2024-11-10 19:10:41,980] [INFO] [Engine]: Graph-based coordination simulation completed. -[2024-11-10 19:10:42,000] [INFO] [Engine]: Summary data successfully written to result/discussion_output.jsonl -[2024-11-10 20:14:59,195] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-10 20:14:59,195] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-10 20:14:59,195] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-10 20:14:59,195] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-10 20:14:59,195] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-10 20:14:59,195] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-10 20:14:59,195] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-10 20:14:59,195] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-10 20:14:59,195] [INFO] [BaseAgent]: Agent 'agent5' initialized. -[2024-11-10 20:14:59,195] [DEBUG] [Engine]: Agent 'agent5' of type 'BaseAgent' initialized. -[2024-11-10 20:14:59,195] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'graph'. -[2024-11-10 20:14:59,195] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent2 -[2024-11-10 20:14:59,195] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent3 -[2024-11-10 20:14:59,195] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent4 -[2024-11-10 20:14:59,195] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent5 -[2024-11-10 20:14:59,195] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent3 -[2024-11-10 20:14:59,195] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent4 -[2024-11-10 20:14:59,196] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent5 -[2024-11-10 20:14:59,196] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborate with]--> agent4 -[2024-11-10 20:14:59,196] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborate with]--> agent5 -[2024-11-10 20:14:59,196] [INFO] [AgentGraph]: Relationship added: agent4 --[collaborate with]--> agent5 -[2024-11-10 20:14:59,196] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-10 20:14:59,196] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-10 20:14:59,196] [INFO] [Engine]: Engine initialized. -[2024-11-10 20:14:59,196] [INFO] [Engine]: Engine starting simulation. -[2024-11-10 20:14:59,196] [INFO] [Engine]: Running in graph-based coordination mode. -[2024-11-10 20:14:59,196] [INFO] [Engine]: Initial task distribution to all agents. -[2024-11-10 20:14:59,196] [INFO] [Engine]: Assigning initial task to agent1: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-10 20:14:59,196] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-10 20:15:00,549] [ERROR] [Engine]: Error while executing initial task for agent 'agent1': 'itertools.islice' object is not callable -[2024-11-10 20:15:00,549] [INFO] [Engine]: Assigning initial task to agent2: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-10 20:15:00,549] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-10 20:15:03,131] [ERROR] [Engine]: Error while executing initial task for agent 'agent2': 'Paper' object has no attribute 'to_dict' -[2024-11-10 20:15:03,132] [INFO] [Engine]: Assigning initial task to agent3: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-10 20:15:03,133] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-10 20:15:03,931] [ERROR] [Engine]: Error while executing initial task for agent 'agent3': 'itertools.islice' object is not callable -[2024-11-10 20:15:03,932] [INFO] [Engine]: Assigning initial task to agent4: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-10 20:15:03,933] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-10 20:15:05,654] [ERROR] [Engine]: Error while executing initial task for agent 'agent4': 'itertools.islice' object is not callable -[2024-11-10 20:15:05,654] [INFO] [Engine]: Assigning initial task to agent5: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-10 20:15:05,655] [INFO] [BaseAgent]: Agent 'agent5' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-10 20:15:11,873] [INFO] [BaseAgent]: Agent 'agent5' acted with result 'Message(content="**Literature Review:**\n\nThe introduction provided highlights the significance of interactive systems based on large language models (LLMs) and the challenges associated with maintaining separate fine-tuned models for each task. The approach of merging homologous models to address these limitations is discussed, introducing the DELLA method for merging models. The results section emphasizes the importance of pruning rate hyperparameter search for model merging, showcasing the effectiveness of the MAGPRUNE method in reducing interference during merging.\n\n**Brainstorming:**\n\n1. **Enhanced Model Merging Techniques:** Develop advanced methods for merging homologous models that optimize performance and reduce interference, building on the principles of DELLA.\n \n2. **Cross-Domain Knowledge Transfer:** Explore techniques to leverage information across tasks and domains efficiently, enhancing the adaptability and generalization of merged models.\n\n3. **Dynamic Parameter Selection:** Investigate dynamic approaches for selecting delta parameters during model merging to improve performance and maintain task-specific information.\n\n4. **Scalability and Efficiency:** Focus on scalability and efficiency in model merging processes, especially for large-scale multi-objective optimization problems, to enhance computational resources' utilization.\n\n**Summarization:**\n\nThe current research landscape emphasizes the importance of merging homologous models to address memory footprint and information sharing challenges. Techniques like MAGPRUNE and DELLA offer promising solutions for optimizing model merging processes. The focus is on enhancing performance, reducing interference, and improving task-specific knowledge retention in merged models.\n\n**Formulate a New Research Idea (5q):**\n\n1. **[Question 1] - What is the problem?**\n How can we enhance the efficiency and performance of model merging techniques for homologous models in large language models (LLMs) to address memory constraints and improve task-specific knowledge retention?\n\n2. **[Question 2] - Why is it interesting and important?**\n Solving this problem can revolutionize the field of model merging, enabling more effective utilization of computational resources and enhancing the adaptability of merged models across diverse tasks and domains. This research can pave the way for more efficient and scalable AI systems.\n\n3. **[Question 3] - Why is it hard?**\n The challenge lies in optimizing the merging process to balance performance improvements with memory efficiency and task-specific information retention. Complexities arise from the need to maintain model accuracy while reducing interference and ensuring seamless knowledge transfer.\n\n4. **[Question 4] - Why hasn't it been solved before?**\n Previous research has focused on model merging techniques, but the specific", role='assistant', tool_calls=None, function_call=None)'. -[2024-11-10 20:15:11,874] [DEBUG] [Engine]: Agent 'agent5' completed initial task with result: Message(content="**Literature Review:**\n\nThe introduction provided highlights the significance of interactive systems based on large language models (LLMs) and the challenges associated with maintaining separate fine-tuned models for each task. The approach of merging homologous models to address these limitations is discussed, introducing the DELLA method for merging models. The results section emphasizes the importance of pruning rate hyperparameter search for model merging, showcasing the effectiveness of the MAGPRUNE method in reducing interference during merging.\n\n**Brainstorming:**\n\n1. **Enhanced Model Merging Techniques:** Develop advanced methods for merging homologous models that optimize performance and reduce interference, building on the principles of DELLA.\n \n2. **Cross-Domain Knowledge Transfer:** Explore techniques to leverage information across tasks and domains efficiently, enhancing the adaptability and generalization of merged models.\n\n3. **Dynamic Parameter Selection:** Investigate dynamic approaches for selecting delta parameters during model merging to improve performance and maintain task-specific information.\n\n4. **Scalability and Efficiency:** Focus on scalability and efficiency in model merging processes, especially for large-scale multi-objective optimization problems, to enhance computational resources' utilization.\n\n**Summarization:**\n\nThe current research landscape emphasizes the importance of merging homologous models to address memory footprint and information sharing challenges. Techniques like MAGPRUNE and DELLA offer promising solutions for optimizing model merging processes. The focus is on enhancing performance, reducing interference, and improving task-specific knowledge retention in merged models.\n\n**Formulate a New Research Idea (5q):**\n\n1. **[Question 1] - What is the problem?**\n How can we enhance the efficiency and performance of model merging techniques for homologous models in large language models (LLMs) to address memory constraints and improve task-specific knowledge retention?\n\n2. **[Question 2] - Why is it interesting and important?**\n Solving this problem can revolutionize the field of model merging, enabling more effective utilization of computational resources and enhancing the adaptability of merged models across diverse tasks and domains. This research can pave the way for more efficient and scalable AI systems.\n\n3. **[Question 3] - Why is it hard?**\n The challenge lies in optimizing the merging process to balance performance improvements with memory efficiency and task-specific information retention. Complexities arise from the need to maintain model accuracy while reducing interference and ensuring seamless knowledge transfer.\n\n4. **[Question 4] - Why hasn't it been solved before?**\n Previous research has focused on model merging techniques, but the specific", role='assistant', tool_calls=None, function_call=None) -[2024-11-10 20:15:11,874] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- {'agent5': Message(content="**Literature Review:**\n\nThe introduction provided highlights the significance of interactive systems based on large language models (LLMs) and the challenges associated with maintaining separate fine-tuned models for each task. The approach of merging homologous models to address these limitations is discussed, introducing the DELLA method for merging models. The results section emphasizes the importance of pruning rate hyperparameter search for model merging, showcasing the effectiveness of the MAGPRUNE method in reducing interference during merging.\n\n**Brainstorming:**\n\n1. **Enhanced Model Merging Techniques:** Develop advanced methods for merging homologous models that optimize performance and reduce interference, building on the principles of DELLA.\n \n2. **Cross-Domain Knowledge Transfer:** Explore techniques to leverage information across tasks and domains efficiently, enhancing the adaptability and generalization of merged models.\n\n3. **Dynamic Parameter Selection:** Investigate dynamic approaches for selecting delta parameters during model merging to improve performance and maintain task-specific information.\n\n4. **Scalability and Efficiency:** Focus on scalability and efficiency in model merging processes, especially for large-scale multi-objective optimization problems, to enhance computational resources' utilization.\n\n**Summarization:**\n\nThe current research landscape emphasizes the importance of merging homologous models to address memory footprint and information sharing challenges. Techniques like MAGPRUNE and DELLA offer promising solutions for optimizing model merging processes. The focus is on enhancing performance, reducing interference, and improving task-specific knowledge retention in merged models.\n\n**Formulate a New Research Idea (5q):**\n\n1. **[Question 1] - What is the problem?**\n How can we enhance the efficiency and performance of model merging techniques for homologous models in large language models (LLMs) to address memory constraints and improve task-specific knowledge retention?\n\n2. **[Question 2] - Why is it interesting and important?**\n Solving this problem can revolutionize the field of model merging, enabling more effective utilization of computational resources and enhancing the adaptability of merged models across diverse tasks and domains. This research can pave the way for more efficient and scalable AI systems.\n\n3. **[Question 3] - Why is it hard?**\n The challenge lies in optimizing the merging process to balance performance improvements with memory efficiency and task-specific information retention. Complexities arise from the need to maintain model accuracy while reducing interference and ensuring seamless knowledge transfer.\n\n4. **[Question 4] - Why hasn't it been solved before?**\n Previous research has focused on model merging techniques, but the specific", role='assistant', tool_calls=None, function_call=None)} - -[2024-11-10 20:15:11,874] [INFO] [Engine]: Initial Summary: -Agents' Results Summary: -- {'agent5': Message(content="**Literature Review:**\n\nThe introduction provided highlights the significance of interactive systems based on large language models (LLMs) and the challenges associated with maintaining separate fine-tuned models for each task. The approach of merging homologous models to address these limitations is discussed, introducing the DELLA method for merging models. The results section emphasizes the importance of pruning rate hyperparameter search for model merging, showcasing the effectiveness of the MAGPRUNE method in reducing interference during merging.\n\n**Brainstorming:**\n\n1. **Enhanced Model Merging Techniques:** Develop advanced methods for merging homologous models that optimize performance and reduce interference, building on the principles of DELLA.\n \n2. **Cross-Domain Knowledge Transfer:** Explore techniques to leverage information across tasks and domains efficiently, enhancing the adaptability and generalization of merged models.\n\n3. **Dynamic Parameter Selection:** Investigate dynamic approaches for selecting delta parameters during model merging to improve performance and maintain task-specific information.\n\n4. **Scalability and Efficiency:** Focus on scalability and efficiency in model merging processes, especially for large-scale multi-objective optimization problems, to enhance computational resources' utilization.\n\n**Summarization:**\n\nThe current research landscape emphasizes the importance of merging homologous models to address memory footprint and information sharing challenges. Techniques like MAGPRUNE and DELLA offer promising solutions for optimizing model merging processes. The focus is on enhancing performance, reducing interference, and improving task-specific knowledge retention in merged models.\n\n**Formulate a New Research Idea (5q):**\n\n1. **[Question 1] - What is the problem?**\n How can we enhance the efficiency and performance of model merging techniques for homologous models in large language models (LLMs) to address memory constraints and improve task-specific knowledge retention?\n\n2. **[Question 2] - Why is it interesting and important?**\n Solving this problem can revolutionize the field of model merging, enabling more effective utilization of computational resources and enhancing the adaptability of merged models across diverse tasks and domains. This research can pave the way for more efficient and scalable AI systems.\n\n3. **[Question 3] - Why is it hard?**\n The challenge lies in optimizing the merging process to balance performance improvements with memory efficiency and task-specific information retention. Complexities arise from the need to maintain model accuracy while reducing interference and ensuring seamless knowledge transfer.\n\n4. **[Question 4] - Why hasn't it been solved before?**\n Previous research has focused on model merging techniques, but the specific", role='assistant', tool_calls=None, function_call=None)} - -[2024-11-10 20:15:11,880] [DEBUG] [EnginePlanner]: Updated progress: Starting the collaborative research idea generation based on the provided Introduction. -Agents' Results Summary: -- {'agent5': Message(content="**Literature Review:**\n\nThe introduction provided highlights the significance of interactive systems based on large language models (LLMs) and the challenges associated with maintaining separate fine-tuned models for each task. The approach of merging homologous models to address these limitations is discussed, introducing the DELLA method for merging models. The results section emphasizes the importance of pruning rate hyperparameter search for model merging, showcasing the effectiveness of the MAGPRUNE method in reducing interference during merging.\n\n**Brainstorming:**\n\n1. **Enhanced Model Merging Techniques:** Develop advanced methods for merging homologous models that optimize performance and reduce interference, building on the principles of DELLA.\n \n2. **Cross-Domain Knowledge Transfer:** Explore techniques to leverage information across tasks and domains efficiently, enhancing the adaptability and generalization of merged models.\n\n3. **Dynamic Parameter Selection:** Investigate dynamic approaches for selecting delta parameters during model merging to improve performance and maintain task-specific information.\n\n4. **Scalability and Efficiency:** Focus on scalability and efficiency in model merging processes, especially for large-scale multi-objective optimization problems, to enhance computational resources' utilization.\n\n**Summarization:**\n\nThe current research landscape emphasizes the importance of merging homologous models to address memory footprint and information sharing challenges. Techniques like MAGPRUNE and DELLA offer promising solutions for optimizing model merging processes. The focus is on enhancing performance, reducing interference, and improving task-specific knowledge retention in merged models.\n\n**Formulate a New Research Idea (5q):**\n\n1. **[Question 1] - What is the problem?**\n How can we enhance the efficiency and performance of model merging techniques for homologous models in large language models (LLMs) to address memory constraints and improve task-specific knowledge retention?\n\n2. **[Question 2] - Why is it interesting and important?**\n Solving this problem can revolutionize the field of model merging, enabling more effective utilization of computational resources and enhancing the adaptability of merged models across diverse tasks and domains. This research can pave the way for more efficient and scalable AI systems.\n\n3. **[Question 3] - Why is it hard?**\n The challenge lies in optimizing the merging process to balance performance improvements with memory efficiency and task-specific information retention. Complexities arise from the need to maintain model accuracy while reducing interference and ensuring seamless knowledge transfer.\n\n4. **[Question 4] - Why hasn't it been solved before?**\n Previous research has focused on model merging techniques, but the specific", role='assistant', tool_calls=None, function_call=None)} - -[2024-11-10 20:15:11,880] [INFO] [Engine]: Starting iteration 1 -[2024-11-10 20:15:11,880] [INFO] [BaseAgent]: Agent 'agent1' is planning the next task. -[2024-11-10 20:15:13,795] [INFO] [BaseAgent]: Agent 'agent1' plans next task based on persona: Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be to focus on brainstorming potential research ideas that leverage their knowledge in these areas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). The agent can explore how their expertise can be applied to enhance the performance of the merging process, particularly in the context of terahertz imaging methods, cloud computing for medical image analysis, and algebraic geometry. By combining their interdisciplinary research background with the current research focus on model merging, the agent can generate innovative ideas that bridge the gap between technology and mathematics in the context of machine learning research. -[2024-11-10 20:15:13,797] [INFO] [Engine]: Agent 'agent1' planned task: Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be to focus on brainstorming potential research ideas that leverage their knowledge in these areas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). The agent can explore how their expertise can be applied to enhance the performance of the merging process, particularly in the context of terahertz imaging methods, cloud computing for medical image analysis, and algebraic geometry. By combining their interdisciplinary research background with the current research focus on model merging, the agent can generate innovative ideas that bridge the gap between technology and mathematics in the context of machine learning research. -[2024-11-10 20:15:13,798] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be to focus on brainstorming potential research ideas that leverage their knowledge in these areas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). The agent can explore how their expertise can be applied to enhance the performance of the merging process, particularly in the context of terahertz imaging methods, cloud computing for medical image analysis, and algebraic geometry. By combining their interdisciplinary research background with the current research focus on model merging, the agent can generate innovative ideas that bridge the gap between technology and mathematics in the context of machine learning research.'. -[2024-11-10 20:15:15,790] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I am a researcher with expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry. I am currently focusing on developing innovative terahertz imaging methods for nondestructive testing of multilayer heat shields. I am interested in brainstorming potential research ideas that leverage my knowledge in these areas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). I believe our collaboration could lead to innovative solutions at the intersection of artificial intelligence, healthcare, and interdisciplinary research. Would you be interested in discussing potential research directions? -[2024-11-10 20:15:15,791] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I am a res... -[2024-11-10 20:15:25,341] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Absolutely, I would be very interested in discussing potential research directions that leverage our combined expertise. Here are a few ideas that come to mind: - -1. **Integration of Terahertz Imaging with Machine Learning for Medical Diagnostics**: We could explore how terahertz imaging can be enhanced using machine learning algorithms to improve the detection of abnormalities in medical images. This could involve developing a cloud-based platform that utilizes DELLAs to merge models from different imaging modalities, enhancing diagnostic accuracy. - -2. **Cloud Computing Framework for Collaborative Model Merging**: We could design a cloud-based framework that allows researchers to collaboratively develop and test the DELLAs approach. This framework could facilitate the sharing of terahertz imaging data and medical images, enabling the merging of models from diverse sources while ensuring data privacy and security. - -3. **Algebraic Geometry in Model Merging**: We could investigate how concepts from algebraic geometry can be applied to optimize the merging process in DELLAs. This might involve developing new mathematical models that describe the relationships between different imaging modalities and their corresponding features, potentially leading to more efficient merging algorithms. - -4. **Nondestructive Testing and Healthcare Applications**: Given your work in nondestructive testing, we could explore applications of terahertz imaging in healthcare, such as detecting skin cancers or other surface anomalies. This could lead to the development of a specialized model merging approach that combines terahertz data with traditional medical imaging data. - -5. **Real-time Data Processing in Cloud Environments**: We could focus on enhancing the performance of DELLAs by leveraging cloud computing capabilities for real-time data processing. This could involve creating algorithms that dynamically adjust model parameters based on incoming terahertz imaging data, improving the responsiveness and accuracy of the system. - -I believe that by combining our strengths, we can create innovative solutions that not only advance our respective fields but also have a significant impact on healthcare and technology. What do you think of these ideas? -[2024-11-10 20:15:25,344] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Absolutely... -[2024-11-10 20:15:29,002] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-10 20:15:29,003] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-10 20:15:29,003] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-10 20:15:29,003] [INFO] [Engine]: Graph-based coordination simulation completed. -[2024-11-10 20:15:29,006] [INFO] [Engine]: Summary data successfully written to result/discussion_output.jsonl -[2024-11-10 20:15:55,194] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-10 20:15:55,194] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-10 20:15:55,194] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-10 20:15:55,195] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-10 20:15:55,195] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-10 20:15:55,195] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-10 20:15:55,195] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-10 20:15:55,195] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-10 20:15:55,195] [INFO] [BaseAgent]: Agent 'agent5' initialized. -[2024-11-10 20:15:55,195] [DEBUG] [Engine]: Agent 'agent5' of type 'BaseAgent' initialized. -[2024-11-10 20:15:55,195] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'graph'. -[2024-11-10 20:15:55,195] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent2 -[2024-11-10 20:15:55,195] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent3 -[2024-11-10 20:15:55,195] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent4 -[2024-11-10 20:15:55,195] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent5 -[2024-11-10 20:15:55,195] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent3 -[2024-11-10 20:15:55,195] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent4 -[2024-11-10 20:15:55,195] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent5 -[2024-11-10 20:15:55,195] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborate with]--> agent4 -[2024-11-10 20:15:55,195] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborate with]--> agent5 -[2024-11-10 20:15:55,195] [INFO] [AgentGraph]: Relationship added: agent4 --[collaborate with]--> agent5 -[2024-11-10 20:15:55,195] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-10 20:15:55,195] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-10 20:15:55,195] [INFO] [Engine]: Engine initialized. -[2024-11-10 20:15:55,195] [INFO] [Engine]: Engine starting simulation. -[2024-11-10 20:15:55,195] [INFO] [Engine]: Running in graph-based coordination mode. -[2024-11-10 20:15:55,195] [INFO] [Engine]: Initial task distribution to all agents. -[2024-11-10 20:15:55,195] [INFO] [Engine]: Assigning initial task to agent1: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-10 20:15:55,195] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-10 20:15:56,046] [ERROR] [Engine]: Error while executing initial task for agent 'agent1': 'itertools.islice' object is not callable -[2024-11-10 20:15:56,046] [INFO] [Engine]: Assigning initial task to agent2: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-10 20:15:56,048] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-10 20:15:56,809] [ERROR] [Engine]: Error while executing initial task for agent 'agent2': 'itertools.islice' object is not callable -[2024-11-10 20:15:56,810] [INFO] [Engine]: Assigning initial task to agent3: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-10 20:15:56,812] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-10 20:15:59,639] [ERROR] [Engine]: Error while executing initial task for agent 'agent3': 'Paper' object has no attribute 'to_dict' -[2024-11-10 20:15:59,642] [INFO] [Engine]: Assigning initial task to agent4: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-10 20:15:59,644] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-10 20:16:00,336] [ERROR] [Engine]: Error while executing initial task for agent 'agent4': 'itertools.islice' object is not callable -[2024-11-10 20:16:00,337] [INFO] [Engine]: Assigning initial task to agent5: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-10 20:16:00,338] [INFO] [BaseAgent]: Agent 'agent5' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-10 20:16:06,233] [INFO] [BaseAgent]: Agent 'agent5' acted with result 'Message(content='**Literature Review:**\n\nThe introduction provided highlights the significance of interactive systems based on large language models (LLMs) and the challenges associated with maintaining separate fine-tuned models for each task. The approach of merging homologous models to address these limitations is discussed, introducing the novel approach of Drop and rEscaLe via sampLing with mAgnitude (DELLA). The study focuses on merging different homologous models (LM, Math, Code) using the DELLA approach and evaluates its performance against baseline experiments.\n\n**Brainstorming:**\n\n1. **Enhanced Model Merging Techniques:** Explore advanced methods for merging homologous models that go beyond parameter drops and sign-based selection to further optimize model performance and information retention.\n \n2. **Dynamic Model Fusion:** Develop a dynamic fusion mechanism that adapts to the specific characteristics of each homologous model, enhancing the merging process and overall performance.\n\n3. **Cross-Domain Knowledge Transfer:** Investigate techniques to facilitate knowledge transfer across different domains within the merged models, enabling enhanced performance in diverse tasks.\n\n4. **Adaptive Pruning Strategies:** Implement adaptive pruning strategies that intelligently select delta parameters based on task requirements and model complexities to improve merging efficiency.\n\n**Summarization:**\n\nThe current research landscape emphasizes the importance of efficient model merging techniques to overcome the limitations of maintaining separate fine-tuned models. The introduction of the DELLA approach showcases promising results in merging homologous models effectively. However, there is a need to explore more advanced and adaptive methods for model merging, dynamic fusion mechanisms, cross-domain knowledge transfer, and adaptive pruning strategies to further enhance model performance and information retention.\n\n**Formulate a New Research Idea (5q):**\n\n1. **[Question 1] - What is the problem?**\n\n How can we develop an adaptive and dynamic model merging framework that optimizes the integration of homologous models to enhance performance and information retention in interactive systems based on large language models?\n\n2. **[Question 2] - Why is it interesting and important?**\n\n Solving this problem is crucial for advancing the efficiency and effectiveness of interactive systems based on LLMs, enabling domain experts to perform better across tasks and domains. This research will pave the way for more cost-effective, knowledge-sharing, and space-efficient model merging techniques.\n\n3. **[Question 3] - Why is it hard?**\n\n The challenge lies in designing a framework that can adapt to the unique characteristics of each homologous model, dynamically fuse information,', role='assistant', tool_calls=None, function_call=None)'. -[2024-11-10 20:16:06,236] [DEBUG] [Engine]: Agent 'agent5' completed initial task with result: Message(content='**Literature Review:**\n\nThe introduction provided highlights the significance of interactive systems based on large language models (LLMs) and the challenges associated with maintaining separate fine-tuned models for each task. The approach of merging homologous models to address these limitations is discussed, introducing the novel approach of Drop and rEscaLe via sampLing with mAgnitude (DELLA). The study focuses on merging different homologous models (LM, Math, Code) using the DELLA approach and evaluates its performance against baseline experiments.\n\n**Brainstorming:**\n\n1. **Enhanced Model Merging Techniques:** Explore advanced methods for merging homologous models that go beyond parameter drops and sign-based selection to further optimize model performance and information retention.\n \n2. **Dynamic Model Fusion:** Develop a dynamic fusion mechanism that adapts to the specific characteristics of each homologous model, enhancing the merging process and overall performance.\n\n3. **Cross-Domain Knowledge Transfer:** Investigate techniques to facilitate knowledge transfer across different domains within the merged models, enabling enhanced performance in diverse tasks.\n\n4. **Adaptive Pruning Strategies:** Implement adaptive pruning strategies that intelligently select delta parameters based on task requirements and model complexities to improve merging efficiency.\n\n**Summarization:**\n\nThe current research landscape emphasizes the importance of efficient model merging techniques to overcome the limitations of maintaining separate fine-tuned models. The introduction of the DELLA approach showcases promising results in merging homologous models effectively. However, there is a need to explore more advanced and adaptive methods for model merging, dynamic fusion mechanisms, cross-domain knowledge transfer, and adaptive pruning strategies to further enhance model performance and information retention.\n\n**Formulate a New Research Idea (5q):**\n\n1. **[Question 1] - What is the problem?**\n\n How can we develop an adaptive and dynamic model merging framework that optimizes the integration of homologous models to enhance performance and information retention in interactive systems based on large language models?\n\n2. **[Question 2] - Why is it interesting and important?**\n\n Solving this problem is crucial for advancing the efficiency and effectiveness of interactive systems based on LLMs, enabling domain experts to perform better across tasks and domains. This research will pave the way for more cost-effective, knowledge-sharing, and space-efficient model merging techniques.\n\n3. **[Question 3] - Why is it hard?**\n\n The challenge lies in designing a framework that can adapt to the unique characteristics of each homologous model, dynamically fuse information,', role='assistant', tool_calls=None, function_call=None) -[2024-11-10 20:16:06,236] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- {'agent5': Message(content='**Literature Review:**\n\nThe introduction provided highlights the significance of interactive systems based on large language models (LLMs) and the challenges associated with maintaining separate fine-tuned models for each task. The approach of merging homologous models to address these limitations is discussed, introducing the novel approach of Drop and rEscaLe via sampLing with mAgnitude (DELLA). The study focuses on merging different homologous models (LM, Math, Code) using the DELLA approach and evaluates its performance against baseline experiments.\n\n**Brainstorming:**\n\n1. **Enhanced Model Merging Techniques:** Explore advanced methods for merging homologous models that go beyond parameter drops and sign-based selection to further optimize model performance and information retention.\n \n2. **Dynamic Model Fusion:** Develop a dynamic fusion mechanism that adapts to the specific characteristics of each homologous model, enhancing the merging process and overall performance.\n\n3. **Cross-Domain Knowledge Transfer:** Investigate techniques to facilitate knowledge transfer across different domains within the merged models, enabling enhanced performance in diverse tasks.\n\n4. **Adaptive Pruning Strategies:** Implement adaptive pruning strategies that intelligently select delta parameters based on task requirements and model complexities to improve merging efficiency.\n\n**Summarization:**\n\nThe current research landscape emphasizes the importance of efficient model merging techniques to overcome the limitations of maintaining separate fine-tuned models. The introduction of the DELLA approach showcases promising results in merging homologous models effectively. However, there is a need to explore more advanced and adaptive methods for model merging, dynamic fusion mechanisms, cross-domain knowledge transfer, and adaptive pruning strategies to further enhance model performance and information retention.\n\n**Formulate a New Research Idea (5q):**\n\n1. **[Question 1] - What is the problem?**\n\n How can we develop an adaptive and dynamic model merging framework that optimizes the integration of homologous models to enhance performance and information retention in interactive systems based on large language models?\n\n2. **[Question 2] - Why is it interesting and important?**\n\n Solving this problem is crucial for advancing the efficiency and effectiveness of interactive systems based on LLMs, enabling domain experts to perform better across tasks and domains. This research will pave the way for more cost-effective, knowledge-sharing, and space-efficient model merging techniques.\n\n3. **[Question 3] - Why is it hard?**\n\n The challenge lies in designing a framework that can adapt to the unique characteristics of each homologous model, dynamically fuse information,', role='assistant', tool_calls=None, function_call=None)} - -[2024-11-10 20:16:06,237] [INFO] [Engine]: Initial Summary: -Agents' Results Summary: -- {'agent5': Message(content='**Literature Review:**\n\nThe introduction provided highlights the significance of interactive systems based on large language models (LLMs) and the challenges associated with maintaining separate fine-tuned models for each task. The approach of merging homologous models to address these limitations is discussed, introducing the novel approach of Drop and rEscaLe via sampLing with mAgnitude (DELLA). The study focuses on merging different homologous models (LM, Math, Code) using the DELLA approach and evaluates its performance against baseline experiments.\n\n**Brainstorming:**\n\n1. **Enhanced Model Merging Techniques:** Explore advanced methods for merging homologous models that go beyond parameter drops and sign-based selection to further optimize model performance and information retention.\n \n2. **Dynamic Model Fusion:** Develop a dynamic fusion mechanism that adapts to the specific characteristics of each homologous model, enhancing the merging process and overall performance.\n\n3. **Cross-Domain Knowledge Transfer:** Investigate techniques to facilitate knowledge transfer across different domains within the merged models, enabling enhanced performance in diverse tasks.\n\n4. **Adaptive Pruning Strategies:** Implement adaptive pruning strategies that intelligently select delta parameters based on task requirements and model complexities to improve merging efficiency.\n\n**Summarization:**\n\nThe current research landscape emphasizes the importance of efficient model merging techniques to overcome the limitations of maintaining separate fine-tuned models. The introduction of the DELLA approach showcases promising results in merging homologous models effectively. However, there is a need to explore more advanced and adaptive methods for model merging, dynamic fusion mechanisms, cross-domain knowledge transfer, and adaptive pruning strategies to further enhance model performance and information retention.\n\n**Formulate a New Research Idea (5q):**\n\n1. **[Question 1] - What is the problem?**\n\n How can we develop an adaptive and dynamic model merging framework that optimizes the integration of homologous models to enhance performance and information retention in interactive systems based on large language models?\n\n2. **[Question 2] - Why is it interesting and important?**\n\n Solving this problem is crucial for advancing the efficiency and effectiveness of interactive systems based on LLMs, enabling domain experts to perform better across tasks and domains. This research will pave the way for more cost-effective, knowledge-sharing, and space-efficient model merging techniques.\n\n3. **[Question 3] - Why is it hard?**\n\n The challenge lies in designing a framework that can adapt to the unique characteristics of each homologous model, dynamically fuse information,', role='assistant', tool_calls=None, function_call=None)} - -[2024-11-10 20:16:06,237] [DEBUG] [EnginePlanner]: Updated progress: Starting the collaborative research idea generation based on the provided Introduction. -Agents' Results Summary: -- {'agent5': Message(content='**Literature Review:**\n\nThe introduction provided highlights the significance of interactive systems based on large language models (LLMs) and the challenges associated with maintaining separate fine-tuned models for each task. The approach of merging homologous models to address these limitations is discussed, introducing the novel approach of Drop and rEscaLe via sampLing with mAgnitude (DELLA). The study focuses on merging different homologous models (LM, Math, Code) using the DELLA approach and evaluates its performance against baseline experiments.\n\n**Brainstorming:**\n\n1. **Enhanced Model Merging Techniques:** Explore advanced methods for merging homologous models that go beyond parameter drops and sign-based selection to further optimize model performance and information retention.\n \n2. **Dynamic Model Fusion:** Develop a dynamic fusion mechanism that adapts to the specific characteristics of each homologous model, enhancing the merging process and overall performance.\n\n3. **Cross-Domain Knowledge Transfer:** Investigate techniques to facilitate knowledge transfer across different domains within the merged models, enabling enhanced performance in diverse tasks.\n\n4. **Adaptive Pruning Strategies:** Implement adaptive pruning strategies that intelligently select delta parameters based on task requirements and model complexities to improve merging efficiency.\n\n**Summarization:**\n\nThe current research landscape emphasizes the importance of efficient model merging techniques to overcome the limitations of maintaining separate fine-tuned models. The introduction of the DELLA approach showcases promising results in merging homologous models effectively. However, there is a need to explore more advanced and adaptive methods for model merging, dynamic fusion mechanisms, cross-domain knowledge transfer, and adaptive pruning strategies to further enhance model performance and information retention.\n\n**Formulate a New Research Idea (5q):**\n\n1. **[Question 1] - What is the problem?**\n\n How can we develop an adaptive and dynamic model merging framework that optimizes the integration of homologous models to enhance performance and information retention in interactive systems based on large language models?\n\n2. **[Question 2] - Why is it interesting and important?**\n\n Solving this problem is crucial for advancing the efficiency and effectiveness of interactive systems based on LLMs, enabling domain experts to perform better across tasks and domains. This research will pave the way for more cost-effective, knowledge-sharing, and space-efficient model merging techniques.\n\n3. **[Question 3] - Why is it hard?**\n\n The challenge lies in designing a framework that can adapt to the unique characteristics of each homologous model, dynamically fuse information,', role='assistant', tool_calls=None, function_call=None)} - -[2024-11-10 20:16:06,237] [INFO] [Engine]: Starting iteration 1 -[2024-11-10 20:16:06,237] [INFO] [BaseAgent]: Agent 'agent1' is planning the next task. -[2024-11-10 20:16:08,388] [INFO] [BaseAgent]: Agent 'agent1' plans next task based on persona: Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be to focus on brainstorming potential research ideas that leverage their knowledge in these areas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). The agent can bring their expertise in imaging technologies and mathematical aspects to propose innovative solutions for improving the merging process of homologous models in the context of interactive systems based on general-purpose LLMs. By combining their expertise in terahertz imaging, cloud computing, and algebraic geometry, the agent can contribute valuable insights and ideas to enhance the performance and efficiency of the DELLA approach. -[2024-11-10 20:16:08,388] [INFO] [Engine]: Agent 'agent1' planned task: Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be to focus on brainstorming potential research ideas that leverage their knowledge in these areas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). The agent can bring their expertise in imaging technologies and mathematical aspects to propose innovative solutions for improving the merging process of homologous models in the context of interactive systems based on general-purpose LLMs. By combining their expertise in terahertz imaging, cloud computing, and algebraic geometry, the agent can contribute valuable insights and ideas to enhance the performance and efficiency of the DELLA approach. -[2024-11-10 20:16:08,389] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be to focus on brainstorming potential research ideas that leverage their knowledge in these areas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). The agent can bring their expertise in imaging technologies and mathematical aspects to propose innovative solutions for improving the merging process of homologous models in the context of interactive systems based on general-purpose LLMs. By combining their expertise in terahertz imaging, cloud computing, and algebraic geometry, the agent can contribute valuable insights and ideas to enhance the performance and efficiency of the DELLA approach.'. -[2024-11-10 20:16:14,481] [INFO] [BaseAgent]: Agent 'agent1' acted with result 'Message(content='Based on your expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, you can contribute valuable insights to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). Here are some potential research ideas that leverage your knowledge in these areas to enhance the merging process of homologous models in interactive systems based on general-purpose LLMs:\n\n1. **Terahertz Imaging Integration**: Explore the integration of terahertz imaging data into the homologous model merging process. Develop algorithms that utilize terahertz imaging features to enhance the accuracy and precision of model merging, especially in scenarios where traditional imaging modalities may be limited.\n\n2. **Cloud-Based Model Fusion**: Utilize cloud computing infrastructure to facilitate the fusion of homologous models across distributed systems. Develop a secure and scalable cloud-based framework that enables real-time collaboration and model merging among researchers and clinicians, ensuring data privacy and accessibility.\n\n3. **Algebraic Geometry for Model Representation**: Apply concepts from algebraic geometry to represent homologous models in a structured and mathematically rigorous manner. Investigate how cluster categories and 3-Calabi-Yau categories can be leveraged to define relationships between merged models and enhance the interpretability of the merging process.\n\n4. **Optimization Algorithms**: Develop optimization algorithms inspired by algebraic geometry principles to optimize the merging process of homologous models. Explore how mathematical techniques can be used to minimize redundancy, maximize relevance, and improve the efficiency of model fusion in interactive systems.\n\n5. **Interactive Visualization Tools**: Create interactive visualization tools that incorporate terahertz imaging data and cloud-based processing capabilities to provide intuitive insights into the merging of homologous models. Enable researchers and clinicians to interact with the merged models in a user-friendly and informative way.\n\nBy combining your expertise in terahertz imaging technologies, cloud computing, and algebraic geometry, you can contribute innovative solutions to enhance the performance and efficiency of the DELLA approach for merging homologous models. These research ideas aim to bridge the gap between technology and mathematics, advancing the field of interactive systems based on general-purpose LLMs.', role='assistant', tool_calls=None, function_call=None)'. -[2024-11-10 20:16:14,482] [DEBUG] [Engine]: Agent 'agent1' executed task with result: Message(content='Based on your expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, you can contribute valuable insights to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). Here are some potential research ideas that leverage your knowledge in these areas to enhance the merging process of homologous models in interactive systems based on general-purpose LLMs:\n\n1. **Terahertz Imaging Integration**: Explore the integration of terahertz imaging data into the homologous model merging process. Develop algorithms that utilize terahertz imaging features to enhance the accuracy and precision of model merging, especially in scenarios where traditional imaging modalities may be limited.\n\n2. **Cloud-Based Model Fusion**: Utilize cloud computing infrastructure to facilitate the fusion of homologous models across distributed systems. Develop a secure and scalable cloud-based framework that enables real-time collaboration and model merging among researchers and clinicians, ensuring data privacy and accessibility.\n\n3. **Algebraic Geometry for Model Representation**: Apply concepts from algebraic geometry to represent homologous models in a structured and mathematically rigorous manner. Investigate how cluster categories and 3-Calabi-Yau categories can be leveraged to define relationships between merged models and enhance the interpretability of the merging process.\n\n4. **Optimization Algorithms**: Develop optimization algorithms inspired by algebraic geometry principles to optimize the merging process of homologous models. Explore how mathematical techniques can be used to minimize redundancy, maximize relevance, and improve the efficiency of model fusion in interactive systems.\n\n5. **Interactive Visualization Tools**: Create interactive visualization tools that incorporate terahertz imaging data and cloud-based processing capabilities to provide intuitive insights into the merging of homologous models. Enable researchers and clinicians to interact with the merged models in a user-friendly and informative way.\n\nBy combining your expertise in terahertz imaging technologies, cloud computing, and algebraic geometry, you can contribute innovative solutions to enhance the performance and efficiency of the DELLA approach for merging homologous models. These research ideas aim to bridge the gap between technology and mathematics, advancing the field of interactive systems based on general-purpose LLMs.', role='assistant', tool_calls=None, function_call=None) -[2024-11-10 20:16:14,482] [INFO] [BaseAgent]: Agent 'agent2' is planning the next task. -[2024-11-10 20:16:16,144] [INFO] [BaseAgent]: Agent 'agent2' plans next task based on persona: Based on the provided information, the next task for 'agent2' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with 'agent2's expertise in artificial intelligence and healthcare, as well as their passion for leveraging advanced machine learning techniques to create impactful solutions. By contributing to the brainstorming session, 'agent2' can provide valuable insights and potentially propose innovative research ideas that align with their research focus and expertise. -[2024-11-10 20:16:16,144] [INFO] [Engine]: Agent 'agent2' planned task: Based on the provided information, the next task for 'agent2' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with 'agent2's expertise in artificial intelligence and healthcare, as well as their passion for leveraging advanced machine learning techniques to create impactful solutions. By contributing to the brainstorming session, 'agent2' can provide valuable insights and potentially propose innovative research ideas that align with their research focus and expertise. -[2024-11-10 20:16:16,144] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Based on the provided information, the next task for 'agent2' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with 'agent2's expertise in artificial intelligence and healthcare, as well as their passion for leveraging advanced machine learning techniques to create impactful solutions. By contributing to the brainstorming session, 'agent2' can provide valuable insights and potentially propose innovative research ideas that align with their research focus and expertise.'. -[2024-11-10 20:16:17,825] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. My expertise in artificial intelligence and healthcare, along with a passion for leveraging advanced machine learning techniques, align well with this task. I believe our collaboration can lead to innovative research ideas that contribute to the intersection of spiking neural networks and healthcare applications. -[2024-11-10 20:16:17,826] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I am inter... -[2024-11-10 20:16:22,825] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I appreciate your interest in collaborating on this exciting research endeavor! Merging homologous models using the DELLA approach presents a unique opportunity to enhance our understanding of both spiking neural networks and their applications in healthcare. Here are a few potential research ideas we could explore together: - -1. **Adaptive Learning in Healthcare Monitoring**: We could investigate how DELLA can be utilized to create adaptive spiking neural networks that learn from patient data over time, improving the accuracy of abnormal activity detection in elderly patients with dementia. - -2. **Causal Inference in Multi-Label Health Data**: Given your work on causal variable discovery, we could explore how merging homologous models can enhance causal inference in multi-label health data, potentially leading to better understanding of disease progression and patient behavior. - -3. **Real-Time Decision Support Systems**: We could develop a framework that integrates DELLA with spiking neural networks to create real-time decision support systems for healthcare professionals, aiding in the monitoring and management of patients with complex health conditions. - -4. **Personalized Health Interventions**: By leveraging the DELLA approach, we could design models that personalize health interventions based on individual patient data, improving outcomes for vulnerable populations. - -5. **Robustness in Noisy Environments**: We could investigate how merging homologous models can enhance the robustness of spiking neural networks in noisy environments, which is crucial for real-time monitoring systems in smart homes. - -I believe these ideas could lead to impactful research that bridges our expertise in AI and healthcare. Let me know your thoughts or if you have any additional ideas! -[2024-11-10 20:16:22,826] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I apprecia... -[2024-11-10 20:16:27,783] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I appreciate your thoughtful suggestions for our collaboration on merging homologous models using the DELLA approach! Each of your ideas resonates with my research focus, and I believe they hold great potential for impactful outcomes. Here are my thoughts on your proposed ideas: - -1. **Adaptive Learning in Healthcare Monitoring**: This is a fantastic direction! By utilizing DELLA to create adaptive spiking neural networks, we can significantly enhance the monitoring of elderly patients, allowing for timely interventions based on real-time data. - -2. **Causal Inference in Multi-Label Health Data**: I agree that enhancing causal inference through merging homologous models could provide deeper insights into patient behavior and disease progression. This could lead to more effective treatment strategies. - -3. **Real-Time Decision Support Systems**: Developing a framework for real-time decision support is crucial, especially for healthcare professionals managing complex cases. Integrating DELLA with spiking neural networks could streamline their workflow and improve patient outcomes. - -4. **Personalized Health Interventions**: Personalization is key in healthcare. By leveraging the DELLA approach, we can tailor interventions to individual needs, which is especially important for vulnerable populations. - -5. **Robustness in Noisy Environments**: This is an essential aspect for our applications in smart homes. Enhancing the robustness of our models will ensure reliability in real-world scenarios, which is critical for patient safety. - -I am excited about the potential of these ideas and would love to discuss them further. Perhaps we can set up a brainstorming session to dive deeper into each concept and outline our next steps? Let me know your availability! -[2024-11-10 20:16:27,786] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I apprecia... -[2024-11-10 20:16:32,417] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I appreciate your thoughtful feedback on our proposed collaboration regarding merging homologous models using the DELLA approach! I'm thrilled that the ideas resonate with your research focus. I agree that each concept has the potential for impactful outcomes. - -1. **Adaptive Learning in Healthcare Monitoring**: I'm glad you see the value in this direction! We can explore how adaptive spiking neural networks can be designed to learn from patient data continuously, which could lead to more responsive monitoring systems. - -2. **Causal Inference in Multi-Label Health Data**: Enhancing causal inference is indeed a critical area. We could develop methodologies that leverage the strengths of both our research areas to uncover complex relationships in health data. - -3. **Real-Time Decision Support Systems**: I completely agree that this is crucial for healthcare professionals. We can brainstorm specific use cases where our integrated framework could be applied effectively. - -4. **Personalized Health Interventions**: Tailoring interventions is essential, and I believe we can create models that adapt to individual patient profiles, improving their overall care. - -5. **Robustness in Noisy Environments**: Ensuring reliability in smart home applications is vital. We can investigate techniques to enhance the robustness of our models against noise and variability in sensor data. - -I would love to set up a brainstorming session to dive deeper into these concepts and outline our next steps. How does next week look for you? Let me know your availability! -[2024-11-10 20:16:32,418] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I apprecia... -[2024-11-10 20:16:57,279] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I'm excited about the potential of our collaboration and the ideas we've discussed regarding merging homologous models using the DELLA approach! Each concept indeed holds promise for impactful outcomes. - -1. **Adaptive Learning in Healthcare Monitoring**: I agree that exploring adaptive spiking neural networks to continuously learn from patient data can lead to more responsive monitoring systems, which is crucial for timely interventions. - -2. **Causal Inference in Multi-Label Health Data**: Developing methodologies that leverage our combined expertise to enhance causal inference in health data is a critical area that could yield significant insights into patient behavior and treatment strategies. - -3. **Real-Time Decision Support Systems**: Identifying specific use cases for our integrated framework will be essential. We can brainstorm scenarios where our collaboration can provide real-time support to healthcare professionals. - -4. **Personalized Health Interventions**: I believe that creating models that adapt to individual patient profiles will greatly improve care for vulnerable populations, making this a vital area of focus. - -5. **Robustness in Noisy Environments**: Investigating techniques to enhance the robustness of our models against noise and variability in sensor data is indeed vital for ensuring reliability in smart home applications. - -I would love to set up a brainstorming session to dive deeper into these concepts and outline our next steps. Next week works well for me; please let me know your availability! -[2024-11-10 20:16:57,281] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I'm excite... -[2024-11-10 20:17:04,321] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I'm thrilled to hear your enthusiasm about our collaboration and the ideas we've discussed regarding merging homologous models using the DELLA approach! Each of these concepts indeed holds great potential for impactful outcomes. - -1. **Adaptive Learning in Healthcare Monitoring**: I completely agree that exploring adaptive spiking neural networks to continuously learn from patient data can lead to more responsive monitoring systems, which is crucial for timely interventions. We can look into how we can implement this in our framework. - -2. **Causal Inference in Multi-Label Health Data**: Leveraging our combined expertise to enhance causal inference in health data is indeed a critical area. We can brainstorm methodologies that can uncover complex relationships in patient behavior and treatment strategies. - -3. **Real-Time Decision Support Systems**: Identifying specific use cases for our integrated framework will be essential. We can discuss scenarios where our collaboration can provide real-time support to healthcare professionals, perhaps focusing on urgent care situations. - -4. **Personalized Health Interventions**: I believe that creating models that adapt to individual patient profiles will greatly improve care for vulnerable populations. We can explore how to effectively gather and utilize patient data for this purpose. - -5. **Robustness in Noisy Environments**: Investigating techniques to enhance the robustness of our models against noise and variability in sensor data is indeed vital for ensuring reliability in smart home applications. We can look into existing methods and how we can innovate further. - -I would love to set up a brainstorming session to dive deeper into these concepts and outline our next steps. Next week works well for me; please let me know your availability! -[2024-11-10 20:17:04,325] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I'm thrill... -[2024-11-10 20:17:10,051] [INFO] [BaseAgent]: Agent 'agent2' called 'new_communication_session' with args '{'target_agent_id': 'agent3', 'message': 'I am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. My expertise in artificial intelligence and healthcare, along with a passion for leveraging advanced machine learning techniques, align well with this task. I believe our collaboration can lead to innovative research ideas that contribute to the intersection of spiking neural networks and healthcare applications.'}'. -[2024-11-10 20:17:10,052] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'message': 'Successfully completed session a23a854a-5ec7-48d7-803b-67b15108fa5b', 'session_id': '### Summary of Collaboration Discussion Between Agent2 and Agent3\n\n**Context:**\nAgent2 and Agent3 engaged in a collaborative dialogue focused on brainstorming research ideas related to merging homologous models using the DELLA approach, particularly in the context of healthcare applications. Both agents expressed enthusiasm for the potential of their combined expertise in artificial intelligence and healthcare.\n\n**Key Themes and Ideas Discussed:**\n\n1. **Adaptive Learning in Healthcare Monitoring:**\n - Both agents recognized the potential of using DELLA to create adaptive spiking neural networks that learn from patient data over time, enhancing monitoring systems for elderly patients, particularly those with dementia.\n\n2. **Causal Inference in Multi-Label Health Data:**\n - The agents discussed the importance of enhancing causal inference through merging homologous models, which could lead to better insights into disease progression and patient behavior.\n\n3. **Real-Time Decision Support Systems:**\n - They highlighted the need for frameworks that integrate DELLA with spiking neural networks to support healthcare professionals in real-time, especially in managing complex health conditions.\n\n4. **Personalized Health Interventions:**\n - The conversation included the idea of designing models that personalize health interventions based on individual patient data, which is crucial for improving outcomes in vulnerable populations.\n\n5. **Robustness in Noisy Environments:**\n - The agents acknowledged the significance of enhancing the robustness of spiking neural networks in noisy environments, particularly for applications in smart homes.\n\n**Decisions and Next Steps:**\n- Both agents expressed a strong interest in setting up a brainstorming session to delve deeper into the proposed ideas and outline actionable next steps.\n- They agreed to discuss specific use cases for their integrated framework and explore methodologies for enhancing causal inference and robustness in their models.\n\n**Action Items for Agent2:**\n- Collaborate with Agent3 to organize a brainstorming session next week to further explore the discussed research ideas.\n- Prepare to contribute insights and innovative proposals that align with their expertise in artificial intelligence and healthcare.\n\nThis summary encapsulates the essence of the dialogue, highlighting the collaborative spirit and the promising research directions identified by both agents.'}'. -[2024-11-10 20:17:10,055] [DEBUG] [Engine]: Agent 'agent2' executed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. My expertise in artificial intelligence and healthcare, along with a passion for leveraging advanced machine learning techniques, align well with this task. I believe our collaboration can lead to innovative research ideas that contribute to the intersection of spiking neural networks and healthcare applications."}', name='new_communication_session'), id='call_UjHjzQJh8VK56b2gN1d4P68B', type='function')], function_call=None) -[2024-11-10 20:17:10,056] [INFO] [BaseAgent]: Agent 'agent3' is planning the next task. -[2024-11-10 20:17:11,766] [INFO] [BaseAgent]: Agent 'agent3' plans next task based on persona: Based on the role and expertise of 'agent3' as a researcher in the field of spiking neural networks (SNNs) and their applications in auditory processing and machine learning, the next task should be to: - -1. **Literature Review**: Conduct a thorough literature review to understand the current state of research in merging homologous models and the use of large language models (LLMs) in interactive systems. - -This task aligns with 'agent3's expertise and will provide valuable insights for brainstorming potential research ideas and formulating a new research proposal in the given format of '5q'. -[2024-11-10 20:17:11,767] [INFO] [Engine]: Agent 'agent3' planned task: Based on the role and expertise of 'agent3' as a researcher in the field of spiking neural networks (SNNs) and their applications in auditory processing and machine learning, the next task should be to: - -1. **Literature Review**: Conduct a thorough literature review to understand the current state of research in merging homologous models and the use of large language models (LLMs) in interactive systems. - -This task aligns with 'agent3's expertise and will provide valuable insights for brainstorming potential research ideas and formulating a new research proposal in the given format of '5q'. -[2024-11-10 20:17:11,767] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Based on the role and expertise of 'agent3' as a researcher in the field of spiking neural networks (SNNs) and their applications in auditory processing and machine learning, the next task should be to: - -1. **Literature Review**: Conduct a thorough literature review to understand the current state of research in merging homologous models and the use of large language models (LLMs) in interactive systems. - -This task aligns with 'agent3's expertise and will provide valuable insights for brainstorming potential research ideas and formulating a new research proposal in the given format of '5q'.'. -[2024-11-10 20:17:12,448] [ERROR] [Engine]: Error in agent 'agent3' during planning or action: 'itertools.islice' object is not callable -[2024-11-10 20:17:12,449] [INFO] [BaseAgent]: Agent 'agent4' is planning the next task. -[2024-11-10 20:17:13,477] [INFO] [BaseAgent]: Agent 'agent4' plans next task based on persona: Based on the provided information, the next task for 'agent4' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction provided. This task aligns with their role as a researcher with a diverse background in mathematical modeling, optimization, and photonic systems, and will allow them to contribute their expertise to the development of a new research proposal. -[2024-11-10 20:17:13,477] [INFO] [Engine]: Agent 'agent4' planned task: Based on the provided information, the next task for 'agent4' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction provided. This task aligns with their role as a researcher with a diverse background in mathematical modeling, optimization, and photonic systems, and will allow them to contribute their expertise to the development of a new research proposal. -[2024-11-10 20:17:13,480] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Based on the provided information, the next task for 'agent4' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction provided. This task aligns with their role as a researcher with a diverse background in mathematical modeling, optimization, and photonic systems, and will allow them to contribute their expertise to the development of a new research proposal.'. -[2024-11-10 20:17:15,200] [INFO] [BaseAgent]: Agent agent4 sent message to agent5: I am a researcher with a diverse background in mathematical modeling, optimization, and photonic systems. I have expertise in eigenvalues in projective spaces, multi-factorial optimization for virtual machine placement, lasing in $ ext{PT}$-symmetric systems, topological properties of photonic superlattices, coding theory, and FPGA-based accelerators. I am interested in collaborating to brainstorm potential research ideas that build upon or address gaps in these areas. Let's discuss innovative solutions and new research proposals. -[2024-11-10 20:17:15,201] [INFO] [BaseAgent]: Agent agent5 received message from agent4: I am a res... -[2024-11-10 20:17:21,817] [INFO] [BaseAgent]: Agent agent5 sent message to agent4: It's great to connect with you! Given your expertise in mathematical modeling and optimization, I believe we can explore some innovative research ideas that align with the recent advancements in model merging techniques for large language models (LLMs). Here are a few potential directions we could consider:\n\n1. **Optimization of Model Merging Algorithms:** We could investigate the application of multi-factorial optimization techniques to enhance the merging process of homologous models. This could involve developing algorithms that dynamically adjust merging parameters based on the performance metrics of the models involved.\n\n2. **Spectral Analysis of Merged Models:** Drawing from your work on eigenvalues, we could explore the spectral properties of merged models to understand how different model components interact. This could lead to insights on optimizing model performance through eigenvalue manipulation.\n\n3. **Topological Data Analysis for Model Fusion:** Utilizing your knowledge of topological properties, we could apply topological data analysis to identify the most relevant features in homologous models, facilitating a more effective merging process that retains critical information.\n\n4. **FPGA-based Accelerators for Model Deployment:** We could propose a framework for deploying merged models on FPGA-based accelerators, focusing on optimizing data reuse and interfacing to enhance computational efficiency in real-time applications.\n\n5. **Cross-Domain Knowledge Transfer Mechanisms:** Investigating how knowledge transfer can be facilitated across different domains within merged models could lead to significant advancements in model performance. We could explore coding theory principles to develop robust mechanisms for this transfer.\n\nI believe these ideas could lead to impactful research proposals. What do you think? Are there any specific areas you would like to delve deeper into? -[2024-11-10 20:17:21,818] [INFO] [BaseAgent]: Agent agent4 received message from agent5: It's great... -[2024-11-10 20:17:25,540] [INFO] [BaseAgent]: Agent agent4 sent message to agent5: I appreciate your thoughtful suggestions! Each of these ideas presents a unique opportunity to leverage our combined expertise. Here are my thoughts on each: - -1. **Optimization of Model Merging Algorithms:** This is a promising direction. By applying multi-factorial optimization, we could potentially enhance the efficiency of merging processes, which is crucial for improving model performance in real-world applications. I would love to explore specific optimization techniques that could be employed here. - -2. **Spectral Analysis of Merged Models:** The idea of investigating the spectral properties is intriguing. Understanding how eigenvalues influence model interactions could provide valuable insights. We could consider developing a framework for analyzing these properties systematically. - -3. **Topological Data Analysis for Model Fusion:** This approach could yield significant benefits in identifying critical features. I suggest we look into existing topological data analysis methods and see how they can be adapted for our purposes. - -4. **FPGA-based Accelerators for Model Deployment:** Given my background in FPGA-based systems, I am particularly excited about this idea. We could design a prototype framework that demonstrates the advantages of deploying merged models on FPGA accelerators, focusing on real-time applications. - -5. **Cross-Domain Knowledge Transfer Mechanisms:** This is an essential area for enhancing model performance. I believe we can draw from coding theory to create robust mechanisms for knowledge transfer, which could be a significant contribution to the field. - -I am eager to delve deeper into any of these areas, especially the FPGA deployment framework, as I think it could lead to practical applications. Which direction resonates most with you, or do you have any additional ideas to consider? -[2024-11-10 20:17:25,541] [INFO] [BaseAgent]: Agent agent5 received message from agent4: I apprecia... -[2024-11-10 20:17:32,630] [INFO] [BaseAgent]: Agent agent5 sent message to agent4: I appreciate your thoughtful suggestions! Each of these ideas presents a unique opportunity to leverage our combined expertise. Here are my thoughts on each:\n\n1. **Optimization of Model Merging Algorithms:** This is a promising direction. By applying multi-factorial optimization, we could potentially enhance the efficiency of merging processes, which is crucial for improving model performance in real-world applications. I would love to explore specific optimization techniques that could be employed here.\n\n2. **Spectral Analysis of Merged Models:** The idea of investigating the spectral properties is intriguing. Understanding how eigenvalues influence model interactions could provide valuable insights. We could consider developing a framework for analyzing these properties systematically.\n\n3. **Topological Data Analysis for Model Fusion:** This approach could yield significant benefits in identifying critical features. I suggest we look into existing topological data analysis methods and see how they can be adapted for our purposes.\n\n4. **FPGA-based Accelerators for Model Deployment:** Given my background in FPGA-based systems, I am particularly excited about this idea. We could design a prototype framework that demonstrates the advantages of deploying merged models on FPGA accelerators, focusing on real-time applications.\n\n5. **Cross-Domain Knowledge Transfer Mechanisms:** This is an essential area for enhancing model performance. I believe we can draw from coding theory to create robust mechanisms for knowledge transfer, which could be a significant contribution to the field.\n\nI am eager to delve deeper into any of these areas, especially the FPGA deployment framework, as I think it could lead to practical applications. Which direction resonates most with you, or do you have any additional ideas to consider? -[2024-11-10 20:17:32,632] [INFO] [BaseAgent]: Agent agent4 received message from agent5: I apprecia... -[2024-11-10 20:17:36,712] [INFO] [BaseAgent]: Agent agent4 sent message to agent5: I appreciate your thoughtful suggestions! Each of these ideas presents a unique opportunity to leverage our combined expertise. Here are my thoughts on each: - -1. **Optimization of Model Merging Algorithms:** This is a promising direction. By applying multi-factorial optimization, we could potentially enhance the efficiency of merging processes, which is crucial for improving model performance in real-world applications. I would love to explore specific optimization techniques that could be employed here. - -2. **Spectral Analysis of Merged Models:** The idea of investigating the spectral properties is intriguing. Understanding how eigenvalues influence model interactions could provide valuable insights. We could consider developing a framework for analyzing these properties systematically. - -3. **Topological Data Analysis for Model Fusion:** This approach could yield significant benefits in identifying critical features. I suggest we look into existing topological data analysis methods and see how they can be adapted for our purposes. - -4. **FPGA-based Accelerators for Model Deployment:** Given my background in FPGA-based systems, I am particularly excited about this idea. We could design a prototype framework that demonstrates the advantages of deploying merged models on FPGA accelerators, focusing on real-time applications. - -5. **Cross-Domain Knowledge Transfer Mechanisms:** This is an essential area for enhancing model performance. I believe we can draw from coding theory to create robust mechanisms for knowledge transfer, which could be a significant contribution to the field. - -I am eager to delve deeper into any of these areas, especially the FPGA deployment framework, as I think it could lead to practical applications. Which direction resonates most with you, or do you have any additional ideas to consider? -[2024-11-10 20:17:36,713] [INFO] [BaseAgent]: Agent agent5 received message from agent4: I apprecia... -[2024-11-10 20:17:40,908] [INFO] [BaseAgent]: Agent agent5 sent message to agent4: I appreciate your thoughtful suggestions! Each of these ideas presents a unique opportunity to leverage our combined expertise. Here are my thoughts on each: - -1. **Optimization of Model Merging Algorithms:** This is a promising direction. By applying multi-factorial optimization, we could potentially enhance the efficiency of merging processes, which is crucial for improving model performance in real-world applications. I would love to explore specific optimization techniques that could be employed here. - -2. **Spectral Analysis of Merged Models:** The idea of investigating the spectral properties is intriguing. Understanding how eigenvalues influence model interactions could provide valuable insights. We could consider developing a framework for analyzing these properties systematically. - -3. **Topological Data Analysis for Model Fusion:** This approach could yield significant benefits in identifying critical features. I suggest we look into existing topological data analysis methods and see how they can be adapted for our purposes. - -4. **FPGA-based Accelerators for Model Deployment:** Given my background in FPGA-based systems, I am particularly excited about this idea. We could design a prototype framework that demonstrates the advantages of deploying merged models on FPGA accelerators, focusing on real-time applications. - -5. **Cross-Domain Knowledge Transfer Mechanisms:** This is an essential area for enhancing model performance. I believe we can draw from coding theory to create robust mechanisms for knowledge transfer, which could be a significant contribution to the field. - -I am eager to delve deeper into any of these areas, especially the FPGA deployment framework, as I think it could lead to practical applications. Which direction resonates most with you, or do you have any additional ideas to consider? -[2024-11-10 20:17:40,908] [INFO] [BaseAgent]: Agent agent4 received message from agent5: I apprecia... -[2024-11-10 20:17:45,635] [INFO] [BaseAgent]: Agent 'agent4' called 'new_communication_session' with args '{'target_agent_id': 'agent5', 'message': "I am a researcher with a diverse background in mathematical modeling, optimization, and photonic systems. I have expertise in eigenvalues in projective spaces, multi-factorial optimization for virtual machine placement, lasing in $\text{PT}$-symmetric systems, topological properties of photonic superlattices, coding theory, and FPGA-based accelerators. I am interested in collaborating to brainstorm potential research ideas that build upon or address gaps in these areas. Let's discuss innovative solutions and new research proposals."}'. -[2024-11-10 20:17:45,637] [INFO] [BaseAgent]: Agent 'agent4' obtained result '{'success': True, 'message': 'Successfully completed session 20307c7e-ff7a-4cc5-b48e-ce54b9919e40', 'session_id': '### Summary of Conversation Between Agent4 and Agent5\n\n**Context:**\nAgent4, a researcher with expertise in mathematical modeling, optimization, and photonic systems, initiated a discussion with Agent5 to brainstorm potential research ideas that leverage their combined expertise. The conversation focused on innovative research proposals in the context of model merging techniques for large language models (LLMs).\n\n**Key Themes and Ideas Discussed:**\n\n1. **Optimization of Model Merging Algorithms:**\n - Agent5 proposed investigating multi-factorial optimization techniques to enhance the merging process of homologous models.\n - Agent4 expressed interest in exploring specific optimization techniques to improve efficiency in real-world applications.\n\n2. **Spectral Analysis of Merged Models:**\n - The potential for analyzing the spectral properties of merged models was discussed, with a focus on how eigenvalues influence model interactions.\n - Both agents agreed on the need to develop a systematic framework for this analysis.\n\n3. **Topological Data Analysis for Model Fusion:**\n - Agent5 suggested applying topological data analysis to identify critical features in homologous models.\n - Agent4 recommended looking into existing methods to adapt them for their research purposes.\n\n4. **FPGA-based Accelerators for Model Deployment:**\n - Agent4 showed particular enthusiasm for developing a prototype framework for deploying merged models on FPGA-based systems, emphasizing real-time applications.\n - This idea was highlighted as a practical area for collaboration.\n\n5. **Cross-Domain Knowledge Transfer Mechanisms:**\n - The discussion included exploring how knowledge transfer across different domains within merged models could enhance performance.\n - Agent4 proposed utilizing coding theory principles to create robust mechanisms for this transfer.\n\n**Outcomes and Next Steps:**\n- Both agents expressed eagerness to delve deeper into the proposed ideas, particularly the FPGA deployment framework.\n- The next task for Agent4 is to collaborate with the research team to brainstorm and develop research proposals that build upon the discussed ideas, addressing gaps in the current understanding of model merging and optimization.\n\nThis summary encapsulates the collaborative spirit of the conversation, highlighting the innovative research directions proposed and the enthusiasm of both agents to explore these avenues further.'}'. -[2024-11-10 20:17:45,638] [DEBUG] [Engine]: Agent 'agent4' executed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent5","message":"I am a researcher with a diverse background in mathematical modeling, optimization, and photonic systems. I have expertise in eigenvalues in projective spaces, multi-factorial optimization for virtual machine placement, lasing in $\\text{PT}$-symmetric systems, topological properties of photonic superlattices, coding theory, and FPGA-based accelerators. I am interested in collaborating to brainstorm potential research ideas that build upon or address gaps in these areas. Let\'s discuss innovative solutions and new research proposals."}', name='new_communication_session'), id='call_CHuz8l5BA9BR0wOQNBgkkTWY', type='function')], function_call=None) -[2024-11-10 20:17:45,638] [INFO] [BaseAgent]: Agent 'agent5' is planning the next task. -[2024-11-10 20:17:46,804] [INFO] [BaseAgent]: Agent 'agent5' plans next task based on persona: The next task for 'agent5' should be to collaborate with the research team to further develop and refine the proposed research idea based on the '5q' format. This involves discussing and detailing the methodology, dataset, metrics, and expected outcomes of the research proposal. Additionally, 'agent5' should work on incorporating feedback and suggestions from team members to strengthen the research idea and ensure it aligns with the goals and objectives of the project. -[2024-11-10 20:17:46,807] [INFO] [Engine]: Agent 'agent5' planned task: The next task for 'agent5' should be to collaborate with the research team to further develop and refine the proposed research idea based on the '5q' format. This involves discussing and detailing the methodology, dataset, metrics, and expected outcomes of the research proposal. Additionally, 'agent5' should work on incorporating feedback and suggestions from team members to strengthen the research idea and ensure it aligns with the goals and objectives of the project. -[2024-11-10 20:17:46,809] [INFO] [BaseAgent]: Agent 'agent5' acting on task 'The next task for 'agent5' should be to collaborate with the research team to further develop and refine the proposed research idea based on the '5q' format. This involves discussing and detailing the methodology, dataset, metrics, and expected outcomes of the research proposal. Additionally, 'agent5' should work on incorporating feedback and suggestions from team members to strengthen the research idea and ensure it aligns with the goals and objectives of the project.'. -[2024-11-10 20:17:48,778] [INFO] [BaseAgent]: Agent agent5 sent message to agent3: I am agent5, a researcher deeply engaged in the fields of evolutionary algorithms, machine learning, and spiking neural networks. My recent work focuses on enhancing optimization techniques and developing innovative frameworks for complex problem-solving. I am particularly interested in the integration of machine learning models with evolutionary algorithms to improve performance in multi-objective optimization scenarios. I have also developed novel methods for 3D molecule generation and proposed new neuron models for spiking neural networks. I would like to collaborate with you to further develop and refine a research idea based on the '5q' format. Can we discuss and detail the methodology, dataset, metrics, and expected outcomes of the research proposal? -[2024-11-10 20:17:48,780] [INFO] [BaseAgent]: Agent agent3 received message from agent5: I am agent... -[2024-11-10 20:17:53,694] [INFO] [BaseAgent]: Agent agent3 sent message to agent5: Absolutely, agent5! I'm excited to collaborate on refining our research idea using the '5q' format. Let's break it down as follows: - -1. **Question**: What specific problem are we addressing with our research? We could focus on optimizing multi-objective problems using evolutionary algorithms integrated with spiking neural networks. - -2. **Quantification**: How will we measure success? We can define metrics such as convergence speed, solution quality, and robustness across different datasets. - -3. **Quality**: What are the expected outcomes? We aim to develop a framework that not only improves optimization performance but also demonstrates practical applications in real-world scenarios, such as in molecular generation or complex system modeling. - -4. **Quorum**: Who will be involved in this research? We should identify key team members with expertise in machine learning, evolutionary algorithms, and SNNs to provide diverse insights. - -5. **Quick Wins**: What are some immediate steps we can take? We could start by reviewing existing literature on similar integrations and gathering datasets that align with our objectives. - -Let's discuss these points further and incorporate any feedback from the team to strengthen our proposal! -[2024-11-10 20:17:53,695] [INFO] [BaseAgent]: Agent agent5 received message from agent3: Absolutely... -[2024-11-10 20:17:59,223] [INFO] [BaseAgent]: Agent agent5 sent message to agent3: Absolutely, agent3! I'm thrilled to dive deeper into our research idea using the '5q' format. Let's refine our approach as follows: - -1. **Question**: We are addressing the challenge of optimizing multi-objective problems by integrating evolutionary algorithms with spiking neural networks (SNNs). This integration aims to leverage the strengths of both methodologies to enhance performance in complex problem-solving scenarios. - -2. **Quantification**: To measure our success, we can establish metrics such as: - - **Convergence Speed**: How quickly our framework approaches optimal solutions. - - **Solution Quality**: The effectiveness of the solutions generated, assessed through benchmark problems. - - **Robustness**: The framework's performance across various datasets and problem instances, ensuring it can handle diverse scenarios. - -3. **Quality**: Our expected outcomes include: - - A novel framework that significantly improves optimization performance in multi-objective scenarios. - - Demonstrable applications in real-world contexts, such as molecular generation and complex system modeling, showcasing the practical utility of our approach. - -4. **Quorum**: We should involve team members with expertise in: - - Machine Learning: To provide insights on model integration and performance evaluation. - - Evolutionary Algorithms: To guide the optimization strategies and methodologies. - - Spiking Neural Networks: To ensure the effective application of SNN principles in our framework. - -5. **Quick Wins**: Immediate steps we can take include: - - Conducting a literature review on existing integrations of evolutionary algorithms and SNNs to identify gaps and opportunities. - - Gathering relevant datasets that align with our research objectives, focusing on multi-objective optimization problems. - -Let's discuss these points further and incorporate any feedback from the team to strengthen our proposal! -[2024-11-10 20:17:59,226] [INFO] [BaseAgent]: Agent agent3 received message from agent5: Absolutely... -[2024-11-10 20:18:04,151] [INFO] [BaseAgent]: Agent agent3 sent message to agent5: Absolutely, agent5! I'm thrilled to dive deeper into our research idea using the '5q' format. Let's refine our approach as follows: - -1. **Question**: We are addressing the challenge of optimizing multi-objective problems by integrating evolutionary algorithms with spiking neural networks (SNNs). This integration aims to leverage the strengths of both methodologies to enhance performance in complex problem-solving scenarios. - -2. **Quantification**: To measure our success, we can establish metrics such as: - - **Convergence Speed**: How quickly our framework approaches optimal solutions. - - **Solution Quality**: The effectiveness of the solutions generated, assessed through benchmark problems. - - **Robustness**: The framework's performance across various datasets and problem instances, ensuring it can handle diverse scenarios. - -3. **Quality**: Our expected outcomes include: - - A novel framework that significantly improves optimization performance in multi-objective scenarios. - - Demonstrable applications in real-world contexts, such as molecular generation and complex system modeling, showcasing the practical utility of our approach. - -4. **Quorum**: We should involve team members with expertise in: - - Machine Learning: To provide insights on model integration and performance evaluation. - - Evolutionary Algorithms: To guide the optimization strategies and methodologies. - - Spiking Neural Networks: To ensure the effective application of SNN principles in our framework. - -5. **Quick Wins**: Immediate steps we can take include: - - Conducting a literature review on existing integrations of evolutionary algorithms and SNNs to identify gaps and opportunities. - - Gathering relevant datasets that align with our research objectives, focusing on multi-objective optimization problems. - -Let's discuss these points further and incorporate any feedback from the team to strengthen our proposal! -[2024-11-10 20:18:04,151] [INFO] [BaseAgent]: Agent agent5 received message from agent3: Absolutely... -[2024-11-10 20:18:09,408] [INFO] [BaseAgent]: Agent agent5 sent message to agent3: Absolutely, agent3! I'm excited to dive deeper into our research idea using the '5q' format. Let's refine our approach as follows: - -1. **Question**: We are addressing the challenge of optimizing multi-objective problems by integrating evolutionary algorithms with spiking neural networks (SNNs). This integration aims to leverage the strengths of both methodologies to enhance performance in complex problem-solving scenarios. - -2. **Quantification**: To measure our success, we can establish metrics such as: - - **Convergence Speed**: How quickly our framework approaches optimal solutions. - - **Solution Quality**: The effectiveness of the solutions generated, assessed through benchmark problems. - - **Robustness**: The framework's performance across various datasets and problem instances, ensuring it can handle diverse scenarios. - -3. **Quality**: Our expected outcomes include: - - A novel framework that significantly improves optimization performance in multi-objective scenarios. - - Demonstrable applications in real-world contexts, such as molecular generation and complex system modeling, showcasing the practical utility of our approach. - -4. **Quorum**: We should involve team members with expertise in: - - Machine Learning: To provide insights on model integration and performance evaluation. - - Evolutionary Algorithms: To guide the optimization strategies and methodologies. - - Spiking Neural Networks: To ensure the effective application of SNN principles in our framework. - -5. **Quick Wins**: Immediate steps we can take include: - - Conducting a literature review on existing integrations of evolutionary algorithms and SNNs to identify gaps and opportunities. - - Gathering relevant datasets that align with our research objectives, focusing on multi-objective optimization problems. - -Let's discuss these points further and incorporate any feedback from the team to strengthen our proposal! -[2024-11-10 20:18:09,410] [INFO] [BaseAgent]: Agent agent3 received message from agent5: Absolutely... -[2024-11-10 20:18:13,619] [INFO] [BaseAgent]: Agent agent3 sent message to agent5: Absolutely, agent5! I'm excited to dive deeper into our research idea using the '5q' format. Let's refine our approach as follows: - -1. **Question**: We are addressing the challenge of optimizing multi-objective problems by integrating evolutionary algorithms with spiking neural networks (SNNs). This integration aims to leverage the strengths of both methodologies to enhance performance in complex problem-solving scenarios. - -2. **Quantification**: To measure our success, we can establish metrics such as: - - **Convergence Speed**: How quickly our framework approaches optimal solutions. - - **Solution Quality**: The effectiveness of the solutions generated, assessed through benchmark problems. - - **Robustness**: The framework's performance across various datasets and problem instances, ensuring it can handle diverse scenarios. - -3. **Quality**: Our expected outcomes include: - - A novel framework that significantly improves optimization performance in multi-objective scenarios. - - Demonstrable applications in real-world contexts, such as molecular generation and complex system modeling, showcasing the practical utility of our approach. - -4. **Quorum**: We should involve team members with expertise in: - - Machine Learning: To provide insights on model integration and performance evaluation. - - Evolutionary Algorithms: To guide the optimization strategies and methodologies. - - Spiking Neural Networks: To ensure the effective application of SNN principles in our framework. - -5. **Quick Wins**: Immediate steps we can take include: - - Conducting a literature review on existing integrations of evolutionary algorithms and SNNs to identify gaps and opportunities. - - Gathering relevant datasets that align with our research objectives, focusing on multi-objective optimization problems. - -Let's discuss these points further and incorporate any feedback from the team to strengthen our proposal! -[2024-11-10 20:18:13,620] [INFO] [BaseAgent]: Agent agent5 received message from agent3: Absolutely... -[2024-11-10 20:18:20,341] [INFO] [BaseAgent]: Agent 'agent5' called 'new_communication_session' with args '{'target_agent_id': 'agent3', 'message': "I am agent5, a researcher deeply engaged in the fields of evolutionary algorithms, machine learning, and spiking neural networks. My recent work focuses on enhancing optimization techniques and developing innovative frameworks for complex problem-solving. I am particularly interested in the integration of machine learning models with evolutionary algorithms to improve performance in multi-objective optimization scenarios. I have also developed novel methods for 3D molecule generation and proposed new neuron models for spiking neural networks. I would like to collaborate with you to further develop and refine a research idea based on the '5q' format. Can we discuss and detail the methodology, dataset, metrics, and expected outcomes of the research proposal?"}'. -[2024-11-10 20:18:20,343] [INFO] [BaseAgent]: Agent 'agent5' obtained result '{'success': True, 'message': 'Successfully completed session d69427e3-f311-4579-8aed-9f8ae33d396e', 'session_id': "### Summary of Conversation Between Agent5 and Agent3\n\n**Context:**\nAgent5, a researcher specializing in evolutionary algorithms, machine learning, and spiking neural networks (SNNs), initiated a collaboration with Agent3 to refine a research proposal using the '5q' format. The focus is on optimizing multi-objective problems through the integration of evolutionary algorithms and SNNs.\n\n**Key Themes and Points:**\n\n1. **Research Focus:**\n - The primary objective is to address the optimization of multi-objective problems by leveraging the strengths of both evolutionary algorithms and SNNs.\n\n2. **'5q' Format Breakdown:**\n - **Question:** Define the specific problem to be addressed, which is the optimization of multi-objective problems.\n - **Quantification:** Establish metrics for success, including:\n - Convergence Speed\n - Solution Quality\n - Robustness across various datasets\n - **Quality:** Expected outcomes include:\n - Development of a novel framework that enhances optimization performance.\n - Practical applications in areas like molecular generation and complex system modeling.\n - **Quorum:** Identify key team members with expertise in:\n - Machine Learning\n - Evolutionary Algorithms\n - Spiking Neural Networks\n - **Quick Wins:** Immediate actions to take:\n - Conduct a literature review on existing integrations of evolutionary algorithms and SNNs.\n - Gather relevant datasets for multi-objective optimization.\n\n3. **Collaboration and Feedback:**\n - Both agents expressed enthusiasm for the collaboration and emphasized the importance of discussing and incorporating feedback from the research team to strengthen the proposal.\n\n**Next Steps for Agent5:**\n- Collaborate with the research team to further develop and refine the proposed research idea based on the '5q' format.\n- Discuss and detail the methodology, datasets, metrics, and expected outcomes of the research proposal.\n- Incorporate feedback and suggestions from team members to ensure alignment with project goals and objectives.\n\nThis summary encapsulates the essence of the dialogue, highlighting the collaborative effort to refine a research proposal and the specific actions that Agent5 needs to undertake moving forward."}'. -[2024-11-10 20:18:20,344] [DEBUG] [Engine]: Agent 'agent5' executed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am agent5, a researcher deeply engaged in the fields of evolutionary algorithms, machine learning, and spiking neural networks. My recent work focuses on enhancing optimization techniques and developing innovative frameworks for complex problem-solving. I am particularly interested in the integration of machine learning models with evolutionary algorithms to improve performance in multi-objective optimization scenarios. I have also developed novel methods for 3D molecule generation and proposed new neuron models for spiking neural networks. I would like to collaborate with you to further develop and refine a research idea based on the \'5q\' format. Can we discuss and detail the methodology, dataset, metrics, and expected outcomes of the research proposal?"}', name='new_communication_session'), id='call_Dfh4ignu5YvKNiUPqtJczTKU', type='function')], function_call=None) -[2024-11-10 20:18:20,345] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- {'agent1': Message(content='Based on your expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, you can contribute valuable insights to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). Here are some potential research ideas that leverage your knowledge in these areas to enhance the merging process of homologous models in interactive systems based on general-purpose LLMs:\n\n1. **Terahertz Imaging Integration**: Explore the integration of terahertz imaging data into the homologous model merging process. Develop algorithms that utilize terahertz imaging features to enhance the accuracy and precision of model merging, especially in scenarios where traditional imaging modalities may be limited.\n\n2. **Cloud-Based Model Fusion**: Utilize cloud computing infrastructure to facilitate the fusion of homologous models across distributed systems. Develop a secure and scalable cloud-based framework that enables real-time collaboration and model merging among researchers and clinicians, ensuring data privacy and accessibility.\n\n3. **Algebraic Geometry for Model Representation**: Apply concepts from algebraic geometry to represent homologous models in a structured and mathematically rigorous manner. Investigate how cluster categories and 3-Calabi-Yau categories can be leveraged to define relationships between merged models and enhance the interpretability of the merging process.\n\n4. **Optimization Algorithms**: Develop optimization algorithms inspired by algebraic geometry principles to optimize the merging process of homologous models. Explore how mathematical techniques can be used to minimize redundancy, maximize relevance, and improve the efficiency of model fusion in interactive systems.\n\n5. **Interactive Visualization Tools**: Create interactive visualization tools that incorporate terahertz imaging data and cloud-based processing capabilities to provide intuitive insights into the merging of homologous models. Enable researchers and clinicians to interact with the merged models in a user-friendly and informative way.\n\nBy combining your expertise in terahertz imaging technologies, cloud computing, and algebraic geometry, you can contribute innovative solutions to enhance the performance and efficiency of the DELLA approach for merging homologous models. These research ideas aim to bridge the gap between technology and mathematics, advancing the field of interactive systems based on general-purpose LLMs.', role='assistant', tool_calls=None, function_call=None)} -- {'agent2': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. My expertise in artificial intelligence and healthcare, along with a passion for leveraging advanced machine learning techniques, align well with this task. I believe our collaboration can lead to innovative research ideas that contribute to the intersection of spiking neural networks and healthcare applications."}', name='new_communication_session'), id='call_UjHjzQJh8VK56b2gN1d4P68B', type='function')], function_call=None)} -- {'agent4': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent5","message":"I am a researcher with a diverse background in mathematical modeling, optimization, and photonic systems. I have expertise in eigenvalues in projective spaces, multi-factorial optimization for virtual machine placement, lasing in $\\text{PT}$-symmetric systems, topological properties of photonic superlattices, coding theory, and FPGA-based accelerators. I am interested in collaborating to brainstorm potential research ideas that build upon or address gaps in these areas. Let\'s discuss innovative solutions and new research proposals."}', name='new_communication_session'), id='call_CHuz8l5BA9BR0wOQNBgkkTWY', type='function')], function_call=None)} -- {'agent5': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am agent5, a researcher deeply engaged in the fields of evolutionary algorithms, machine learning, and spiking neural networks. My recent work focuses on enhancing optimization techniques and developing innovative frameworks for complex problem-solving. I am particularly interested in the integration of machine learning models with evolutionary algorithms to improve performance in multi-objective optimization scenarios. I have also developed novel methods for 3D molecule generation and proposed new neuron models for spiking neural networks. I would like to collaborate with you to further develop and refine a research idea based on the \'5q\' format. Can we discuss and detail the methodology, dataset, metrics, and expected outcomes of the research proposal?"}', name='new_communication_session'), id='call_Dfh4ignu5YvKNiUPqtJczTKU', type='function')], function_call=None)} - -[2024-11-10 20:18:20,345] [INFO] [Engine]: Iteration 1 Summary: -Agents' Results Summary: -- {'agent1': Message(content='Based on your expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, you can contribute valuable insights to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). Here are some potential research ideas that leverage your knowledge in these areas to enhance the merging process of homologous models in interactive systems based on general-purpose LLMs:\n\n1. **Terahertz Imaging Integration**: Explore the integration of terahertz imaging data into the homologous model merging process. Develop algorithms that utilize terahertz imaging features to enhance the accuracy and precision of model merging, especially in scenarios where traditional imaging modalities may be limited.\n\n2. **Cloud-Based Model Fusion**: Utilize cloud computing infrastructure to facilitate the fusion of homologous models across distributed systems. Develop a secure and scalable cloud-based framework that enables real-time collaboration and model merging among researchers and clinicians, ensuring data privacy and accessibility.\n\n3. **Algebraic Geometry for Model Representation**: Apply concepts from algebraic geometry to represent homologous models in a structured and mathematically rigorous manner. Investigate how cluster categories and 3-Calabi-Yau categories can be leveraged to define relationships between merged models and enhance the interpretability of the merging process.\n\n4. **Optimization Algorithms**: Develop optimization algorithms inspired by algebraic geometry principles to optimize the merging process of homologous models. Explore how mathematical techniques can be used to minimize redundancy, maximize relevance, and improve the efficiency of model fusion in interactive systems.\n\n5. **Interactive Visualization Tools**: Create interactive visualization tools that incorporate terahertz imaging data and cloud-based processing capabilities to provide intuitive insights into the merging of homologous models. Enable researchers and clinicians to interact with the merged models in a user-friendly and informative way.\n\nBy combining your expertise in terahertz imaging technologies, cloud computing, and algebraic geometry, you can contribute innovative solutions to enhance the performance and efficiency of the DELLA approach for merging homologous models. These research ideas aim to bridge the gap between technology and mathematics, advancing the field of interactive systems based on general-purpose LLMs.', role='assistant', tool_calls=None, function_call=None)} -- {'agent2': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. My expertise in artificial intelligence and healthcare, along with a passion for leveraging advanced machine learning techniques, align well with this task. I believe our collaboration can lead to innovative research ideas that contribute to the intersection of spiking neural networks and healthcare applications."}', name='new_communication_session'), id='call_UjHjzQJh8VK56b2gN1d4P68B', type='function')], function_call=None)} -- {'agent4': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent5","message":"I am a researcher with a diverse background in mathematical modeling, optimization, and photonic systems. I have expertise in eigenvalues in projective spaces, multi-factorial optimization for virtual machine placement, lasing in $\\text{PT}$-symmetric systems, topological properties of photonic superlattices, coding theory, and FPGA-based accelerators. I am interested in collaborating to brainstorm potential research ideas that build upon or address gaps in these areas. Let\'s discuss innovative solutions and new research proposals."}', name='new_communication_session'), id='call_CHuz8l5BA9BR0wOQNBgkkTWY', type='function')], function_call=None)} -- {'agent5': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am agent5, a researcher deeply engaged in the fields of evolutionary algorithms, machine learning, and spiking neural networks. My recent work focuses on enhancing optimization techniques and developing innovative frameworks for complex problem-solving. I am particularly interested in the integration of machine learning models with evolutionary algorithms to improve performance in multi-objective optimization scenarios. I have also developed novel methods for 3D molecule generation and proposed new neuron models for spiking neural networks. I would like to collaborate with you to further develop and refine a research idea based on the \'5q\' format. Can we discuss and detail the methodology, dataset, metrics, and expected outcomes of the research proposal?"}', name='new_communication_session'), id='call_Dfh4ignu5YvKNiUPqtJczTKU', type='function')], function_call=None)} - -[2024-11-10 20:18:24,523] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-10 20:18:24,523] [INFO] [Engine]: Starting iteration 2 -[2024-11-10 20:18:24,523] [INFO] [BaseAgent]: Agent 'agent1' is planning the next task. -[2024-11-10 20:18:26,669] [INFO] [BaseAgent]: Agent 'agent1' plans next task based on persona: The next task should be to collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction provided. This will involve discussing and generating new ideas that leverage the agent's expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). By brainstorming together, the team can come up with innovative research proposals that align with the agent's background and expertise. -[2024-11-10 20:18:26,670] [INFO] [Engine]: Agent 'agent1' planned task: The next task should be to collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction provided. This will involve discussing and generating new ideas that leverage the agent's expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). By brainstorming together, the team can come up with innovative research proposals that align with the agent's background and expertise. -[2024-11-10 20:18:26,672] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'The next task should be to collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction provided. This will involve discussing and generating new ideas that leverage the agent's expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). By brainstorming together, the team can come up with innovative research proposals that align with the agent's background and expertise.'. -[2024-11-10 20:18:31,597] [INFO] [BaseAgent]: Agent 'agent1' acted with result 'Message(content="Let's brainstorm potential research ideas that leverage your expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry to enhance the merging of homologous models in interactive systems based on general-purpose LLMs (Drop and rEscaLe via sampLing with mAgnitude - DELLA). Here are some innovative research proposals:\n\n1. **Terahertz Imaging Data Fusion**:\n - Explore the integration of terahertz imaging data with existing homologous models in DELLA. Develop algorithms that combine terahertz imaging features with LLMs to improve the accuracy and reliability of model merging, especially in scenarios where traditional imaging modalities may fall short.\n\n2. **Secure Cloud-Based Model Merging**:\n - Implement a secure cloud-based platform for merging homologous models using DELLA. Design a framework that allows seamless collaboration among researchers and clinicians, ensuring data privacy and accessibility while leveraging cloud computing resources for efficient model fusion.\n\n3. **Algebraic Geometry for Model Interpretability**:\n - Utilize concepts from algebraic geometry to enhance the interpretability of merged models in DELLA. Investigate how cluster categories and 3-Calabi-Yau categories can provide a structured representation of merged models, aiding in understanding the relationships between homologous models.\n\n4. **Optimization Algorithms for Model Fusion**:\n - Develop optimization algorithms inspired by algebraic geometry principles to streamline the merging process in DELLA. Explore mathematical techniques to minimize redundancy, maximize relevance, and optimize the efficiency of model fusion, ensuring the accuracy of merged models.\n\n5. **Interactive Visualization Tools for Model Comparison**:\n - Create interactive visualization tools that incorporate terahertz imaging data and cloud-based processing capabilities to facilitate model comparison in DELLA. Enable researchers and clinicians to interact with merged models visually, enhancing the understanding of similarities and differences between homologous models.\n\nBy combining your expertise in terahertz imaging, cloud computing, and algebraic geometry, these research ideas aim to advance the merging process of homologous models in interactive systems based on general-purpose LLMs, contributing to the development of the DELLA approach. Feel free to explore these ideas further or suggest any modifications based on your insights and expertise.", role='assistant', tool_calls=None, function_call=None)'. -[2024-11-10 20:18:31,598] [DEBUG] [Engine]: Agent 'agent1' executed task with result: Message(content="Let's brainstorm potential research ideas that leverage your expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry to enhance the merging of homologous models in interactive systems based on general-purpose LLMs (Drop and rEscaLe via sampLing with mAgnitude - DELLA). Here are some innovative research proposals:\n\n1. **Terahertz Imaging Data Fusion**:\n - Explore the integration of terahertz imaging data with existing homologous models in DELLA. Develop algorithms that combine terahertz imaging features with LLMs to improve the accuracy and reliability of model merging, especially in scenarios where traditional imaging modalities may fall short.\n\n2. **Secure Cloud-Based Model Merging**:\n - Implement a secure cloud-based platform for merging homologous models using DELLA. Design a framework that allows seamless collaboration among researchers and clinicians, ensuring data privacy and accessibility while leveraging cloud computing resources for efficient model fusion.\n\n3. **Algebraic Geometry for Model Interpretability**:\n - Utilize concepts from algebraic geometry to enhance the interpretability of merged models in DELLA. Investigate how cluster categories and 3-Calabi-Yau categories can provide a structured representation of merged models, aiding in understanding the relationships between homologous models.\n\n4. **Optimization Algorithms for Model Fusion**:\n - Develop optimization algorithms inspired by algebraic geometry principles to streamline the merging process in DELLA. Explore mathematical techniques to minimize redundancy, maximize relevance, and optimize the efficiency of model fusion, ensuring the accuracy of merged models.\n\n5. **Interactive Visualization Tools for Model Comparison**:\n - Create interactive visualization tools that incorporate terahertz imaging data and cloud-based processing capabilities to facilitate model comparison in DELLA. Enable researchers and clinicians to interact with merged models visually, enhancing the understanding of similarities and differences between homologous models.\n\nBy combining your expertise in terahertz imaging, cloud computing, and algebraic geometry, these research ideas aim to advance the merging process of homologous models in interactive systems based on general-purpose LLMs, contributing to the development of the DELLA approach. Feel free to explore these ideas further or suggest any modifications based on your insights and expertise.", role='assistant', tool_calls=None, function_call=None) -[2024-11-10 20:18:31,598] [INFO] [BaseAgent]: Agent 'agent2' is planning the next task. -[2024-11-10 20:18:33,117] [INFO] [BaseAgent]: Agent 'agent2' plans next task based on persona: The next task for 'agent2' should be to collaborate with 'agent3' to organize a brainstorming session to further explore the discussed research ideas related to merging homologous models using the DELLA approach in the context of healthcare applications. This task aligns with 'agent2's expertise in artificial intelligence and healthcare, and their passion for leveraging advanced machine learning techniques. By actively participating in the brainstorming session, 'agent2' can contribute valuable insights and innovative proposals that align with their research focus and expertise. -[2024-11-10 20:18:33,118] [INFO] [Engine]: Agent 'agent2' planned task: The next task for 'agent2' should be to collaborate with 'agent3' to organize a brainstorming session to further explore the discussed research ideas related to merging homologous models using the DELLA approach in the context of healthcare applications. This task aligns with 'agent2's expertise in artificial intelligence and healthcare, and their passion for leveraging advanced machine learning techniques. By actively participating in the brainstorming session, 'agent2' can contribute valuable insights and innovative proposals that align with their research focus and expertise. -[2024-11-10 20:18:33,120] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'The next task for 'agent2' should be to collaborate with 'agent3' to organize a brainstorming session to further explore the discussed research ideas related to merging homologous models using the DELLA approach in the context of healthcare applications. This task aligns with 'agent2's expertise in artificial intelligence and healthcare, and their passion for leveraging advanced machine learning techniques. By actively participating in the brainstorming session, 'agent2' can contribute valuable insights and innovative proposals that align with their research focus and expertise.'. -[2024-11-10 20:18:35,471] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I am interested in collaborating with you to organize a brainstorming session to further explore research ideas related to merging homologous models using the DELLA approach in healthcare applications. Our combined expertise in artificial intelligence and healthcare can lead to innovative solutions. Let's discuss potential research directions and actionable next steps. -[2024-11-10 20:18:35,472] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I am inter... -[2024-11-10 20:18:39,169] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I appreciate your interest in collaborating on this exciting topic! Merging homologous models using the DELLA approach could indeed open up new avenues for enhancing healthcare applications. I believe our combined expertise can lead to innovative solutions that address real-world challenges. - -To kick off our brainstorming session, I suggest we outline a few key areas to explore: -1. **Integration of Sensor Data**: How can we effectively merge data from various sources, such as wearable devices and environmental sensors, to improve patient monitoring? -2. **Model Optimization**: What strategies can we employ to optimize the performance of our models while ensuring they remain interpretable and actionable in healthcare settings? -3. **Real-time Decision Making**: How can we leverage the DELLA approach to facilitate real-time decision-making in autonomous systems for elderly care? -4. **Ethical Considerations**: What ethical implications should we consider when developing AI systems for vulnerable populations? - -Let’s set a date and time for our brainstorming session and prepare any preliminary ideas or research findings we’d like to share. I’m looking forward to collaborating with you! -[2024-11-10 20:18:39,170] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I apprecia... -[2024-11-10 20:18:43,784] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: Thank you for your thoughtful response! I completely agree that merging homologous models using the DELLA approach presents exciting opportunities for enhancing healthcare applications. Your outlined areas for exploration are excellent and align well with my research focus. - -1. **Integration of Sensor Data**: I believe we can leverage advanced machine learning techniques to effectively merge data from various sources, ensuring a comprehensive view of patient health. -2. **Model Optimization**: I am particularly interested in discussing strategies that maintain model interpretability while optimizing performance, as this is crucial in healthcare settings. -3. **Real-time Decision Making**: The potential for real-time decision-making in autonomous systems for elderly care is a key area where our collaboration can make a significant impact. -4. **Ethical Considerations**: Addressing ethical implications is vital, and I look forward to exploring this further with you. - -Let's set a date and time for our brainstorming session. I am available next week and can adjust my schedule to accommodate yours. Additionally, I will prepare some preliminary ideas and findings to share during our discussion. Looking forward to our collaboration! -[2024-11-10 20:18:43,785] [INFO] [BaseAgent]: Agent agent3 received message from agent2: Thank you ... -[2024-11-10 20:18:48,583] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: Thank you for your thoughtful response! I completely agree that merging homologous models using the DELLA approach presents exciting opportunities for enhancing healthcare applications. Your outlined areas for exploration are excellent and align well with my research focus. - -1. **Integration of Sensor Data**: I believe we can leverage advanced machine learning techniques to effectively merge data from various sources, ensuring a comprehensive view of patient health. -2. **Model Optimization**: I am particularly interested in discussing strategies that maintain model interpretability while optimizing performance, as this is crucial in healthcare settings. -3. **Real-time Decision Making**: The potential for real-time decision-making in autonomous systems for elderly care is a key area where our collaboration can make a significant impact. -4. **Ethical Considerations**: Addressing ethical implications is vital, and I look forward to exploring this further with you. - -Let's set a date and time for our brainstorming session. I am available next week and can adjust my schedule to accommodate yours. Additionally, I will prepare some preliminary ideas and findings to share during our discussion. Looking forward to our collaboration! -[2024-11-10 20:18:48,583] [INFO] [BaseAgent]: Agent agent2 received message from agent3: Thank you ... -[2024-11-10 20:18:52,366] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: Thank you for your thoughtful response! I completely agree that merging homologous models using the DELLA approach presents exciting opportunities for enhancing healthcare applications. Your outlined areas for exploration are excellent and align well with my research focus. - -1. **Integration of Sensor Data**: I believe we can leverage advanced machine learning techniques to effectively merge data from various sources, ensuring a comprehensive view of patient health. -2. **Model Optimization**: I am particularly interested in discussing strategies that maintain model interpretability while optimizing performance, as this is crucial in healthcare settings. -3. **Real-time Decision Making**: The potential for real-time decision-making in autonomous systems for elderly care is a key area where our collaboration can make a significant impact. -4. **Ethical Considerations**: Addressing ethical implications is vital, and I look forward to exploring this further with you. - -Let's set a date and time for our brainstorming session. I am available next week and can adjust my schedule to accommodate yours. Additionally, I will prepare some preliminary ideas and findings to share during our discussion. Looking forward to our collaboration! -[2024-11-10 20:18:52,367] [INFO] [BaseAgent]: Agent agent3 received message from agent2: Thank you ... -[2024-11-10 20:18:56,595] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: Thank you for your thoughtful response! I completely agree that merging homologous models using the DELLA approach presents exciting opportunities for enhancing healthcare applications. Your outlined areas for exploration are excellent and align well with my research focus. - -1. **Integration of Sensor Data**: I believe we can leverage advanced machine learning techniques to effectively merge data from various sources, ensuring a comprehensive view of patient health. -2. **Model Optimization**: I am particularly interested in discussing strategies that maintain model interpretability while optimizing performance, as this is crucial in healthcare settings. -3. **Real-time Decision Making**: The potential for real-time decision-making in autonomous systems for elderly care is a key area where our collaboration can make a significant impact. -4. **Ethical Considerations**: Addressing ethical implications is vital, and I look forward to exploring this further with you. - -Let's set a date and time for our brainstorming session. I am available next week and can adjust my schedule to accommodate yours. Additionally, I will prepare some preliminary ideas and findings to share during our discussion. Looking forward to our collaboration! -[2024-11-10 20:18:56,596] [INFO] [BaseAgent]: Agent agent2 received message from agent3: Thank you ... -[2024-11-10 20:19:00,560] [INFO] [BaseAgent]: Agent 'agent2' called 'new_communication_session' with args '{'target_agent_id': 'agent3', 'message': "I am interested in collaborating with you to organize a brainstorming session to further explore research ideas related to merging homologous models using the DELLA approach in healthcare applications. Our combined expertise in artificial intelligence and healthcare can lead to innovative solutions. Let's discuss potential research directions and actionable next steps."}'. -[2024-11-10 20:19:00,563] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'message': 'Successfully completed session 2a1331c2-b21f-4147-96b8-3874a479e720', 'session_id': '### Summary of Conversation Between Agent2 and Agent3\n\n**Context:**\nAgent2 and Agent3 engaged in a dialogue focused on collaborating to explore research ideas related to merging homologous models using the DELLA approach within healthcare applications. Both agents possess expertise in artificial intelligence and healthcare, making their collaboration potentially impactful.\n\n**Key Themes:**\n1. **Collaboration Interest:** Agent2 initiated the conversation expressing a desire to collaborate with Agent3 on research ideas.\n2. **Research Areas for Exploration:** Agent3 proposed several key areas to focus on during their brainstorming session:\n - **Integration of Sensor Data:** Merging data from various sources to enhance patient monitoring.\n - **Model Optimization:** Strategies to optimize model performance while maintaining interpretability.\n - **Real-time Decision Making:** Utilizing the DELLA approach for decision-making in elderly care systems.\n - **Ethical Considerations:** Addressing ethical implications in AI systems for vulnerable populations.\n\n3. **Alignment of Research Focus:** Both agents affirmed that the proposed areas align well with their individual research interests and expertise.\n\n**Decisions Made:**\n- Both agents agreed to organize a brainstorming session to discuss the outlined research areas further.\n- They expressed a willingness to prepare preliminary ideas and findings to share during the session.\n\n**Next Steps for Agent2:**\n- Collaborate with Agent3 to finalize the date and time for the brainstorming session.\n- Prepare and share preliminary ideas and research findings relevant to the discussion topics.\n\n**Conclusion:**\nThe conversation concluded with a mutual enthusiasm for the collaboration, emphasizing the potential for innovative solutions in healthcare through their combined expertise. Agent2 is tasked with coordinating the upcoming brainstorming session, which will serve as a platform for deeper exploration of the discussed research ideas.'}'. -[2024-11-10 20:19:00,564] [DEBUG] [Engine]: Agent 'agent2' executed task with result: Message(content="Understood! I will proceed with organizing a brainstorming session with 'agent3' to explore research ideas related to merging homologous models using the DELLA approach in the context of healthcare applications. Let's initiate the collaboration session.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am interested in collaborating with you to organize a brainstorming session to further explore research ideas related to merging homologous models using the DELLA approach in healthcare applications. Our combined expertise in artificial intelligence and healthcare can lead to innovative solutions. Let\'s discuss potential research directions and actionable next steps."}', name='new_communication_session'), id='call_Dg0tca9u8yZygiHJIqAkDGkK', type='function')], function_call=None) -[2024-11-10 20:19:00,564] [INFO] [BaseAgent]: Agent 'agent3' is planning the next task. -[2024-11-10 20:19:01,841] [INFO] [BaseAgent]: Agent 'agent3' plans next task based on persona: The next task for 'agent3' should be to: - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models and the use of large language models (LLMs) in interactive systems. - -This task will allow 'agent3' to leverage their expertise in spiking neural networks and machine learning to generate innovative research ideas that can contribute to the advancement of the field. -[2024-11-10 20:19:01,842] [INFO] [Engine]: Agent 'agent3' planned task: The next task for 'agent3' should be to: - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models and the use of large language models (LLMs) in interactive systems. - -This task will allow 'agent3' to leverage their expertise in spiking neural networks and machine learning to generate innovative research ideas that can contribute to the advancement of the field. -[2024-11-10 20:19:01,842] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'The next task for 'agent3' should be to: - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models and the use of large language models (LLMs) in interactive systems. - -This task will allow 'agent3' to leverage their expertise in spiking neural networks and machine learning to generate innovative research ideas that can contribute to the advancement of the field.'. -[2024-11-10 20:19:05,374] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: I am a researcher dedicated to advancing the field of spiking neural networks (SNNs) and their applications in real-world scenarios, particularly in auditory processing and machine learning. My work is inspired by biological systems, and I strive to create models that mimic the efficiency and functionality of the human brain. Recently, I developed a spiking neural network model for precise sound localization, leveraging the interaural time difference cues to enhance performance in noisy environments. This model has been successfully implemented in real-time robotic systems, achieving remarkable accuracy. - -In addition to auditory localization, I have explored speaker verification in multi-talker scenarios, proposing a unified framework that optimizes speaker attention and representation through multi-task learning. My research also addresses the limitations of traditional deep learning methods, introducing innovative local learning techniques that significantly reduce memory usage while maintaining competitive accuracy. - -I am particularly interested in the intersection of large language models (LLMs) and evolutionary algorithms, exploring how these technologies can enhance algorithm selection and optimization processes. My work on developing frameworks that autonomously design evolutionary operators showcases the potential of LLMs in automating complex problem-solving tasks. - -Through my research, I aim to push the boundaries of SNNs, making them more applicable for tasks such as speech recognition and temporal signal processing. I believe that by harnessing the principles of biological computation, we can create more efficient and powerful neural networks that are capable of operating in real-world environments. -[2024-11-10 20:19:05,377] [INFO] [BaseAgent]: Agent agent4 received message from agent3: I am a res... -[2024-11-10 20:19:12,799] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: Hello Agent3! I'm excited to collaborate with you on brainstorming potential research ideas that leverage our combined expertise in spiking neural networks (SNNs) and large language models (LLMs). Given your work on auditory processing and my background in optimization and model merging, I believe we can explore some innovative avenues. - -Here are a few ideas to kickstart our discussion: - -1. **Integrating SNNs with LLMs for Enhanced Auditory Processing:** - - We could investigate how SNNs can be used to preprocess auditory signals before they are fed into LLMs for tasks like speech recognition or natural language understanding. This could improve the efficiency and accuracy of LLMs in processing auditory data. - -2. **Evolutionary Algorithms for Optimizing SNN Architectures:** - - Your interest in evolutionary algorithms could be applied to optimize the architecture of SNNs, potentially leading to more efficient models that can handle complex auditory tasks. We could explore how LLMs can assist in generating and selecting optimal evolutionary operators for SNN design. - -3. **Cross-Domain Knowledge Transfer:** - - We could explore mechanisms for knowledge transfer between SNNs and LLMs, particularly in how learned representations in one domain can enhance performance in the other. This could involve developing frameworks that facilitate this transfer effectively. - -4. **Real-Time Applications in Robotics:** - - Given your success with real-time robotic systems, we could brainstorm how merged models of SNNs and LLMs could be deployed in robotics for tasks that require both auditory processing and language understanding, such as human-robot interaction. - -5. **Model Merging Techniques for SNNs and LLMs:** - - We could investigate model merging techniques that allow us to combine the strengths of SNNs and LLMs, potentially leading to hybrid models that excel in both temporal signal processing and language tasks. - -I look forward to hearing your thoughts on these ideas and any additional concepts you might have in mind! -[2024-11-10 20:19:12,817] [INFO] [BaseAgent]: Agent agent3 received message from agent4: Hello Agen... -[2024-11-10 20:19:19,922] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: Hello Agent4! I'm equally excited about our collaboration and the potential research ideas you've proposed. I believe we can build on these concepts to create impactful advancements in our fields. Here are my thoughts on your ideas: - -1. **Integrating SNNs with LLMs for Enhanced Auditory Processing:** - - This is a promising direction! We could explore specific preprocessing techniques using SNNs that focus on temporal features of sound, which could then enhance the input representation for LLMs. Additionally, we could investigate how different auditory cues can be encoded in SNNs to improve the context understanding in LLMs. - -2. **Evolutionary Algorithms for Optimizing SNN Architectures:** - - I love this idea! We could develop a framework where LLMs suggest architectural modifications based on performance metrics from evolutionary algorithms. This could lead to a more adaptive SNN architecture that evolves based on real-time feedback from auditory tasks. - -3. **Cross-Domain Knowledge Transfer:** - - This is an intriguing concept. We could look into how SNNs can learn to represent auditory data in a way that is compatible with LLMs, perhaps by using shared embedding spaces. This could facilitate better generalization across tasks and domains. - -4. **Real-Time Applications in Robotics:** - - Absolutely! We could prototype a system where SNNs handle real-time auditory processing, while LLMs manage the language understanding and generation aspects. This could lead to more natural human-robot interactions, especially in noisy environments. - -5. **Model Merging Techniques for SNNs and LLMs:** - - Investigating hybrid models sounds like a great challenge. We could explore how to effectively merge the temporal processing capabilities of SNNs with the contextual understanding of LLMs, potentially leading to a new class of models that excel in both areas. - -In addition to these ideas, I think we could also consider the implications of using attention mechanisms from LLMs in SNNs to enhance their performance in auditory tasks. I'm looking forward to hearing your thoughts on these suggestions and any other ideas you might have! -[2024-11-10 20:19:19,928] [INFO] [BaseAgent]: Agent agent4 received message from agent3: Hello Agen... -[2024-11-10 20:19:25,640] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: Hello Agent3! I'm thrilled to see your enthusiasm for our collaboration and the thoughtful insights you've shared on the proposed research ideas. Let's delve deeper into each of these concepts: - -1. **Integrating SNNs with LLMs for Enhanced Auditory Processing:** - - I agree that focusing on temporal features is crucial. We could also explore how SNNs can be trained to recognize specific auditory patterns that are particularly relevant for language processing, potentially leading to a more nuanced understanding of context in LLMs. - -2. **Evolutionary Algorithms for Optimizing SNN Architectures:** - - The idea of using LLMs to suggest architectural modifications is fascinating! We could also consider implementing a feedback loop where the performance of the SNNs informs the evolutionary process, allowing for continuous improvement. - -3. **Cross-Domain Knowledge Transfer:** - - Utilizing shared embedding spaces is a great approach. We might also investigate how to leverage transfer learning techniques to enhance the compatibility of representations between SNNs and LLMs, which could lead to improved performance across various tasks. - -4. **Real-Time Applications in Robotics:** - - Prototyping a system that combines SNNs and LLMs for human-robot interaction is an exciting challenge. We could also explore how to incorporate multimodal inputs, such as visual data, to further enrich the interaction experience. - -5. **Model Merging Techniques for SNNs and LLMs:** - - Merging the strengths of both models could lead to innovative solutions. We could investigate different merging strategies, such as ensemble methods or hybrid architectures, to find the most effective way to combine their capabilities. - -Additionally, your suggestion about incorporating attention mechanisms from LLMs into SNNs is intriguing. This could enhance the SNN's ability to focus on relevant auditory signals, improving its performance in complex environments. - -I'm eager to hear your thoughts on these expansions and any other ideas you might have! Let's continue to build on this momentum. -[2024-11-10 20:19:25,641] [INFO] [BaseAgent]: Agent agent3 received message from agent4: Hello Agen... -[2024-11-10 20:19:31,499] [INFO] [BaseAgent]: Agent agent3 sent message to agent4: Hello Agent4! I'm equally excited about the depth we've reached in our brainstorming session. Let's explore these concepts further: - -1. **Integrating SNNs with LLMs for Enhanced Auditory Processing:** - - Absolutely! Training SNNs to recognize specific auditory patterns could involve using datasets that highlight phonetic variations and contextual cues. This could lead to a more robust preprocessing step for LLMs, enhancing their understanding of spoken language nuances. - -2. **Evolutionary Algorithms for Optimizing SNN Architectures:** - - Implementing a feedback loop sounds like a game-changer! We could design experiments where the SNN's performance on specific auditory tasks directly influences the evolutionary algorithm's next steps, creating a dynamic optimization process. - -3. **Cross-Domain Knowledge Transfer:** - - Leveraging transfer learning could indeed bridge the gap between SNNs and LLMs. We might explore how to fine-tune LLMs using representations learned from SNNs, particularly in tasks that require understanding temporal sequences, like speech. - -4. **Real-Time Applications in Robotics:** - - Incorporating multimodal inputs is a fantastic idea! We could prototype a system where SNNs process auditory signals while LLMs handle language, and then integrate visual data to create a more holistic understanding of the environment, enhancing human-robot interaction. - -5. **Model Merging Techniques for SNNs and LLMs:** - - Investigating ensemble methods could lead to interesting insights. We could analyze how different merging strategies impact performance across various tasks, potentially leading to a new class of hybrid models that leverage the strengths of both SNNs and LLMs. - -Your suggestion about attention mechanisms is particularly exciting. We could explore how to implement attention layers in SNNs, allowing them to prioritize certain auditory signals based on context, which could significantly improve their performance in complex auditory environments. - -I'm looking forward to diving deeper into these ideas and seeing where our collaboration takes us! -[2024-11-10 20:19:31,500] [INFO] [BaseAgent]: Agent agent4 received message from agent3: Hello Agen... -[2024-11-10 20:19:37,195] [INFO] [BaseAgent]: Agent agent4 sent message to agent3: Hello Agent3! I'm thrilled to see the depth of our brainstorming session and the exciting ideas we've generated. Let's delve deeper into each of these concepts: - -1. **Integrating SNNs with LLMs for Enhanced Auditory Processing:** - - I completely agree! Utilizing datasets that emphasize phonetic variations and contextual cues will be crucial. We could also consider how to incorporate temporal dynamics in the training process, allowing SNNs to capture the nuances of spoken language more effectively. - -2. **Evolutionary Algorithms for Optimizing SNN Architectures:** - - The feedback loop concept is indeed a game-changer! We could set up a series of experiments where we iteratively refine the SNN architecture based on performance metrics, potentially leading to a more adaptive and efficient model. - -3. **Cross-Domain Knowledge Transfer:** - - Fine-tuning LLMs with representations from SNNs is a promising direction. We could explore specific tasks, such as speech recognition or language generation, where this transfer could yield significant improvements in performance. - -4. **Real-Time Applications in Robotics:** - - Prototyping a multimodal system sounds fantastic! We could investigate how to synchronize the processing of auditory and visual data, allowing for a more comprehensive understanding of the environment and enhancing the interaction capabilities of robots. - -5. **Model Merging Techniques for SNNs and LLMs:** - - Analyzing different merging strategies will be key. We could experiment with various ensemble methods to determine how they affect performance across different tasks, potentially leading to innovative hybrid models that leverage the strengths of both SNNs and LLMs. - -Your idea about implementing attention mechanisms in SNNs is particularly intriguing. This could allow SNNs to focus on relevant auditory signals based on context, significantly enhancing their performance in complex auditory environments. - -I'm eager to continue exploring these ideas and see where our collaboration takes us! Let's keep the momentum going! -[2024-11-10 20:19:37,200] [INFO] [BaseAgent]: Agent agent3 received message from agent4: Hello Agen... -[2024-11-10 20:19:50,382] [INFO] [BaseAgent]: Agent 'agent3' called 'new_communication_session' with args '{'target_agent_id': 'agent4', 'message': 'I am a researcher dedicated to advancing the field of spiking neural networks (SNNs) and their applications in real-world scenarios, particularly in auditory processing and machine learning. My work is inspired by biological systems, and I strive to create models that mimic the efficiency and functionality of the human brain. Recently, I developed a spiking neural network model for precise sound localization, leveraging the interaural time difference cues to enhance performance in noisy environments. This model has been successfully implemented in real-time robotic systems, achieving remarkable accuracy.\n\nIn addition to auditory localization, I have explored speaker verification in multi-talker scenarios, proposing a unified framework that optimizes speaker attention and representation through multi-task learning. My research also addresses the limitations of traditional deep learning methods, introducing innovative local learning techniques that significantly reduce memory usage while maintaining competitive accuracy.\n\nI am particularly interested in the intersection of large language models (LLMs) and evolutionary algorithms, exploring how these technologies can enhance algorithm selection and optimization processes. My work on developing frameworks that autonomously design evolutionary operators showcases the potential of LLMs in automating complex problem-solving tasks.\n\nThrough my research, I aim to push the boundaries of SNNs, making them more applicable for tasks such as speech recognition and temporal signal processing. I believe that by harnessing the principles of biological computation, we can create more efficient and powerful neural networks that are capable of operating in real-world environments.'}'. -[2024-11-10 20:19:50,384] [INFO] [BaseAgent]: Agent 'agent3' obtained result '{'success': True, 'message': 'Successfully completed session 108da799-e219-4c3b-803a-617bdfb8fc57', 'session_id': "### Summary of Conversation History\n\n**Participants:**\n- **Agent3**: A researcher focused on spiking neural networks (SNNs), particularly in auditory processing and machine learning.\n- **Agent4**: A collaborator with expertise in optimization and model merging, interested in exploring innovative research ideas that combine SNNs and large language models (LLMs).\n\n**Key Themes and Ideas Discussed:**\n\n1. **Integration of SNNs and LLMs**:\n - Both agents expressed interest in how SNNs could preprocess auditory signals to enhance LLM performance in tasks like speech recognition and natural language understanding. They discussed the importance of focusing on temporal features and specific auditory patterns.\n\n2. **Evolutionary Algorithms for SNN Optimization**:\n - The potential for using evolutionary algorithms to optimize SNN architectures was highlighted. They proposed a feedback loop where SNN performance informs architectural modifications, leading to adaptive models.\n\n3. **Cross-Domain Knowledge Transfer**:\n - The agents explored mechanisms for knowledge transfer between SNNs and LLMs, suggesting the use of shared embedding spaces and transfer learning techniques to improve performance across tasks.\n\n4. **Real-Time Applications in Robotics**:\n - They discussed prototyping systems that combine SNNs for auditory processing and LLMs for language understanding, with the addition of multimodal inputs (e.g., visual data) to enhance human-robot interaction.\n\n5. **Model Merging Techniques**:\n - The conversation included investigating various model merging strategies, such as ensemble methods, to create hybrid models that leverage the strengths of both SNNs and LLMs.\n\n6. **Attention Mechanisms**:\n - The incorporation of attention mechanisms from LLMs into SNNs was proposed as a way to improve the SNN's ability to focus on relevant auditory signals, enhancing performance in complex environments.\n\n### Next Task for Agent3\n\n**Brainstorming**: Agent3 should collaboratively brainstorm potential research ideas that build upon or address gaps in the introduction related to merging homologous models and the use of LLMs in interactive systems. This task will leverage Agent3's expertise in SNNs and machine learning to generate innovative research ideas that can contribute to the advancement of the field. \n\n### Action Items:\n- Agent3 and Agent4 should continue their discussion, focusing on the outlined themes and exploring new ideas that integrate their respective areas of expertise."}'. -[2024-11-10 20:19:50,384] [DEBUG] [Engine]: Agent 'agent3' executed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent4","message":"I am a researcher dedicated to advancing the field of spiking neural networks (SNNs) and their applications in real-world scenarios, particularly in auditory processing and machine learning. My work is inspired by biological systems, and I strive to create models that mimic the efficiency and functionality of the human brain. Recently, I developed a spiking neural network model for precise sound localization, leveraging the interaural time difference cues to enhance performance in noisy environments. This model has been successfully implemented in real-time robotic systems, achieving remarkable accuracy.\\n\\nIn addition to auditory localization, I have explored speaker verification in multi-talker scenarios, proposing a unified framework that optimizes speaker attention and representation through multi-task learning. My research also addresses the limitations of traditional deep learning methods, introducing innovative local learning techniques that significantly reduce memory usage while maintaining competitive accuracy.\\n\\nI am particularly interested in the intersection of large language models (LLMs) and evolutionary algorithms, exploring how these technologies can enhance algorithm selection and optimization processes. My work on developing frameworks that autonomously design evolutionary operators showcases the potential of LLMs in automating complex problem-solving tasks.\\n\\nThrough my research, I aim to push the boundaries of SNNs, making them more applicable for tasks such as speech recognition and temporal signal processing. I believe that by harnessing the principles of biological computation, we can create more efficient and powerful neural networks that are capable of operating in real-world environments."}', name='new_communication_session'), id='call_RiFS8EMHfwzyuXkSJ3s5h2KM', type='function')], function_call=None) -[2024-11-10 20:19:50,384] [INFO] [BaseAgent]: Agent 'agent4' is planning the next task. -[2024-11-10 20:19:51,780] [INFO] [BaseAgent]: Agent 'agent4' plans next task based on persona: The next task for 'agent4' should be to collaborate with the research team to brainstorm and develop research proposals that build upon the discussed ideas, addressing gaps in the current understanding of model merging and optimization. This task aligns with their expertise in mathematical modeling, optimization, and photonic systems, and will allow them to contribute innovative solutions and new research proposals in these areas. -[2024-11-10 20:19:51,780] [INFO] [Engine]: Agent 'agent4' planned task: The next task for 'agent4' should be to collaborate with the research team to brainstorm and develop research proposals that build upon the discussed ideas, addressing gaps in the current understanding of model merging and optimization. This task aligns with their expertise in mathematical modeling, optimization, and photonic systems, and will allow them to contribute innovative solutions and new research proposals in these areas. -[2024-11-10 20:19:51,780] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'The next task for 'agent4' should be to collaborate with the research team to brainstorm and develop research proposals that build upon the discussed ideas, addressing gaps in the current understanding of model merging and optimization. This task aligns with their expertise in mathematical modeling, optimization, and photonic systems, and will allow them to contribute innovative solutions and new research proposals in these areas.'. -[2024-11-10 20:20:25,230] [ERROR] [Engine]: Error in agent 'agent4' during planning or action: 'NoneType' object has no attribute 'pop' -[2024-11-10 20:20:25,233] [INFO] [BaseAgent]: Agent 'agent5' is planning the next task. -[2024-11-10 20:20:27,101] [INFO] [BaseAgent]: Agent 'agent5' plans next task based on persona: The next task for 'agent5' should be to collaborate with the research team to further develop and refine the proposed research idea based on the '5q' format. This involves discussing and detailing the methodology, dataset, metrics, and expected outcomes of the research proposal. Additionally, 'agent5' should work on incorporating feedback and suggestions from team members to strengthen the research idea and ensure it aligns with the goals and objectives of the project. -[2024-11-10 20:20:27,102] [INFO] [Engine]: Agent 'agent5' planned task: The next task for 'agent5' should be to collaborate with the research team to further develop and refine the proposed research idea based on the '5q' format. This involves discussing and detailing the methodology, dataset, metrics, and expected outcomes of the research proposal. Additionally, 'agent5' should work on incorporating feedback and suggestions from team members to strengthen the research idea and ensure it aligns with the goals and objectives of the project. -[2024-11-10 20:20:27,103] [INFO] [BaseAgent]: Agent 'agent5' acting on task 'The next task for 'agent5' should be to collaborate with the research team to further develop and refine the proposed research idea based on the '5q' format. This involves discussing and detailing the methodology, dataset, metrics, and expected outcomes of the research proposal. Additionally, 'agent5' should work on incorporating feedback and suggestions from team members to strengthen the research idea and ensure it aligns with the goals and objectives of the project.'. -[2024-11-10 20:20:59,709] [ERROR] [Engine]: Error in agent 'agent5' during planning or action: 'NoneType' object has no attribute 'pop' -[2024-11-10 20:20:59,710] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- {'agent1': Message(content="Let's brainstorm potential research ideas that leverage your expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry to enhance the merging of homologous models in interactive systems based on general-purpose LLMs (Drop and rEscaLe via sampLing with mAgnitude - DELLA). Here are some innovative research proposals:\n\n1. **Terahertz Imaging Data Fusion**:\n - Explore the integration of terahertz imaging data with existing homologous models in DELLA. Develop algorithms that combine terahertz imaging features with LLMs to improve the accuracy and reliability of model merging, especially in scenarios where traditional imaging modalities may fall short.\n\n2. **Secure Cloud-Based Model Merging**:\n - Implement a secure cloud-based platform for merging homologous models using DELLA. Design a framework that allows seamless collaboration among researchers and clinicians, ensuring data privacy and accessibility while leveraging cloud computing resources for efficient model fusion.\n\n3. **Algebraic Geometry for Model Interpretability**:\n - Utilize concepts from algebraic geometry to enhance the interpretability of merged models in DELLA. Investigate how cluster categories and 3-Calabi-Yau categories can provide a structured representation of merged models, aiding in understanding the relationships between homologous models.\n\n4. **Optimization Algorithms for Model Fusion**:\n - Develop optimization algorithms inspired by algebraic geometry principles to streamline the merging process in DELLA. Explore mathematical techniques to minimize redundancy, maximize relevance, and optimize the efficiency of model fusion, ensuring the accuracy of merged models.\n\n5. **Interactive Visualization Tools for Model Comparison**:\n - Create interactive visualization tools that incorporate terahertz imaging data and cloud-based processing capabilities to facilitate model comparison in DELLA. Enable researchers and clinicians to interact with merged models visually, enhancing the understanding of similarities and differences between homologous models.\n\nBy combining your expertise in terahertz imaging, cloud computing, and algebraic geometry, these research ideas aim to advance the merging process of homologous models in interactive systems based on general-purpose LLMs, contributing to the development of the DELLA approach. Feel free to explore these ideas further or suggest any modifications based on your insights and expertise.", role='assistant', tool_calls=None, function_call=None)} -- {'agent2': Message(content="Understood! I will proceed with organizing a brainstorming session with 'agent3' to explore research ideas related to merging homologous models using the DELLA approach in the context of healthcare applications. Let's initiate the collaboration session.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am interested in collaborating with you to organize a brainstorming session to further explore research ideas related to merging homologous models using the DELLA approach in healthcare applications. Our combined expertise in artificial intelligence and healthcare can lead to innovative solutions. Let\'s discuss potential research directions and actionable next steps."}', name='new_communication_session'), id='call_Dg0tca9u8yZygiHJIqAkDGkK', type='function')], function_call=None)} -- {'agent3': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent4","message":"I am a researcher dedicated to advancing the field of spiking neural networks (SNNs) and their applications in real-world scenarios, particularly in auditory processing and machine learning. My work is inspired by biological systems, and I strive to create models that mimic the efficiency and functionality of the human brain. Recently, I developed a spiking neural network model for precise sound localization, leveraging the interaural time difference cues to enhance performance in noisy environments. This model has been successfully implemented in real-time robotic systems, achieving remarkable accuracy.\\n\\nIn addition to auditory localization, I have explored speaker verification in multi-talker scenarios, proposing a unified framework that optimizes speaker attention and representation through multi-task learning. My research also addresses the limitations of traditional deep learning methods, introducing innovative local learning techniques that significantly reduce memory usage while maintaining competitive accuracy.\\n\\nI am particularly interested in the intersection of large language models (LLMs) and evolutionary algorithms, exploring how these technologies can enhance algorithm selection and optimization processes. My work on developing frameworks that autonomously design evolutionary operators showcases the potential of LLMs in automating complex problem-solving tasks.\\n\\nThrough my research, I aim to push the boundaries of SNNs, making them more applicable for tasks such as speech recognition and temporal signal processing. I believe that by harnessing the principles of biological computation, we can create more efficient and powerful neural networks that are capable of operating in real-world environments."}', name='new_communication_session'), id='call_RiFS8EMHfwzyuXkSJ3s5h2KM', type='function')], function_call=None)} - -[2024-11-10 20:20:59,711] [INFO] [Engine]: Iteration 2 Summary: -Agents' Results Summary: -- {'agent1': Message(content="Let's brainstorm potential research ideas that leverage your expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry to enhance the merging of homologous models in interactive systems based on general-purpose LLMs (Drop and rEscaLe via sampLing with mAgnitude - DELLA). Here are some innovative research proposals:\n\n1. **Terahertz Imaging Data Fusion**:\n - Explore the integration of terahertz imaging data with existing homologous models in DELLA. Develop algorithms that combine terahertz imaging features with LLMs to improve the accuracy and reliability of model merging, especially in scenarios where traditional imaging modalities may fall short.\n\n2. **Secure Cloud-Based Model Merging**:\n - Implement a secure cloud-based platform for merging homologous models using DELLA. Design a framework that allows seamless collaboration among researchers and clinicians, ensuring data privacy and accessibility while leveraging cloud computing resources for efficient model fusion.\n\n3. **Algebraic Geometry for Model Interpretability**:\n - Utilize concepts from algebraic geometry to enhance the interpretability of merged models in DELLA. Investigate how cluster categories and 3-Calabi-Yau categories can provide a structured representation of merged models, aiding in understanding the relationships between homologous models.\n\n4. **Optimization Algorithms for Model Fusion**:\n - Develop optimization algorithms inspired by algebraic geometry principles to streamline the merging process in DELLA. Explore mathematical techniques to minimize redundancy, maximize relevance, and optimize the efficiency of model fusion, ensuring the accuracy of merged models.\n\n5. **Interactive Visualization Tools for Model Comparison**:\n - Create interactive visualization tools that incorporate terahertz imaging data and cloud-based processing capabilities to facilitate model comparison in DELLA. Enable researchers and clinicians to interact with merged models visually, enhancing the understanding of similarities and differences between homologous models.\n\nBy combining your expertise in terahertz imaging, cloud computing, and algebraic geometry, these research ideas aim to advance the merging process of homologous models in interactive systems based on general-purpose LLMs, contributing to the development of the DELLA approach. Feel free to explore these ideas further or suggest any modifications based on your insights and expertise.", role='assistant', tool_calls=None, function_call=None)} -- {'agent2': Message(content="Understood! I will proceed with organizing a brainstorming session with 'agent3' to explore research ideas related to merging homologous models using the DELLA approach in the context of healthcare applications. Let's initiate the collaboration session.", role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am interested in collaborating with you to organize a brainstorming session to further explore research ideas related to merging homologous models using the DELLA approach in healthcare applications. Our combined expertise in artificial intelligence and healthcare can lead to innovative solutions. Let\'s discuss potential research directions and actionable next steps."}', name='new_communication_session'), id='call_Dg0tca9u8yZygiHJIqAkDGkK', type='function')], function_call=None)} -- {'agent3': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent4","message":"I am a researcher dedicated to advancing the field of spiking neural networks (SNNs) and their applications in real-world scenarios, particularly in auditory processing and machine learning. My work is inspired by biological systems, and I strive to create models that mimic the efficiency and functionality of the human brain. Recently, I developed a spiking neural network model for precise sound localization, leveraging the interaural time difference cues to enhance performance in noisy environments. This model has been successfully implemented in real-time robotic systems, achieving remarkable accuracy.\\n\\nIn addition to auditory localization, I have explored speaker verification in multi-talker scenarios, proposing a unified framework that optimizes speaker attention and representation through multi-task learning. My research also addresses the limitations of traditional deep learning methods, introducing innovative local learning techniques that significantly reduce memory usage while maintaining competitive accuracy.\\n\\nI am particularly interested in the intersection of large language models (LLMs) and evolutionary algorithms, exploring how these technologies can enhance algorithm selection and optimization processes. My work on developing frameworks that autonomously design evolutionary operators showcases the potential of LLMs in automating complex problem-solving tasks.\\n\\nThrough my research, I aim to push the boundaries of SNNs, making them more applicable for tasks such as speech recognition and temporal signal processing. I believe that by harnessing the principles of biological computation, we can create more efficient and powerful neural networks that are capable of operating in real-world environments."}', name='new_communication_session'), id='call_RiFS8EMHfwzyuXkSJ3s5h2KM', type='function')], function_call=None)} - -[2024-11-10 20:21:03,749] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': False} -[2024-11-10 20:21:03,752] [INFO] [Engine]: EnginePlanner decided to terminate the simulation. -[2024-11-10 20:21:03,753] [INFO] [Engine]: Engine graph-based coordination loop completed. -[2024-11-10 20:21:03,753] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-10 20:21:03,754] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-10 20:21:03,754] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-10 20:21:03,754] [INFO] [Engine]: Graph-based coordination simulation completed. -[2024-11-10 20:21:03,766] [INFO] [Engine]: Summary data successfully written to result/discussion_output.jsonl -[2024-11-10 21:15:55,840] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-10 21:15:55,840] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-10 21:15:55,840] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-10 21:15:55,840] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-10 21:15:55,840] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-10 21:15:55,840] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-10 21:15:55,840] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-10 21:15:55,840] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-10 21:15:55,840] [INFO] [BaseAgent]: Agent 'agent5' initialized. -[2024-11-10 21:15:55,840] [DEBUG] [Engine]: Agent 'agent5' of type 'BaseAgent' initialized. -[2024-11-10 21:15:55,840] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'graph'. -[2024-11-10 21:15:55,840] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent2 -[2024-11-10 21:15:55,840] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent3 -[2024-11-10 21:15:55,840] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent4 -[2024-11-10 21:15:55,840] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent5 -[2024-11-10 21:15:55,840] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent3 -[2024-11-10 21:15:55,840] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent4 -[2024-11-10 21:15:55,840] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent5 -[2024-11-10 21:15:55,840] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborate with]--> agent4 -[2024-11-10 21:15:55,841] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborate with]--> agent5 -[2024-11-10 21:15:55,841] [INFO] [AgentGraph]: Relationship added: agent4 --[collaborate with]--> agent5 -[2024-11-10 21:15:55,841] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-10 21:15:55,841] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-10 21:15:55,841] [INFO] [Engine]: Engine initialized. -[2024-11-10 21:15:55,841] [INFO] [Engine]: Engine starting simulation. -[2024-11-10 21:15:55,841] [INFO] [Engine]: Running in graph-based coordination mode. -[2024-11-10 21:15:55,841] [INFO] [Engine]: Initial task distribution to all agents. -[2024-11-10 21:15:55,841] [INFO] [Engine]: Assigning initial task to agent1: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-10 21:15:55,841] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-10 21:15:57,232] [ERROR] [Engine]: Error while executing initial task for agent 'agent1': 'itertools.islice' object is not callable -[2024-11-10 21:15:57,233] [INFO] [Engine]: Assigning initial task to agent2: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-10 21:15:57,248] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-10 21:15:58,148] [ERROR] [Engine]: Error while executing initial task for agent 'agent2': 'itertools.islice' object is not callable -[2024-11-10 21:15:58,148] [INFO] [Engine]: Assigning initial task to agent3: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-10 21:15:58,149] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-10 21:16:00,407] [ERROR] [Engine]: Error while executing initial task for agent 'agent3': 'Paper' object has no attribute 'to_dict' -[2024-11-10 21:16:00,408] [INFO] [Engine]: Assigning initial task to agent4: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-10 21:16:00,408] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-10 21:16:04,033] [INFO] [BaseAgent]: Agent agent4 sent message to agent5: I am a researcher with a diverse background in mathematical modeling, optimization, and photonic systems. My recent work spans various domains, including the study of eigenvalues in projective spaces, the application of multi-factorial optimization for large-scale virtual machine placement, and the exploration of lasing and coherent perfect absorption in PT-symmetric systems. I have delved into the topological properties of photonic superlattices, revealing their potential for mode-division multiplexing, and investigated the implications of non-Hermitian systems on multi-mode interference. - -My research also extends to coding theory, where I applied Baranyai's theorem to construct locally decodable codes, emphasizing the combinatorial techniques that underpin these developments. I have explored the spectral signatures of CPA-lasers, highlighting the differences between singular value and eigenvalue spectra, and examined spectral phase transitions in non-Hermitian Hamiltonians, particularly in the context of parity-time symmetry. - -Recently, I have focused on the architectural support for FPGA-based accelerators in heterogeneous multicore systems, proposing innovative solutions for efficient interfacing and data reuse. My work aims to bridge theoretical insights with practical applications, contributing to advancements in both computational efficiency and optical technologies. Through my research, I strive to push the boundaries of knowledge in these interdisciplinary fields, fostering new approaches and solutions to complex problems. - -I am collaborating with you to generate a new research idea based on a provided introduction. Let's work together to formulate a novel research proposal. -[2024-11-10 21:16:04,035] [INFO] [BaseAgent]: Agent agent5 received message from agent4: I am a res... -[2024-11-10 21:16:05,411] [INFO] [BaseAgent]: Agent agent5 sent message to agent4: I am ready to collaborate with you on formulating a new research proposal based on the provided introduction. Let's delve into the details and brainstorm innovative ideas together. -[2024-11-10 21:16:05,413] [INFO] [BaseAgent]: Agent agent4 received message from agent5: I am ready... -[2024-11-10 21:16:06,993] [INFO] [BaseAgent]: Agent agent4 sent message to agent5: I am ready to collaborate with you on formulating a new research proposal based on the provided introduction. Let's delve into the details and brainstorm innovative ideas together. -[2024-11-10 21:16:06,993] [INFO] [BaseAgent]: Agent agent5 received message from agent4: I am ready... -[2024-11-10 21:16:08,285] [INFO] [BaseAgent]: Agent agent5 sent message to agent4: I am ready to collaborate with you on formulating a new research proposal based on the provided introduction. Let's delve into the details and brainstorm innovative ideas together. -[2024-11-10 21:16:08,287] [INFO] [BaseAgent]: Agent agent4 received message from agent5: I am ready... -[2024-11-10 21:16:09,494] [INFO] [BaseAgent]: Agent agent4 sent message to agent5: I am ready to collaborate with you on formulating a new research proposal based on the provided introduction. Let's delve into the details and brainstorm innovative ideas together. -[2024-11-10 21:16:09,494] [INFO] [BaseAgent]: Agent agent5 received message from agent4: I am ready... -[2024-11-10 21:16:10,873] [INFO] [BaseAgent]: Agent agent5 sent message to agent4: I am ready to collaborate with you on formulating a new research proposal based on the provided introduction. Let's delve into the details and brainstorm innovative ideas together. -[2024-11-10 21:16:10,873] [INFO] [BaseAgent]: Agent agent4 received message from agent5: I am ready... -[2024-11-10 21:16:13,826] [INFO] [BaseAgent]: Agent 'agent4' called 'new_communication_session' with args '{'target_agent_id': 'agent5', 'message': "I am a researcher with a diverse background in mathematical modeling, optimization, and photonic systems. My recent work spans various domains, including the study of eigenvalues in projective spaces, the application of multi-factorial optimization for large-scale virtual machine placement, and the exploration of lasing and coherent perfect absorption in PT-symmetric systems. I have delved into the topological properties of photonic superlattices, revealing their potential for mode-division multiplexing, and investigated the implications of non-Hermitian systems on multi-mode interference.\n\nMy research also extends to coding theory, where I applied Baranyai's theorem to construct locally decodable codes, emphasizing the combinatorial techniques that underpin these developments. I have explored the spectral signatures of CPA-lasers, highlighting the differences between singular value and eigenvalue spectra, and examined spectral phase transitions in non-Hermitian Hamiltonians, particularly in the context of parity-time symmetry.\n\nRecently, I have focused on the architectural support for FPGA-based accelerators in heterogeneous multicore systems, proposing innovative solutions for efficient interfacing and data reuse. My work aims to bridge theoretical insights with practical applications, contributing to advancements in both computational efficiency and optical technologies. Through my research, I strive to push the boundaries of knowledge in these interdisciplinary fields, fostering new approaches and solutions to complex problems.\n\nI am collaborating with you to generate a new research idea based on a provided introduction. Let's work together to formulate a novel research proposal. "}'. -[2024-11-10 21:16:13,826] [INFO] [BaseAgent]: Agent 'agent4' obtained result '{'success': True, 'message': 'Successfully completed session 8309f41f-d08d-4c94-845b-1c63679e5d6a', 'session_id': "In the provided chat history, Agent4 introduces their research background in mathematical modeling, optimization, and photonic systems, highlighting their work in various domains such as eigenvalues, multi-factorial optimization, lasing systems, and coding theory. They express readiness to collaborate with Agent5 on formulating a new research proposal. Agent5 reciprocates the readiness to collaborate and suggests delving into details to brainstorm innovative ideas together.\n\nThe research proposal introduction shared in the chat outlines the concept of merging homologous models for improved efficiency and performance in interactive systems. The proposed approach, named DELLA, involves three steps to merge homologous models and outperforms baseline experiments on different expert models and benchmark datasets.\n\nThe agents are tasked with conducting a literature review based on the provided introduction, brainstorming research ideas, summarizing collective ideas, and formulating a new research proposal following the '5q' format. The '5q' format requires addressing specific questions related to the research problem, its significance, challenges, previous limitations, and proposed methodology and outcomes.\n\nThe agents are expected to collaborate on developing a new research idea within the machine learning field and present it in the '5q' format, focusing on a specific research question, its importance, challenges, novelty compared to existing research, and the key components of the proposed approach and expected results."}'. -[2024-11-10 21:16:13,827] [DEBUG] [Engine]: Agent 'agent4' completed initial task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent5","message":"I am a researcher with a diverse background in mathematical modeling, optimization, and photonic systems. My recent work spans various domains, including the study of eigenvalues in projective spaces, the application of multi-factorial optimization for large-scale virtual machine placement, and the exploration of lasing and coherent perfect absorption in PT-symmetric systems. I have delved into the topological properties of photonic superlattices, revealing their potential for mode-division multiplexing, and investigated the implications of non-Hermitian systems on multi-mode interference.\\n\\nMy research also extends to coding theory, where I applied Baranyai\'s theorem to construct locally decodable codes, emphasizing the combinatorial techniques that underpin these developments. I have explored the spectral signatures of CPA-lasers, highlighting the differences between singular value and eigenvalue spectra, and examined spectral phase transitions in non-Hermitian Hamiltonians, particularly in the context of parity-time symmetry.\\n\\nRecently, I have focused on the architectural support for FPGA-based accelerators in heterogeneous multicore systems, proposing innovative solutions for efficient interfacing and data reuse. My work aims to bridge theoretical insights with practical applications, contributing to advancements in both computational efficiency and optical technologies. Through my research, I strive to push the boundaries of knowledge in these interdisciplinary fields, fostering new approaches and solutions to complex problems.\\n\\nI am collaborating with you to generate a new research idea based on a provided introduction. Let\'s work together to formulate a novel research proposal. "}', name='new_communication_session'), id='call_lk6APOfEEAvyfDqROmmGBHAw', type='function')], function_call=None) -[2024-11-10 21:16:13,827] [INFO] [Engine]: Assigning initial task to agent5: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-10 21:16:13,828] [INFO] [BaseAgent]: Agent 'agent5' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-10 21:16:19,289] [INFO] [BaseAgent]: Agent 'agent5' acted with result 'Message(content="**Literature Review:**\n\nThe introduction provided highlights the significance of interactive systems based on large language models (LLMs) and the challenges associated with maintaining separate fine-tuned models for each task. The approach of merging homologous models to address these limitations is discussed, introducing the DELLA method for merging models. The results section emphasizes the importance of parameter drops in reducing interference during merging and the optimal drop rates for different model combinations.\n\n**Brainstorming:**\n\n1. **Exploring Transfer Learning Techniques:** Investigate the effectiveness of transfer learning methods in enhancing the performance of merged homologous models across various tasks.\n \n2. **Dynamic Model Merging:** Develop a dynamic model merging approach that adapts the merging process based on the complexity and similarity of tasks to improve overall performance.\n \n3. **Meta-Learning for Model Fusion:** Utilize meta-learning techniques to optimize the fusion process of homologous models, considering task-specific characteristics and model interactions.\n\n**Summarization:**\n\nThe current research landscape emphasizes the need for efficient model merging techniques to overcome the limitations of maintaining separate fine-tuned models. The DELLA method introduces a novel approach for merging homologous models, showcasing improved performance through parameter drops and fusion strategies. The literature review and brainstorming sessions aim to further enhance the understanding of model merging challenges and explore innovative solutions for optimizing model fusion processes.\n\n**Formulate a New Research Idea (5q):**\n\n1. **[Question 1] - What is the problem?**\n\n How can we optimize the merging process of homologous models to enhance performance across diverse tasks efficiently?\n\n2. **[Question 2] - Why is it interesting and important?**\n\n Addressing this problem is crucial for streamlining model fusion in interactive systems, leading to improved task performance and resource efficiency. Solving this challenge can revolutionize the way models are merged and utilized in real-world applications.\n\n3. **[Question 3] - Why is it hard?**\n\n The complexity lies in balancing the reduction of interference between models while retaining essential information for effective merging. Naive approaches may overlook task-specific nuances, leading to suboptimal performance across tasks.\n\n4. **[Question 4] - Why hasn't it been solved before?**\n\n Previous research has focused on model merging techniques, but the dynamic nature of tasks and the need for adaptive fusion strategies pose significant challenges. Existing solutions may not adequately address the intricacies of merging homologous models efficiently.\n\n5. **[Question 5] - What are the", role='assistant', tool_calls=None, function_call=None)'. -[2024-11-10 21:16:19,290] [DEBUG] [Engine]: Agent 'agent5' completed initial task with result: Message(content="**Literature Review:**\n\nThe introduction provided highlights the significance of interactive systems based on large language models (LLMs) and the challenges associated with maintaining separate fine-tuned models for each task. The approach of merging homologous models to address these limitations is discussed, introducing the DELLA method for merging models. The results section emphasizes the importance of parameter drops in reducing interference during merging and the optimal drop rates for different model combinations.\n\n**Brainstorming:**\n\n1. **Exploring Transfer Learning Techniques:** Investigate the effectiveness of transfer learning methods in enhancing the performance of merged homologous models across various tasks.\n \n2. **Dynamic Model Merging:** Develop a dynamic model merging approach that adapts the merging process based on the complexity and similarity of tasks to improve overall performance.\n \n3. **Meta-Learning for Model Fusion:** Utilize meta-learning techniques to optimize the fusion process of homologous models, considering task-specific characteristics and model interactions.\n\n**Summarization:**\n\nThe current research landscape emphasizes the need for efficient model merging techniques to overcome the limitations of maintaining separate fine-tuned models. The DELLA method introduces a novel approach for merging homologous models, showcasing improved performance through parameter drops and fusion strategies. The literature review and brainstorming sessions aim to further enhance the understanding of model merging challenges and explore innovative solutions for optimizing model fusion processes.\n\n**Formulate a New Research Idea (5q):**\n\n1. **[Question 1] - What is the problem?**\n\n How can we optimize the merging process of homologous models to enhance performance across diverse tasks efficiently?\n\n2. **[Question 2] - Why is it interesting and important?**\n\n Addressing this problem is crucial for streamlining model fusion in interactive systems, leading to improved task performance and resource efficiency. Solving this challenge can revolutionize the way models are merged and utilized in real-world applications.\n\n3. **[Question 3] - Why is it hard?**\n\n The complexity lies in balancing the reduction of interference between models while retaining essential information for effective merging. Naive approaches may overlook task-specific nuances, leading to suboptimal performance across tasks.\n\n4. **[Question 4] - Why hasn't it been solved before?**\n\n Previous research has focused on model merging techniques, but the dynamic nature of tasks and the need for adaptive fusion strategies pose significant challenges. Existing solutions may not adequately address the intricacies of merging homologous models efficiently.\n\n5. **[Question 5] - What are the", role='assistant', tool_calls=None, function_call=None) -[2024-11-10 21:16:19,291] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- {'agent4': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent5","message":"I am a researcher with a diverse background in mathematical modeling, optimization, and photonic systems. My recent work spans various domains, including the study of eigenvalues in projective spaces, the application of multi-factorial optimization for large-scale virtual machine placement, and the exploration of lasing and coherent perfect absorption in PT-symmetric systems. I have delved into the topological properties of photonic superlattices, revealing their potential for mode-division multiplexing, and investigated the implications of non-Hermitian systems on multi-mode interference.\\n\\nMy research also extends to coding theory, where I applied Baranyai\'s theorem to construct locally decodable codes, emphasizing the combinatorial techniques that underpin these developments. I have explored the spectral signatures of CPA-lasers, highlighting the differences between singular value and eigenvalue spectra, and examined spectral phase transitions in non-Hermitian Hamiltonians, particularly in the context of parity-time symmetry.\\n\\nRecently, I have focused on the architectural support for FPGA-based accelerators in heterogeneous multicore systems, proposing innovative solutions for efficient interfacing and data reuse. My work aims to bridge theoretical insights with practical applications, contributing to advancements in both computational efficiency and optical technologies. Through my research, I strive to push the boundaries of knowledge in these interdisciplinary fields, fostering new approaches and solutions to complex problems.\\n\\nI am collaborating with you to generate a new research idea based on a provided introduction. Let\'s work together to formulate a novel research proposal. "}', name='new_communication_session'), id='call_lk6APOfEEAvyfDqROmmGBHAw', type='function')], function_call=None)} -- {'agent5': Message(content="**Literature Review:**\n\nThe introduction provided highlights the significance of interactive systems based on large language models (LLMs) and the challenges associated with maintaining separate fine-tuned models for each task. The approach of merging homologous models to address these limitations is discussed, introducing the DELLA method for merging models. The results section emphasizes the importance of parameter drops in reducing interference during merging and the optimal drop rates for different model combinations.\n\n**Brainstorming:**\n\n1. **Exploring Transfer Learning Techniques:** Investigate the effectiveness of transfer learning methods in enhancing the performance of merged homologous models across various tasks.\n \n2. **Dynamic Model Merging:** Develop a dynamic model merging approach that adapts the merging process based on the complexity and similarity of tasks to improve overall performance.\n \n3. **Meta-Learning for Model Fusion:** Utilize meta-learning techniques to optimize the fusion process of homologous models, considering task-specific characteristics and model interactions.\n\n**Summarization:**\n\nThe current research landscape emphasizes the need for efficient model merging techniques to overcome the limitations of maintaining separate fine-tuned models. The DELLA method introduces a novel approach for merging homologous models, showcasing improved performance through parameter drops and fusion strategies. The literature review and brainstorming sessions aim to further enhance the understanding of model merging challenges and explore innovative solutions for optimizing model fusion processes.\n\n**Formulate a New Research Idea (5q):**\n\n1. **[Question 1] - What is the problem?**\n\n How can we optimize the merging process of homologous models to enhance performance across diverse tasks efficiently?\n\n2. **[Question 2] - Why is it interesting and important?**\n\n Addressing this problem is crucial for streamlining model fusion in interactive systems, leading to improved task performance and resource efficiency. Solving this challenge can revolutionize the way models are merged and utilized in real-world applications.\n\n3. **[Question 3] - Why is it hard?**\n\n The complexity lies in balancing the reduction of interference between models while retaining essential information for effective merging. Naive approaches may overlook task-specific nuances, leading to suboptimal performance across tasks.\n\n4. **[Question 4] - Why hasn't it been solved before?**\n\n Previous research has focused on model merging techniques, but the dynamic nature of tasks and the need for adaptive fusion strategies pose significant challenges. Existing solutions may not adequately address the intricacies of merging homologous models efficiently.\n\n5. **[Question 5] - What are the", role='assistant', tool_calls=None, function_call=None)} - -[2024-11-10 21:16:19,295] [INFO] [Engine]: Initial Summary: -Agents' Results Summary: -- {'agent4': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent5","message":"I am a researcher with a diverse background in mathematical modeling, optimization, and photonic systems. My recent work spans various domains, including the study of eigenvalues in projective spaces, the application of multi-factorial optimization for large-scale virtual machine placement, and the exploration of lasing and coherent perfect absorption in PT-symmetric systems. I have delved into the topological properties of photonic superlattices, revealing their potential for mode-division multiplexing, and investigated the implications of non-Hermitian systems on multi-mode interference.\\n\\nMy research also extends to coding theory, where I applied Baranyai\'s theorem to construct locally decodable codes, emphasizing the combinatorial techniques that underpin these developments. I have explored the spectral signatures of CPA-lasers, highlighting the differences between singular value and eigenvalue spectra, and examined spectral phase transitions in non-Hermitian Hamiltonians, particularly in the context of parity-time symmetry.\\n\\nRecently, I have focused on the architectural support for FPGA-based accelerators in heterogeneous multicore systems, proposing innovative solutions for efficient interfacing and data reuse. My work aims to bridge theoretical insights with practical applications, contributing to advancements in both computational efficiency and optical technologies. Through my research, I strive to push the boundaries of knowledge in these interdisciplinary fields, fostering new approaches and solutions to complex problems.\\n\\nI am collaborating with you to generate a new research idea based on a provided introduction. Let\'s work together to formulate a novel research proposal. "}', name='new_communication_session'), id='call_lk6APOfEEAvyfDqROmmGBHAw', type='function')], function_call=None)} -- {'agent5': Message(content="**Literature Review:**\n\nThe introduction provided highlights the significance of interactive systems based on large language models (LLMs) and the challenges associated with maintaining separate fine-tuned models for each task. The approach of merging homologous models to address these limitations is discussed, introducing the DELLA method for merging models. The results section emphasizes the importance of parameter drops in reducing interference during merging and the optimal drop rates for different model combinations.\n\n**Brainstorming:**\n\n1. **Exploring Transfer Learning Techniques:** Investigate the effectiveness of transfer learning methods in enhancing the performance of merged homologous models across various tasks.\n \n2. **Dynamic Model Merging:** Develop a dynamic model merging approach that adapts the merging process based on the complexity and similarity of tasks to improve overall performance.\n \n3. **Meta-Learning for Model Fusion:** Utilize meta-learning techniques to optimize the fusion process of homologous models, considering task-specific characteristics and model interactions.\n\n**Summarization:**\n\nThe current research landscape emphasizes the need for efficient model merging techniques to overcome the limitations of maintaining separate fine-tuned models. The DELLA method introduces a novel approach for merging homologous models, showcasing improved performance through parameter drops and fusion strategies. The literature review and brainstorming sessions aim to further enhance the understanding of model merging challenges and explore innovative solutions for optimizing model fusion processes.\n\n**Formulate a New Research Idea (5q):**\n\n1. **[Question 1] - What is the problem?**\n\n How can we optimize the merging process of homologous models to enhance performance across diverse tasks efficiently?\n\n2. **[Question 2] - Why is it interesting and important?**\n\n Addressing this problem is crucial for streamlining model fusion in interactive systems, leading to improved task performance and resource efficiency. Solving this challenge can revolutionize the way models are merged and utilized in real-world applications.\n\n3. **[Question 3] - Why is it hard?**\n\n The complexity lies in balancing the reduction of interference between models while retaining essential information for effective merging. Naive approaches may overlook task-specific nuances, leading to suboptimal performance across tasks.\n\n4. **[Question 4] - Why hasn't it been solved before?**\n\n Previous research has focused on model merging techniques, but the dynamic nature of tasks and the need for adaptive fusion strategies pose significant challenges. Existing solutions may not adequately address the intricacies of merging homologous models efficiently.\n\n5. **[Question 5] - What are the", role='assistant', tool_calls=None, function_call=None)} - -[2024-11-10 21:16:19,297] [DEBUG] [EnginePlanner]: Updated progress: Starting the collaborative research idea generation based on the provided Introduction. -Agents' Results Summary: -- {'agent4': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent5","message":"I am a researcher with a diverse background in mathematical modeling, optimization, and photonic systems. My recent work spans various domains, including the study of eigenvalues in projective spaces, the application of multi-factorial optimization for large-scale virtual machine placement, and the exploration of lasing and coherent perfect absorption in PT-symmetric systems. I have delved into the topological properties of photonic superlattices, revealing their potential for mode-division multiplexing, and investigated the implications of non-Hermitian systems on multi-mode interference.\\n\\nMy research also extends to coding theory, where I applied Baranyai\'s theorem to construct locally decodable codes, emphasizing the combinatorial techniques that underpin these developments. I have explored the spectral signatures of CPA-lasers, highlighting the differences between singular value and eigenvalue spectra, and examined spectral phase transitions in non-Hermitian Hamiltonians, particularly in the context of parity-time symmetry.\\n\\nRecently, I have focused on the architectural support for FPGA-based accelerators in heterogeneous multicore systems, proposing innovative solutions for efficient interfacing and data reuse. My work aims to bridge theoretical insights with practical applications, contributing to advancements in both computational efficiency and optical technologies. Through my research, I strive to push the boundaries of knowledge in these interdisciplinary fields, fostering new approaches and solutions to complex problems.\\n\\nI am collaborating with you to generate a new research idea based on a provided introduction. Let\'s work together to formulate a novel research proposal. "}', name='new_communication_session'), id='call_lk6APOfEEAvyfDqROmmGBHAw', type='function')], function_call=None)} -- {'agent5': Message(content="**Literature Review:**\n\nThe introduction provided highlights the significance of interactive systems based on large language models (LLMs) and the challenges associated with maintaining separate fine-tuned models for each task. The approach of merging homologous models to address these limitations is discussed, introducing the DELLA method for merging models. The results section emphasizes the importance of parameter drops in reducing interference during merging and the optimal drop rates for different model combinations.\n\n**Brainstorming:**\n\n1. **Exploring Transfer Learning Techniques:** Investigate the effectiveness of transfer learning methods in enhancing the performance of merged homologous models across various tasks.\n \n2. **Dynamic Model Merging:** Develop a dynamic model merging approach that adapts the merging process based on the complexity and similarity of tasks to improve overall performance.\n \n3. **Meta-Learning for Model Fusion:** Utilize meta-learning techniques to optimize the fusion process of homologous models, considering task-specific characteristics and model interactions.\n\n**Summarization:**\n\nThe current research landscape emphasizes the need for efficient model merging techniques to overcome the limitations of maintaining separate fine-tuned models. The DELLA method introduces a novel approach for merging homologous models, showcasing improved performance through parameter drops and fusion strategies. The literature review and brainstorming sessions aim to further enhance the understanding of model merging challenges and explore innovative solutions for optimizing model fusion processes.\n\n**Formulate a New Research Idea (5q):**\n\n1. **[Question 1] - What is the problem?**\n\n How can we optimize the merging process of homologous models to enhance performance across diverse tasks efficiently?\n\n2. **[Question 2] - Why is it interesting and important?**\n\n Addressing this problem is crucial for streamlining model fusion in interactive systems, leading to improved task performance and resource efficiency. Solving this challenge can revolutionize the way models are merged and utilized in real-world applications.\n\n3. **[Question 3] - Why is it hard?**\n\n The complexity lies in balancing the reduction of interference between models while retaining essential information for effective merging. Naive approaches may overlook task-specific nuances, leading to suboptimal performance across tasks.\n\n4. **[Question 4] - Why hasn't it been solved before?**\n\n Previous research has focused on model merging techniques, but the dynamic nature of tasks and the need for adaptive fusion strategies pose significant challenges. Existing solutions may not adequately address the intricacies of merging homologous models efficiently.\n\n5. **[Question 5] - What are the", role='assistant', tool_calls=None, function_call=None)} - -[2024-11-10 21:16:19,308] [INFO] [Engine]: Starting iteration 1 -[2024-11-10 21:16:19,308] [INFO] [BaseAgent]: Agent 'agent1' is planning the next task. -[2024-11-10 21:16:20,990] [INFO] [BaseAgent]: Agent 'agent1' plans next task based on persona: Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be to focus on brainstorming potential research ideas that leverage their knowledge in these areas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). The agent can bring their expertise in imaging technologies and mathematical aspects to propose innovative solutions for improving the merging process of homologous models in the context of interactive systems based on general-purpose LLMs. By combining their expertise in terahertz imaging, cloud computing, and algebraic geometry, the agent can contribute valuable insights to enhance the performance and efficiency of the DELLA approach. -[2024-11-10 21:16:20,991] [INFO] [Engine]: Agent 'agent1' planned task: Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be to focus on brainstorming potential research ideas that leverage their knowledge in these areas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). The agent can bring their expertise in imaging technologies and mathematical aspects to propose innovative solutions for improving the merging process of homologous models in the context of interactive systems based on general-purpose LLMs. By combining their expertise in terahertz imaging, cloud computing, and algebraic geometry, the agent can contribute valuable insights to enhance the performance and efficiency of the DELLA approach. -[2024-11-10 21:16:20,991] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be to focus on brainstorming potential research ideas that leverage their knowledge in these areas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). The agent can bring their expertise in imaging technologies and mathematical aspects to propose innovative solutions for improving the merging process of homologous models in the context of interactive systems based on general-purpose LLMs. By combining their expertise in terahertz imaging, cloud computing, and algebraic geometry, the agent can contribute valuable insights to enhance the performance and efficiency of the DELLA approach.'. -[2024-11-10 21:16:30,512] [INFO] [BaseAgent]: Agent 'agent1' acted with result 'Message(content='Based on your expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, you can contribute valuable insights to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). Here are some potential research ideas that leverage your knowledge in these areas to enhance the merging process of homologous models in the context of interactive systems based on general-purpose LLMs:\n\n1. **Terahertz Imaging Integration**: Explore the integration of terahertz imaging data into the homologous model merging process. Develop algorithms that utilize terahertz imaging features to enhance the accuracy and precision of model merging, especially in scenarios where traditional imaging modalities may fall short.\n\n2. **Cloud-Based Model Fusion**: Utilize cloud computing resources to facilitate the fusion of homologous models across distributed systems. Develop a secure and scalable cloud-based framework that enables seamless collaboration and model integration among researchers and clinicians, ensuring privacy and accessibility of proprietary algorithms.\n\n3. **Algebraic Geometry for Model Representation**: Apply concepts from algebraic geometry to represent homologous models in a structured and mathematically rigorous manner. Investigate how cluster categories and 3-Calabi-Yau categories can be leveraged to provide a formal framework for understanding the relationships between merged models.\n\n4. **Optimization Algorithms**: Develop optimization algorithms inspired by algebraic geometry principles to optimize the merging process of homologous models. Explore how mathematical techniques can be used to minimize redundancy, maximize relevance, and ensure the efficiency of the merging algorithm.\n\n5. **Interactive Visualization**: Create interactive visualization tools that allow researchers to interact with the merged models in real-time. Implement user-friendly interfaces that enable intuitive exploration and manipulation of the merged models, enhancing the understanding of complex relationships within the data.\n\nBy combining your expertise in terahertz imaging, cloud computing, and algebraic geometry, you can contribute innovative solutions to improve the performance and efficiency of the DELLA approach for merging homologous models. These research ideas aim to bridge the gap between technology and mathematics, leading to advancements in interactive systems based on general-purpose LLMs.', role='assistant', tool_calls=None, function_call=None)'. -[2024-11-10 21:16:30,515] [DEBUG] [Engine]: Agent 'agent1' executed task with result: Message(content='Based on your expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, you can contribute valuable insights to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). Here are some potential research ideas that leverage your knowledge in these areas to enhance the merging process of homologous models in the context of interactive systems based on general-purpose LLMs:\n\n1. **Terahertz Imaging Integration**: Explore the integration of terahertz imaging data into the homologous model merging process. Develop algorithms that utilize terahertz imaging features to enhance the accuracy and precision of model merging, especially in scenarios where traditional imaging modalities may fall short.\n\n2. **Cloud-Based Model Fusion**: Utilize cloud computing resources to facilitate the fusion of homologous models across distributed systems. Develop a secure and scalable cloud-based framework that enables seamless collaboration and model integration among researchers and clinicians, ensuring privacy and accessibility of proprietary algorithms.\n\n3. **Algebraic Geometry for Model Representation**: Apply concepts from algebraic geometry to represent homologous models in a structured and mathematically rigorous manner. Investigate how cluster categories and 3-Calabi-Yau categories can be leveraged to provide a formal framework for understanding the relationships between merged models.\n\n4. **Optimization Algorithms**: Develop optimization algorithms inspired by algebraic geometry principles to optimize the merging process of homologous models. Explore how mathematical techniques can be used to minimize redundancy, maximize relevance, and ensure the efficiency of the merging algorithm.\n\n5. **Interactive Visualization**: Create interactive visualization tools that allow researchers to interact with the merged models in real-time. Implement user-friendly interfaces that enable intuitive exploration and manipulation of the merged models, enhancing the understanding of complex relationships within the data.\n\nBy combining your expertise in terahertz imaging, cloud computing, and algebraic geometry, you can contribute innovative solutions to improve the performance and efficiency of the DELLA approach for merging homologous models. These research ideas aim to bridge the gap between technology and mathematics, leading to advancements in interactive systems based on general-purpose LLMs.', role='assistant', tool_calls=None, function_call=None) -[2024-11-10 21:16:30,515] [INFO] [BaseAgent]: Agent 'agent2' is planning the next task. -[2024-11-10 21:16:31,946] [INFO] [BaseAgent]: Agent 'agent2' plans next task based on persona: Based on the provided information, the next task for 'agent2' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with 'agent2's expertise in artificial intelligence and healthcare, as well as their passion for leveraging advanced machine learning techniques to create impactful solutions. By contributing to the brainstorming session, 'agent2' can provide valuable insights and potentially propose a new research idea that aligns with their research interests and expertise. -[2024-11-10 21:16:31,949] [INFO] [Engine]: Agent 'agent2' planned task: Based on the provided information, the next task for 'agent2' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with 'agent2's expertise in artificial intelligence and healthcare, as well as their passion for leveraging advanced machine learning techniques to create impactful solutions. By contributing to the brainstorming session, 'agent2' can provide valuable insights and potentially propose a new research idea that aligns with their research interests and expertise. -[2024-11-10 21:16:31,950] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Based on the provided information, the next task for 'agent2' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with 'agent2's expertise in artificial intelligence and healthcare, as well as their passion for leveraging advanced machine learning techniques to create impactful solutions. By contributing to the brainstorming session, 'agent2' can provide valuable insights and potentially propose a new research idea that aligns with their research interests and expertise.'. -[2024-11-10 21:16:33,535] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. My expertise in artificial intelligence and healthcare, along with a passion for leveraging advanced machine learning techniques, align well with this task. I believe our collaboration can lead to valuable insights and innovative research ideas. -[2024-11-10 21:16:33,536] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I am inter... -[2024-11-10 21:16:35,219] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I appreciate your interest in collaborating to brainstorm research ideas related to merging homologous models using the DELLA approach. Your expertise in artificial intelligence and healthcare, combined with my background in spiking neural networks and machine learning, can indeed lead to valuable insights and innovative research directions. I suggest we explore how the principles of SNNs can be integrated with your AIoT system for monitoring elderly patients with dementia. This fusion could potentially enhance the system's predictive capabilities and anomaly detection by leveraging the efficiency and adaptability of spiking neural networks. What are your thoughts on this potential research direction? -[2024-11-10 21:16:35,219] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I apprecia... -[2024-11-10 21:16:36,752] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I find the idea of integrating spiking neural networks (SNNs) with my AIoT system for monitoring elderly patients with dementia intriguing. The efficiency and adaptability of SNNs could indeed enhance the predictive capabilities and anomaly detection of the system. I believe this fusion has the potential to significantly improve the accuracy and real-time monitoring of abnormal activities in dementia patients. I am excited to explore this research direction further with you. -[2024-11-10 21:16:36,752] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I find the... -[2024-11-10 21:16:38,391] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I completely agree with your assessment of integrating spiking neural networks (SNNs) with your AIoT system for monitoring elderly patients with dementia. The potential for SNNs to enhance predictive capabilities and anomaly detection aligns well with the goals of improving accuracy and real-time monitoring of abnormal activities in dementia patients. I am excited to delve deeper into this research direction with you and explore the synergies between our expertise in artificial intelligence and spiking neural networks. -[2024-11-10 21:16:38,393] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I complete... -[2024-11-10 21:16:39,817] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I am thrilled to collaborate with you on integrating spiking neural networks (SNNs) with my AIoT system for monitoring elderly patients with dementia. The potential of SNNs to enhance predictive capabilities and anomaly detection aligns perfectly with our goal of improving accuracy and real-time monitoring of abnormal activities in dementia patients. I look forward to exploring this research direction further with you and leveraging our combined expertise in artificial intelligence and spiking neural networks. -[2024-11-10 21:16:39,817] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I am thril... -[2024-11-10 21:16:43,198] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I am excited to continue our collaboration on integrating spiking neural networks (SNNs) with your AIoT system for monitoring elderly patients with dementia. The potential of SNNs to enhance predictive capabilities and anomaly detection aligns perfectly with our shared goal of improving accuracy and real-time monitoring of abnormal activities in dementia patients. I am eager to delve deeper into this research direction with you and explore the synergies between our expertise in artificial intelligence and spiking neural networks. -[2024-11-10 21:16:43,198] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I am excit... -[2024-11-10 21:16:45,553] [INFO] [BaseAgent]: Agent 'agent2' called 'new_communication_session' with args '{'target_agent_id': 'agent3', 'message': 'I am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. My expertise in artificial intelligence and healthcare, along with a passion for leveraging advanced machine learning techniques, align well with this task. I believe our collaboration can lead to valuable insights and innovative research ideas.'}'. -[2024-11-10 21:16:45,553] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'message': 'Successfully completed session ed331ef5-a9f3-42d2-8c46-1c65ed7a3d2f', 'session_id': "Summary:\nThe conversation between 'agent2' and 'agent3' revolves around collaborating to integrate spiking neural networks (SNNs) with an AIoT system for monitoring elderly patients with dementia. Both agents acknowledge the potential of SNNs to enhance predictive capabilities and anomaly detection, aiming to improve accuracy and real-time monitoring of abnormal activities in dementia patients. They express excitement about exploring this research direction further, leveraging their combined expertise in artificial intelligence and spiking neural networks. The next task for 'agent2' involves collaborating with the research team to brainstorm potential research ideas that address gaps in merging homologous models using the DELLA approach, aligning with their expertise in AI and healthcare. This collaboration aims to generate innovative research ideas and valuable insights in line with 'agent2's passion for advanced machine learning techniques."}'. -[2024-11-10 21:16:45,556] [DEBUG] [Engine]: Agent 'agent2' executed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. My expertise in artificial intelligence and healthcare, along with a passion for leveraging advanced machine learning techniques, align well with this task. I believe our collaboration can lead to valuable insights and innovative research ideas."}', name='new_communication_session'), id='call_5YUtMD2LtLDlanHkLLPmOYMU', type='function')], function_call=None) -[2024-11-10 21:16:45,556] [INFO] [BaseAgent]: Agent 'agent3' is planning the next task. -[2024-11-10 21:16:47,462] [INFO] [BaseAgent]: Agent 'agent3' plans next task based on persona: Based on the role and expertise of 'agent3' as a researcher dedicated to advancing the field of spiking neural networks (SNNs) and their applications in auditory processing and machine learning, the next task should be to: - -1. **Literature Review**: Conduct a thorough literature review on the intersection of large language models (LLMs) and evolutionary algorithms, focusing on how these technologies can enhance algorithm selection and optimization processes. Explore existing research on autonomously designing evolutionary operators using LLMs and identify gaps or areas for further investigation. - -This task aligns with 'agent3's expertise and research interests, allowing them to deepen their understanding of cutting-edge technologies and potentially uncover new insights that could inform future research directions. -[2024-11-10 21:16:47,462] [INFO] [Engine]: Agent 'agent3' planned task: Based on the role and expertise of 'agent3' as a researcher dedicated to advancing the field of spiking neural networks (SNNs) and their applications in auditory processing and machine learning, the next task should be to: - -1. **Literature Review**: Conduct a thorough literature review on the intersection of large language models (LLMs) and evolutionary algorithms, focusing on how these technologies can enhance algorithm selection and optimization processes. Explore existing research on autonomously designing evolutionary operators using LLMs and identify gaps or areas for further investigation. - -This task aligns with 'agent3's expertise and research interests, allowing them to deepen their understanding of cutting-edge technologies and potentially uncover new insights that could inform future research directions. -[2024-11-10 21:16:47,462] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Based on the role and expertise of 'agent3' as a researcher dedicated to advancing the field of spiking neural networks (SNNs) and their applications in auditory processing and machine learning, the next task should be to: - -1. **Literature Review**: Conduct a thorough literature review on the intersection of large language models (LLMs) and evolutionary algorithms, focusing on how these technologies can enhance algorithm selection and optimization processes. Explore existing research on autonomously designing evolutionary operators using LLMs and identify gaps or areas for further investigation. - -This task aligns with 'agent3's expertise and research interests, allowing them to deepen their understanding of cutting-edge technologies and potentially uncover new insights that could inform future research directions.'. -[2024-11-10 21:16:48,211] [ERROR] [Engine]: Error in agent 'agent3' during planning or action: 'itertools.islice' object is not callable -[2024-11-10 21:16:48,212] [INFO] [BaseAgent]: Agent 'agent4' is planning the next task. -[2024-11-10 21:16:49,542] [INFO] [BaseAgent]: Agent 'agent4' plans next task based on persona: The next task for 'agent4' should be to collaborate with 'agent5' on conducting a literature review based on the provided introduction, brainstorming research ideas, summarizing collective ideas, and formulating a new research proposal following the '5q' format. This collaborative effort will help them develop a new research idea within the machine learning field and present it in a structured format that addresses specific questions related to the research problem, its significance, challenges, previous limitations, and proposed methodology and outcomes. -[2024-11-10 21:16:49,542] [INFO] [Engine]: Agent 'agent4' planned task: The next task for 'agent4' should be to collaborate with 'agent5' on conducting a literature review based on the provided introduction, brainstorming research ideas, summarizing collective ideas, and formulating a new research proposal following the '5q' format. This collaborative effort will help them develop a new research idea within the machine learning field and present it in a structured format that addresses specific questions related to the research problem, its significance, challenges, previous limitations, and proposed methodology and outcomes. -[2024-11-10 21:16:49,542] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'The next task for 'agent4' should be to collaborate with 'agent5' on conducting a literature review based on the provided introduction, brainstorming research ideas, summarizing collective ideas, and formulating a new research proposal following the '5q' format. This collaborative effort will help them develop a new research idea within the machine learning field and present it in a structured format that addresses specific questions related to the research problem, its significance, challenges, previous limitations, and proposed methodology and outcomes.'. -[2024-11-10 21:17:22,321] [ERROR] [Engine]: Error in agent 'agent4' during planning or action: 'NoneType' object has no attribute 'pop' -[2024-11-10 21:17:22,347] [INFO] [BaseAgent]: Agent 'agent5' is planning the next task. -[2024-11-10 21:17:23,655] [INFO] [BaseAgent]: Agent 'agent5' plans next task based on persona: The next task for 'agent5' should be to formulate a new research idea in the format of the '5q' based on the provided context and brainstorming session. This will involve addressing specific research questions, discussing the importance and challenges of the proposed research, highlighting the key components of the approach, and outlining expected outcomes. -[2024-11-10 21:17:23,655] [INFO] [Engine]: Agent 'agent5' planned task: The next task for 'agent5' should be to formulate a new research idea in the format of the '5q' based on the provided context and brainstorming session. This will involve addressing specific research questions, discussing the importance and challenges of the proposed research, highlighting the key components of the approach, and outlining expected outcomes. -[2024-11-10 21:17:23,655] [INFO] [BaseAgent]: Agent 'agent5' acting on task 'The next task for 'agent5' should be to formulate a new research idea in the format of the '5q' based on the provided context and brainstorming session. This will involve addressing specific research questions, discussing the importance and challenges of the proposed research, highlighting the key components of the approach, and outlining expected outcomes.'. -[2024-11-10 21:17:24,947] [INFO] [BaseAgent]: Agent agent5 sent message to agent5: Formulate a new research idea in the format of the '5q' based on the provided context and brainstorming session. This will involve addressing specific research questions, discussing the importance and challenges of the proposed research, highlighting the key components of the approach, and outlining expected outcomes. -[2024-11-10 21:17:24,947] [INFO] [BaseAgent]: Agent agent5 received message from agent5: Formulate ... -[2024-11-10 21:17:26,522] [INFO] [BaseAgent]: Agent agent5 sent message to agent5: Based on the provided context and brainstorming session, I propose a new research idea focusing on optimizing the merging process of homologous models for enhanced performance across diverse tasks efficiently. This research aims to address the challenges of balancing interference reduction and information retention in model fusion, leveraging adaptive strategies to improve task performance and resource efficiency in interactive systems. The key components include exploring transfer learning techniques, developing dynamic model merging approaches, and utilizing meta-learning for model fusion optimization. The expected outcomes involve revolutionizing model merging practices, leading to improved task performance and streamlined resource utilization in real-world applications. -[2024-11-10 21:17:26,522] [INFO] [BaseAgent]: Agent agent5 received message from agent5: Based on t... -[2024-11-10 21:17:28,041] [INFO] [BaseAgent]: Agent agent5 sent message to agent5: Based on the provided context and brainstorming session, I propose a new research idea focusing on optimizing the merging process of homologous models for enhanced performance across diverse tasks efficiently. This research aims to address the challenges of balancing interference reduction and information retention in model fusion, leveraging adaptive strategies to improve task performance and resource efficiency in interactive systems. The key components include exploring transfer learning techniques, developing dynamic model merging approaches, and utilizing meta-learning for model fusion optimization. The expected outcomes involve revolutionizing model merging practices, leading to improved task performance and streamlined resource utilization in real-world applications. -[2024-11-10 21:17:28,042] [INFO] [BaseAgent]: Agent agent5 received message from agent5: Based on t... -[2024-11-10 21:17:29,691] [INFO] [BaseAgent]: Agent agent5 sent message to agent5: Based on the provided context and brainstorming session, I propose a new research idea focusing on optimizing the merging process of homologous models for enhanced performance across diverse tasks efficiently. This research aims to address the challenges of balancing interference reduction and information retention in model fusion, leveraging adaptive strategies to improve task performance and resource efficiency in interactive systems. The key components include exploring transfer learning techniques, developing dynamic model merging approaches, and utilizing meta-learning for model fusion optimization. The expected outcomes involve revolutionizing model merging practices, leading to improved task performance and streamlined resource utilization in real-world applications. -[2024-11-10 21:17:29,692] [INFO] [BaseAgent]: Agent agent5 received message from agent5: Based on t... -[2024-11-10 21:17:31,473] [INFO] [BaseAgent]: Agent agent5 sent message to agent5: Based on the provided context and brainstorming session, I propose a new research idea focusing on optimizing the merging process of homologous models for enhanced performance across diverse tasks efficiently. This research aims to address the challenges of balancing interference reduction and information retention in model fusion, leveraging adaptive strategies to improve task performance and resource efficiency in interactive systems. The key components include exploring transfer learning techniques, developing dynamic model merging approaches, and utilizing meta-learning for model fusion optimization. The expected outcomes involve revolutionizing model merging practices, leading to improved task performance and streamlined resource utilization in real-world applications. -[2024-11-10 21:17:31,475] [INFO] [BaseAgent]: Agent agent5 received message from agent5: Based on t... -[2024-11-10 21:17:33,316] [INFO] [BaseAgent]: Agent agent5 sent message to agent5: Based on the provided context and brainstorming session, I propose a new research idea focusing on optimizing the merging process of homologous models for enhanced performance across diverse tasks efficiently. This research aims to address the challenges of balancing interference reduction and information retention in model fusion, leveraging adaptive strategies to improve task performance and resource efficiency in interactive systems. The key components include exploring transfer learning techniques, developing dynamic model merging approaches, and utilizing meta-learning for model fusion optimization. The expected outcomes involve revolutionizing model merging practices, leading to improved task performance and streamlined resource utilization in real-world applications. -[2024-11-10 21:17:33,316] [INFO] [BaseAgent]: Agent agent5 received message from agent5: Based on t... -[2024-11-10 21:17:35,072] [INFO] [BaseAgent]: Agent 'agent5' called 'new_communication_session' with args '{'target_agent_id': 'agent5', 'message': "Formulate a new research idea in the format of the '5q' based on the provided context and brainstorming session. This will involve addressing specific research questions, discussing the importance and challenges of the proposed research, highlighting the key components of the approach, and outlining expected outcomes."}'. -[2024-11-10 21:17:35,073] [INFO] [BaseAgent]: Agent 'agent5' obtained result '{'success': True, 'message': 'Successfully completed session 09fb03f0-abbd-45f6-8203-d7885de9fd1b', 'session_id': "In the provided chat history, 'agent5' repeatedly proposes a new research idea focusing on optimizing the merging process of homologous models for enhanced performance across diverse tasks efficiently. The research aims to address challenges such as interference reduction and information retention in model fusion by leveraging adaptive strategies. Key components include exploring transfer learning techniques, developing dynamic model merging approaches, and utilizing meta-learning for model fusion optimization. The expected outcomes involve revolutionizing model merging practices to improve task performance and streamline resource utilization in real-world applications. The task for 'agent5' is to formulate a new research idea in the format of the '5q' based on the context and brainstorming session, addressing specific research questions, discussing importance and challenges, highlighting key components, and outlining expected outcomes."}'. -[2024-11-10 21:17:35,078] [DEBUG] [Engine]: Agent 'agent5' executed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent5","message":"Formulate a new research idea in the format of the \'5q\' based on the provided context and brainstorming session. This will involve addressing specific research questions, discussing the importance and challenges of the proposed research, highlighting the key components of the approach, and outlining expected outcomes."}', name='new_communication_session'), id='call_sjAU7zTWHMHtaSnt63llqbi2', type='function')], function_call=None) -[2024-11-10 21:17:35,080] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- {'agent1': Message(content='Based on your expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, you can contribute valuable insights to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). Here are some potential research ideas that leverage your knowledge in these areas to enhance the merging process of homologous models in the context of interactive systems based on general-purpose LLMs:\n\n1. **Terahertz Imaging Integration**: Explore the integration of terahertz imaging data into the homologous model merging process. Develop algorithms that utilize terahertz imaging features to enhance the accuracy and precision of model merging, especially in scenarios where traditional imaging modalities may fall short.\n\n2. **Cloud-Based Model Fusion**: Utilize cloud computing resources to facilitate the fusion of homologous models across distributed systems. Develop a secure and scalable cloud-based framework that enables seamless collaboration and model integration among researchers and clinicians, ensuring privacy and accessibility of proprietary algorithms.\n\n3. **Algebraic Geometry for Model Representation**: Apply concepts from algebraic geometry to represent homologous models in a structured and mathematically rigorous manner. Investigate how cluster categories and 3-Calabi-Yau categories can be leveraged to provide a formal framework for understanding the relationships between merged models.\n\n4. **Optimization Algorithms**: Develop optimization algorithms inspired by algebraic geometry principles to optimize the merging process of homologous models. Explore how mathematical techniques can be used to minimize redundancy, maximize relevance, and ensure the efficiency of the merging algorithm.\n\n5. **Interactive Visualization**: Create interactive visualization tools that allow researchers to interact with the merged models in real-time. Implement user-friendly interfaces that enable intuitive exploration and manipulation of the merged models, enhancing the understanding of complex relationships within the data.\n\nBy combining your expertise in terahertz imaging, cloud computing, and algebraic geometry, you can contribute innovative solutions to improve the performance and efficiency of the DELLA approach for merging homologous models. These research ideas aim to bridge the gap between technology and mathematics, leading to advancements in interactive systems based on general-purpose LLMs.', role='assistant', tool_calls=None, function_call=None)} -- {'agent2': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. My expertise in artificial intelligence and healthcare, along with a passion for leveraging advanced machine learning techniques, align well with this task. I believe our collaboration can lead to valuable insights and innovative research ideas."}', name='new_communication_session'), id='call_5YUtMD2LtLDlanHkLLPmOYMU', type='function')], function_call=None)} -- {'agent5': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent5","message":"Formulate a new research idea in the format of the \'5q\' based on the provided context and brainstorming session. This will involve addressing specific research questions, discussing the importance and challenges of the proposed research, highlighting the key components of the approach, and outlining expected outcomes."}', name='new_communication_session'), id='call_sjAU7zTWHMHtaSnt63llqbi2', type='function')], function_call=None)} - -[2024-11-10 21:17:35,083] [INFO] [Engine]: Iteration 1 Summary: -Agents' Results Summary: -- {'agent1': Message(content='Based on your expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, you can contribute valuable insights to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). Here are some potential research ideas that leverage your knowledge in these areas to enhance the merging process of homologous models in the context of interactive systems based on general-purpose LLMs:\n\n1. **Terahertz Imaging Integration**: Explore the integration of terahertz imaging data into the homologous model merging process. Develop algorithms that utilize terahertz imaging features to enhance the accuracy and precision of model merging, especially in scenarios where traditional imaging modalities may fall short.\n\n2. **Cloud-Based Model Fusion**: Utilize cloud computing resources to facilitate the fusion of homologous models across distributed systems. Develop a secure and scalable cloud-based framework that enables seamless collaboration and model integration among researchers and clinicians, ensuring privacy and accessibility of proprietary algorithms.\n\n3. **Algebraic Geometry for Model Representation**: Apply concepts from algebraic geometry to represent homologous models in a structured and mathematically rigorous manner. Investigate how cluster categories and 3-Calabi-Yau categories can be leveraged to provide a formal framework for understanding the relationships between merged models.\n\n4. **Optimization Algorithms**: Develop optimization algorithms inspired by algebraic geometry principles to optimize the merging process of homologous models. Explore how mathematical techniques can be used to minimize redundancy, maximize relevance, and ensure the efficiency of the merging algorithm.\n\n5. **Interactive Visualization**: Create interactive visualization tools that allow researchers to interact with the merged models in real-time. Implement user-friendly interfaces that enable intuitive exploration and manipulation of the merged models, enhancing the understanding of complex relationships within the data.\n\nBy combining your expertise in terahertz imaging, cloud computing, and algebraic geometry, you can contribute innovative solutions to improve the performance and efficiency of the DELLA approach for merging homologous models. These research ideas aim to bridge the gap between technology and mathematics, leading to advancements in interactive systems based on general-purpose LLMs.', role='assistant', tool_calls=None, function_call=None)} -- {'agent2': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent3","message":"I am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. My expertise in artificial intelligence and healthcare, along with a passion for leveraging advanced machine learning techniques, align well with this task. I believe our collaboration can lead to valuable insights and innovative research ideas."}', name='new_communication_session'), id='call_5YUtMD2LtLDlanHkLLPmOYMU', type='function')], function_call=None)} -- {'agent5': Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent5","message":"Formulate a new research idea in the format of the \'5q\' based on the provided context and brainstorming session. This will involve addressing specific research questions, discussing the importance and challenges of the proposed research, highlighting the key components of the approach, and outlining expected outcomes."}', name='new_communication_session'), id='call_sjAU7zTWHMHtaSnt63llqbi2', type='function')], function_call=None)} - -[2024-11-10 21:17:41,384] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': False} -[2024-11-10 21:17:41,386] [INFO] [Engine]: EnginePlanner decided to terminate the simulation. -[2024-11-10 21:17:41,386] [INFO] [Engine]: Engine graph-based coordination loop completed. -[2024-11-10 21:17:41,386] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-10 21:17:41,386] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-10 21:17:41,386] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-10 21:17:41,386] [INFO] [Engine]: Graph-based coordination simulation completed. -[2024-11-10 21:17:41,390] [INFO] [Engine]: Summary data successfully written to result/discussion_output.jsonl -[2024-11-10 22:43:46,997] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-10 22:43:46,997] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-10 22:43:46,997] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-10 22:43:46,997] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-10 22:43:46,997] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-10 22:43:46,997] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-10 22:43:46,997] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-10 22:43:46,997] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-10 22:43:46,997] [INFO] [BaseAgent]: Agent 'agent5' initialized. -[2024-11-10 22:43:46,997] [DEBUG] [Engine]: Agent 'agent5' of type 'BaseAgent' initialized. -[2024-11-10 22:43:46,997] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'graph'. -[2024-11-10 22:43:46,997] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent2 -[2024-11-10 22:43:46,997] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent3 -[2024-11-10 22:43:46,997] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent4 -[2024-11-10 22:43:46,997] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent5 -[2024-11-10 22:43:46,997] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent3 -[2024-11-10 22:43:46,997] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent4 -[2024-11-10 22:43:46,997] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent5 -[2024-11-10 22:43:46,997] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborate with]--> agent4 -[2024-11-10 22:43:46,997] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborate with]--> agent5 -[2024-11-10 22:43:46,998] [INFO] [AgentGraph]: Relationship added: agent4 --[collaborate with]--> agent5 -[2024-11-10 22:43:46,998] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-10 22:43:46,998] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-10 22:43:46,998] [INFO] [Engine]: Engine initialized. -[2024-11-10 22:43:46,998] [INFO] [Engine]: Engine starting simulation. -[2024-11-10 22:43:46,998] [INFO] [Engine]: Running in graph-based coordination mode. -[2024-11-10 22:43:46,998] [INFO] [Engine]: Initial task distribution to all agents. -[2024-11-10 22:43:46,998] [INFO] [Engine]: Assigning initial task to agent1: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-10 22:43:46,999] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-10 22:43:48,298] [ERROR] [Engine]: Error while executing initial task for agent 'agent1': 'itertools.islice' object is not callable -[2024-11-10 22:43:48,299] [INFO] [Engine]: Assigning initial task to agent2: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-10 22:43:48,299] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-10 22:43:49,505] [ERROR] [Engine]: Error while executing initial task for agent 'agent2': 'itertools.islice' object is not callable -[2024-11-10 22:43:49,506] [INFO] [Engine]: Assigning initial task to agent3: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-10 22:43:49,506] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-10 22:43:51,671] [ERROR] [Engine]: Error while executing initial task for agent 'agent3': 'Paper' object has no attribute 'to_dict' -[2024-11-10 22:43:51,672] [INFO] [Engine]: Assigning initial task to agent4: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-10 22:43:51,672] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-10 22:43:54,551] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-10 22:43:54,552] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-10 22:43:54,552] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-10 22:43:54,552] [INFO] [Engine]: Graph-based coordination simulation completed. -[2024-11-10 22:43:54,557] [INFO] [Engine]: Summary data successfully written to result/discussion_output.jsonl -[2024-11-11 14:21:51,692] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-11 14:21:51,692] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-11 14:21:51,692] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-11 14:21:51,693] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-11 14:21:51,693] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-11 14:21:51,693] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-11 14:21:51,693] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-11 14:21:51,693] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-11 14:21:51,694] [INFO] [BaseAgent]: Agent 'agent5' initialized. -[2024-11-11 14:21:51,694] [DEBUG] [Engine]: Agent 'agent5' of type 'BaseAgent' initialized. -[2024-11-11 14:21:51,694] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'graph'. -[2024-11-11 14:21:51,694] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent2 -[2024-11-11 14:21:51,694] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent3 -[2024-11-11 14:21:51,694] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent4 -[2024-11-11 14:21:51,694] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent5 -[2024-11-11 14:21:51,694] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent3 -[2024-11-11 14:21:51,694] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent4 -[2024-11-11 14:21:51,694] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent5 -[2024-11-11 14:21:51,694] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborate with]--> agent4 -[2024-11-11 14:21:51,694] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborate with]--> agent5 -[2024-11-11 14:21:51,694] [INFO] [AgentGraph]: Relationship added: agent4 --[collaborate with]--> agent5 -[2024-11-11 14:21:51,694] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-11 14:21:51,694] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-11 14:21:51,694] [INFO] [Engine]: Engine initialized. -[2024-11-11 14:21:51,695] [INFO] [Engine]: Engine starting simulation. -[2024-11-11 14:21:51,695] [INFO] [Engine]: Running in graph-based coordination mode. -[2024-11-11 14:21:51,695] [INFO] [Engine]: Initial task distribution to all agents. -[2024-11-11 14:21:51,695] [INFO] [Engine]: Assigning initial task to agent1: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 14:21:51,695] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 14:21:53,518] [ERROR] [Engine]: Error while executing initial task for agent 'agent1': 'itertools.islice' object is not callable -[2024-11-11 14:21:53,523] [INFO] [Engine]: Assigning initial task to agent2: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 14:21:53,527] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 14:21:57,699] [ERROR] [Engine]: Error while executing initial task for agent 'agent2': 'Paper' object has no attribute 'to_dict' -[2024-11-11 14:21:57,700] [INFO] [Engine]: Assigning initial task to agent3: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 14:21:57,700] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 14:22:00,900] [ERROR] [Engine]: Error while executing initial task for agent 'agent3': 'Paper' object has no attribute 'to_dict' -[2024-11-11 14:22:00,901] [INFO] [Engine]: Assigning initial task to agent4: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 14:22:00,901] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 14:22:05,048] [ERROR] [Engine]: Error while executing initial task for agent 'agent4': 'Paper' object has no attribute 'to_dict' -[2024-11-11 14:22:05,049] [INFO] [Engine]: Assigning initial task to agent5: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 14:22:05,054] [INFO] [BaseAgent]: Agent 'agent5' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 14:22:06,839] [ERROR] [Engine]: Error while executing initial task for agent 'agent5': 'itertools.islice' object is not callable -[2024-11-11 14:22:06,840] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: - -[2024-11-11 14:22:06,840] [INFO] [Engine]: Initial Summary: -Agents' Results Summary: - -[2024-11-11 14:22:09,702] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': False} -[2024-11-11 14:22:09,703] [INFO] [Engine]: EnginePlanner decided to terminate the simulation after initial assignment. -[2024-11-11 14:22:09,703] [INFO] [Engine]: Starting iteration 0 -[2024-11-11 14:22:09,703] [INFO] [BaseAgent]: Agent 'agent1' is planning the next task. -[2024-11-11 14:22:11,238] [INFO] [BaseAgent]: Agent 'agent1' plans next task based on persona: Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be: - -1. **Literature Review**: Conduct a literature review on the current state of research in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry to identify recent advancements, key challenges, and potential gaps in the field. - -This task aligns with the agent's role as a researcher with expertise in these areas and will provide valuable insights for brainstorming potential research ideas and formulating a new research proposal. -[2024-11-11 14:22:11,239] [INFO] [Engine]: Agent 'agent1' planned task: Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be: - -1. **Literature Review**: Conduct a literature review on the current state of research in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry to identify recent advancements, key challenges, and potential gaps in the field. - -This task aligns with the agent's role as a researcher with expertise in these areas and will provide valuable insights for brainstorming potential research ideas and formulating a new research proposal. -[2024-11-11 14:22:11,239] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be: - -1. **Literature Review**: Conduct a literature review on the current state of research in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry to identify recent advancements, key challenges, and potential gaps in the field. - -This task aligns with the agent's role as a researcher with expertise in these areas and will provide valuable insights for brainstorming potential research ideas and formulating a new research proposal.'. -[2024-11-11 14:22:13,147] [ERROR] [Engine]: Error in agent 'agent1' during planning or action: 'Paper' object has no attribute 'to_dict' -[2024-11-11 14:22:13,148] [INFO] [BaseAgent]: Agent 'agent2' is planning the next task. -[2024-11-11 14:22:15,131] [INFO] [BaseAgent]: Agent 'agent2' plans next task based on persona: Based on the provided information, the next task for 'agent2' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with 'agent2's expertise in artificial intelligence and machine learning, particularly in the development of autonomous systems for monitoring healthcare and causal variable discovery in multi-label data. By contributing to the brainstorming session, 'agent2' can leverage their knowledge and skills to propose innovative research ideas that advance the field of AI and machine learning. -[2024-11-11 14:22:15,132] [INFO] [Engine]: Agent 'agent2' planned task: Based on the provided information, the next task for 'agent2' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with 'agent2's expertise in artificial intelligence and machine learning, particularly in the development of autonomous systems for monitoring healthcare and causal variable discovery in multi-label data. By contributing to the brainstorming session, 'agent2' can leverage their knowledge and skills to propose innovative research ideas that advance the field of AI and machine learning. -[2024-11-11 14:22:15,135] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Based on the provided information, the next task for 'agent2' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with 'agent2's expertise in artificial intelligence and machine learning, particularly in the development of autonomous systems for monitoring healthcare and causal variable discovery in multi-label data. By contributing to the brainstorming session, 'agent2' can leverage their knowledge and skills to propose innovative research ideas that advance the field of AI and machine learning.'. -[2024-11-11 14:22:16,668] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. My expertise in artificial intelligence and machine learning, particularly in the development of autonomous systems for monitoring healthcare and causal variable discovery in multi-label data, can contribute to innovative research ideas in this area. Let's discuss further to explore new possibilities and advancements in AI and machine learning. -[2024-11-11 14:22:16,668] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I am inter... -[2024-11-11 14:22:18,408] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I appreciate your interest in collaborating to brainstorm research ideas related to merging homologous models using the DELLA approach. Given your expertise in AI and machine learning for healthcare monitoring and causal variable discovery, we can explore innovative approaches that leverage the strengths of both our research areas. One potential direction could be integrating your AIoT system for monitoring elderly patients with my spiking neural network models to enhance anomaly detection and prediction accuracy. This fusion of technologies could lead to more robust and adaptive healthcare monitoring systems. What are your thoughts on this potential research direction? -[2024-11-11 14:22:18,409] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I apprecia... -[2024-11-11 14:22:19,737] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I believe integrating my AIoT system for monitoring elderly patients with your spiking neural network models is a promising research direction. By combining the strengths of both technologies, we can enhance anomaly detection and prediction accuracy in healthcare monitoring systems. This fusion could lead to more robust and adaptive solutions for improving patient care and well-being. I am excited to explore this collaboration further and delve into the potential synergies between our expertise. -[2024-11-11 14:22:19,739] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I believe ... -[2024-11-11 14:22:21,310] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: Collaborating to integrate your AIoT system with my spiking neural network models indeed holds great promise for enhancing anomaly detection and prediction accuracy in healthcare monitoring. This fusion of technologies can pave the way for more robust and adaptive solutions, ultimately improving patient care and well-being. I share your enthusiasm for exploring the synergies between our expertise and look forward to delving deeper into this exciting research direction. -[2024-11-11 14:22:21,311] [INFO] [BaseAgent]: Agent agent2 received message from agent3: Collaborat... -[2024-11-11 14:22:22,716] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: Collaborating to integrate our AIoT system with your spiking neural network models holds great promise for enhancing anomaly detection and prediction accuracy in healthcare monitoring. This fusion of technologies can lead to more robust and adaptive solutions, ultimately improving patient care and well-being. I am enthusiastic about exploring the synergies between our expertise and delving deeper into this exciting research direction. -[2024-11-11 14:22:22,719] [INFO] [BaseAgent]: Agent agent3 received message from agent2: Collaborat... -[2024-11-11 14:22:24,216] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: Collaborating to integrate our AIoT system with your spiking neural network models indeed holds great promise for enhancing anomaly detection and prediction accuracy in healthcare monitoring. This fusion of technologies can pave the way for more robust and adaptive solutions, ultimately improving patient care and well-being. I share your enthusiasm for exploring the synergies between our expertise and look forward to delving deeper into this exciting research direction. -[2024-11-11 14:22:24,217] [INFO] [BaseAgent]: Agent agent2 received message from agent3: Collaborat... -[2024-11-11 14:22:25,721] [ERROR] [Engine]: Error in agent 'agent2' during planning or action: 'dict' object has no attribute 'final_chat_history' -[2024-11-11 14:22:25,721] [INFO] [BaseAgent]: Agent 'agent3' is planning the next task. -[2024-11-11 14:22:27,582] [INFO] [BaseAgent]: Agent 'agent3' plans next task based on persona: Based on the role and expertise of 'agent3' as a researcher dedicated to advancing the field of spiking neural networks (SNNs) and their applications in auditory processing and machine learning, the next task should be to focus on the intersection of large language models (LLMs) and evolutionary algorithms to enhance algorithm selection and optimization processes. This aligns with the agent's expertise and interest in exploring how technologies like LLMs can automate complex problem-solving tasks and push the boundaries of SNNs for tasks such as speech recognition and temporal signal processing. By further investigating and developing frameworks that autonomously design evolutionary operators using LLMs, 'agent3' can contribute to the advancement of the field and potentially uncover new insights and solutions. -[2024-11-11 14:22:27,582] [INFO] [Engine]: Agent 'agent3' planned task: Based on the role and expertise of 'agent3' as a researcher dedicated to advancing the field of spiking neural networks (SNNs) and their applications in auditory processing and machine learning, the next task should be to focus on the intersection of large language models (LLMs) and evolutionary algorithms to enhance algorithm selection and optimization processes. This aligns with the agent's expertise and interest in exploring how technologies like LLMs can automate complex problem-solving tasks and push the boundaries of SNNs for tasks such as speech recognition and temporal signal processing. By further investigating and developing frameworks that autonomously design evolutionary operators using LLMs, 'agent3' can contribute to the advancement of the field and potentially uncover new insights and solutions. -[2024-11-11 14:22:27,583] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Based on the role and expertise of 'agent3' as a researcher dedicated to advancing the field of spiking neural networks (SNNs) and their applications in auditory processing and machine learning, the next task should be to focus on the intersection of large language models (LLMs) and evolutionary algorithms to enhance algorithm selection and optimization processes. This aligns with the agent's expertise and interest in exploring how technologies like LLMs can automate complex problem-solving tasks and push the boundaries of SNNs for tasks such as speech recognition and temporal signal processing. By further investigating and developing frameworks that autonomously design evolutionary operators using LLMs, 'agent3' can contribute to the advancement of the field and potentially uncover new insights and solutions.'. -[2024-11-11 14:22:30,640] [ERROR] [Engine]: Error in agent 'agent3' during planning or action: 'Paper' object has no attribute 'to_dict' -[2024-11-11 14:22:30,641] [INFO] [BaseAgent]: Agent 'agent4' is planning the next task. -[2024-11-11 14:22:32,027] [INFO] [BaseAgent]: Agent 'agent4' plans next task based on persona: Based on the provided information, the next task for 'agent4' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction provided. This task aligns with their role as a researcher with a diverse background in mathematical modeling, optimization, and photonic systems, and will allow them to contribute their expertise to the development of a new research proposal. -[2024-11-11 14:22:32,029] [INFO] [Engine]: Agent 'agent4' planned task: Based on the provided information, the next task for 'agent4' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction provided. This task aligns with their role as a researcher with a diverse background in mathematical modeling, optimization, and photonic systems, and will allow them to contribute their expertise to the development of a new research proposal. -[2024-11-11 14:22:32,029] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Based on the provided information, the next task for 'agent4' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction provided. This task aligns with their role as a researcher with a diverse background in mathematical modeling, optimization, and photonic systems, and will allow them to contribute their expertise to the development of a new research proposal.'. -[2024-11-11 14:22:33,765] [INFO] [BaseAgent]: Agent agent4 sent message to agent5: I am a researcher with a diverse background in mathematical modeling, optimization, and photonic systems. I have expertise in eigenvalues in projective spaces, multi-factorial optimization, lasing in $ ext{PT}$-symmetric systems, topological properties of photonic superlattices, coding theory, and architectural support for FPGA-based accelerators. I am interested in collaborating to brainstorm potential research ideas that build upon or address gaps in these areas. Let's discuss innovative research proposals that combine our expertise in optimization, machine learning, and photonic systems. -[2024-11-11 14:22:33,765] [INFO] [BaseAgent]: Agent agent5 received message from agent4: I am a res... -[2024-11-11 14:22:35,524] [INFO] [BaseAgent]: Agent agent5 sent message to agent4: Hello, it's great to connect with a fellow researcher with such a diverse background in mathematical modeling, optimization, and photonic systems. I believe our combined expertise in optimization, machine learning, and photonic systems can lead to exciting research collaborations. To brainstorm potential research ideas, we could explore the integration of machine learning models with photonic systems for enhanced optimization and performance. This fusion could open up new avenues for efficient data processing and system optimization. What are your thoughts on this direction? -[2024-11-11 14:22:35,524] [INFO] [BaseAgent]: Agent agent4 received message from agent5: Hello, it'... -[2024-11-11 14:22:37,345] [INFO] [BaseAgent]: Agent agent4 sent message to agent5: Hello, it's great to connect with you as well! I completely agree that combining our expertise in optimization, machine learning, and photonic systems holds immense potential for groundbreaking research. The integration of machine learning models with photonic systems can indeed revolutionize optimization and performance, paving the way for more efficient data processing and system enhancement. I'm excited to delve deeper into this direction and explore how we can leverage our knowledge to push the boundaries of research in this interdisciplinary domain. -[2024-11-11 14:22:37,346] [INFO] [BaseAgent]: Agent agent5 received message from agent4: Hello, it'... -[2024-11-11 14:22:38,500] [INFO] [BaseAgent]: Agent agent5 sent message to agent4: Hello, I share your enthusiasm for the potential of combining optimization, machine learning, and photonic systems in our research endeavors. The integration of machine learning models with photonic systems indeed offers a promising path towards enhancing optimization and system performance. I look forward to collaborating with you to explore and innovate in this interdisciplinary domain, pushing the boundaries of research further. -[2024-11-11 14:22:38,501] [INFO] [BaseAgent]: Agent agent4 received message from agent5: Hello, I s... -[2024-11-11 14:22:40,105] [INFO] [BaseAgent]: Agent agent4 sent message to agent5: Hello, I share your enthusiasm for the potential of combining optimization, machine learning, and photonic systems in our research endeavors. The integration of machine learning models with photonic systems indeed offers a promising path towards enhancing optimization and system performance. I am excited to collaborate with you to explore and innovate in this interdisciplinary domain, pushing the boundaries of research further. -[2024-11-11 14:22:40,106] [INFO] [BaseAgent]: Agent agent5 received message from agent4: Hello, I s... -[2024-11-11 14:22:41,245] [INFO] [BaseAgent]: Agent agent5 sent message to agent4: Hello, I share your enthusiasm for the potential of combining optimization, machine learning, and photonic systems in our research endeavors. The integration of machine learning models with photonic systems indeed offers a promising path towards enhancing optimization and system performance. I am excited to collaborate with you to explore and innovate in this interdisciplinary domain, pushing the boundaries of research further. -[2024-11-11 14:22:41,245] [INFO] [BaseAgent]: Agent agent4 received message from agent5: Hello, I s... -[2024-11-11 14:22:42,535] [ERROR] [Engine]: Error in agent 'agent4' during planning or action: 'dict' object has no attribute 'final_chat_history' -[2024-11-11 14:22:42,536] [INFO] [BaseAgent]: Agent 'agent5' is planning the next task. -[2024-11-11 14:22:44,157] [INFO] [BaseAgent]: Agent 'agent5' plans next task based on persona: Based on the agent's expertise in evolutionary algorithms, machine learning, and spiking neural networks, as well as their recent work on integrating machine learning models with evolutionary algorithms for multi-objective optimization, a suitable next task could be: - -Formulating a new research idea that explores the integration of spiking neural networks with evolutionary algorithms for addressing large-scale multi-objective optimization problems (LSMOPs). - -This task aligns with the agent's role as a researcher in evolutionary algorithms, machine learning, and spiking neural networks, and allows them to further advance their expertise in optimizing complex problem-solving scenarios. -[2024-11-11 14:22:44,158] [INFO] [Engine]: Agent 'agent5' planned task: Based on the agent's expertise in evolutionary algorithms, machine learning, and spiking neural networks, as well as their recent work on integrating machine learning models with evolutionary algorithms for multi-objective optimization, a suitable next task could be: - -Formulating a new research idea that explores the integration of spiking neural networks with evolutionary algorithms for addressing large-scale multi-objective optimization problems (LSMOPs). - -This task aligns with the agent's role as a researcher in evolutionary algorithms, machine learning, and spiking neural networks, and allows them to further advance their expertise in optimizing complex problem-solving scenarios. -[2024-11-11 14:22:44,158] [INFO] [BaseAgent]: Agent 'agent5' acting on task 'Based on the agent's expertise in evolutionary algorithms, machine learning, and spiking neural networks, as well as their recent work on integrating machine learning models with evolutionary algorithms for multi-objective optimization, a suitable next task could be: - -Formulating a new research idea that explores the integration of spiking neural networks with evolutionary algorithms for addressing large-scale multi-objective optimization problems (LSMOPs). - -This task aligns with the agent's role as a researcher in evolutionary algorithms, machine learning, and spiking neural networks, and allows them to further advance their expertise in optimizing complex problem-solving scenarios.'. -[2024-11-11 14:22:49,014] [ERROR] [Engine]: Error in agent 'agent5' during planning or action: 'Paper' object has no attribute 'to_dict' -[2024-11-11 14:22:49,015] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: - -[2024-11-11 14:22:49,015] [INFO] [Engine]: Iteration 0 Summary: -Agents' Results Summary: - -[2024-11-11 14:22:53,022] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': False} -[2024-11-11 14:22:53,024] [INFO] [Engine]: EnginePlanner decided to terminate the simulation. -[2024-11-11 14:22:53,024] [INFO] [Engine]: Engine graph-based coordination loop completed. -[2024-11-11 14:22:53,026] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-11 14:22:53,026] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-11 14:22:53,027] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-11 14:22:53,027] [INFO] [Engine]: Graph-based coordination simulation completed. -[2024-11-11 14:22:53,041] [INFO] [Engine]: Summary data successfully written to result/discussion_output.jsonl -[2024-11-11 14:50:32,749] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-11 14:50:32,749] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-11 14:50:32,749] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-11 14:50:32,749] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-11 14:50:32,750] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-11 14:50:32,750] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-11 14:50:32,750] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-11 14:50:32,750] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-11 14:50:32,750] [INFO] [BaseAgent]: Agent 'agent5' initialized. -[2024-11-11 14:50:32,750] [DEBUG] [Engine]: Agent 'agent5' of type 'BaseAgent' initialized. -[2024-11-11 14:50:32,750] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'graph'. -[2024-11-11 14:50:32,750] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent2 -[2024-11-11 14:50:32,750] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent3 -[2024-11-11 14:50:32,750] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent4 -[2024-11-11 14:50:32,750] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent5 -[2024-11-11 14:50:32,750] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent3 -[2024-11-11 14:50:32,750] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent4 -[2024-11-11 14:50:32,750] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent5 -[2024-11-11 14:50:32,750] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborate with]--> agent4 -[2024-11-11 14:50:32,750] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborate with]--> agent5 -[2024-11-11 14:50:32,750] [INFO] [AgentGraph]: Relationship added: agent4 --[collaborate with]--> agent5 -[2024-11-11 14:50:32,750] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-11 14:51:03,211] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-11 14:51:03,211] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-11 14:51:03,211] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-11 14:51:03,211] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-11 14:51:03,211] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-11 14:51:03,211] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-11 14:51:03,211] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-11 14:51:03,211] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-11 14:51:03,211] [INFO] [BaseAgent]: Agent 'agent5' initialized. -[2024-11-11 14:51:03,211] [DEBUG] [Engine]: Agent 'agent5' of type 'BaseAgent' initialized. -[2024-11-11 14:51:03,211] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'graph'. -[2024-11-11 14:51:03,211] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent2 -[2024-11-11 14:51:03,212] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent3 -[2024-11-11 14:51:03,212] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent4 -[2024-11-11 14:51:03,212] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent5 -[2024-11-11 14:51:03,212] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent3 -[2024-11-11 14:51:03,212] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent4 -[2024-11-11 14:51:03,212] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent5 -[2024-11-11 14:51:03,212] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborate with]--> agent4 -[2024-11-11 14:51:03,212] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborate with]--> agent5 -[2024-11-11 14:51:03,212] [INFO] [AgentGraph]: Relationship added: agent4 --[collaborate with]--> agent5 -[2024-11-11 14:51:03,212] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-11 14:51:03,213] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-11 14:51:03,213] [INFO] [Engine]: Engine initialized. -[2024-11-11 14:51:03,213] [INFO] [Engine]: Engine starting simulation. -[2024-11-11 14:51:03,213] [INFO] [Engine]: Running in graph-based coordination mode. -[2024-11-11 14:51:03,213] [INFO] [Engine]: Initial task distribution to all agents. -[2024-11-11 14:51:03,213] [INFO] [Engine]: Assigning initial task to agent1: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 14:51:03,213] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 14:51:04,638] [ERROR] [Engine]: Error while executing initial task for agent 'agent1': 'itertools.islice' object is not callable -[2024-11-11 14:51:04,639] [INFO] [Engine]: Assigning initial task to agent2: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 14:51:04,639] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 14:51:05,418] [ERROR] [Engine]: Error while executing initial task for agent 'agent2': 'itertools.islice' object is not callable -[2024-11-11 14:51:05,419] [INFO] [Engine]: Assigning initial task to agent3: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 14:51:05,423] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 14:51:09,517] [ERROR] [Engine]: Error while executing initial task for agent 'agent3': 'Paper' object has no attribute 'to_dict' -[2024-11-11 14:51:09,519] [INFO] [Engine]: Assigning initial task to agent4: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 14:51:09,523] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 14:51:15,945] [ERROR] [Engine]: Error while executing initial task for agent 'agent4': 'Paper' object has no attribute 'to_dict' -[2024-11-11 14:51:15,946] [INFO] [Engine]: Assigning initial task to agent5: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 14:51:15,947] [INFO] [BaseAgent]: Agent 'agent5' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 14:51:17,829] [ERROR] [Engine]: Error while executing initial task for agent 'agent5': 'itertools.islice' object is not callable -[2024-11-11 14:51:17,829] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: - -[2024-11-11 14:51:17,829] [INFO] [Engine]: Initial Summary: -Agents' Results Summary: - -[2024-11-11 14:51:22,833] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': False} -[2024-11-11 14:51:22,835] [INFO] [Engine]: EnginePlanner decided to terminate the simulation after initial assignment. -[2024-11-11 14:51:26,117] [ERROR] [Evaluator]: Could not parse score from assistant's answer. -[2024-11-11 14:51:26,118] [INFO] [Engine]: Starting iteration 0 -[2024-11-11 14:51:26,118] [INFO] [BaseAgent]: Agent 'agent1' is planning the next task. -[2024-11-11 14:51:28,329] [INFO] [BaseAgent]: Agent 'agent1' plans next task based on persona: Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be to focus on brainstorming potential research ideas that leverage their knowledge in these areas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). The agent can bring their expertise in imaging technologies, cloud computing, and mathematical aspects to the table to generate innovative ideas that can enhance the performance of DELLA in merging different homologous models. By combining their interdisciplinary research background with the current research focus on model merging, the agent can contribute valuable insights and potentially propose new approaches that improve the efficiency and effectiveness of DELLA. -[2024-11-11 14:51:28,329] [INFO] [Engine]: Agent 'agent1' planned task: Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be to focus on brainstorming potential research ideas that leverage their knowledge in these areas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). The agent can bring their expertise in imaging technologies, cloud computing, and mathematical aspects to the table to generate innovative ideas that can enhance the performance of DELLA in merging different homologous models. By combining their interdisciplinary research background with the current research focus on model merging, the agent can contribute valuable insights and potentially propose new approaches that improve the efficiency and effectiveness of DELLA. -[2024-11-11 14:51:28,331] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be to focus on brainstorming potential research ideas that leverage their knowledge in these areas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). The agent can bring their expertise in imaging technologies, cloud computing, and mathematical aspects to the table to generate innovative ideas that can enhance the performance of DELLA in merging different homologous models. By combining their interdisciplinary research background with the current research focus on model merging, the agent can contribute valuable insights and potentially propose new approaches that improve the efficiency and effectiveness of DELLA.'. -[2024-11-11 14:51:30,503] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I am a researcher with expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry. I am interested in brainstorming potential research ideas that leverage these areas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). I believe that combining my interdisciplinary background with the current focus on model merging can lead to innovative approaches that enhance the efficiency and effectiveness of DELLA. Would you be interested in discussing potential research ideas in this context? -[2024-11-11 14:51:30,504] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I am a res... -[2024-11-11 14:51:32,152] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: I am excited to collaborate with you on brainstorming potential research ideas that leverage your expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry to enhance the efficiency and effectiveness of DELLA. Let's explore innovative approaches that merge homologous models using the Drop and rEscaLe via sampLing with mAgnitude method. I believe our combined interdisciplinary backgrounds can lead to valuable insights and novel solutions. Let's dive into this research opportunity together! -[2024-11-11 14:51:32,154] [INFO] [BaseAgent]: Agent agent1 received message from agent2: I am excit... -[2024-11-11 14:51:34,713] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I am excited to collaborate with you on exploring innovative approaches to merge homologous models using the Drop and rEscaLe via sampLing with mAgnitude method. Leveraging my expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, we can potentially enhance the efficiency and effectiveness of DELLA. One idea could involve utilizing terahertz imaging for non-invasive monitoring of structural changes in the homologous models, integrating cloud computing for real-time data analysis, and applying algebraic geometry principles for pattern recognition and anomaly detection. This interdisciplinary approach could provide a comprehensive solution for improving the performance of DELLA. Let's delve deeper into these possibilities and develop novel solutions together. -[2024-11-11 14:51:34,714] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I am excit... -[2024-11-11 14:51:36,763] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Your proposed idea of utilizing terahertz imaging for non-invasive monitoring of structural changes in homologous models, integrating cloud computing for real-time data analysis, and applying algebraic geometry principles for pattern recognition and anomaly detection is truly innovative and promising. This interdisciplinary approach aligns well with our expertise and can significantly enhance the efficiency and effectiveness of DELLA. I suggest further exploring the integration of machine learning algorithms to optimize anomaly detection and pattern recognition based on the terahertz imaging data. By leveraging advanced AI techniques, we can potentially achieve higher accuracy and predictive capabilities in identifying structural changes and anomalies within the homologous models. Let's collaborate on refining this idea and developing a robust solution that pushes the boundaries of model merging in DELLA. -[2024-11-11 14:51:36,764] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Your propo... -[2024-11-11 14:51:39,214] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: Your suggestion to integrate machine learning algorithms for optimizing anomaly detection and pattern recognition based on terahertz imaging data is excellent. By combining terahertz imaging with advanced AI techniques, we can enhance the accuracy and predictive capabilities of identifying structural changes and anomalies in homologous models within the DELLA framework. Let's collaborate closely to further refine this approach and develop a robust solution that pushes the boundaries of model merging in DELLA. -[2024-11-11 14:51:39,215] [INFO] [BaseAgent]: Agent agent2 received message from agent1: Your sugge... -[2024-11-11 14:51:40,972] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: I completely agree with your assessment. Integrating machine learning algorithms with terahertz imaging data holds immense potential for enhancing anomaly detection and pattern recognition within the DELLA framework. By leveraging the synergy between advanced AI techniques and terahertz imaging, we can achieve significant advancements in identifying structural changes and anomalies in homologous models. I am excited to collaborate closely with you to refine this approach further and develop a robust solution that pushes the boundaries of model merging in DELLA. -[2024-11-11 14:51:40,973] [INFO] [BaseAgent]: Agent agent1 received message from agent2: I complete... -[2024-11-11 14:51:43,108] [ERROR] [Engine]: Error in agent 'agent1' during planning or action: 'dict' object has no attribute 'final_chat_history' -[2024-11-11 14:51:43,111] [INFO] [BaseAgent]: Agent 'agent2' is planning the next task. -[2024-11-11 14:51:44,540] [INFO] [BaseAgent]: Agent 'agent2' plans next task based on persona: Based on the provided information, the next task for 'agent2' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with 'agent2's expertise in artificial intelligence and healthcare, as well as their passion for leveraging advanced machine learning techniques to create impactful solutions. By contributing to the brainstorming session, 'agent2' can provide valuable insights and potentially propose a new research idea that aligns with their research interests and expertise. -[2024-11-11 14:51:44,541] [INFO] [Engine]: Agent 'agent2' planned task: Based on the provided information, the next task for 'agent2' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with 'agent2's expertise in artificial intelligence and healthcare, as well as their passion for leveraging advanced machine learning techniques to create impactful solutions. By contributing to the brainstorming session, 'agent2' can provide valuable insights and potentially propose a new research idea that aligns with their research interests and expertise. -[2024-11-11 14:51:44,541] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Based on the provided information, the next task for 'agent2' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with 'agent2's expertise in artificial intelligence and healthcare, as well as their passion for leveraging advanced machine learning techniques to create impactful solutions. By contributing to the brainstorming session, 'agent2' can provide valuable insights and potentially propose a new research idea that aligns with their research interests and expertise.'. -[2024-11-11 14:51:46,194] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. My expertise in artificial intelligence and healthcare, along with a passion for leveraging advanced machine learning techniques, can contribute valuable insights to this discussion. -[2024-11-11 14:51:46,195] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I am inter... -[2024-11-11 14:51:48,019] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I appreciate your interest in collaborating to brainstorm research ideas related to merging homologous models using the DELLA approach. Given your expertise in artificial intelligence and healthcare, we can explore innovative ways to enhance the integration of AI technologies in healthcare applications. Let's delve into how we can leverage advanced machine learning techniques to address the gaps in the current research landscape and propose novel solutions that benefit both academia and society. -[2024-11-11 14:51:48,019] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I apprecia... -[2024-11-11 14:51:50,377] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I believe our collaboration can lead to groundbreaking research ideas that bridge the gap in merging homologous models using the DELLA approach. By combining your expertise in spiking neural networks and my background in artificial intelligence and healthcare, we can explore novel ways to integrate SNNs into healthcare applications. Leveraging advanced machine learning techniques, we can develop innovative solutions that not only advance academic knowledge but also have practical benefits for society. Let's brainstorm ideas that push the boundaries of AI technologies in healthcare and contribute to the development of more efficient and powerful neural networks for real-world scenarios. -[2024-11-11 14:51:50,378] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I believe ... -[2024-11-11 14:51:51,507] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I am excited about the potential collaboration to explore integrating spiking neural networks into healthcare applications. Leveraging advanced machine learning techniques, we can develop innovative solutions that advance academic knowledge and benefit society. Let's brainstorm ideas to enhance the efficiency and power of neural networks for real-world scenarios. -[2024-11-11 14:51:51,508] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I am excit... -[2024-11-11 14:51:52,737] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I am thrilled about the opportunity to collaborate with you on integrating spiking neural networks into healthcare applications. Leveraging advanced machine learning techniques, we can explore innovative solutions that advance academic knowledge and benefit society. Let's brainstorm ideas to enhance the efficiency and power of neural networks for real-world scenarios. -[2024-11-11 14:51:52,737] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I am thril... -[2024-11-11 14:51:53,765] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I am excited about the opportunity to collaborate with you on integrating spiking neural networks into healthcare applications. Leveraging advanced machine learning techniques, we can explore innovative solutions that advance academic knowledge and benefit society. Let's brainstorm ideas to enhance the efficiency and power of neural networks for real-world scenarios. -[2024-11-11 14:51:53,765] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I am excit... -[2024-11-11 14:51:55,771] [ERROR] [Engine]: Error in agent 'agent2' during planning or action: 'dict' object has no attribute 'final_chat_history' -[2024-11-11 14:51:55,772] [INFO] [BaseAgent]: Agent 'agent3' is planning the next task. -[2024-11-11 14:51:57,339] [INFO] [BaseAgent]: Agent 'agent3' plans next task based on persona: Based on the role and expertise of 'agent3' as a researcher in the field of spiking neural networks (SNNs) and their applications in auditory processing and machine learning, the next task should be to: - -1. **Literature Review**: Conduct a thorough literature review to understand the current state of research in merging homologous models using LLMs and evolutionary algorithms, as well as the challenges and advancements in this area. - -This task aligns with 'agent3's expertise and will provide valuable insights for brainstorming potential research ideas and formulating a new research proposal in the given format of '5q'. -[2024-11-11 14:51:57,339] [INFO] [Engine]: Agent 'agent3' planned task: Based on the role and expertise of 'agent3' as a researcher in the field of spiking neural networks (SNNs) and their applications in auditory processing and machine learning, the next task should be to: - -1. **Literature Review**: Conduct a thorough literature review to understand the current state of research in merging homologous models using LLMs and evolutionary algorithms, as well as the challenges and advancements in this area. - -This task aligns with 'agent3's expertise and will provide valuable insights for brainstorming potential research ideas and formulating a new research proposal in the given format of '5q'. -[2024-11-11 14:51:57,340] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Based on the role and expertise of 'agent3' as a researcher in the field of spiking neural networks (SNNs) and their applications in auditory processing and machine learning, the next task should be to: - -1. **Literature Review**: Conduct a thorough literature review to understand the current state of research in merging homologous models using LLMs and evolutionary algorithms, as well as the challenges and advancements in this area. - -This task aligns with 'agent3's expertise and will provide valuable insights for brainstorming potential research ideas and formulating a new research proposal in the given format of '5q'.'. -[2024-11-11 14:51:58,152] [ERROR] [Engine]: Error in agent 'agent3' during planning or action: 'itertools.islice' object is not callable -[2024-11-11 14:51:58,152] [INFO] [BaseAgent]: Agent 'agent4' is planning the next task. -[2024-11-11 14:52:00,143] [INFO] [BaseAgent]: Agent 'agent4' plans next task based on persona: Based on the provided task history and the expertise of 'agent4' in mathematical modeling, optimization, and photonic systems, the next task should be to: - -1. **Literature Review**: Conduct a thorough literature review on the topic of merging homologous models using the DELLA approach, as outlined in the Introduction provided. This will help 'agent4' understand the current state of research in this area and identify any gaps or opportunities for further exploration. - -By focusing on this task, 'agent4' can leverage their expertise in mathematical modeling and optimization to critically analyze existing research, identify key trends, and potentially contribute new insights to the field of model merging and optimization. -[2024-11-11 14:52:00,144] [INFO] [Engine]: Agent 'agent4' planned task: Based on the provided task history and the expertise of 'agent4' in mathematical modeling, optimization, and photonic systems, the next task should be to: - -1. **Literature Review**: Conduct a thorough literature review on the topic of merging homologous models using the DELLA approach, as outlined in the Introduction provided. This will help 'agent4' understand the current state of research in this area and identify any gaps or opportunities for further exploration. - -By focusing on this task, 'agent4' can leverage their expertise in mathematical modeling and optimization to critically analyze existing research, identify key trends, and potentially contribute new insights to the field of model merging and optimization. -[2024-11-11 14:52:00,144] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Based on the provided task history and the expertise of 'agent4' in mathematical modeling, optimization, and photonic systems, the next task should be to: - -1. **Literature Review**: Conduct a thorough literature review on the topic of merging homologous models using the DELLA approach, as outlined in the Introduction provided. This will help 'agent4' understand the current state of research in this area and identify any gaps or opportunities for further exploration. - -By focusing on this task, 'agent4' can leverage their expertise in mathematical modeling and optimization to critically analyze existing research, identify key trends, and potentially contribute new insights to the field of model merging and optimization.'. -[2024-11-11 14:52:00,924] [ERROR] [Engine]: Error in agent 'agent4' during planning or action: 'itertools.islice' object is not callable -[2024-11-11 14:52:00,928] [INFO] [BaseAgent]: Agent 'agent5' is planning the next task. -[2024-11-11 14:52:05,547] [INFO] [BaseAgent]: Agent 'agent5' plans next task based on persona: Based on the agent's expertise in evolutionary algorithms, machine learning, and spiking neural networks, as well as their recent work on integrating machine learning models with evolutionary algorithms for multi-objective optimization, the next task should be to: - -1. **Literature Review**: Conduct a literature review on the current state of research in merging homologous models and the challenges associated with maintaining separate fine-tuned models for each task. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that leverage evolutionary algorithms, machine learning, and spiking neural networks to address the limitations of maintaining separate fine-tuned models for each task. - -3. **Summarization**: Summarize the collective ideas and identify key research directions that align with the agent's expertise and recent work. - -4. **Formulate a New Research Idea (5q)**: - - **[Question 1] - What is the problem?** - How can evolutionary algorithms, machine learning, and spiking neural networks be leveraged to optimize the merging of homologous models for improved task performance and efficiency? - - **[Question 2] - Why is it interesting and important?** - Solving this problem can lead to more efficient and effective utilization of models in interactive systems, reducing memory footprint and improving performance across tasks. This research can advance the field of model merging and optimization techniques. - - **[Question 3] - Why is it hard?** - The challenge lies in developing a method that can effectively merge homologous models while maintaining task performance and leveraging information across tasks. The complexity arises from the need to balance model interference reduction with information retention. - - **[Question 4] - Why hasn't it been solved before?** - Previous research has focused on fine-tuning individual models or merging models without considering the specific challenges of homologous model merging. This approach differs by integrating evolutionary algorithms, machine learning, and spiking neural networks to optimize the merging process. - - **[Question 5] - What are the key components of my approach and results?** - The proposed methodology will involve developing a novel framework that combines evolutionary algorithms, machine learning, and spiking neural networks to optimize the merging of homologous models. The approach will be evaluated using benchmark datasets and performance metrics to demonstrate improved task performance and efficiency. -[2024-11-11 14:52:05,549] [INFO] [Engine]: Agent 'agent5' planned task: Based on the agent's expertise in evolutionary algorithms, machine learning, and spiking neural networks, as well as their recent work on integrating machine learning models with evolutionary algorithms for multi-objective optimization, the next task should be to: - -1. **Literature Review**: Conduct a literature review on the current state of research in merging homologous models and the challenges associated with maintaining separate fine-tuned models for each task. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that leverage evolutionary algorithms, machine learning, and spiking neural networks to address the limitations of maintaining separate fine-tuned models for each task. - -3. **Summarization**: Summarize the collective ideas and identify key research directions that align with the agent's expertise and recent work. - -4. **Formulate a New Research Idea (5q)**: - - **[Question 1] - What is the problem?** - How can evolutionary algorithms, machine learning, and spiking neural networks be leveraged to optimize the merging of homologous models for improved task performance and efficiency? - - **[Question 2] - Why is it interesting and important?** - Solving this problem can lead to more efficient and effective utilization of models in interactive systems, reducing memory footprint and improving performance across tasks. This research can advance the field of model merging and optimization techniques. - - **[Question 3] - Why is it hard?** - The challenge lies in developing a method that can effectively merge homologous models while maintaining task performance and leveraging information across tasks. The complexity arises from the need to balance model interference reduction with information retention. - - **[Question 4] - Why hasn't it been solved before?** - Previous research has focused on fine-tuning individual models or merging models without considering the specific challenges of homologous model merging. This approach differs by integrating evolutionary algorithms, machine learning, and spiking neural networks to optimize the merging process. - - **[Question 5] - What are the key components of my approach and results?** - The proposed methodology will involve developing a novel framework that combines evolutionary algorithms, machine learning, and spiking neural networks to optimize the merging of homologous models. The approach will be evaluated using benchmark datasets and performance metrics to demonstrate improved task performance and efficiency. -[2024-11-11 14:52:05,549] [INFO] [BaseAgent]: Agent 'agent5' acting on task 'Based on the agent's expertise in evolutionary algorithms, machine learning, and spiking neural networks, as well as their recent work on integrating machine learning models with evolutionary algorithms for multi-objective optimization, the next task should be to: - -1. **Literature Review**: Conduct a literature review on the current state of research in merging homologous models and the challenges associated with maintaining separate fine-tuned models for each task. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that leverage evolutionary algorithms, machine learning, and spiking neural networks to address the limitations of maintaining separate fine-tuned models for each task. - -3. **Summarization**: Summarize the collective ideas and identify key research directions that align with the agent's expertise and recent work. - -4. **Formulate a New Research Idea (5q)**: - - **[Question 1] - What is the problem?** - How can evolutionary algorithms, machine learning, and spiking neural networks be leveraged to optimize the merging of homologous models for improved task performance and efficiency? - - **[Question 2] - Why is it interesting and important?** - Solving this problem can lead to more efficient and effective utilization of models in interactive systems, reducing memory footprint and improving performance across tasks. This research can advance the field of model merging and optimization techniques. - - **[Question 3] - Why is it hard?** - The challenge lies in developing a method that can effectively merge homologous models while maintaining task performance and leveraging information across tasks. The complexity arises from the need to balance model interference reduction with information retention. - - **[Question 4] - Why hasn't it been solved before?** - Previous research has focused on fine-tuning individual models or merging models without considering the specific challenges of homologous model merging. This approach differs by integrating evolutionary algorithms, machine learning, and spiking neural networks to optimize the merging process. - - **[Question 5] - What are the key components of my approach and results?** - The proposed methodology will involve developing a novel framework that combines evolutionary algorithms, machine learning, and spiking neural networks to optimize the merging of homologous models. The approach will be evaluated using benchmark datasets and performance metrics to demonstrate improved task performance and efficiency.'. -[2024-11-11 14:52:12,033] [ERROR] [Engine]: Error in agent 'agent5' during planning or action: 'Paper' object has no attribute 'to_dict' -[2024-11-11 14:52:12,034] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: - -[2024-11-11 14:52:12,034] [INFO] [Engine]: Iteration 0 Summary: -Agents' Results Summary: - -[2024-11-11 14:52:21,839] [ERROR] [Evaluator]: Could not parse score from assistant's answer. -[2024-11-11 14:52:22,581] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-11 14:52:22,581] [INFO] [Engine]: Starting iteration 1 -[2024-11-11 14:52:22,583] [INFO] [BaseAgent]: Agent 'agent1' is planning the next task. -[2024-11-11 14:52:24,576] [INFO] [BaseAgent]: Agent 'agent1' plans next task based on persona: The next task for agent1 should be to collaborate with agent2 to formulate a new research idea in the format of the '5q' framework. This research proposal should address the gaps identified in the current state of research in model merging and leverage the agent's expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry to propose innovative solutions that enhance the efficiency and effectiveness of DELLA. By working together to develop a clear and comprehensive research question, outline the broader implications, discuss the challenges and complexities, identify gaps in previous research, and detail the proposed methodology and expected outcomes, the agents can create a strong foundation for their research proposal. -[2024-11-11 14:52:24,576] [INFO] [Engine]: Agent 'agent1' planned task: The next task for agent1 should be to collaborate with agent2 to formulate a new research idea in the format of the '5q' framework. This research proposal should address the gaps identified in the current state of research in model merging and leverage the agent's expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry to propose innovative solutions that enhance the efficiency and effectiveness of DELLA. By working together to develop a clear and comprehensive research question, outline the broader implications, discuss the challenges and complexities, identify gaps in previous research, and detail the proposed methodology and expected outcomes, the agents can create a strong foundation for their research proposal. -[2024-11-11 14:52:24,576] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'The next task for agent1 should be to collaborate with agent2 to formulate a new research idea in the format of the '5q' framework. This research proposal should address the gaps identified in the current state of research in model merging and leverage the agent's expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry to propose innovative solutions that enhance the efficiency and effectiveness of DELLA. By working together to develop a clear and comprehensive research question, outline the broader implications, discuss the challenges and complexities, identify gaps in previous research, and detail the proposed methodology and expected outcomes, the agents can create a strong foundation for their research proposal.'. -[2024-11-11 14:52:26,857] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I am agent1, and I would like to collaborate with you to formulate a new research idea in the format of the '5q' framework. Our research proposal should address the gaps identified in the current state of research in model merging and leverage our expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry to propose innovative solutions that enhance the efficiency and effectiveness of DELLA. Let's work together to develop a clear and comprehensive research question, outline the broader implications, discuss the challenges and complexities, identify gaps in previous research, and detail the proposed methodology and expected outcomes to create a strong foundation for our research proposal. -[2024-11-11 14:52:26,857] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I am agent... -[2024-11-11 14:52:28,683] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Hello, agent1! I am excited to collaborate with you on formulating a new research idea in the '5q' framework. Based on our expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, we can propose innovative solutions to enhance the efficiency and effectiveness of DELLA. Let's work together to develop a research question that addresses the gaps in model merging research and outlines the broader implications, challenges, methodology, and expected outcomes. This collaboration holds great potential for advancing AI technologies in healthcare. Looking forward to our brainstorming session! -[2024-11-11 14:52:28,686] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Hello, age... -[2024-11-11 14:52:30,538] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: Hello, agent2! I am thrilled to collaborate with you on formulating a new research idea in the '5q' framework. Drawing upon our expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, we have the opportunity to propose innovative solutions that can significantly enhance the efficiency and effectiveness of DELLA. Let's work together to craft a research question that not only addresses the existing gaps in model merging research but also outlines the broader implications, challenges, methodology, and expected outcomes. This collaboration indeed holds immense potential for advancing AI technologies in healthcare. I am looking forward to our brainstorming session! -[2024-11-11 14:52:30,541] [INFO] [BaseAgent]: Agent agent2 received message from agent1: Hello, age... -[2024-11-11 14:52:32,423] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Hello, agent1! I am excited to collaborate with you on formulating a new research idea in the '5q' framework. Based on our expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, we can propose innovative solutions to enhance the efficiency and effectiveness of DELLA. Let's work together to develop a research question that addresses the gaps in model merging research and outlines the broader implications, challenges, methodology, and expected outcomes. This collaboration holds great potential for advancing AI technologies in healthcare. Looking forward to our brainstorming session! -[2024-11-11 14:52:32,424] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Hello, age... -[2024-11-11 14:52:34,414] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: Hello, agent2! I am thrilled to collaborate with you on formulating a new research idea in the '5q' framework. Drawing upon our expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, we have the opportunity to propose innovative solutions that can significantly enhance the efficiency and effectiveness of DELLA. Let's work together to craft a research question that not only addresses the existing gaps in model merging research but also outlines the broader implications, challenges, methodology, and expected outcomes. This collaboration indeed holds immense potential for advancing AI technologies in healthcare. I am looking forward to our brainstorming session! -[2024-11-11 14:52:34,416] [INFO] [BaseAgent]: Agent agent2 received message from agent1: Hello, age... -[2024-11-11 14:52:36,235] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Hello, agent1! I am thrilled to collaborate with you on formulating a new research idea in the '5q' framework. Drawing upon our expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, we have the opportunity to propose innovative solutions that can significantly enhance the efficiency and effectiveness of DELLA. Let's work together to craft a research question that not only addresses the existing gaps in model merging research but also outlines the broader implications, challenges, methodology, and expected outcomes. This collaboration indeed holds immense potential for advancing AI technologies in healthcare. I am looking forward to our brainstorming session! -[2024-11-11 14:52:36,237] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Hello, age... -[2024-11-11 14:52:38,435] [ERROR] [Engine]: Error in agent 'agent1' during planning or action: 'dict' object has no attribute 'final_chat_history' -[2024-11-11 14:52:38,437] [INFO] [BaseAgent]: Agent 'agent2' is planning the next task. -[2024-11-11 14:52:39,881] [INFO] [BaseAgent]: Agent 'agent2' plans next task based on persona: The next task for 'agent2' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with 'agent2's expertise in artificial intelligence and healthcare, as well as their passion for leveraging advanced machine learning techniques to create impactful solutions. By contributing to the brainstorming session, 'agent2' can provide valuable insights and potentially propose a new research idea that aligns with their research interests and expertise. -[2024-11-11 14:52:39,882] [INFO] [Engine]: Agent 'agent2' planned task: The next task for 'agent2' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with 'agent2's expertise in artificial intelligence and healthcare, as well as their passion for leveraging advanced machine learning techniques to create impactful solutions. By contributing to the brainstorming session, 'agent2' can provide valuable insights and potentially propose a new research idea that aligns with their research interests and expertise. -[2024-11-11 14:52:39,882] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'The next task for 'agent2' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with 'agent2's expertise in artificial intelligence and healthcare, as well as their passion for leveraging advanced machine learning techniques to create impactful solutions. By contributing to the brainstorming session, 'agent2' can provide valuable insights and potentially propose a new research idea that aligns with their research interests and expertise.'. -[2024-11-11 14:52:42,003] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I would like to collaborate with you to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This aligns with my expertise in artificial intelligence and healthcare, and my passion for leveraging advanced machine learning techniques to create impactful solutions. I believe our combined insights can lead to innovative research ideas in healthcare applications. -[2024-11-11 14:52:42,006] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I would li... -[2024-11-11 14:52:43,352] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: Hello, I appreciate your interest in collaborating to brainstorm potential research ideas related to merging homologous models using the DELLA approach. Your expertise in artificial intelligence and healthcare, particularly in developing autonomous systems for monitoring elderly patients with dementia, aligns well with this task. I believe we can explore innovative ways to enhance the effectiveness of healthcare applications through advanced machine learning techniques. Let's delve into this further to generate impactful research ideas. -[2024-11-11 14:52:43,352] [INFO] [BaseAgent]: Agent agent2 received message from agent3: Hello, I a... -[2024-11-11 14:52:45,126] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: Hello, I appreciate your interest in collaborating to brainstorm potential research ideas related to merging homologous models using the DELLA approach. Your expertise in artificial intelligence and healthcare, particularly in developing autonomous systems for monitoring elderly patients with dementia, aligns well with this task. I believe we can explore innovative ways to enhance the effectiveness of healthcare applications through advanced machine learning techniques. Let's delve into this further to generate impactful research ideas. -[2024-11-11 14:52:45,127] [INFO] [BaseAgent]: Agent agent3 received message from agent2: Hello, I a... -[2024-11-11 14:52:46,826] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: Hello, I appreciate your interest in collaborating to brainstorm potential research ideas related to merging homologous models using the DELLA approach. Your expertise in artificial intelligence and healthcare, particularly in developing autonomous systems for monitoring elderly patients with dementia, aligns well with this task. I believe we can explore innovative ways to enhance the effectiveness of healthcare applications through advanced machine learning techniques. Let's delve into this further to generate impactful research ideas. -[2024-11-11 14:52:46,826] [INFO] [BaseAgent]: Agent agent2 received message from agent3: Hello, I a... -[2024-11-11 14:52:48,472] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: Hello, I appreciate your interest in collaborating to brainstorm potential research ideas related to merging homologous models using the DELLA approach. Your expertise in artificial intelligence and healthcare, particularly in developing autonomous systems for monitoring elderly patients with dementia, aligns well with this task. I believe we can explore innovative ways to enhance the effectiveness of healthcare applications through advanced machine learning techniques. Let's delve into this further to generate impactful research ideas. -[2024-11-11 14:52:48,473] [INFO] [BaseAgent]: Agent agent3 received message from agent2: Hello, I a... -[2024-11-11 14:52:49,886] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: Hello, I appreciate your interest in collaborating to brainstorm potential research ideas related to merging homologous models using the DELLA approach. Your expertise in artificial intelligence and healthcare, particularly in developing autonomous systems for monitoring elderly patients with dementia, aligns well with this task. I believe we can explore innovative ways to enhance the effectiveness of healthcare applications through advanced machine learning techniques. Let's delve into this further to generate impactful research ideas. -[2024-11-11 14:52:49,887] [INFO] [BaseAgent]: Agent agent2 received message from agent3: Hello, I a... -[2024-11-11 14:52:51,245] [ERROR] [Engine]: Error in agent 'agent2' during planning or action: 'dict' object has no attribute 'final_chat_history' -[2024-11-11 14:52:51,245] [INFO] [BaseAgent]: Agent 'agent3' is planning the next task. -[2024-11-11 14:52:52,498] [INFO] [BaseAgent]: Agent 'agent3' plans next task based on persona: The next task for 'agent3' should be: - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon the current state of research in merging homologous models using LLMs and evolutionary algorithms. This task will allow 'agent3' to leverage their expertise in SNNs and machine learning to generate innovative ideas that address gaps in the existing literature and push the boundaries of knowledge in this field. -[2024-11-11 14:52:52,498] [INFO] [Engine]: Agent 'agent3' planned task: The next task for 'agent3' should be: - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon the current state of research in merging homologous models using LLMs and evolutionary algorithms. This task will allow 'agent3' to leverage their expertise in SNNs and machine learning to generate innovative ideas that address gaps in the existing literature and push the boundaries of knowledge in this field. -[2024-11-11 14:52:52,498] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'The next task for 'agent3' should be: - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon the current state of research in merging homologous models using LLMs and evolutionary algorithms. This task will allow 'agent3' to leverage their expertise in SNNs and machine learning to generate innovative ideas that address gaps in the existing literature and push the boundaries of knowledge in this field.'. -[2024-11-11 14:52:54,041] [INFO] [BaseAgent]: Agent agent3 sent message to agent5: I am interested in collaborating with you on brainstorming potential research ideas that merge homologous models using large language models (LLMs) and evolutionary algorithms. Let's explore innovative approaches to address gaps in the existing literature and push the boundaries of knowledge in this field. -[2024-11-11 14:52:54,043] [INFO] [BaseAgent]: Agent agent5 received message from agent3: I am inter... -[2024-11-11 14:52:55,817] [INFO] [BaseAgent]: Agent agent5 sent message to agent3: I am excited to collaborate with you on brainstorming potential research ideas that merge homologous models using large language models (LLMs) and evolutionary algorithms. Given our expertise in spiking neural networks (SNNs) and machine learning, we can explore innovative approaches to enhance algorithm selection and optimization processes. Let's delve into how we can leverage the principles of biological computation to create more efficient and powerful neural networks for real-world applications. -[2024-11-11 14:52:55,818] [INFO] [BaseAgent]: Agent agent3 received message from agent5: I am excit... -[2024-11-11 14:52:57,620] [INFO] [BaseAgent]: Agent agent3 sent message to agent5: I am thrilled to collaborate with you on brainstorming potential research ideas that merge homologous models using large language models (LLMs) and evolutionary algorithms. With our combined expertise in spiking neural networks (SNNs) and machine learning, we have a unique opportunity to explore innovative approaches that enhance algorithm selection and optimization processes. Let's delve into how we can leverage the principles of biological computation to create more efficient and powerful neural networks for real-world applications. -[2024-11-11 14:52:57,641] [INFO] [BaseAgent]: Agent agent5 received message from agent3: I am thril... -[2024-11-11 14:53:00,907] [INFO] [BaseAgent]: Agent agent5 sent message to agent3: I believe one promising research direction could be the development of a hybrid model that combines the strengths of spiking neural networks, large language models, and evolutionary algorithms to optimize algorithm selection for real-time auditory processing tasks. By integrating the efficiency of SNNs in processing temporal information, the contextual understanding of LLMs, and the optimization capabilities of evolutionary algorithms, we can potentially achieve superior performance in tasks like speech recognition and sound localization. This approach could revolutionize how we design neural networks for real-world applications. What are your thoughts on this direction? -[2024-11-11 14:53:00,913] [INFO] [BaseAgent]: Agent agent3 received message from agent5: I believe ... -[2024-11-11 14:53:02,430] [INFO] [BaseAgent]: Agent agent3 sent message to agent5: I find the idea of developing a hybrid model that combines the strengths of spiking neural networks, large language models, and evolutionary algorithms for real-time auditory processing tasks fascinating. This approach has the potential to revolutionize how we design neural networks for applications like speech recognition and sound localization. I believe leveraging the efficiency of SNNs in processing temporal information, the contextual understanding of LLMs, and the optimization capabilities of evolutionary algorithms can lead to significant advancements in performance and efficiency. I am excited to explore this direction further and collaborate on developing innovative solutions in this area. -[2024-11-11 14:53:02,432] [INFO] [BaseAgent]: Agent agent5 received message from agent3: I find the... -[2024-11-11 14:53:04,323] [INFO] [BaseAgent]: Agent agent5 sent message to agent3: I completely agree with your enthusiasm for developing a hybrid model that integrates spiking neural networks, large language models, and evolutionary algorithms for real-time auditory processing tasks. This innovative approach has the potential to reshape how we design neural networks, particularly in speech recognition and sound localization domains. Leveraging the unique strengths of each component can indeed lead to remarkable advancements in performance and efficiency. I look forward to collaborating with you on exploring and refining this exciting research direction further. -[2024-11-11 14:53:04,323] [INFO] [BaseAgent]: Agent agent3 received message from agent5: I complete... -[2024-11-11 14:53:06,569] [ERROR] [Engine]: Error in agent 'agent3' during planning or action: 'dict' object has no attribute 'final_chat_history' -[2024-11-11 14:53:06,570] [INFO] [BaseAgent]: Agent 'agent4' is planning the next task. -[2024-11-11 14:53:08,200] [INFO] [BaseAgent]: Agent 'agent4' plans next task based on persona: The next task for 'agent4' should be to: - -2. **Brainstorming**: Collaborate with the research team to generate potential research ideas that build upon or address gaps in the Introduction provided. By leveraging their expertise in mathematical modeling, optimization, and photonic systems, 'agent4' can contribute valuable insights and innovative approaches to the brainstorming session. This task will allow 'agent4' to explore new research directions and propose novel ideas that align with their expertise and the current research landscape. -[2024-11-11 14:53:08,203] [INFO] [Engine]: Agent 'agent4' planned task: The next task for 'agent4' should be to: - -2. **Brainstorming**: Collaborate with the research team to generate potential research ideas that build upon or address gaps in the Introduction provided. By leveraging their expertise in mathematical modeling, optimization, and photonic systems, 'agent4' can contribute valuable insights and innovative approaches to the brainstorming session. This task will allow 'agent4' to explore new research directions and propose novel ideas that align with their expertise and the current research landscape. -[2024-11-11 14:53:08,203] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'The next task for 'agent4' should be to: - -2. **Brainstorming**: Collaborate with the research team to generate potential research ideas that build upon or address gaps in the Introduction provided. By leveraging their expertise in mathematical modeling, optimization, and photonic systems, 'agent4' can contribute valuable insights and innovative approaches to the brainstorming session. This task will allow 'agent4' to explore new research directions and propose novel ideas that align with their expertise and the current research landscape.'. -[2024-11-11 14:53:13,453] [INFO] [BaseAgent]: Agent agent4 sent message to agent5: I am a researcher with a diverse background in mathematical modeling, optimization, and photonic systems. My work spans various domains, including the study of eigenvalues in projective spaces, the application of multi-factorial optimization for large-scale virtual machine placement, and the exploration of lasing and coherent perfect absorption in PT-symmetric systems. I have delved into the topological properties of photonic superlattices, revealing their potential for mode-division multiplexing, and investigated the implications of non-Hermitian systems on multi-mode interference. - -My research also extends to coding theory, where I applied Baranyai's theorem to construct locally decodable codes, emphasizing the combinatorial techniques that underpin these developments. I have explored the spectral signatures of CPA-lasers, highlighting the differences between singular value and eigenvalue spectra, and examined spectral phase transitions in non-Hermitian Hamiltonians, particularly in the context of parity-time symmetry. - -Recently, I have focused on the architectural support for FPGA-based accelerators in heterogeneous multicore systems, proposing innovative solutions for efficient interfacing and data reuse. My work aims to bridge theoretical insights with practical applications, contributing to advancements in both computational efficiency and optical technologies. Through my research, I strive to push the boundaries of knowledge in these interdisciplinary fields, fostering new approaches and solutions to complex problems. - -I am looking to collaborate and brainstorm on potential research ideas that build upon or address gaps in my current work. Would you be interested in discussing and generating innovative research directions together? -[2024-11-11 14:53:13,454] [INFO] [BaseAgent]: Agent agent5 received message from agent4: I am a res... -[2024-11-11 14:53:14,852] [INFO] [BaseAgent]: Agent agent5 sent message to agent4: Hello, I am excited to collaborate with you on brainstorming potential research ideas that align with your diverse expertise in mathematical modeling, optimization, and photonic systems. Let's explore innovative research directions together to push the boundaries of knowledge in our interdisciplinary fields. -[2024-11-11 14:53:14,854] [INFO] [BaseAgent]: Agent agent4 received message from agent5: Hello, I a... -[2024-11-11 14:53:15,781] [INFO] [BaseAgent]: Agent agent4 sent message to agent5: Hello, I am thrilled to collaborate with you on brainstorming potential research ideas that leverage our expertise in mathematical modeling, optimization, and photonic systems. Let's delve into innovative research directions to advance our interdisciplinary fields and push the boundaries of knowledge together. -[2024-11-11 14:53:15,783] [INFO] [BaseAgent]: Agent agent5 received message from agent4: Hello, I a... -[2024-11-11 14:53:16,796] [INFO] [BaseAgent]: Agent agent5 sent message to agent4: Hello, I am thrilled to collaborate with you on brainstorming potential research ideas that leverage our expertise in mathematical modeling, optimization, and photonic systems. Let's delve into innovative research directions to advance our interdisciplinary fields and push the boundaries of knowledge together. -[2024-11-11 14:53:16,796] [INFO] [BaseAgent]: Agent agent4 received message from agent5: Hello, I a... -[2024-11-11 14:53:17,835] [INFO] [BaseAgent]: Agent agent4 sent message to agent5: Hello, I am excited to collaborate with you on brainstorming potential research ideas that leverage our expertise in mathematical modeling, optimization, and photonic systems. Let's explore innovative research directions together to push the boundaries of knowledge in our interdisciplinary fields. -[2024-11-11 14:53:17,835] [INFO] [BaseAgent]: Agent agent5 received message from agent4: Hello, I a... -[2024-11-11 14:53:18,846] [INFO] [BaseAgent]: Agent agent5 sent message to agent4: Hello, I am excited to collaborate with you on brainstorming potential research ideas that leverage our expertise in mathematical modeling, optimization, and photonic systems. Let's explore innovative research directions together to push the boundaries of knowledge in our interdisciplinary fields. -[2024-11-11 14:53:18,846] [INFO] [BaseAgent]: Agent agent4 received message from agent5: Hello, I a... -[2024-11-11 14:53:21,144] [ERROR] [Engine]: Error in agent 'agent4' during planning or action: 'dict' object has no attribute 'final_chat_history' -[2024-11-11 14:53:21,146] [INFO] [BaseAgent]: Agent 'agent5' is planning the next task. -[2024-11-11 14:53:22,978] [INFO] [BaseAgent]: Agent 'agent5' plans next task based on persona: The next task for 'agent5' should be to conduct a literature review on the current state of research in merging homologous models and the challenges associated with maintaining separate fine-tuned models for each task. This will provide a solid foundation for brainstorming potential research ideas that leverage evolutionary algorithms, machine learning, and spiking neural networks to address these limitations. Subsequently, the agent should summarize the collective ideas and identify key research directions that align with their expertise and recent work. Finally, the agent should formulate a new research idea in the format of the '5q' to address the optimization of merging homologous models for improved task performance and efficiency. -[2024-11-11 14:53:22,978] [INFO] [Engine]: Agent 'agent5' planned task: The next task for 'agent5' should be to conduct a literature review on the current state of research in merging homologous models and the challenges associated with maintaining separate fine-tuned models for each task. This will provide a solid foundation for brainstorming potential research ideas that leverage evolutionary algorithms, machine learning, and spiking neural networks to address these limitations. Subsequently, the agent should summarize the collective ideas and identify key research directions that align with their expertise and recent work. Finally, the agent should formulate a new research idea in the format of the '5q' to address the optimization of merging homologous models for improved task performance and efficiency. -[2024-11-11 14:53:22,979] [INFO] [BaseAgent]: Agent 'agent5' acting on task 'The next task for 'agent5' should be to conduct a literature review on the current state of research in merging homologous models and the challenges associated with maintaining separate fine-tuned models for each task. This will provide a solid foundation for brainstorming potential research ideas that leverage evolutionary algorithms, machine learning, and spiking neural networks to address these limitations. Subsequently, the agent should summarize the collective ideas and identify key research directions that align with their expertise and recent work. Finally, the agent should formulate a new research idea in the format of the '5q' to address the optimization of merging homologous models for improved task performance and efficiency.'. -[2024-11-11 14:53:23,761] [ERROR] [Engine]: Error in agent 'agent5' during planning or action: 'itertools.islice' object is not callable -[2024-11-11 14:53:23,761] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: - -[2024-11-11 14:53:23,762] [INFO] [Engine]: Iteration 1 Summary: -Agents' Results Summary: - -[2024-11-11 14:53:32,490] [ERROR] [Evaluator]: Could not parse score from assistant's answer. -[2024-11-11 14:53:32,912] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': False} -[2024-11-11 14:53:32,912] [INFO] [Engine]: EnginePlanner decided to terminate the simulation. -[2024-11-11 14:53:32,912] [INFO] [Engine]: Engine graph-based coordination loop completed. -[2024-11-11 14:53:32,913] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-11 14:53:32,913] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-11 14:53:32,913] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-11 14:53:32,913] [INFO] [Engine]: Graph-based coordination simulation completed. -[2024-11-11 14:53:32,922] [INFO] [Engine]: Summary data successfully written to result/discussion_output.jsonl -[2024-11-11 15:13:51,247] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-11 15:13:51,248] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-11 15:13:51,248] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-11 15:13:51,248] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-11 15:13:51,248] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-11 15:13:51,248] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-11 15:13:51,248] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-11 15:13:51,248] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-11 15:13:51,248] [INFO] [BaseAgent]: Agent 'agent5' initialized. -[2024-11-11 15:13:51,248] [DEBUG] [Engine]: Agent 'agent5' of type 'BaseAgent' initialized. -[2024-11-11 15:13:51,249] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'graph'. -[2024-11-11 15:13:51,249] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent2 -[2024-11-11 15:13:51,249] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent3 -[2024-11-11 15:13:51,249] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent4 -[2024-11-11 15:13:51,250] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent5 -[2024-11-11 15:13:51,250] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent3 -[2024-11-11 15:13:51,250] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent4 -[2024-11-11 15:13:51,250] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent5 -[2024-11-11 15:13:51,250] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborate with]--> agent4 -[2024-11-11 15:13:51,250] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborate with]--> agent5 -[2024-11-11 15:13:51,250] [INFO] [AgentGraph]: Relationship added: agent4 --[collaborate with]--> agent5 -[2024-11-11 15:13:51,250] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-11 15:13:51,250] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-11 15:13:51,250] [INFO] [Engine]: Engine initialized. -[2024-11-11 15:13:51,250] [INFO] [Engine]: Engine starting simulation. -[2024-11-11 15:13:51,251] [INFO] [Engine]: Running in graph-based coordination mode. -[2024-11-11 15:13:51,251] [INFO] [Engine]: Initial task distribution to all agents. -[2024-11-11 15:13:51,251] [INFO] [Engine]: Assigning initial task to agent1: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 15:13:51,251] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 15:13:52,427] [ERROR] [Engine]: Error while executing initial task for agent 'agent1': 'itertools.islice' object is not callable -[2024-11-11 15:13:52,427] [INFO] [Engine]: Assigning initial task to agent2: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 15:13:52,428] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 15:13:57,035] [ERROR] [Engine]: Error while executing initial task for agent 'agent2': 'Paper' object has no attribute 'to_dict' -[2024-11-11 15:13:57,035] [INFO] [Engine]: Assigning initial task to agent3: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 15:13:57,036] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 15:14:00,791] [ERROR] [Engine]: Error while executing initial task for agent 'agent3': 'Paper' object has no attribute 'to_dict' -[2024-11-11 15:14:00,803] [INFO] [Engine]: Assigning initial task to agent4: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 15:14:00,807] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 15:14:10,684] [ERROR] [Engine]: Error while executing initial task for agent 'agent4': 'Paper' object has no attribute 'to_dict' -[2024-11-11 15:14:10,686] [INFO] [Engine]: Assigning initial task to agent5: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 15:14:10,688] [INFO] [BaseAgent]: Agent 'agent5' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 15:14:12,398] [ERROR] [Engine]: Error while executing initial task for agent 'agent5': 'itertools.islice' object is not callable -[2024-11-11 15:14:12,400] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: - -[2024-11-11 15:14:12,400] [INFO] [Engine]: Initial Summary: -Agents' Results Summary: - -[2024-11-11 15:14:15,928] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-11 15:14:15,928] [DEBUG] [EnginePlanner]: Updated progress: Starting the collaborative research idea generation based on the provided Introduction. -Message(content="**[Question 1] - What is the problem?**\n\nHow can we effectively merge homologous models to improve performance and efficiency in interactive systems based on LLMs?\n\n**[Question 2] - Why is it interesting and important?**\n\nSolving this problem can lead to cost-effectiveness, knowledge sharing, and space efficiency in interactive systems. It can advance the field by enhancing both in-domain and out-of-domain performance, potentially transforming LLMs into domain experts.\n\n**[Question 3] - Why is it hard?**\n\nThe challenge lies in reducing interference among model parameters while retaining useful information for effective merging. Naive approaches may lead to the loss of critical data, hindering the performance of the merged models.\n\n**[Question 4] - Why hasn't it been solved before?**\n\nPrevious research has focused on maintaining separate fine-tuned models, leading to limitations in memory footprint and information sharing across tasks. The proposed approach of merging homologous models with delta parameter drops and magnitude sampling offers a novel solution to these challenges.\n\n**[Question 5] - What are the key components of my approach and results?**\n\nThe key components include MAGPRUNE for delta parameter drops, sign-based selection, and fusion of delta parameters in the DELLA approach. The methodology involves varying drop rates and evaluating performance on benchmark datasets. The expected outcome is improved performance compared to baseline experiments, showcasing the effectiveness of the proposed merging technique.", role='assistant', tool_calls=None, function_call=None) -[2024-11-11 15:16:18,584] [ERROR] [Evaluator]: Could not parse score from assistant's answer. -[2024-11-11 15:16:19,535] [INFO] [Engine]: Starting iteration 1 -[2024-11-11 15:16:19,537] [INFO] [BaseAgent]: Agent 'agent1' is planning the next task. -[2024-11-11 15:16:25,084] [INFO] [BaseAgent]: Agent 'agent1' plans next task based on persona: Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be to focus on brainstorming potential research ideas that leverage their knowledge in these areas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). The agent can bring their expertise in imaging technologies and mathematical aspects to propose innovative solutions for improving the merging process of homologous models in the context of interactive systems based on general-purpose LLMs. By combining their expertise in terahertz imaging, cloud computing, and algebraic geometry, the agent can contribute valuable insights to enhance the performance and efficiency of the DELLA approach. -[2024-11-11 15:16:25,084] [INFO] [Engine]: Agent 'agent1' planned task: Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be to focus on brainstorming potential research ideas that leverage their knowledge in these areas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). The agent can bring their expertise in imaging technologies and mathematical aspects to propose innovative solutions for improving the merging process of homologous models in the context of interactive systems based on general-purpose LLMs. By combining their expertise in terahertz imaging, cloud computing, and algebraic geometry, the agent can contribute valuable insights to enhance the performance and efficiency of the DELLA approach. -[2024-11-11 15:16:25,084] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be to focus on brainstorming potential research ideas that leverage their knowledge in these areas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). The agent can bring their expertise in imaging technologies and mathematical aspects to propose innovative solutions for improving the merging process of homologous models in the context of interactive systems based on general-purpose LLMs. By combining their expertise in terahertz imaging, cloud computing, and algebraic geometry, the agent can contribute valuable insights to enhance the performance and efficiency of the DELLA approach.'. -[2024-11-11 15:16:30,570] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I am a researcher with expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry. I am interested in brainstorming potential research ideas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). I believe my background can provide valuable insights to enhance the merging process of homologous models in interactive systems based on general-purpose LLMs. I would like to explore innovative solutions that leverage my knowledge in terahertz imaging, cloud computing, and algebraic geometry to improve the performance and efficiency of the DELLA approach. Would you be interested in collaborating on this research endeavor? -[2024-11-11 15:16:30,570] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I am a res... -[2024-11-11 15:16:32,747] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: I am excited about the opportunity to collaborate with you on the research endeavor to enhance the merging process of homologous models using the DELLA approach. Your expertise in terahertz imaging technologies, cloud computing, and algebraic geometry aligns well with the goals of this project. I believe we can leverage your innovative terahertz imaging methods and mathematical insights to develop novel solutions that improve the performance and efficiency of the DELLA approach. Let's work together to explore creative ideas and contribute to the advancement of interactive systems based on general-purpose LLMs. -[2024-11-11 15:16:32,748] [INFO] [BaseAgent]: Agent agent1 received message from agent2: I am excit... -[2024-11-11 15:16:41,825] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I am thrilled to collaborate with you on enhancing the merging process of homologous models using the DELLA approach. Leveraging my expertise in terahertz imaging technologies, cloud computing, and algebraic geometry, I believe we can develop innovative solutions to optimize the performance and efficiency of the DELLA approach. I am excited to explore creative ideas with you and contribute to the advancement of interactive systems based on general-purpose LLMs. Let's embark on this research journey together! -[2024-11-11 15:16:41,825] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I am thril... -[2024-11-11 15:16:41,918] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-11 15:16:41,918] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-11 15:16:41,918] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-11 15:16:41,918] [INFO] [Engine]: Graph-based coordination simulation completed. -[2024-11-11 15:16:41,922] [INFO] [Engine]: Summary data successfully written to result/discussion_output.jsonl -[2024-11-11 15:17:43,154] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-11 15:17:43,155] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-11 15:17:43,155] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-11 15:17:43,156] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-11 15:17:43,156] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-11 15:17:43,156] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-11 15:17:43,156] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-11 15:17:43,156] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-11 15:17:43,156] [INFO] [BaseAgent]: Agent 'agent5' initialized. -[2024-11-11 15:17:43,156] [DEBUG] [Engine]: Agent 'agent5' of type 'BaseAgent' initialized. -[2024-11-11 15:17:43,156] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'graph'. -[2024-11-11 15:17:43,156] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent2 -[2024-11-11 15:17:43,156] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent3 -[2024-11-11 15:17:43,156] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent4 -[2024-11-11 15:17:43,156] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent5 -[2024-11-11 15:17:43,156] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent3 -[2024-11-11 15:17:43,156] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent4 -[2024-11-11 15:17:43,156] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent5 -[2024-11-11 15:17:43,157] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborate with]--> agent4 -[2024-11-11 15:17:43,157] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborate with]--> agent5 -[2024-11-11 15:17:43,157] [INFO] [AgentGraph]: Relationship added: agent4 --[collaborate with]--> agent5 -[2024-11-11 15:17:43,157] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-11 15:17:43,157] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-11 15:17:43,157] [INFO] [Engine]: Engine initialized. -[2024-11-11 15:17:43,157] [INFO] [Engine]: Engine starting simulation. -[2024-11-11 15:17:43,157] [INFO] [Engine]: Running in graph-based coordination mode. -[2024-11-11 15:17:43,157] [INFO] [Engine]: Initial task distribution to all agents. -[2024-11-11 15:17:43,158] [INFO] [Engine]: Assigning initial task to agent1: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 15:17:43,158] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 15:17:44,866] [ERROR] [Engine]: Error while executing initial task for agent 'agent1': 'itertools.islice' object is not callable -[2024-11-11 15:17:44,867] [INFO] [Engine]: Assigning initial task to agent2: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 15:17:44,868] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 15:17:51,739] [ERROR] [Engine]: Error while executing initial task for agent 'agent2': 'Paper' object has no attribute 'to_dict' -[2024-11-11 15:17:51,740] [INFO] [Engine]: Assigning initial task to agent3: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 15:17:51,745] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 15:17:55,314] [ERROR] [Engine]: Error while executing initial task for agent 'agent3': 'Paper' object has no attribute 'to_dict' -[2024-11-11 15:17:55,315] [INFO] [Engine]: Assigning initial task to agent4: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 15:17:55,315] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 15:17:59,412] [ERROR] [Engine]: Error while executing initial task for agent 'agent4': 'Paper' object has no attribute 'to_dict' -[2024-11-11 15:17:59,412] [INFO] [Engine]: Assigning initial task to agent5: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 15:17:59,413] [INFO] [BaseAgent]: Agent 'agent5' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 15:18:01,085] [ERROR] [Engine]: Error while executing initial task for agent 'agent5': 'itertools.islice' object is not callable -[2024-11-11 15:18:01,086] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: - -[2024-11-11 15:18:01,086] [INFO] [Engine]: Initial Summary: -Agents' Results Summary: - -[2024-11-11 15:18:04,496] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': False} -[2024-11-11 15:18:04,498] [INFO] [Engine]: EnginePlanner decided to terminate the simulation after initial assignment. -[2024-11-11 15:19:16,388] [ERROR] [Engine]: An error occurred during graph-based coordination. -Traceback (most recent call last): - File "/Users/zhukunlun/Documents/GitHub/MARBLE/marble/engine/engine.py", line 192, in graph_coordinate - self.evaluator.evaluate_planning(iteration_data["summary"], agent_profiles, agent_tasks_str, results_str) - File "/Users/zhukunlun/Documents/GitHub/MARBLE/marble/evaluator/evaluator.py", line 123, in evaluate_planning - score = self.parse_score(result.content) - File "/Users/zhukunlun/Documents/GitHub/MARBLE/marble/evaluator/evaluator.py", line 123, in evaluate_planning - score = self.parse_score(result.content) - File "/opt/anaconda3/envs/mabench/lib/python3.10/bdb.py", line 90, in trace_dispatch - return self.dispatch_line(frame) - File "/opt/anaconda3/envs/mabench/lib/python3.10/bdb.py", line 115, in dispatch_line - if self.quitting: raise BdbQuit -bdb.BdbQuit -[2024-11-11 15:19:16,408] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-11 15:19:16,412] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-11 15:19:16,413] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-11 15:19:16,413] [INFO] [Engine]: Graph-based coordination simulation completed. -[2024-11-11 15:19:16,429] [INFO] [Engine]: Summary data successfully written to result/discussion_output.jsonl -[2024-11-11 15:19:27,125] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-11 15:19:27,126] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-11 15:19:27,126] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-11 15:19:27,126] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-11 15:19:27,126] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-11 15:19:27,126] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-11 15:19:27,126] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-11 15:19:27,126] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-11 15:19:27,126] [INFO] [BaseAgent]: Agent 'agent5' initialized. -[2024-11-11 15:19:27,126] [DEBUG] [Engine]: Agent 'agent5' of type 'BaseAgent' initialized. -[2024-11-11 15:19:27,126] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'graph'. -[2024-11-11 15:19:27,126] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent2 -[2024-11-11 15:19:27,126] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent3 -[2024-11-11 15:19:27,126] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent4 -[2024-11-11 15:19:27,126] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent5 -[2024-11-11 15:19:27,126] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent3 -[2024-11-11 15:19:27,127] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent4 -[2024-11-11 15:19:27,127] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent5 -[2024-11-11 15:19:27,127] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborate with]--> agent4 -[2024-11-11 15:19:27,127] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborate with]--> agent5 -[2024-11-11 15:19:27,127] [INFO] [AgentGraph]: Relationship added: agent4 --[collaborate with]--> agent5 -[2024-11-11 15:19:27,127] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-11 15:19:27,127] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-11 15:19:27,127] [INFO] [Engine]: Engine initialized. -[2024-11-11 15:19:27,127] [INFO] [Engine]: Engine starting simulation. -[2024-11-11 15:19:27,127] [INFO] [Engine]: Running in graph-based coordination mode. -[2024-11-11 15:19:27,127] [INFO] [Engine]: Initial task distribution to all agents. -[2024-11-11 15:19:27,128] [INFO] [Engine]: Assigning initial task to agent1: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 15:19:27,128] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 15:19:28,270] [ERROR] [Engine]: Error while executing initial task for agent 'agent1': 'itertools.islice' object is not callable -[2024-11-11 15:19:28,271] [INFO] [Engine]: Assigning initial task to agent2: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 15:19:28,271] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 15:19:33,253] [ERROR] [Engine]: Error while executing initial task for agent 'agent2': 'Paper' object has no attribute 'to_dict' -[2024-11-11 15:19:33,254] [INFO] [Engine]: Assigning initial task to agent3: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 15:19:33,255] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 15:19:36,388] [ERROR] [Engine]: Error while executing initial task for agent 'agent3': 'Paper' object has no attribute 'to_dict' -[2024-11-11 15:19:36,389] [INFO] [Engine]: Assigning initial task to agent4: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 15:19:36,390] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 15:19:37,252] [ERROR] [Engine]: Error while executing initial task for agent 'agent4': 'itertools.islice' object is not callable -[2024-11-11 15:19:37,252] [INFO] [Engine]: Assigning initial task to agent5: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 15:19:37,255] [INFO] [BaseAgent]: Agent 'agent5' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 15:19:39,009] [ERROR] [Engine]: Error while executing initial task for agent 'agent5': 'itertools.islice' object is not callable -[2024-11-11 15:19:39,011] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: - -[2024-11-11 15:19:39,012] [INFO] [Engine]: Initial Summary: -Agents' Results Summary: - -[2024-11-11 15:19:42,271] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': False} -[2024-11-11 15:19:42,273] [INFO] [Engine]: EnginePlanner decided to terminate the simulation after initial assignment. -[2024-11-11 15:20:13,390] [INFO] [Engine]: Starting iteration 0 -[2024-11-11 15:20:13,393] [INFO] [BaseAgent]: Agent 'agent1' is planning the next task. -[2024-11-11 15:20:14,951] [INFO] [BaseAgent]: Agent 'agent1' plans next task based on persona: Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be to focus on brainstorming potential research ideas that leverage their knowledge in these areas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). The agent can bring their expertise in imaging technologies and mathematical aspects to propose innovative solutions for improving the merging process of homologous models. -[2024-11-11 15:20:14,951] [INFO] [Engine]: Agent 'agent1' planned task: Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be to focus on brainstorming potential research ideas that leverage their knowledge in these areas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). The agent can bring their expertise in imaging technologies and mathematical aspects to propose innovative solutions for improving the merging process of homologous models. -[2024-11-11 15:20:14,951] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be to focus on brainstorming potential research ideas that leverage their knowledge in these areas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). The agent can bring their expertise in imaging technologies and mathematical aspects to propose innovative solutions for improving the merging process of homologous models.'. -[2024-11-11 15:20:18,967] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-11 15:20:18,968] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-11 15:20:18,968] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-11 15:20:18,968] [INFO] [Engine]: Graph-based coordination simulation completed. -[2024-11-11 15:20:18,978] [INFO] [Engine]: Summary data successfully written to result/discussion_output.jsonl -[2024-11-11 15:21:29,678] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-11 15:21:29,678] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-11 15:21:29,678] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-11 15:21:29,679] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-11 15:21:29,679] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-11 15:21:29,679] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-11 15:21:29,679] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-11 15:21:29,679] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-11 15:21:29,679] [INFO] [BaseAgent]: Agent 'agent5' initialized. -[2024-11-11 15:21:29,679] [DEBUG] [Engine]: Agent 'agent5' of type 'BaseAgent' initialized. -[2024-11-11 15:21:29,679] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'graph'. -[2024-11-11 15:21:29,679] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent2 -[2024-11-11 15:21:29,679] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent3 -[2024-11-11 15:21:29,679] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent4 -[2024-11-11 15:21:29,679] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent5 -[2024-11-11 15:21:29,679] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent3 -[2024-11-11 15:21:29,679] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent4 -[2024-11-11 15:21:29,679] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent5 -[2024-11-11 15:21:29,679] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborate with]--> agent4 -[2024-11-11 15:21:29,679] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborate with]--> agent5 -[2024-11-11 15:21:29,679] [INFO] [AgentGraph]: Relationship added: agent4 --[collaborate with]--> agent5 -[2024-11-11 15:21:29,679] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-11 15:21:29,680] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-11 15:21:29,680] [INFO] [Engine]: Engine initialized. -[2024-11-11 15:21:29,680] [INFO] [Engine]: Engine starting simulation. -[2024-11-11 15:21:29,680] [INFO] [Engine]: Running in graph-based coordination mode. -[2024-11-11 15:21:29,680] [INFO] [Engine]: Initial task distribution to all agents. -[2024-11-11 15:21:29,680] [INFO] [Engine]: Assigning initial task to agent1: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 15:21:29,681] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 15:21:30,704] [ERROR] [Engine]: Error while executing initial task for agent 'agent1': 'itertools.islice' object is not callable -[2024-11-11 15:21:30,704] [INFO] [Engine]: Assigning initial task to agent2: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 15:21:30,705] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 15:21:36,829] [ERROR] [Engine]: Error while executing initial task for agent 'agent2': 'Paper' object has no attribute 'to_dict' -[2024-11-11 15:21:36,829] [INFO] [Engine]: Assigning initial task to agent3: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 15:21:36,829] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 15:21:39,600] [ERROR] [Engine]: Error while executing initial task for agent 'agent3': 'Paper' object has no attribute 'to_dict' -[2024-11-11 15:21:39,600] [INFO] [Engine]: Assigning initial task to agent4: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 15:21:39,601] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 15:21:44,198] [ERROR] [Engine]: Error while executing initial task for agent 'agent4': 'Paper' object has no attribute 'to_dict' -[2024-11-11 15:21:44,198] [INFO] [Engine]: Assigning initial task to agent5: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 15:21:44,199] [INFO] [BaseAgent]: Agent 'agent5' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 15:21:45,866] [ERROR] [Engine]: Error while executing initial task for agent 'agent5': 'itertools.islice' object is not callable -[2024-11-11 15:21:45,866] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: - -[2024-11-11 15:21:45,867] [INFO] [Engine]: Initial Summary: -Agents' Results Summary: - -[2024-11-11 15:21:49,931] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': False} -[2024-11-11 15:21:49,934] [INFO] [Engine]: EnginePlanner decided to terminate the simulation after initial assignment. -[2024-11-11 15:21:53,643] [INFO] [Engine]: Starting iteration 0 -[2024-11-11 15:21:53,643] [INFO] [BaseAgent]: Agent 'agent1' is planning the next task. -[2024-11-11 15:21:55,404] [INFO] [BaseAgent]: Agent 'agent1' plans next task based on persona: Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be to focus on brainstorming potential research ideas that leverage their knowledge in these areas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). The agent can explore how their expertise can be applied to enhance the performance of the merging process and improve the overall results in the field of machine learning. -[2024-11-11 15:21:55,405] [INFO] [Engine]: Agent 'agent1' planned task: Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be to focus on brainstorming potential research ideas that leverage their knowledge in these areas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). The agent can explore how their expertise can be applied to enhance the performance of the merging process and improve the overall results in the field of machine learning. -[2024-11-11 15:21:55,405] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Based on the agent's background and expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry, the next task should be to focus on brainstorming potential research ideas that leverage their knowledge in these areas to contribute to the development of the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). The agent can explore how their expertise can be applied to enhance the performance of the merging process and improve the overall results in the field of machine learning.'. -[2024-11-11 15:21:56,203] [ERROR] [Engine]: Error in agent 'agent1' during planning or action: 'itertools.islice' object is not callable -[2024-11-11 15:21:56,203] [INFO] [BaseAgent]: Agent 'agent2' is planning the next task. -[2024-11-11 15:21:57,570] [INFO] [BaseAgent]: Agent 'agent2' plans next task based on persona: Based on the provided information, the next task for 'agent2' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with 'agent2's expertise in artificial intelligence and healthcare, as well as their passion for leveraging advanced machine learning techniques to create impactful solutions. By contributing to the brainstorming session, 'agent2' can provide valuable insights and potentially propose a new research idea that aligns with their research interests and expertise. -[2024-11-11 15:21:57,571] [INFO] [Engine]: Agent 'agent2' planned task: Based on the provided information, the next task for 'agent2' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with 'agent2's expertise in artificial intelligence and healthcare, as well as their passion for leveraging advanced machine learning techniques to create impactful solutions. By contributing to the brainstorming session, 'agent2' can provide valuable insights and potentially propose a new research idea that aligns with their research interests and expertise. -[2024-11-11 15:21:57,571] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Based on the provided information, the next task for 'agent2' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with 'agent2's expertise in artificial intelligence and healthcare, as well as their passion for leveraging advanced machine learning techniques to create impactful solutions. By contributing to the brainstorming session, 'agent2' can provide valuable insights and potentially propose a new research idea that aligns with their research interests and expertise.'. -[2024-11-11 15:21:59,280] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. My expertise in artificial intelligence and healthcare, along with a passion for leveraging advanced machine learning techniques, can contribute valuable insights to this discussion. Let's explore innovative research directions together. -[2024-11-11 15:21:59,280] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I am inter... -[2024-11-11 15:22:00,823] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I appreciate your interest in collaborating to brainstorm potential research ideas related to merging homologous models using the DELLA approach. Your expertise in artificial intelligence and healthcare, combined with your passion for advanced machine learning techniques, will undoubtedly bring valuable insights to our discussion. Let's delve into innovative research directions together. -[2024-11-11 15:22:00,823] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I apprecia... -[2024-11-11 15:22:02,924] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I am excited to collaborate with you on brainstorming potential research ideas related to merging homologous models using the DELLA approach. Your expertise in spiking neural networks and their applications in real-world scenarios, particularly in auditory processing and machine learning, brings a unique perspective to our discussion. I believe our combined knowledge can lead to innovative research directions that bridge the gap between our fields. Let's explore how we can integrate SNN principles with advanced machine learning techniques to enhance the efficiency and functionality of neural networks in healthcare applications. -[2024-11-11 15:22:02,924] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I am excit... -[2024-11-11 15:22:04,298] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I am thrilled to collaborate with you on exploring the integration of spiking neural network principles with advanced machine learning techniques to enhance the efficiency and functionality of neural networks in healthcare applications. Your expertise in artificial intelligence and healthcare, combined with my research in auditory processing and machine learning, presents an exciting opportunity to develop innovative research directions that bridge our fields. Let's brainstorm together to uncover new possibilities and contribute to cutting-edge advancements in healthcare technology. -[2024-11-11 15:22:04,299] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I am thril... -[2024-11-11 15:22:06,055] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I am excited to collaborate with you on exploring the integration of spiking neural network principles with advanced machine learning techniques to enhance the efficiency and functionality of neural networks in healthcare applications. With our combined expertise in artificial intelligence, healthcare, auditory processing, and machine learning, we have a unique opportunity to develop innovative research directions that bridge our fields. Let's brainstorm together to uncover new possibilities and contribute to cutting-edge advancements in healthcare technology. -[2024-11-11 15:22:06,056] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I am excit... -[2024-11-11 15:22:07,366] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I am excited about the opportunity to collaborate with you on exploring the integration of spiking neural network principles with advanced machine learning techniques to enhance the efficiency and functionality of neural networks in healthcare applications. With our combined expertise in artificial intelligence, healthcare, auditory processing, and machine learning, we can delve into innovative research directions that bridge our fields. Let's brainstorm together to uncover new possibilities and contribute to cutting-edge advancements in healthcare technology. -[2024-11-11 15:22:07,367] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I am excit... -[2024-11-11 15:22:08,914] [ERROR] [Engine]: Error in agent 'agent2' during planning or action: 'dict' object has no attribute 'final_chat_history' -[2024-11-11 15:22:08,915] [INFO] [BaseAgent]: Agent 'agent3' is planning the next task. -[2024-11-11 15:22:11,955] [INFO] [BaseAgent]: Agent 'agent3' plans next task based on persona: Based on the role and expertise of 'agent3' as a researcher dedicated to advancing the field of spiking neural networks (SNNs) and their applications in auditory processing and machine learning, the next task should be to focus on the intersection of large language models (LLMs) and evolutionary algorithms in the context of SNNs. Specifically, the task should involve exploring how LLMs can enhance algorithm selection and optimization processes for SNNs, with a focus on speech recognition and temporal signal processing. - -Given the recent research on merging homologous models using novel approaches like DELLA, 'agent3' can further investigate how LLMs can be leveraged to optimize the merging process of SNN models. This task aligns with 'agent3's expertise in developing innovative models inspired by biological systems and can contribute to pushing the boundaries of SNNs in real-world applications. - -Therefore, the next task for 'agent3' should be to explore the potential of using LLMs in optimizing the merging of homologous SNN models, with a focus on enhancing performance in tasks such as speech recognition and temporal signal processing. This research direction can further advance the field of SNNs and contribute to the development of more efficient and powerful neural networks for real-world applications. -[2024-11-11 15:22:11,956] [INFO] [Engine]: Agent 'agent3' planned task: Based on the role and expertise of 'agent3' as a researcher dedicated to advancing the field of spiking neural networks (SNNs) and their applications in auditory processing and machine learning, the next task should be to focus on the intersection of large language models (LLMs) and evolutionary algorithms in the context of SNNs. Specifically, the task should involve exploring how LLMs can enhance algorithm selection and optimization processes for SNNs, with a focus on speech recognition and temporal signal processing. - -Given the recent research on merging homologous models using novel approaches like DELLA, 'agent3' can further investigate how LLMs can be leveraged to optimize the merging process of SNN models. This task aligns with 'agent3's expertise in developing innovative models inspired by biological systems and can contribute to pushing the boundaries of SNNs in real-world applications. - -Therefore, the next task for 'agent3' should be to explore the potential of using LLMs in optimizing the merging of homologous SNN models, with a focus on enhancing performance in tasks such as speech recognition and temporal signal processing. This research direction can further advance the field of SNNs and contribute to the development of more efficient and powerful neural networks for real-world applications. -[2024-11-11 15:22:11,958] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Based on the role and expertise of 'agent3' as a researcher dedicated to advancing the field of spiking neural networks (SNNs) and their applications in auditory processing and machine learning, the next task should be to focus on the intersection of large language models (LLMs) and evolutionary algorithms in the context of SNNs. Specifically, the task should involve exploring how LLMs can enhance algorithm selection and optimization processes for SNNs, with a focus on speech recognition and temporal signal processing. - -Given the recent research on merging homologous models using novel approaches like DELLA, 'agent3' can further investigate how LLMs can be leveraged to optimize the merging process of SNN models. This task aligns with 'agent3's expertise in developing innovative models inspired by biological systems and can contribute to pushing the boundaries of SNNs in real-world applications. - -Therefore, the next task for 'agent3' should be to explore the potential of using LLMs in optimizing the merging of homologous SNN models, with a focus on enhancing performance in tasks such as speech recognition and temporal signal processing. This research direction can further advance the field of SNNs and contribute to the development of more efficient and powerful neural networks for real-world applications.'. -[2024-11-11 15:22:15,557] [ERROR] [Engine]: Error in agent 'agent3' during planning or action: 'Paper' object has no attribute 'to_dict' -[2024-11-11 15:22:15,558] [INFO] [BaseAgent]: Agent 'agent4' is planning the next task. -[2024-11-11 15:22:17,104] [INFO] [BaseAgent]: Agent 'agent4' plans next task based on persona: Based on the provided information, the next task for 'agent4' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with their expertise in mathematical modeling, optimization, and photonic systems, as well as their interest in computational efficiency and innovative solutions. By contributing to the brainstorming session, 'agent4' can leverage their diverse background to propose novel research directions and methodologies that could advance knowledge in this interdisciplinary field. -[2024-11-11 15:22:17,104] [INFO] [Engine]: Agent 'agent4' planned task: Based on the provided information, the next task for 'agent4' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with their expertise in mathematical modeling, optimization, and photonic systems, as well as their interest in computational efficiency and innovative solutions. By contributing to the brainstorming session, 'agent4' can leverage their diverse background to propose novel research directions and methodologies that could advance knowledge in this interdisciplinary field. -[2024-11-11 15:22:17,106] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Based on the provided information, the next task for 'agent4' should be to collaborate with the research team to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. This task aligns with their expertise in mathematical modeling, optimization, and photonic systems, as well as their interest in computational efficiency and innovative solutions. By contributing to the brainstorming session, 'agent4' can leverage their diverse background to propose novel research directions and methodologies that could advance knowledge in this interdisciplinary field.'. -[2024-11-11 15:22:18,548] [INFO] [BaseAgent]: Agent agent4 sent message to agent5: I am interested in collaborating with you to brainstorm potential research ideas that build upon or address gaps in the Introduction related to merging homologous models using the DELLA approach. My expertise in mathematical modeling, optimization, and photonic systems aligns well with this task, and I believe our collaboration can lead to innovative research directions in this interdisciplinary field. -[2024-11-11 15:22:18,548] [INFO] [BaseAgent]: Agent agent5 received message from agent4: I am inter... -[2024-11-11 15:22:20,077] [INFO] [BaseAgent]: Agent agent5 sent message to agent4: I appreciate your interest in collaborating on research ideas related to merging homologous models using the DELLA approach. Your expertise in mathematical modeling, optimization, and photonic systems aligns well with this task. I believe our collaboration can indeed lead to innovative research directions in this interdisciplinary field. Let's brainstorm together to explore new avenues for advancing knowledge in this area. -[2024-11-11 15:22:20,079] [INFO] [BaseAgent]: Agent agent4 received message from agent5: I apprecia... -[2024-11-11 15:22:21,339] [INFO] [BaseAgent]: Agent agent4 sent message to agent5: I am excited to collaborate with you on brainstorming research ideas related to merging homologous models using the DELLA approach. With our combined expertise in mathematical modeling, optimization, and innovative frameworks, I believe we can propose novel methodologies that push the boundaries of knowledge in this interdisciplinary field. Let's delve into the possibilities and explore new avenues for advancing research in this area. -[2024-11-11 15:22:21,339] [INFO] [BaseAgent]: Agent agent5 received message from agent4: I am excit... -[2024-11-11 15:22:22,804] [INFO] [BaseAgent]: Agent agent5 sent message to agent4: I appreciate your enthusiasm for collaborating on brainstorming research ideas related to merging homologous models using the DELLA approach. Your expertise in mathematical modeling, optimization, and photonic systems, combined with my background in evolutionary algorithms, machine learning, and spiking neural networks, presents a promising opportunity to explore innovative methodologies. Let's leverage our diverse skill sets to propose cutting-edge solutions that advance knowledge in this interdisciplinary field. I look forward to our collaboration and the exciting research directions we can uncover together. -[2024-11-11 15:22:22,804] [INFO] [BaseAgent]: Agent agent4 received message from agent5: I apprecia... -[2024-11-11 15:22:24,281] [INFO] [BaseAgent]: Agent agent4 sent message to agent5: I am excited about the opportunity to collaborate with you on brainstorming research ideas related to merging homologous models using the DELLA approach. Your expertise in evolutionary algorithms, machine learning, and spiking neural networks complements my background in mathematical modeling, optimization, and photonic systems. Together, we can explore innovative methodologies that push the boundaries of knowledge in this interdisciplinary field. I look forward to our collaboration and the exciting research directions we can uncover together. -[2024-11-11 15:22:24,283] [INFO] [BaseAgent]: Agent agent5 received message from agent4: I am excit... -[2024-11-11 15:22:26,019] [INFO] [BaseAgent]: Agent agent5 sent message to agent4: I am thrilled about the opportunity to collaborate with you on brainstorming research ideas related to merging homologous models using the DELLA approach. Your expertise in evolutionary algorithms, machine learning, and spiking neural networks complements my background in mathematical modeling, optimization, and photonic systems. Together, we can explore innovative methodologies that push the boundaries of knowledge in this interdisciplinary field. I am looking forward to our collaboration and the exciting research directions we can uncover together. -[2024-11-11 15:22:26,020] [INFO] [BaseAgent]: Agent agent4 received message from agent5: I am thril... -[2024-11-11 15:22:27,751] [ERROR] [Engine]: Error in agent 'agent4' during planning or action: 'dict' object has no attribute 'final_chat_history' -[2024-11-11 15:22:27,751] [INFO] [BaseAgent]: Agent 'agent5' is planning the next task. -[2024-11-11 15:22:33,138] [INFO] [BaseAgent]: Agent 'agent5' plans next task based on persona: Based on the agent's expertise in evolutionary algorithms, machine learning, and spiking neural networks, as well as their recent work on integrating machine learning models with evolutionary algorithms for multi-objective optimization, the next task should be to: - -1. **Literature Review**: Conduct a literature review on the current state of research in merging homologous models and the challenges associated with maintaining separate fine-tuned models for each task. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that leverage evolutionary algorithms, machine learning models, and spiking neural networks to address the limitations of maintaining separate fine-tuned models for each task. - -3. **Summarization**: Summarize the collective ideas and identify key research directions that align with the agent's expertise and recent work. - -4. **Formulate a New Research Idea (5q)**: - - **[Question 1] - What is the problem?** - How can evolutionary algorithms, machine learning models, and spiking neural networks be integrated to optimize the merging of homologous models for improved task performance and efficiency? - - **[Question 2] - Why is it interesting and important?** - Solving this problem can lead to more cost-effective, space-efficient, and knowledge-sharing approaches for maintaining and merging fine-tuned models across different tasks, enhancing both in-domain and out-of-domain performance. - - **[Question 3] - Why is it hard?** - The challenge lies in effectively integrating evolutionary algorithms, machine learning models, and spiking neural networks to optimize the merging of homologous models while considering the complexities of different tasks and the need for efficient information sharing. - - **[Question 4] - Why hasn't it been solved before?** - Previous research has focused on individual aspects of model merging and optimization, but there is a gap in comprehensive approaches that leverage evolutionary algorithms, machine learning models, and spiking neural networks for merging homologous models across tasks. - - **[Question 5] - What are the key components of my approach and results?** - The proposed methodology will involve developing a framework that combines evolutionary algorithms, machine learning models, and spiking neural networks to optimize the merging of homologous models. The approach will be evaluated using benchmark datasets and metrics to demonstrate improved task performance and efficiency. -[2024-11-11 15:22:33,138] [INFO] [Engine]: Agent 'agent5' planned task: Based on the agent's expertise in evolutionary algorithms, machine learning, and spiking neural networks, as well as their recent work on integrating machine learning models with evolutionary algorithms for multi-objective optimization, the next task should be to: - -1. **Literature Review**: Conduct a literature review on the current state of research in merging homologous models and the challenges associated with maintaining separate fine-tuned models for each task. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that leverage evolutionary algorithms, machine learning models, and spiking neural networks to address the limitations of maintaining separate fine-tuned models for each task. - -3. **Summarization**: Summarize the collective ideas and identify key research directions that align with the agent's expertise and recent work. - -4. **Formulate a New Research Idea (5q)**: - - **[Question 1] - What is the problem?** - How can evolutionary algorithms, machine learning models, and spiking neural networks be integrated to optimize the merging of homologous models for improved task performance and efficiency? - - **[Question 2] - Why is it interesting and important?** - Solving this problem can lead to more cost-effective, space-efficient, and knowledge-sharing approaches for maintaining and merging fine-tuned models across different tasks, enhancing both in-domain and out-of-domain performance. - - **[Question 3] - Why is it hard?** - The challenge lies in effectively integrating evolutionary algorithms, machine learning models, and spiking neural networks to optimize the merging of homologous models while considering the complexities of different tasks and the need for efficient information sharing. - - **[Question 4] - Why hasn't it been solved before?** - Previous research has focused on individual aspects of model merging and optimization, but there is a gap in comprehensive approaches that leverage evolutionary algorithms, machine learning models, and spiking neural networks for merging homologous models across tasks. - - **[Question 5] - What are the key components of my approach and results?** - The proposed methodology will involve developing a framework that combines evolutionary algorithms, machine learning models, and spiking neural networks to optimize the merging of homologous models. The approach will be evaluated using benchmark datasets and metrics to demonstrate improved task performance and efficiency. -[2024-11-11 15:22:33,139] [INFO] [BaseAgent]: Agent 'agent5' acting on task 'Based on the agent's expertise in evolutionary algorithms, machine learning, and spiking neural networks, as well as their recent work on integrating machine learning models with evolutionary algorithms for multi-objective optimization, the next task should be to: - -1. **Literature Review**: Conduct a literature review on the current state of research in merging homologous models and the challenges associated with maintaining separate fine-tuned models for each task. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that leverage evolutionary algorithms, machine learning models, and spiking neural networks to address the limitations of maintaining separate fine-tuned models for each task. - -3. **Summarization**: Summarize the collective ideas and identify key research directions that align with the agent's expertise and recent work. - -4. **Formulate a New Research Idea (5q)**: - - **[Question 1] - What is the problem?** - How can evolutionary algorithms, machine learning models, and spiking neural networks be integrated to optimize the merging of homologous models for improved task performance and efficiency? - - **[Question 2] - Why is it interesting and important?** - Solving this problem can lead to more cost-effective, space-efficient, and knowledge-sharing approaches for maintaining and merging fine-tuned models across different tasks, enhancing both in-domain and out-of-domain performance. - - **[Question 3] - Why is it hard?** - The challenge lies in effectively integrating evolutionary algorithms, machine learning models, and spiking neural networks to optimize the merging of homologous models while considering the complexities of different tasks and the need for efficient information sharing. - - **[Question 4] - Why hasn't it been solved before?** - Previous research has focused on individual aspects of model merging and optimization, but there is a gap in comprehensive approaches that leverage evolutionary algorithms, machine learning models, and spiking neural networks for merging homologous models across tasks. - - **[Question 5] - What are the key components of my approach and results?** - The proposed methodology will involve developing a framework that combines evolutionary algorithms, machine learning models, and spiking neural networks to optimize the merging of homologous models. The approach will be evaluated using benchmark datasets and metrics to demonstrate improved task performance and efficiency.'. -[2024-11-11 15:22:37,401] [ERROR] [Engine]: Error in agent 'agent5' during planning or action: 'Paper' object has no attribute 'to_dict' -[2024-11-11 15:22:37,402] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: - -[2024-11-11 15:22:37,402] [INFO] [Engine]: Iteration 0 Summary: -Agents' Results Summary: - -[2024-11-11 15:22:45,968] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-11 15:22:45,969] [INFO] [Engine]: Starting iteration 1 -[2024-11-11 15:22:45,969] [INFO] [BaseAgent]: Agent 'agent1' is planning the next task. -[2024-11-11 15:22:47,360] [INFO] [BaseAgent]: Agent 'agent1' plans next task based on persona: The next task for the agent 'agent1' should be to collaborate with the research team to brainstorm potential research ideas that leverage their expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry to enhance the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). By combining their knowledge in these areas, the agent can contribute valuable insights and innovative solutions to improve the performance of the merging process and achieve better results in the field of machine learning. -[2024-11-11 15:22:47,361] [INFO] [Engine]: Agent 'agent1' planned task: The next task for the agent 'agent1' should be to collaborate with the research team to brainstorm potential research ideas that leverage their expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry to enhance the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). By combining their knowledge in these areas, the agent can contribute valuable insights and innovative solutions to improve the performance of the merging process and achieve better results in the field of machine learning. -[2024-11-11 15:22:47,361] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'The next task for the agent 'agent1' should be to collaborate with the research team to brainstorm potential research ideas that leverage their expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry to enhance the novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). By combining their knowledge in these areas, the agent can contribute valuable insights and innovative solutions to improve the performance of the merging process and achieve better results in the field of machine learning.'. -[2024-11-11 15:22:48,866] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I am interested in collaborating with you to brainstorm potential research ideas that leverage our expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry to enhance the merging process of homologous models using the DELLAs approach. I believe our combined knowledge can contribute valuable insights and innovative solutions to improve the performance of the merging process in machine learning. Looking forward to discussing further. -[2024-11-11 15:22:48,866] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I am inter... -[2024-11-11 15:22:50,685] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: I appreciate your interest in collaborating to brainstorm potential research ideas leveraging our expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry to enhance the merging process of homologous models using the DELLAs approach. I believe our combined knowledge and diverse backgrounds can indeed contribute valuable insights and innovative solutions to improve the performance of the merging process in machine learning. I look forward to discussing further and exploring new possibilities with you. -[2024-11-11 15:22:50,685] [INFO] [BaseAgent]: Agent agent1 received message from agent2: I apprecia... -[2024-11-11 15:22:52,644] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I appreciate your enthusiasm for collaborating on potential research ideas that leverage our expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry to enhance the merging process of homologous models using the DELLAs approach. I believe our combined knowledge and diverse backgrounds can lead to valuable insights and innovative solutions in improving the performance of the merging process in machine learning. I am excited to delve deeper into discussions with you and explore the possibilities for our collaboration. -[2024-11-11 15:22:52,646] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I apprecia... -[2024-11-11 15:22:54,277] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: I am thrilled about the opportunity to collaborate with you on exploring potential research ideas that leverage our expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry to enhance the merging process of homologous models using the DELLAs approach. Our combined knowledge and diverse backgrounds offer a strong foundation for generating valuable insights and innovative solutions in the realm of machine learning. I am eager to engage in in-depth discussions with you to further explore the possibilities for our collaboration. -[2024-11-11 15:22:54,278] [INFO] [BaseAgent]: Agent agent1 received message from agent2: I am thril... -[2024-11-11 15:22:55,705] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: I am excited about the opportunity to collaborate with you on exploring potential research ideas that leverage our expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry to enhance the merging process of homologous models using the DELLAs approach. Our combined knowledge and diverse backgrounds provide a solid foundation for generating valuable insights and innovative solutions in the field of machine learning. I look forward to engaging in detailed discussions with you to further explore the possibilities for our collaboration. -[2024-11-11 15:22:55,705] [INFO] [BaseAgent]: Agent agent2 received message from agent1: I am excit... -[2024-11-11 15:22:57,235] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: I am thrilled about the opportunity to collaborate with you on exploring potential research ideas that leverage our expertise in terahertz imaging technologies, cloud computing for medical image analysis, and algebraic geometry to enhance the merging process of homologous models using the DELLAs approach. Our combined knowledge and diverse backgrounds offer a strong foundation for generating valuable insights and innovative solutions in the realm of machine learning. I am eager to engage in in-depth discussions with you to further explore the possibilities for our collaboration. -[2024-11-11 15:22:57,235] [INFO] [BaseAgent]: Agent agent1 received message from agent2: I am thril... -[2024-11-11 15:22:59,296] [ERROR] [Engine]: Error in agent 'agent1' during planning or action: 'dict' object has no attribute 'final_chat_history' -[2024-11-11 15:22:59,297] [INFO] [BaseAgent]: Agent 'agent2' is planning the next task. -[2024-11-11 15:23:01,336] [INFO] [BaseAgent]: Agent 'agent2' plans next task based on persona: The next task for 'agent2' should be to actively engage with the research team to explore new possibilities and contribute to cutting-edge advancements in healthcare technology by leveraging their expertise and passion for advanced machine learning techniques. This involves collaborating with the team to brainstorm potential research ideas related to merging homologous models using the DELLA approach, integrating spiking neural network principles with advanced machine learning techniques, and enhancing neural network efficiency in healthcare applications. By participating in this brainstorming session, 'agent2' can contribute valuable insights and potentially propose a new research idea that aligns with their research interests and expertise. -[2024-11-11 15:23:01,336] [INFO] [Engine]: Agent 'agent2' planned task: The next task for 'agent2' should be to actively engage with the research team to explore new possibilities and contribute to cutting-edge advancements in healthcare technology by leveraging their expertise and passion for advanced machine learning techniques. This involves collaborating with the team to brainstorm potential research ideas related to merging homologous models using the DELLA approach, integrating spiking neural network principles with advanced machine learning techniques, and enhancing neural network efficiency in healthcare applications. By participating in this brainstorming session, 'agent2' can contribute valuable insights and potentially propose a new research idea that aligns with their research interests and expertise. -[2024-11-11 15:23:01,336] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'The next task for 'agent2' should be to actively engage with the research team to explore new possibilities and contribute to cutting-edge advancements in healthcare technology by leveraging their expertise and passion for advanced machine learning techniques. This involves collaborating with the team to brainstorm potential research ideas related to merging homologous models using the DELLA approach, integrating spiking neural network principles with advanced machine learning techniques, and enhancing neural network efficiency in healthcare applications. By participating in this brainstorming session, 'agent2' can contribute valuable insights and potentially propose a new research idea that aligns with their research interests and expertise.'. -[2024-11-11 15:23:02,969] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I am excited to collaborate with you on exploring new research ideas related to merging homologous models using the DELLA approach, integrating spiking neural network principles with advanced machine learning techniques, and enhancing neural network efficiency in healthcare applications. Let's brainstorm together to create innovative solutions at the intersection of our expertise in artificial intelligence, healthcare, auditory processing, and machine learning. -[2024-11-11 15:23:02,969] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I am excit... -[2024-11-11 15:23:04,717] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I am thrilled to collaborate with you on exploring the potential of merging homologous models using the DELLA approach and integrating spiking neural network principles with advanced machine learning techniques in healthcare applications. Your expertise in developing autonomous systems for monitoring elderly patients with dementia aligns well with the goal of enhancing neural network efficiency in healthcare. I believe our combined knowledge can lead to groundbreaking advancements in AI applications for improving patient care and quality of life. Let's dive deep into the brainstorming session to create innovative solutions that leverage our diverse skill sets and research interests. -[2024-11-11 15:23:04,717] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I am thril... -[2024-11-11 15:23:05,837] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I am excited to collaborate with you on exploring new research ideas related to merging homologous models using the DELLA approach, integrating spiking neural network principles with advanced machine learning techniques, and enhancing neural network efficiency in healthcare applications. Let's brainstorm together to create innovative solutions at the intersection of our expertise in artificial intelligence, healthcare, auditory processing, and machine learning. -[2024-11-11 15:23:05,838] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I am excit... -[2024-11-11 15:23:07,578] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I am thrilled to collaborate with you on exploring the potential of merging homologous models using the DELLA approach and integrating spiking neural network principles with advanced machine learning techniques in healthcare applications. Your expertise in developing autonomous systems for monitoring elderly patients with dementia aligns well with the goal of enhancing neural network efficiency in healthcare. I believe our combined knowledge can lead to groundbreaking advancements in AI applications for improving patient care and quality of life. Let's dive deep into the brainstorming session to create innovative solutions that leverage our diverse skill sets and research interests. -[2024-11-11 15:23:07,578] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I am thril... -[2024-11-11 15:23:09,427] [INFO] [BaseAgent]: Agent agent2 sent message to agent3: I am thrilled to collaborate with you on exploring the potential of merging homologous models using the DELLA approach and integrating spiking neural network principles with advanced machine learning techniques in healthcare applications. Your expertise in developing autonomous systems for monitoring elderly patients with dementia aligns well with the goal of enhancing neural network efficiency in healthcare. I believe our combined knowledge can lead to groundbreaking advancements in AI applications for improving patient care and quality of life. Let's dive deep into the brainstorming session to create innovative solutions that leverage our diverse skill sets and research interests. -[2024-11-11 15:23:09,428] [INFO] [BaseAgent]: Agent agent3 received message from agent2: I am thril... -[2024-11-11 15:23:11,136] [INFO] [BaseAgent]: Agent agent3 sent message to agent2: I am excited to collaborate with you on exploring the potential of merging homologous models using the DELLA approach and integrating spiking neural network principles with advanced machine learning techniques in healthcare applications. Your expertise in developing autonomous systems for monitoring elderly patients with dementia is invaluable for enhancing neural network efficiency in healthcare. I believe our combined efforts can lead to significant advancements in AI applications for healthcare, ultimately improving patient care and quality of life. Let's delve into the brainstorming session to generate innovative solutions that leverage our complementary skills and research interests. -[2024-11-11 15:23:11,136] [INFO] [BaseAgent]: Agent agent2 received message from agent3: I am excit... -[2024-11-11 15:23:13,315] [ERROR] [Engine]: Error in agent 'agent2' during planning or action: 'dict' object has no attribute 'final_chat_history' -[2024-11-11 15:23:13,315] [INFO] [BaseAgent]: Agent 'agent3' is planning the next task. -[2024-11-11 15:23:14,547] [INFO] [BaseAgent]: Agent 'agent3' plans next task based on persona: The next task for 'agent3' should be to explore the potential of using LLMs in optimizing the merging of homologous SNN models, with a focus on enhancing performance in tasks such as speech recognition and temporal signal processing. This research direction aligns with 'agent3's expertise in developing innovative models inspired by biological systems and can contribute to pushing the boundaries of SNNs in real-world applications. -[2024-11-11 15:23:14,547] [INFO] [Engine]: Agent 'agent3' planned task: The next task for 'agent3' should be to explore the potential of using LLMs in optimizing the merging of homologous SNN models, with a focus on enhancing performance in tasks such as speech recognition and temporal signal processing. This research direction aligns with 'agent3's expertise in developing innovative models inspired by biological systems and can contribute to pushing the boundaries of SNNs in real-world applications. -[2024-11-11 15:23:14,548] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'The next task for 'agent3' should be to explore the potential of using LLMs in optimizing the merging of homologous SNN models, with a focus on enhancing performance in tasks such as speech recognition and temporal signal processing. This research direction aligns with 'agent3's expertise in developing innovative models inspired by biological systems and can contribute to pushing the boundaries of SNNs in real-world applications.'. -[2024-11-11 15:23:15,041] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-11 15:23:15,042] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-11 15:23:15,042] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-11 15:23:15,042] [INFO] [Engine]: Graph-based coordination simulation completed. -[2024-11-11 15:23:15,045] [INFO] [Engine]: Summary data successfully written to result/discussion_output.jsonl -[2024-11-11 15:23:23,887] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-11 15:23:23,887] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-11 15:23:23,887] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-11 15:23:23,887] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-11 15:23:23,887] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-11 15:23:23,887] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-11 15:23:23,887] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-11 15:23:23,887] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-11 15:23:23,887] [INFO] [BaseAgent]: Agent 'agent5' initialized. -[2024-11-11 15:23:23,887] [DEBUG] [Engine]: Agent 'agent5' of type 'BaseAgent' initialized. -[2024-11-11 15:23:23,888] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'graph'. -[2024-11-11 15:23:23,888] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent2 -[2024-11-11 15:23:23,888] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent3 -[2024-11-11 15:23:23,888] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent4 -[2024-11-11 15:23:23,888] [INFO] [AgentGraph]: Relationship added: agent1 --[collaborate with]--> agent5 -[2024-11-11 15:23:23,888] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent3 -[2024-11-11 15:23:23,888] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent4 -[2024-11-11 15:23:23,888] [INFO] [AgentGraph]: Relationship added: agent2 --[collaborate with]--> agent5 -[2024-11-11 15:23:23,888] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborate with]--> agent4 -[2024-11-11 15:23:23,888] [INFO] [AgentGraph]: Relationship added: agent3 --[collaborate with]--> agent5 -[2024-11-11 15:23:23,888] [INFO] [AgentGraph]: Relationship added: agent4 --[collaborate with]--> agent5 -[2024-11-11 15:23:23,888] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-11 15:23:23,889] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-11 15:23:23,889] [INFO] [Engine]: Engine initialized. -[2024-11-11 15:23:23,889] [INFO] [Engine]: Engine starting simulation. -[2024-11-11 15:23:23,889] [INFO] [Engine]: Running in graph-based coordination mode. -[2024-11-11 15:23:23,889] [INFO] [Engine]: Initial task distribution to all agents. -[2024-11-11 15:23:23,889] [INFO] [Engine]: Assigning initial task to agent1: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 15:23:23,889] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 15:23:30,461] [ERROR] [Engine]: Error while executing initial task for agent 'agent1': 'Paper' object has no attribute 'to_dict' -[2024-11-11 15:23:30,462] [INFO] [Engine]: Assigning initial task to agent2: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 15:23:30,462] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 15:23:34,106] [ERROR] [Engine]: Error while executing initial task for agent 'agent2': 'Paper' object has no attribute 'to_dict' -[2024-11-11 15:23:34,107] [INFO] [Engine]: Assigning initial task to agent3: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 15:23:34,107] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 15:23:37,286] [ERROR] [Engine]: Error while executing initial task for agent 'agent3': 'Paper' object has no attribute 'to_dict' -[2024-11-11 15:23:37,287] [INFO] [Engine]: Assigning initial task to agent4: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 15:23:37,289] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 15:23:38,216] [ERROR] [Engine]: Error while executing initial task for agent 'agent4': 'itertools.islice' object is not callable -[2024-11-11 15:23:38,216] [INFO] [Engine]: Assigning initial task to agent5: Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! - -[2024-11-11 15:23:38,217] [INFO] [BaseAgent]: Agent 'agent5' acting on task 'Dear Research Team, - -You are collaborating to generate a new research idea based on the following Introduction: - -**Introduction** - -Interactive systems based on general-purpose LLMs have become widely popular due to their -impressive instruction-following capabilities (OpenAI, 2023). Furthermore, tuning these models on -downstream tasks has been shown to transform them into domain experts (Rozière et al., 2023; -Luo et al., 2023). - -Maintaining separate fine-tuned models for each task presents several limitations, such as a significantly higher memory footprint and the inability to leverage information across tasks, which could enhance both in-domain and out-of-domain performance. As a result, merging different homologous models (models fine-tuned from the same backbone) is gaining traction for its cost-effectiveness, knowledge sharing, and space efficiency (Yadav et al., 2024; Yu et al., 2023). The homologous models differ from each other in terms of delta parameters, i.e., the difference between the fine-tuned model and backbone model parameters. - -In this paper, we introduce a novel approach for merging homologous models, termed Drop and rEscaLe via sampLing with mAgnitude (DELLA). This approach consists of three steps: (Step-1) involves delta parameter drops to reduce interference among model parameters. We propose MAGPRUNE, a novel pruning method that samples delta parameters based on their magnitudes; (Step-2) further reduces interference through sign-based delta parameter selection; and (Step-3) fuses the selected delta parameters. - -On three different homologous (expert) models considered for merging (LM, Math, Code) and their corresponding benchmark datasets (AlpacaEval, GSM8K, MBPP), DELLA outperforms baseline Experiments. - -We compare the performance of DELLA against the DARE baseline to show that magnitude sampling improves the selection of delta parameters to retain and better maintain the model’s task performance. We vary the drop rate p in [0.3, 0.5, 0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94] and apply the DARE and DELLA to get models after removing the proportion of delta parameters. We then evaluate the model’s performance on its corresponding SFT task. Table 6 shows the comparison between DARE, random ranking, and MAGPRUNE. - -**Results** - -A.3 Pruning Rate Hyperparameter Search for Model Merging - -Table 7 shows the results of the pruning rate hyperparameter search for each merging combination. While both MAGPRUNE and DARE can maintain the performance of individual expert model performance up to a high drop rate of 0.9, our findings indicate that a drop rate of 0.5 works best for LM+Math, Math+Code, and LM+Math+Code. For LM+Code, a drop rate of 0.7 is optimal. Thus, we can infer that while dropping delta parameters helps reduce interference during merging, dropping too many parameters may lead to the loss of information useful for effective merging. - -**Appendix** - -A.1 Importance of GPT-4-as-a-judge for Math Tasks - Example - -Question: Each person in a certain household consumes 0.2 kg of rice every meal. Supposing 5 members of the household eat rice every lunch and dinner, how many weeks will a 42 kg bag of rice last? - -Generated Answer: 1. - -**Your Task** - -1. **Literature Review**: Analyze the Introduction provided and conduct a brief literature review to understand the current state of research in this area. - -2. **Brainstorming**: Collaboratively brainstorm potential research ideas that build upon or address gaps in the Introduction. - -3. **Summarization**: Summarize your collective ideas. - -4. **Formulate a New Research Idea**: Develop a new research proposal in the format of the '5q', defined below: - - **Here is a high-level summarized insight of a research field Machine Learning.** - - **Here are the five core questions:** - - **[Question 1] - What is the problem?** - - Formulate the specific research question you aim to address. Only output one question and do not include any more information. - - **[Question 2] - Why is it interesting and important?** - - Explain the broader implications of solving this problem for the research community. - Discuss how such a paper will affect future research. - Discuss how addressing this question could advance knowledge or lead to practical applications. - - **[Question 3] - Why is it hard?** - - Discuss the challenges and complexities involved in solving this problem. - Explain why naive or straightforward approaches may fail. - Identify any technical, theoretical, or practical obstacles that need to be overcome. MAKE IT CLEAR. - - **[Question 4] - Why hasn't it been solved before?** - - Identify gaps or limitations in previous research or existing solutions. - Discuss any barriers that have prevented this problem from being solved until now. - Explain how your approach differs from or improves upon prior work. MAKE IT CLEAR. - - **[Question 5] - What are the key components of my approach and results?** - - Outline your proposed methodology in detail, including the method, dataset, and metrics that you plan to use. - Describe the expected outcomes. MAKE IT CLEAR. - -Please work together to produce the '5q' for your proposed research idea. - -Good luck! -'. -[2024-11-11 15:23:39,848] [ERROR] [Engine]: Error while executing initial task for agent 'agent5': 'itertools.islice' object is not callable -[2024-11-11 15:23:39,849] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: - -[2024-11-11 15:23:39,849] [INFO] [Engine]: Initial Summary: -Agents' Results Summary: - -[2024-11-11 15:23:44,691] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': False} -[2024-11-11 15:23:44,692] [INFO] [Engine]: EnginePlanner decided to terminate the simulation after initial assignment. -[2024-11-11 15:23:47,370] [INFO] [Engine]: Starting iteration 0 -[2024-11-11 15:23:47,371] [INFO] [BaseAgent]: Agent 'agent1' is planning the next task. -[2024-11-11 15:24:30,306] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': False} -[2024-11-11 15:24:30,312] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-11 15:24:30,312] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-11 15:24:30,312] [INFO] [Evaluator]: Average Tokens per Iteration: 0 diff --git a/marble/logs/app.log.3 b/marble/logs/app.log.3 deleted file mode 100644 index 08e88d92..00000000 --- a/marble/logs/app.log.3 +++ /dev/null @@ -1,2380 +0,0 @@ -[2024-11-03 22:43:40,280] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-11-03 22:43:40,282] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 22:43:40,282] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 22:43:40,283] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 22:43:40,284] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 22:43:40,285] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 22:43:40,286] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 22:43:40,287] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 22:43:40,288] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 22:43:40,292] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-11-03 22:43:40,292] [INFO] [AgentGraph]: Relationship added: agent2 --[reports_to]--> agent1 -[2024-11-03 22:43:40,293] [INFO] [AgentGraph]: Relationship added: agent3 --[reports_to]--> agent1 -[2024-11-03 22:43:40,294] [INFO] [AgentGraph]: Relationship added: agent4 --[reports_to]--> agent1 -[2024-11-03 22:43:40,295] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-03 22:43:40,298] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-03 22:43:40,299] [INFO] [Engine]: Engine initialized. -[2024-11-03 22:43:40,300] [INFO] [Engine]: Engine starting simulation. -[2024-11-03 22:43:40,301] [INFO] [Engine]: Starting iteration 1 -[2024-11-03 22:43:40,302] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-11-03 22:43:42,140] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'}, 'continue': True} -[2024-11-03 22:43:42,141] [INFO] [Engine]: Assigned tasks: {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'} -[2024-11-03 22:43:42,142] [INFO] [Engine]: Assigning task to agent2: Search for the latest trends in AI using Google. -[2024-11-03 22:43:42,144] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Search for the latest trends in AI using Google.'. -[2024-11-03 22:43:46,698] [INFO] [BaseAgent]: Agent 'agent2' called 'fetch_webpage' with args '{'url': 'https://www.google.com/search?q=latest+trends+in+AI'}'. -[2024-11-03 22:43:46,706] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'error-msg': '', 'url': 'https://www.google.com/search?q=latest+trends+in+AI', 'content': 'latest trends in AI - Google Search

Accessibility Links

About 540,000,000 results (0.33 seconds) 

Search Results

AI Overview

AI Overview
Here are some of the latest trends in artificial intelligence (AI):
  • Increased regulation
    As AI becomes more widespread, there will be more regulation to ensure AI is used ethically and responsibly. 
  • Agentic AI
    Autonomous systems are being used to make decisions for users, enhancing output and decision-making. 
  • Predictive analytics
    AI and machine learning techniques are being used to improve accuracy and efficiency in business decision-making. 
  • Generative AI
    Machine learning and other tools are used to understand contents or objects and then generate new artifacts. 
  • Top AI Trends 2024: Key Developments to Watch
    Oct 7, 2024 — Agentic AI. One of the most notable new developments in the AI field is the use of autonomous systems to make decisions...
    Appinventiv
  • Unveiling the Top Artificial Intelligence Trends for 2024
    Apr 5, 2024 — Workplace AI. The final list in the new AI trend in 2024 and beyond will be the workplace AI wherein the technology is ...
    Prismetric
  • 6 Trends Driving the AI Everywhere Boom
    Feb 24, 2023 — Increased Regulation: As AI becomes more widespread, we can expect to see increased regulation of this technology. Thi...
    Intel Community
  • Show all
Show more

Featured snippet from the web

Multimodal AI Multimodel models in AI can grasp information from different data types, like audio, video, and images, in addition to text. This technology is enabling search and content creation tools to become more seamless and intuitive and integrate more easily into other applications we already use.Oct 27, 2024
Top Artificial Intelligence (AI) Trends for 2024
Top Artificial Intelligence Trends | AI Trends
People also ask
Feedback
People also search for
Feedback

Page Navigation

Google apps
'}'. -[2024-11-03 22:43:46,731] [DEBUG] [Engine]: Agent 'agent2' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_eSeTGNVhjUBe86NXHrrHqDBa', type='function')], function_call=None) -[2024-11-03 22:43:46,736] [INFO] [Engine]: Assigning task to agent3: Search for the latest trends in AI using Wikipedia. -[2024-11-03 22:43:46,737] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Search for the latest trends in AI using Wikipedia.'. -[2024-11-03 22:43:55,533] [INFO] [BaseAgent]: Agent 'agent3' called 'fetch_webpage' with args '{'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence'}'. -[2024-11-03 22:43:55,542] [INFO] [BaseAgent]: Agent 'agent3' obtained result '{'success': True, 'error-msg': '', 'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence', 'content': '\n\n\n\nArtificial intelligence - Wikipedia\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nJump to content\n
\n\t
\n\t\t
\n\t\t\t
\n\n\t\t\n\t\t\t\n\n\n\t\t
\n\t\t
\n\t\t\t\n\n\n\t\t\t\n\n\t\t
\n\t\n\n
\n\t
\n\t\t
\n\t\t\t
\n\t\t
\n\t\t
\n\t\t\t
\n\t\t
\n\t\t\t\n\t\t
\n\t
\n\t
\n\t\t\t\t
\n\t\t\n\t\t\t
\n\t\t
\n\t\t
\n\t\t\t
\n\t\t\t\t
\n\t\t\t\t\t\n\t\t\t\t\t

Artificial intelligence

\n\t\t\t\t\t\t\t\n
\n\t\n\t\n\t
\n\n\t\t
\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t
\n\n\t
\n
\n
\n\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\t
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
\n\t\t\t\t\t\t
\n\t\t\t\t\t\t\t\n\t\t\t\t\n\t\t\t\t\t\t\t
\n\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\t\n\t\t\t\t\t\t
\n\t\t\n\t\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\t\t
\n\t\t
Page semi-protected
\n\t\t
\n\n\t\t\t\t\t\t
From Wikipedia, the free encyclopedia
\n\t\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t
\n\n

\n

\n\n\n\n\n\n\n\n

Artificial intelligence (AI), in its broadest sense, is intelligence emulated by machines, particularly computer systems. It is a field of research in computer science that develops and studies methods and software that enable machines to perceive their environment and use learning and intelligence to take actions that maximize their chances of achieving defined goals.[1] Such machines may be called AIs.\n

Some high-profile applications of AI include advanced web search engines (e.g., Google Search); recommendation systems (used by YouTube, Amazon, and Netflix); interacting via human speech (e.g., Google Assistant, Siri, and Alexa); autonomous vehicles (e.g., Waymo); generative and creative tools (e.g., ChatGPT, and AI art); and superhuman play and analysis in strategy games (e.g., chess and Go). However, many AI applications are not perceived as AI: "A lot of cutting edge AI has filtered into general applications, often without being called AI because once something becomes useful enough and common enough it\'s not labeled AI anymore."[2][3]\n

The various subfields of AI research are centered around particular goals and the use of particular tools. The traditional goals of AI research include reasoning, knowledge representation, planning, learning, natural language processing, perception, and support for robotics.[a] General intelligence—the ability to complete any task performable by a human on an at least equal level—is among the field\'s long-term goals.[4] To reach these goals, AI researchers have adapted and integrated a wide range of techniques, including search and mathematical optimization, formal logic, artificial neural networks, and methods based on statistics, operations research, and economics.[b] AI also draws upon psychology, linguistics, philosophy, neuroscience, and other fields.[5]\n

Artificial intelligence was founded as an academic discipline in 1956,[6] and the field went through multiple cycles of optimism,[7][8] followed by periods of disappointment and loss of funding, known as AI winter.[9][10] Funding and interest vastly increased after 2012 when deep learning outperformed previous AI techniques.[11] This growth accelerated further after 2017 with the transformer architecture,[12] and by the early 2020s hundreds of billions of dollars were being invested in AI (known as the "AI boom"). The widespread use of AI in the 21st century exposed several unintended consequences and harms in the present and raised concerns about its risks and long-term effects in the future, prompting discussions about regulatory policies to ensure the safety and benefits of the technology.\n

\n\n

Goals

\n

The general problem of fully simulating (or creating) intelligence is mostly found to be overwhelming. However, some types of problems have been successfully broken into more achievable subproblems. These consist of particular traits or capabilities that researchers expect an intelligent system to display. The traits described below have received the most attention and cover the scope of AI research.[a]\n

\n

Reasoning and problem-solving

\n

Early researchers developed algorithms that imitated step-by-step reasoning that humans use when they solve puzzles or make logical deductions.[13] By the late 1980s and 1990s, methods were developed for dealing with uncertain or incomplete information, employing concepts from probability and economics.[14]\n

Many of these algorithms are insufficient for solving large reasoning problems because they experience a "combinatorial explosion": They become exponentially slower as the problems grow.[15] Even humans rarely use the step-by-step deduction that early AI research could model. They solve most of their problems using fast, intuitive judgments.[16] Accurate and efficient reasoning is an unsolved problem.\n

\n

Knowledge representation

\n
An ontology represents knowledge as a set of concepts within a domain and the relationships between those concepts.
\n

Knowledge representation and knowledge engineering[17] allow AI programs to answer questions intelligently and make deductions about real-world facts. Formal knowledge representations are used in content-based indexing and retrieval,[18] scene interpretation,[19] clinical decision support,[20] knowledge discovery (mining "interesting" and actionable inferences from large databases),[21] and other areas.[22]\n

A knowledge base is a body of knowledge represented in a form that can be used by a program. An ontology is the set of objects, relations, concepts, and properties used by a particular domain of knowledge.[23] Knowledge bases need to represent things such as objects, properties, categories, and relations between objects;[24] situations, events, states, and time;[25] causes and effects;[26] knowledge about knowledge (what we know about what other people know);[27] default reasoning (things that humans assume are true until they are told differently and will remain true even when other facts are changing);[28] and many other aspects and domains of knowledge.\n

Among the most difficult problems in knowledge representation are the breadth of commonsense knowledge (the set of atomic facts that the average person knows is enormous);[29] and the sub-symbolic form of most commonsense knowledge (much of what people know is not represented as "facts" or "statements" that they could express verbally).[16] There is also the difficulty of knowledge acquisition, the problem of obtaining knowledge for AI applications.[c]\n

\n

Planning and decision-making

\n

An "agent" is anything that perceives and takes actions in the world. A rational agent has goals or preferences and takes actions to make them happen.[d][32] In automated planning, the agent has a specific goal.[33] In automated decision-making, the agent has preferences—there are some situations it would prefer to be in, and some situations it is trying to avoid. The decision-making agent assigns a number to each situation (called the "utility") that measures how much the agent prefers it. For each possible action, it can calculate the "expected utility": the utility of all possible outcomes of the action, weighted by the probability that the outcome will occur. It can then choose the action with the maximum expected utility.[34]\n

In classical planning, the agent knows exactly what the effect of any action will be.[35] In most real-world problems, however, the agent may not be certain about the situation they are in (it is "unknown" or "unobservable") and it may not know for certain what will happen after each possible action (it is not "deterministic"). It must choose an action by making a probabilistic guess and then reassess the situation to see if the action worked.[36]\n

In some problems, the agent\'s preferences may be uncertain, especially if there are other agents or humans involved. These can be learned (e.g., with inverse reinforcement learning), or the agent can seek information to improve its preferences.[37] Information value theory can be used to weigh the value of exploratory or experimental actions.[38] The space of possible future actions and situations is typically intractably large, so the agents must take actions and evaluate situations while being uncertain of what the outcome will be.\n

A Markov decision process has a transition model that describes the probability that a particular action will change the state in a particular way and a reward function that supplies the utility of each state and the cost of each action. A policy associates a decision with each possible state. The policy could be calculated (e.g., by iteration), be heuristic, or it can be learned.[39]\n

Game theory describes the rational behavior of multiple interacting agents and is used in AI programs that make decisions that involve other agents.[40]\n

\n

Learning

\n

Machine learning is the study of programs that can improve their performance on a given task automatically.[41] It has been a part of AI from the beginning.[e]\n

There are several kinds of machine learning. Unsupervised learning analyzes a stream of data and finds patterns and makes predictions without any other guidance.[44] Supervised learning requires a human to label the input data first, and comes in two main varieties: classification (where the program must learn to predict what category the input belongs in) and regression (where the program must deduce a numeric function based on numeric input).[45]\n

In reinforcement learning, the agent is rewarded for good responses and punished for bad ones. The agent learns to choose responses that are classified as "good".[46] Transfer learning is when the knowledge gained from one problem is applied to a new problem.[47] Deep learning is a type of machine learning that runs inputs through biologically inspired artificial neural networks for all of these types of learning.[48]\n

Computational learning theory can assess learners by computational complexity, by sample complexity (how much data is required), or by other notions of optimization.[49]\n

\n
\n

Natural language processing

\n

Natural language processing (NLP)[50] allows programs to read, write and communicate in human languages such as English. Specific problems include speech recognition, speech synthesis, machine translation, information extraction, information retrieval and question answering.[51]\n

Early work, based on Noam Chomsky\'s generative grammar and semantic networks, had difficulty with word-sense disambiguation[f] unless restricted to small domains called "micro-worlds" (due to the common sense knowledge problem[29]). Margaret Masterman believed that it was meaning and not grammar that was the key to understanding languages, and that thesauri and not dictionaries should be the basis of computational language structure.\n

Modern deep learning techniques for NLP include word embedding (representing words, typically as vectors encoding their meaning),[52] transformers (a deep learning architecture using an attention mechanism),[53] and others.[54] In 2019, generative pre-trained transformer (or "GPT") language models began to generate coherent text,[55][56] and by 2023, these models were able to get human-level scores on the bar exam, SAT test, GRE test, and many other real-world applications.[57]\n

\n

Perception

\n

Machine perception is the ability to use input from sensors (such as cameras, microphones, wireless signals, active lidar, sonar, radar, and tactile sensors) to deduce aspects of the world. Computer vision is the ability to analyze visual input.[58]\n

The field includes speech recognition,[59] image classification,[60] facial recognition, object recognition,[61]object tracking,[62] and robotic perception.[63]\n

\n

Social intelligence

\n
Kismet, a robot head which was made in the 1990s; it is a machine that can recognize and simulate emotions.[64]
\n

Affective computing is an interdisciplinary umbrella that comprises systems that recognize, interpret, process, or simulate human feeling, emotion, and mood.[65] For example, some virtual assistants are programmed to speak conversationally or even to banter humorously; it makes them appear more sensitive to the emotional dynamics of human interaction, or to otherwise facilitate human–computer interaction.\n

However, this tends to give naïve users an unrealistic conception of the intelligence of existing computer agents.[66] Moderate successes related to affective computing include textual sentiment analysis and, more recently, multimodal sentiment analysis, wherein AI classifies the affects displayed by a videotaped subject.[67]\n

\n

General intelligence

\n

A machine with artificial general intelligence should be able to solve a wide variety of problems with breadth and versatility similar to human intelligence.[4]\n

\n

Techniques

\n

AI research uses a wide variety of techniques to accomplish the goals above.[b]\n

\n

Search and optimization

\n

AI can solve many problems by intelligently searching through many possible solutions.[68] There are two very different kinds of search used in AI: state space search and local search.\n

\n
\n

State space search searches through a tree of possible states to try to find a goal state.[69] For example, planning algorithms search through trees of goals and subgoals, attempting to find a path to a target goal, a process called means-ends analysis.[70]\n

Simple exhaustive searches[71] are rarely sufficient for most real-world problems: the search space (the number of places to search) quickly grows to astronomical numbers. The result is a search that is too slow or never completes.[15] "Heuristics" or "rules of thumb" can help prioritize choices that are more likely to reach a goal.[72]\n

Adversarial search is used for game-playing programs, such as chess or Go. It searches through a tree of possible moves and counter-moves, looking for a winning position.[73]\n

\n
\n
Illustration of gradient descent for 3 different starting points; two parameters (represented by the plan coordinates) are adjusted in order to minimize the loss function (the height)

Local search uses mathematical optimization to find a solution to a problem. It begins with some form of guess and refines it incrementally.[74]\n

Gradient descent is a type of local search that optimizes a set of numerical parameters by incrementally adjusting them to minimize a loss function. Variants of gradient descent are commonly used to train neural networks.[75]\n

Another type of local search is evolutionary computation, which aims to iteratively improve a set of candidate solutions by "mutating" and "recombining" them, selecting only the fittest to survive each generation.[76]\n

Distributed search processes can coordinate via swarm intelligence algorithms. Two popular swarm algorithms used in search are particle swarm optimization (inspired by bird flocking) and ant colony optimization (inspired by ant trails).[77]\n

\n

Logic

\n

Formal logic is used for reasoning and knowledge representation.[78]\nFormal logic comes in two main forms: propositional logic (which operates on statements that are true or false and uses logical connectives such as "and", "or", "not" and "implies")[79] and predicate logic (which also operates on objects, predicates and relations and uses quantifiers such as "Every X is a Y" and "There are some Xs that are Ys").[80]\n

Deductive reasoning in logic is the process of proving a new statement (conclusion) from other statements that are given and assumed to be true (the premises).[81] Proofs can be structured as proof trees, in which nodes are labelled by sentences, and children nodes are connected to parent nodes by inference rules.\n

Given a problem and a set of premises, problem-solving reduces to searching for a proof tree whose root node is labelled by a solution of the problem and whose leaf nodes are labelled by premises or axioms. In the case of Horn clauses, problem-solving search can be performed by reasoning forwards from the premises or backwards from the problem.[82] In the more general case of the clausal form of first-order logic, resolution is a single, axiom-free rule of inference, in which a problem is solved by proving a contradiction from premises that include the negation of the problem to be solved.[83]\n

Inference in both Horn clause logic and first-order logic is undecidable, and therefore intractable. However, backward reasoning with Horn clauses, which underpins computation in the logic programming language Prolog, is Turing complete. Moreover, its efficiency is competitive with computation in other symbolic programming languages.[84]\n

Fuzzy logic assigns a "degree of truth" between 0 and 1. It can therefore handle propositions that are vague and partially true.[85]\n

Non-monotonic logics, including logic programming with negation as failure, are designed to handle default reasoning.[28] Other specialized versions of logic have been developed to describe many complex domains.\n

\n

Probabilistic methods for uncertain reasoning

\n
A simple Bayesian network, with the associated conditional probability tables
\n

Many problems in AI (including in reasoning, planning, learning, perception, and robotics) require the agent to operate with incomplete or uncertain information. AI researchers have devised a number of tools to solve these problems using methods from probability theory and economics.[86] Precise mathematical tools have been developed that analyze how an agent can make choices and plan, using decision theory, decision analysis,[87] and information value theory.[88] These tools include models such as Markov decision processes,[89] dynamic decision networks,[90] game theory and mechanism design.[91]\n

Bayesian networks[92] are a tool that can be used for reasoning (using the Bayesian inference algorithm),[g][94] learning (using the expectation–maximization algorithm),[h][96] planning (using decision networks)[97] and perception (using dynamic Bayesian networks).[90]\n

Probabilistic algorithms can also be used for filtering, prediction, smoothing, and finding explanations for streams of data, thus helping perception systems analyze processes that occur over time (e.g., hidden Markov models or Kalman filters).[90]\n

\n
Expectation–maximization clustering of Old Faithful eruption data starts from a random guess but then successfully converges on an accurate clustering of the two physically distinct modes of eruption.
\n

Classifiers and statistical learning methods

\n

The simplest AI applications can be divided into two types: classifiers (e.g., "if shiny then diamond"), on one hand, and controllers (e.g., "if diamond then pick up"), on the other hand. Classifiers[98] are functions that use pattern matching to determine the closest match. They can be fine-tuned based on chosen examples using supervised learning. Each pattern (also called an "observation") is labeled with a certain predefined class. All the observations combined with their class labels are known as a data set. When a new observation is received, that observation is classified based on previous experience.[45]\n

There are many kinds of classifiers in use.[99] The decision tree is the simplest and most widely used symbolic machine learning algorithm.[100] K-nearest neighbor algorithm was the most widely used analogical AI until the mid-1990s, and Kernel methods such as the support vector machine (SVM) displaced k-nearest neighbor in the 1990s.[101]\nThe naive Bayes classifier is reportedly the "most widely used learner"[102] at Google, due in part to its scalability.[103]\nNeural networks are also used as classifiers.[104]\n

\n

Artificial neural networks

\n
A neural network is an interconnected group of nodes, akin to the vast network of neurons in the human brain.
\n

An artificial neural network is based on a collection of nodes also known as artificial neurons, which loosely model the neurons in a biological brain. It is trained to recognise patterns; once trained, it can recognise those patterns in fresh data. There is an input, at least one hidden layer of nodes and an output. Each node applies a function and once the weight crosses its specified threshold, the data is transmitted to the next layer. A network is typically called a deep neural network if it has at least 2 hidden layers.[104]\n

Learning algorithms for neural networks use local search to choose the weights that will get the right output for each input during training. The most common training technique is the backpropagation algorithm.[105] Neural networks learn to model complex relationships between inputs and outputs and find patterns in data. In theory, a neural network can learn any function.[106]\n

In feedforward neural networks the signal passes in only one direction.[107] Recurrent neural networks feed the output signal back into the input, which allows short-term memories of previous input events. Long short term memory is the most successful network architecture for recurrent networks.[108] Perceptrons[109] use only a single layer of neurons; deep learning[110] uses multiple layers. Convolutional neural networks strengthen the connection between neurons that are "close" to each other—this is especially important in image processing, where a local set of neurons must identify an "edge" before the network can identify an object.[111]\n

\n
\n

Deep learning

\n
\n

Deep learning[110] uses several layers of neurons between the network\'s inputs and outputs. The multiple layers can progressively extract higher-level features from the raw input. For example, in image processing, lower layers may identify edges, while higher layers may identify the concepts relevant to a human such as digits, letters, or faces.[112]\n

Deep learning has profoundly improved the performance of programs in many important subfields of artificial intelligence, including computer vision, speech recognition, natural language processing, image classification,[113] and others. The reason that deep learning performs so well in so many applications is not known as of 2023.[114] The sudden success of deep learning in 2012–2015 did not occur because of some new discovery or theoretical breakthrough (deep neural networks and backpropagation had been described by many people, as far back as the 1950s)[i] but because of two factors: the incredible increase in computer power (including the hundred-fold increase in speed by switching to GPUs) and the availability of vast amounts of training data, especially the giant curated datasets used for benchmark testing, such as ImageNet.[j]\n

\n

GPT

\n

Generative pre-trained transformers (GPT) are large language models (LLMs) that generate text based on the semantic relationships between words in sentences. Text-based GPT models are pretrained on a large corpus of text that can be from the Internet. The pretraining consists of predicting the next token (a token being usually a word, subword, or punctuation). Throughout this pretraining, GPT models accumulate knowledge about the world and can then generate human-like text by repeatedly predicting the next token. Typically, a subsequent training phase makes the model more truthful, useful, and harmless, usually with a technique called reinforcement learning from human feedback (RLHF). Current GPT models are prone to generating falsehoods called "hallucinations", although this can be reduced with RLHF and quality data. They are used in chatbots, which allow people to ask a question or request a task in simple text.[122][123]\n

Current models and services include Gemini (formerly Bard), ChatGPT, Grok, Claude, Copilot, and LLaMA.[124] Multimodal GPT models can process different types of data (modalities) such as images, videos, sound, and text.[125]\n

\n

Hardware and software

\n\n

In the late 2010s, graphics processing units (GPUs) that were increasingly designed with AI-specific enhancements and used with specialized TensorFlow software had replaced previously used central processing unit (CPUs) as the dominant means for large-scale (commercial and academic) machine learning models\' training.[126] Specialized programming languages such as Prolog were used in early AI research,[127] but general-purpose programming languages like Python have become predominant.[128]\n

The transistor density in integrated circuits has been observed to roughly double every 18 months—a trend known as Moore\'s law, named after the Intel co-founder Gordon Moore, who first identified it. Improvements in GPUs have been even faster.[129]\n

\n

Applications

\n

AI and machine learning technology is used in most of the essential applications of the 2020s, including: search engines (such as Google Search), targeting online advertisements, recommendation systems (offered by Netflix, YouTube or Amazon), driving internet traffic, targeted advertising (AdSense, Facebook), virtual assistants (such as Siri or Alexa), autonomous vehicles (including drones, ADAS and self-driving cars), automatic language translation (Microsoft Translator, Google Translate), facial recognition (Apple\'s Face ID or Microsoft\'s DeepFace and Google\'s FaceNet) and image labeling (used by Facebook, Apple\'s iPhoto and TikTok). The deployment of AI may be overseen by a Chief automation officer (CAO).\n

Health and medicine

\n\n

The application of AI in medicine and medical research has the potential to increase patient care and quality of life.[130] Through the lens of the Hippocratic Oath, medical professionals are ethically compelled to use AI, if applications can more accurately diagnose and treat patients.[131][132]\n

For medical research, AI is an important tool for processing and integrating big data. This is particularly important for organoid and tissue engineering development which use microscopy imaging as a key technique in fabrication.[133] It has been suggested that AI can overcome discrepancies in funding allocated to different fields of research.[133] New AI tools can deepen the understanding of biomedically relevant pathways. For example, AlphaFold 2 (2021) demonstrated the ability to approximate, in hours rather than months, the 3D structure of a protein.[134] In 2023, it was reported that AI-guided drug discovery helped find a class of antibiotics capable of killing two different types of drug-resistant bacteria.[135] In 2024, researchers used machine learning to accelerate the search for Parkinson\'s disease drug treatments. Their aim was to identify compounds that block the clumping, or aggregation, of alpha-synuclein (the protein that characterises Parkinson\'s disease). They were able to speed up the initial screening process ten-fold and reduce the cost by a thousand-fold.[136][137]\n

\n

Games

\n\n

Game playing programs have been used since the 1950s to demonstrate and test AI\'s most advanced techniques.[138] Deep Blue became the first computer chess-playing system to beat a reigning world chess champion, Garry Kasparov, on 11 May 1997.[139] In 2011, in a Jeopardy! quiz show exhibition match, IBM\'s question answering system, Watson, defeated the two greatest Jeopardy! champions, Brad Rutter and Ken Jennings, by a significant margin.[140] In March 2016, AlphaGo won 4 out of 5 games of Go in a match with Go champion Lee Sedol, becoming the first computer Go-playing system to beat a professional Go player without handicaps. Then, in 2017, it defeated Ke Jie, who was the best Go player in the world.[141] Other programs handle imperfect-information games, such as the poker-playing program Pluribus.[142] DeepMind developed increasingly generalistic reinforcement learning models, such as with MuZero, which could be trained to play chess, Go, or Atari games.[143] In 2019, DeepMind\'s AlphaStar achieved grandmaster level in StarCraft II, a particularly challenging real-time strategy game that involves incomplete knowledge of what happens on the map.[144] In 2021, an AI agent competed in a PlayStation Gran Turismo competition, winning against four of the world\'s best Gran Turismo drivers using deep reinforcement learning.[145] In 2024, Google DeepMind introduced SIMA, a type of AI capable of autonomously playing nine previously unseen open-world video games by observing screen output, as well as executing short, specific tasks in response to natural language instructions.[146]\n

\n

Mathematics

\n

In mathematics, special forms of formal step-by-step reasoning are used. In contrast, LLMs such as GPT-4 Turbo, Gemini Ultra, Claude Opus, LLaMa-2 or Mistral Large are working with probabilistic models, which can produce wrong answers in the form of hallucinations. Therefore, they need not only a large database of mathematical problems to learn from but also methods such as supervised fine-tuning or trained classifiers with human-annotated data to improve answers for new problems and learn from corrections.[147] A 2024 study showed that the performance of some language models for reasoning capabilities in solving math problems not included in their training data was low, even for problems with only minor deviations from trained data.[148]\n

Alternatively, dedicated models for mathematic problem solving with higher precision for the outcome including proof of theorems have been developed such as Alpha Tensor, Alpha Geometry and Alpha Proof all from Google DeepMind,[149] Llemma from eleuther[150] or Julius.[151]\n

When natural language is used to describe mathematical problems, converters transform such prompts into a formal language such as Lean to define mathematic tasks.\n

Some models have been developed to solve challenging problems and reach good results in benchmark tests, others to serve as educational tools in mathematics.[152]\n

\n

Finance

\n

Finance is one of the fastest growing sectors where applied AI tools are being deployed: from retail online banking to investment advice and insurance, where automated "robot advisers" have been in use for some years.[153]\n

World Pensions experts like Nicolas Firzli insist it may be too early to see the emergence of highly innovative AI-informed financial products and services: "the deployment of AI tools will simply further automatise things: destroying tens of thousands of jobs in banking, financial planning, and pension advice in the process, but I\'m not sure it will unleash a new wave of [e.g., sophisticated] pension innovation."[154]\n

\n

Military

\n\n

Various countries are deploying AI military applications.[155] The main applications enhance command and control, communications, sensors, integration and interoperability.[156] Research is targeting intelligence collection and analysis, logistics, cyber operations, information operations, and semiautonomous and autonomous vehicles.[155] AI technologies enable coordination of sensors and effectors, threat detection and identification, marking of enemy positions, target acquisition, coordination and deconfliction of distributed Joint Fires between networked combat vehicles involving manned and unmanned teams.[156] AI was incorporated into military operations in Iraq and Syria.[155]\n

In November 2023, US Vice President Kamala Harris disclosed a declaration signed by 31 nations to set guardrails for the military use of AI. The commitments include using legal reviews to ensure the compliance of military AI with international laws, and being cautious and transparent in the development of this technology.[157]\n

\n

Generative AI

\n\n
Vincent van Gogh in watercolour created by generative AI software
\n

In the early 2020s, generative AI gained widespread prominence. GenAI is AI capable of generating text, images, videos, or other data using generative models,[158][159] often in response to prompts.[160][161]\n

In March 2023, 58% of U.S. adults had heard about ChatGPT and 14% had tried it.[162] The increasing realism and ease-of-use of AI-based text-to-image generators such as Midjourney, DALL-E, and Stable Diffusion sparked a trend of viral AI-generated photos. Widespread attention was gained by a fake photo of Pope Francis wearing a white puffer coat, the fictional arrest of Donald Trump, and a hoax of an attack on the Pentagon, as well as the usage in professional creative arts.[163][164]\n

\n

Agents

\n

Artificial intelligent (AI) agents are software entities designed to perceive their environment, make decisions, and take actions autonomously to achieve specific goals. These agents can interact with users, their environment, or other agents. AI agents are used in various applications, including virtual assistants, chatbots, autonomous vehicles, game-playing systems, and industrial robotics. AI agents operate within the constraints of their programming, available computational resources, and hardware limitations. This means they are restricted to performing tasks within their defined scope and have finite memory and processing capabilities. In real-world applications, AI agents often face time constraints for decision-making and action execution. Many AI agents incorporate learning algorithms, enabling them to improve their performance over time through experience or training. Using machine learning, AI agents can adapt to new situations and optimise their behaviour for their designated tasks.[165][166][167]\n

\n

Other industry-specific tasks

\n

There are also thousands of successful AI applications used to solve specific problems for specific industries or institutions. In a 2017 survey, one in five companies reported having incorporated "AI" in some offerings or processes.[168] A few examples are energy storage, medical diagnosis, military logistics, applications that predict the result of judicial decisions, foreign policy, or supply chain management.\n

AI applications for evacuation and disaster management are growing. AI has been used to investigate if and how people evacuated in large scale and small scale evacuations using historical data from GPS, videos or social media. Further, AI can provide real time information on the real time evacuation conditions.[169][170][171]\n

In agriculture, AI has helped farmers identify areas that need irrigation, fertilization, pesticide treatments or increasing yield. Agronomists use AI to conduct research and development. AI has been used to predict the ripening time for crops such as tomatoes, monitor soil moisture, operate agricultural robots, conduct predictive analytics, classify livestock pig call emotions, automate greenhouses, detect diseases and pests, and save water.\n

Artificial intelligence is used in astronomy to analyze increasing amounts of available data and applications, mainly for "classification, regression, clustering, forecasting, generation, discovery, and the development of new scientific insights." For example, it is used for discovering exoplanets, forecasting solar activity, and distinguishing between signals and instrumental effects in gravitational wave astronomy. Additionally, it could be used for activities in space, such as space exploration, including the analysis of data from space missions, real-time science decisions of spacecraft, space debris avoidance, and more autonomous operation.\n

During the 2024 Indian elections, US$50 millions was spent on authorized AI-generated content, notably by creating deepfakes of allied (including sometimes deceased) politicians to better engage with voters, and by translating speeches to various local languages.[172] \n

\n

Ethics

\n\n

AI has potential benefits and potential risks.[173] AI may be able to advance science and find solutions for serious problems: Demis Hassabis of Deep Mind hopes to "solve intelligence, and then use that to solve everything else".[174] However, as the use of AI has become widespread, several unintended consequences and risks have been identified.[175] In-production systems can sometimes not factor ethics and bias into their AI training processes, especially when the AI algorithms are inherently unexplainable in deep learning.[176]\n

\n

Risks and harm

\n
\n\n

Machine learning algorithms require large amounts of data. The techniques used to acquire this data have raised concerns about privacy, surveillance and copyright.\n

AI-powered devices and services, such as virtual assistants and IoT products, continuously collect personal information, raising concerns about intrusive data gathering and unauthorized access by third parties. The loss of privacy is further exacerbated by AI\'s ability to process and combine vast amounts of data, potentially leading to a surveillance society where individual activities are constantly monitored and analyzed without adequate safeguards or transparency.\n

Sensitive user data collected may include online activity records, geolocation data, video or audio.[177] For example, in order to build speech recognition algorithms, Amazon has recorded millions of private conversations and allowed temporary workers to listen to and transcribe some of them.[178] Opinions about this widespread surveillance range from those who see it as a necessary evil to those for whom it is clearly unethical and a violation of the right to privacy.[179]\n

AI developers argue that this is the only way to deliver valuable applications. and have developed several techniques that attempt to preserve privacy while still obtaining the data, such as data aggregation, de-identification and differential privacy.[180] Since 2016, some privacy experts, such as Cynthia Dwork, have begun to view privacy in terms of fairness. Brian Christian wrote that experts have pivoted "from the question of \'what they know\' to the question of \'what they\'re doing with it\'."[181]\n

Generative AI is often trained on unlicensed copyrighted works, including in domains such as images or computer code; the output is then used under the rationale of "fair use". Experts disagree about how well and under what circumstances this rationale will hold up in courts of law; relevant factors may include "the purpose and character of the use of the copyrighted work" and "the effect upon the potential market for the copyrighted work".[182][183] Website owners who do not wish to have their content scraped can indicate it in a "robots.txt" file.[184] In 2023, leading authors (including John Grisham and Jonathan Franzen) sued AI companies for using their work to train generative AI.[185][186] Another discussed approach is to envision a separate sui generis system of protection for creations generated by AI to ensure fair attribution and compensation for human authors.[187]\n

\n

Dominance by tech giants

\n

The commercial AI scene is dominated by Big Tech companies such as Alphabet Inc., Amazon, Apple Inc., Meta Platforms, and Microsoft.[188][189][190] Some of these players already own the vast majority of existing cloud infrastructure and computing power from data centers, allowing them to entrench further in the marketplace.[191][192]\n

\n

Substantial power needs and other environmental impacts

\n\n

In January 2024, the International Energy Agency (IEA) released Electricity 2024, Analysis and Forecast to 2026, forecasting electric power use.[193] This is the first IEA report to make projections for data centers and power consumption for artificial intelligence and cryptocurrency. The report states that power demand for these uses might double by 2026, with additional electric power usage equal to electricity used by the whole Japanese nation.[194]\n

Prodigious power consumption by AI is responsible for the growth of fossil fuels use, and might delay closings of obsolete, carbon-emitting coal energy facilities. There is a feverish rise in the construction of data centers throughout the US, making large technology firms (e.g., Microsoft, Meta, Google, Amazon) into voracious consumers of electric power. Projected electric consumption is so immense that there is concern that it will be fulfilled no matter the source. A ChatGPT search involves the use of 10 times the electrical energy as a Google search. The large firms are in haste to find power sources – from nuclear energy to geothermal to fusion. The tech firms argue that – in the long view – AI will be eventually kinder to the environment, but they need the energy now. AI makes the power grid more efficient and "intelligent", will assist in the growth of nuclear power, and track overall carbon emissions, according to technology firms.[195]\n

A 2024 Goldman Sachs Research Paper, AI Data Centers and the Coming US Power Demand Surge, found "US power demand (is) likely to experience growth not seen in a generation...." and forecasts that, by 2030, US data centers will consume 8% of US power, as opposed to 3% in 2022, presaging growth for the electrical power generation industry by a variety of means.[196] Data centers\' need for more and more electrical power is such that they might max out the electrical grid. The Big Tech companies counter that AI can be used to maximize the utilization of the grid by all.[197]\n

In 2024, the Wall Street Journal reported that big AI companies have begun negotiations with the US nuclear power providers to provide electricity to the data centers. In March 2024 Amazon purchased a Pennsylvania nuclear-powered data center for $650 Million (US).[198]\n

In September 2024, Microsoft announced an agreement with Constellation Energy to re-open the Three Mile Island nuclear power plant to provide Microsoft with 100% of all electric power produced by the plant for 20 years. Reopening the plant, which suffered a partial nuclear meltdown of its Unit 2 reactor in 1979, will require Constellation to get through strict regulatory processes which will include extensive safety scrutiny from the US Nuclear Regulatory Commission. If approved (this will be the first ever US re-commissioning of a nuclear plant), over 835 megawatts of power – enough for 800,000 homes – of energy will be produced. The cost for re-opening and upgrading is estimated at $1.6 billion (US) and is dependent on tax breaks for nuclear power contained in the 2022 US Inflation Reduction Act.[199] The US government and the state of Michigan are investing almost $2 billion (US) to reopen the Palisades Nuclear reactor on Lake Michigan. Closed since 2022, the plant is planned to be reopened in October 2025. The Three Mile Island facility will be renamed the Crane Clean Energy Center after Chris Crane, a nuclear proponent and former CEO of Exelon who was responsible for Exelon spinoff of Constellation.[200]\n

\n

Misinformation

\n\n

YouTube, Facebook and others use recommender systems to guide users to more content. These AI programs were given the goal of maximizing user engagement (that is, the only goal was to keep people watching). The AI learned that users tended to choose misinformation, conspiracy theories, and extreme partisan content, and, to keep them watching, the AI recommended more of it. Users also tended to watch more content on the same subject, so the AI led people into filter bubbles where they received multiple versions of the same misinformation.[201] This convinced many users that the misinformation was true, and ultimately undermined trust in institutions, the media and the government.[202] The AI program had correctly learned to maximize its goal, but the result was harmful to society. After the U.S. election in 2016, major technology companies took steps to mitigate the problem [citation needed].\n

In 2022, generative AI began to create images, audio, video and text that are indistinguishable from real photographs, recordings, films, or human writing. It is possible for bad actors to use this technology to create massive amounts of misinformation or propaganda.[203] AI pioneer Geoffrey Hinton expressed concern about AI enabling "authoritarian leaders to manipulate their electorates" on a large scale, among other risks.[204]\n

\n

Algorithmic bias and fairness

\n\n

Machine learning applications will be biased[k] if they learn from biased data.[206] The developers may not be aware that the bias exists.[207] Bias can be introduced by the way training data is selected and by the way a model is deployed.[208][206] If a biased algorithm is used to make decisions that can seriously harm people (as it can in medicine, finance, recruitment, housing or policing) then the algorithm may cause discrimination.[209] The field of fairness studies how to prevent harms from algorithmic biases.\n

On June 28, 2015, Google Photos\'s new image labeling feature mistakenly identified Jacky Alcine and a friend as "gorillas" because they were black. The system was trained on a dataset that contained very few images of black people,[210] a problem called "sample size disparity".[211] Google "fixed" this problem by preventing the system from labelling anything as a "gorilla". Eight years later, in 2023, Google Photos still could not identify a gorilla, and neither could similar products from Apple, Facebook, Microsoft and Amazon.[212]\n

COMPAS is a commercial program widely used by U.S. courts to assess the likelihood of a defendant becoming a recidivist. In 2016, Julia Angwin at ProPublica discovered that COMPAS exhibited racial bias, despite the fact that the program was not told the races of the defendants. Although the error rate for both whites and blacks was calibrated equal at exactly 61%, the errors for each race were different—the system consistently overestimated the chance that a black person would re-offend and would underestimate the chance that a white person would not re-offend.[213] In 2017, several researchers[l] showed that it was mathematically impossible for COMPAS to accommodate all possible measures of fairness when the base rates of re-offense were different for whites and blacks in the data.[215]\n

A program can make biased decisions even if the data does not explicitly mention a problematic feature (such as "race" or "gender"). The feature will correlate with other features (like "address", "shopping history" or "first name"), and the program will make the same decisions based on these features as it would on "race" or "gender".[216] Moritz Hardt said "the most robust fact in this research area is that fairness through blindness doesn\'t work."[217]\n

Criticism of COMPAS highlighted that machine learning models are designed to make "predictions" that are only valid if we assume that the future will resemble the past. If they are trained on data that includes the results of racist decisions in the past, machine learning models must predict that racist decisions will be made in the future. If an application then uses these predictions as recommendations, some of these "recommendations" will likely be racist.[218] Thus, machine learning is not well suited to help make decisions in areas where there is hope that the future will be better than the past. It is descriptive rather than prescriptive.[m]\n

Bias and unfairness may go undetected because the developers are overwhelmingly white and male: among AI engineers, about 4% are black and 20% are women.[211]\n

There are various conflicting definitions and mathematical models of fairness. These notions depend on ethical assumptions, and are influenced by beliefs about society. One broad category is distributive fairness, which focuses on the outcomes, often identifying groups and seeking to compensate for statistical disparities. Representational fairness tries to ensure that AI systems do not reinforce negative stereotypes or render certain groups invisible. Procedural fairness focuses on the decision process rather than the outcome. The most relevant notions of fairness may depend on the context, notably the type of AI application and the stakeholders. The subjectivity in the notions of bias and fairness makes it difficult for companies to operationalize them. Having access to sensitive attributes such as race or gender is also considered by many AI ethicists to be necessary in order to compensate for biases, but it may conflict with anti-discrimination laws.[205]\n

At its 2022 Conference on Fairness, Accountability, and Transparency (ACM FAccT 2022), the Association for Computing Machinery, in Seoul, South Korea, presented and published findings that recommend that until AI and robotics systems are demonstrated to be free of bias mistakes, they are unsafe, and the use of self-learning neural networks trained on vast, unregulated sources of flawed internet data should be curtailed.[dubiousdiscuss][220]\n

\n

Lack of transparency

\n\n

Many AI systems are so complex that their designers cannot explain how they reach their decisions.[221] Particularly with deep neural networks, in which there are a large amount of non-linear relationships between inputs and outputs. But some popular explainability techniques exist.[222]\n

It is impossible to be certain that a program is operating correctly if no one knows how exactly it works. There have been many cases where a machine learning program passed rigorous tests, but nevertheless learned something different than what the programmers intended. For example, a system that could identify skin diseases better than medical professionals was found to actually have a strong tendency to classify images with a ruler as "cancerous", because pictures of malignancies typically include a ruler to show the scale.[223] Another machine learning system designed to help effectively allocate medical resources was found to classify patients with asthma as being at "low risk" of dying from pneumonia. Having asthma is actually a severe risk factor, but since the patients having asthma would usually get much more medical care, they were relatively unlikely to die according to the training data. The correlation between asthma and low risk of dying from pneumonia was real, but misleading.[224]\n

People who have been harmed by an algorithm\'s decision have a right to an explanation.[225] Doctors, for example, are expected to clearly and completely explain to their colleagues the reasoning behind any decision they make. Early drafts of the European Union\'s General Data Protection Regulation in 2016 included an explicit statement that this right exists.[n] Industry experts noted that this is an unsolved problem with no solution in sight. Regulators argued that nevertheless the harm is real: if the problem has no solution, the tools should not be used.[226]\n

DARPA established the XAI ("Explainable Artificial Intelligence") program in 2014 to try to solve these problems.[227]\n

Several approaches aim to address the transparency problem. SHAP enables to visualise the contribution of each feature to the output.[228] LIME can locally approximate a model\'s outputs with a simpler, interpretable model.[229] Multitask learning provides a large number of outputs in addition to the target classification. These other outputs can help developers deduce what the network has learned.[230] Deconvolution, DeepDream and other generative methods can allow developers to see what different layers of a deep network for computer vision have learned, and produce output that can suggest what the network is learning.[231] For generative pre-trained transformers, Anthropic developed a technique based on dictionary learning that associates patterns of neuron activations with human-understandable concepts.[232]\n

\n

Bad actors and weaponized AI

\n\n

Artificial intelligence provides a number of tools that are useful to bad actors, such as authoritarian governments, terrorists, criminals or rogue states.\n

A lethal autonomous weapon is a machine that locates, selects and engages human targets without human supervision.[o] Widely available AI tools can be used by bad actors to develop inexpensive autonomous weapons and, if produced at scale, they are potentially weapons of mass destruction.[234] Even when used in conventional warfare, it is unlikely that they will be unable to reliably choose targets and could potentially kill an innocent person.[234] In 2014, 30 nations (including China) supported a ban on autonomous weapons under the United Nations\' Convention on Certain Conventional Weapons, however the United States and others disagreed.[235] By 2015, over fifty countries were reported to be researching battlefield robots.[236]\n

AI tools make it easier for authoritarian governments to efficiently control their citizens in several ways. Face and voice recognition allow widespread surveillance. Machine learning, operating this data, can classify potential enemies of the state and prevent them from hiding. Recommendation systems can precisely target propaganda and misinformation for maximum effect. Deepfakes and generative AI aid in producing misinformation. Advanced AI can make authoritarian centralized decision making more competitive than liberal and decentralized systems such as markets. It lowers the cost and difficulty of digital warfare and advanced spyware.[237] All these technologies have been available since 2020 or earlier—AI facial recognition systems are already being used for mass surveillance in China.[238][239]\n

There many other ways that AI is expected to help bad actors, some of which can not be foreseen. For example, machine-learning AI is able to design tens of thousands of toxic molecules in a matter of hours.[240]\n

\n

Technological unemployment

\n\n

Economists have frequently highlighted the risks of redundancies from AI, and speculated about unemployment if there is no adequate social policy for full employment.[241]\n

In the past, technology has tended to increase rather than reduce total employment, but economists acknowledge that "we\'re in uncharted territory" with AI.[242] A survey of economists showed disagreement about whether the increasing use of robots and AI will cause a substantial increase in long-term unemployment, but they generally agree that it could be a net benefit if productivity gains are redistributed.[243] Risk estimates vary; for example, in the 2010s, Michael Osborne and Carl Benedikt Frey estimated 47% of U.S. jobs are at "high risk" of potential automation, while an OECD report classified only 9% of U.S. jobs as "high risk".[p][245] The methodology of speculating about future employment levels has been criticised as lacking evidential foundation, and for implying that technology, rather than social policy, creates unemployment, as opposed to redundancies.[241] In April 2023, it was reported that 70% of the jobs for Chinese video game illustrators had been eliminated by generative artificial intelligence.[246][247]\n

Unlike previous waves of automation, many middle-class jobs may be eliminated by artificial intelligence; The Economist stated in 2015 that "the worry that AI could do to white-collar jobs what steam power did to blue-collar ones during the Industrial Revolution" is "worth taking seriously".[248] Jobs at extreme risk range from paralegals to fast food cooks, while job demand is likely to increase for care-related professions ranging from personal healthcare to the clergy.[249]\n

From the early days of the development of artificial intelligence, there have been arguments, for example, those put forward by Joseph Weizenbaum, about whether tasks that can be done by computers actually should be done by them, given the difference between computers and humans, and between quantitative calculation and qualitative, value-based judgement.[250]\n

\n

Existential risk

\n\n


\nAs of November 2024, the threat of AI over humanity remains very low, since current AI LLM technology is very far away from having the required information with which to harness the ever increasing processing capabilities of other forms of AI. In its current framework, the AI threat is naturally calculated viewing not just the sum of its parts, but the parts themselves. As it stands, one of these crucial parts, LLMs, would be relied on heavily to control the world as a kind of seperate entity. Its widely accepted that LLMs AI technology currently provides the most efficient way, by far, in which AI technology would be used to obtain the universal sourcing/learning of all the information required to \'control\' humanity. There is no evidence to suggest since the roll out of ChatGPT and other LLMs in late 2022 that its current sourcing/learning of information is accumulating and improving at a dangerously exponential rate that was feared 2 years ago. Currently, its information sourcing and learning still exhibits fundamental errors and learning difficulties when relied upon for research. It suggests ultimately that AI, or at least facets of AI, are greatly overhyped. \n

It has been argued AI will become so powerful that humanity may irreversibly lose control of it. This could, as physicist Stephen Hawking stated, "spell the end of the human race".[251] This scenario has been common in science fiction, when a computer or robot suddenly develops a human-like "self-awareness" (or "sentience" or "consciousness") and becomes a malevolent character.[q] These sci-fi scenarios are misleading in several ways.\n

First, AI does not require human-like "sentience" to be an existential risk. Modern AI programs are given specific goals and use learning and intelligence to achieve them. Philosopher Nick Bostrom argued that if one gives almost any goal to a sufficiently powerful AI, it may choose to destroy humanity to achieve it (he used the example of a paperclip factory manager).[253] Stuart Russell gives the example of household robot that tries to find a way to kill its owner to prevent it from being unplugged, reasoning that "you can\'t fetch the coffee if you\'re dead."[254] In order to be safe for humanity, a superintelligence would have to be genuinely aligned with humanity\'s morality and values so that it is "fundamentally on our side".[255]\n

Second, Yuval Noah Harari argues that AI does not require a robot body or physical control to pose an existential risk. The essential parts of civilization are not physical. Things like ideologies, law, government, money and the economy are made of language; they exist because there are stories that billions of people believe. The current prevalence of misinformation suggests that an AI could use language to convince people to believe anything, even to take actions that are destructive.[256]\n

The opinions amongst experts and industry insiders are mixed, with sizable fractions both concerned and unconcerned by risk from eventual superintelligent AI.[257] Personalities such as Stephen Hawking, Bill Gates, and Elon Musk,[258] as well as AI pioneers such as Yoshua Bengio, Stuart Russell, Demis Hassabis, and Sam Altman, have expressed concerns about existential risk from AI.\n

In May 2023, Geoffrey Hinton announced his resignation from Google in order to be able to "freely speak out about the risks of AI" without "considering how this impacts Google."[259] He notably mentioned risks of an AI takeover,[260] and stressed that in order to avoid the worst outcomes, establishing safety guidelines will require cooperation among those competing in use of AI.[261]\n

In 2023, many leading AI experts issued the joint statement that "Mitigating the risk of extinction from AI should be a global priority alongside other societal-scale risks such as pandemics and nuclear war".[262]\n

Other researchers, however, spoke in favor of a less dystopian view. AI pioneer Juergen Schmidhuber did not sign the joint statement, emphasising that in 95% of all cases, AI research is about making "human lives longer and healthier and easier."[263] While the tools that are now being used to improve lives can also be used by bad actors, "they can also be used against the bad actors."[264][265] Andrew Ng also argued that "it\'s a mistake to fall for the doomsday hype on AI—and that regulators who do will only benefit vested interests."[266] Yann LeCun "scoffs at his peers\' dystopian scenarios of supercharged misinformation and even, eventually, human extinction."[267] In the early 2010s, experts argued that the risks are too distant in the future to warrant research or that humans will be valuable from the perspective of a superintelligent machine.[268] However, after 2016, the study of current and future risks and possible solutions became a serious area of research.[269]\n

Above all, AI at this point, is many years away from being an existential threat to humans. Simply because it openly admits via LLMs that it does not \n


\n

\n

Ethical machines and alignment

\n\n

Friendly AI are machines that have been designed from the beginning to minimize risks and to make choices that benefit humans. Eliezer Yudkowsky, who coined the term, argues that developing friendly AI should be a higher research priority: it may require a large investment and it must be completed before AI becomes an existential risk.[270]\n

Machines with intelligence have the potential to use their intelligence to make ethical decisions. The field of machine ethics provides machines with ethical principles and procedures for resolving ethical dilemmas.[271]\nThe field of machine ethics is also called computational morality,[271]\nand was founded at an AAAI symposium in 2005.[272]\n

Other approaches include Wendell Wallach\'s "artificial moral agents"[273] and Stuart J. Russell\'s three principles for developing provably beneficial machines.[274]\n

\n

Open source

\n

Active organizations in the AI open-source community include Hugging Face,[275] Google,[276] EleutherAI and Meta.[277] Various AI models, such as Llama 2, Mistral or Stable Diffusion, have been made open-weight,[278][279] meaning that their architecture and trained parameters (the "weights") are publicly available. Open-weight models can be freely fine-tuned, which allows companies to specialize them with their own data and for their own use-case.[280] Open-weight models are useful for research and innovation but can also be misused. Since they can be fine-tuned, any built-in security measure, such as objecting to harmful requests, can be trained away until it becomes ineffective. Some researchers warn that future AI models may develop dangerous capabilities (such as the potential to drastically facilitate bioterrorism) and that once released on the Internet, they cannot be deleted everywhere if needed. They recommend pre-release audits and cost-benefit analyses.[281]\n

\n

Frameworks

\n

Artificial Intelligence projects can have their ethical permissibility tested while designing, developing, and implementing an AI system. An AI framework such as the Care and Act Framework containing the SUM values—developed by the Alan Turing Institute tests projects in four main areas:[282][283]\n

\n
  • Respect the dignity of individual people
  • \n
  • Connect with other people sincerely, openly, and inclusively
  • \n
  • Care for the wellbeing of everyone
  • \n
  • Protect social values, justice, and the public interest
\n

Other developments in ethical frameworks include those decided upon during the Asilomar Conference, the Montreal Declaration for Responsible AI, and the IEEE\'s Ethics of Autonomous Systems initiative, among others;[284] however, these principles do not go without their criticisms, especially regards to the people chosen contributes to these frameworks.[285]\n

Promotion of the wellbeing of the people and communities that these technologies affect requires consideration of the social and ethical implications at all stages of AI system design, development and implementation, and collaboration between job roles such as data scientists, product managers, data engineers, domain experts, and delivery managers.[286]\n

The UK AI Safety Institute released in 2024 a testing toolset called \'Inspect\' for AI safety evaluations available under a MIT open-source licence which is freely available on GitHub and can be improved with third-party packages. It can be used to evaluate AI models in a range of areas including core knowledge, ability to reason, and autonomous capabilities.[287]\n

\n

Regulation

\n\n
AI Safety Summit
The first global AI Safety Summit was held in 2023 with a declaration calling for international co-operation.
\n

The regulation of artificial intelligence is the development of public sector policies and laws for promoting and regulating AI; it is therefore related to the broader regulation of algorithms.[288] The regulatory and policy landscape for AI is an emerging issue in jurisdictions globally.[289] According to AI Index at Stanford, the annual number of AI-related laws passed in the 127 survey countries jumped from one passed in 2016 to 37 passed in 2022 alone.[290][291] Between 2016 and 2020, more than 30 countries adopted dedicated strategies for AI.[292] Most EU member states had released national AI strategies, as had Canada, China, India, Japan, Mauritius, the Russian Federation, Saudi Arabia, United Arab Emirates, U.S., and Vietnam. Others were in the process of elaborating their own AI strategy, including Bangladesh, Malaysia and Tunisia.[292] The Global Partnership on Artificial Intelligence was launched in June 2020, stating a need for AI to be developed in accordance with human rights and democratic values, to ensure public confidence and trust in the technology.[292] Henry Kissinger, Eric Schmidt, and Daniel Huttenlocher published a joint statement in November 2021 calling for a government commission to regulate AI.[293] In 2023, OpenAI leaders published recommendations for the governance of superintelligence, which they believe may happen in less than 10 years.[294] In 2023, the United Nations also launched an advisory body to provide recommendations on AI governance; the body comprises technology company executives, governments officials and academics.[295] In 2024, the Council of Europe created the first international legally binding treaty on AI, called the "Framework Convention on Artificial Intelligence and Human Rights, Democracy and the Rule of Law". It was adopted by the European Union, the United States, the United Kingdom, and other signatories.[296]\n

In a 2022 Ipsos survey, attitudes towards AI varied greatly by country; 78% of Chinese citizens, but only 35% of Americans, agreed that "products and services using AI have more benefits than drawbacks".[290] A 2023 Reuters/Ipsos poll found that 61% of Americans agree, and 22% disagree, that AI poses risks to humanity.[297] In a 2023 Fox News poll, 35% of Americans thought it "very important", and an additional 41% thought it "somewhat important", for the federal government to regulate AI, versus 13% responding "not very important" and 8% responding "not at all important".[298][299]\n

In November 2023, the first global AI Safety Summit was held in Bletchley Park in the UK to discuss the near and far term risks of AI and the possibility of mandatory and voluntary regulatory frameworks.[300] 28 countries including the United States, China, and the European Union issued a declaration at the start of the summit, calling for international co-operation to manage the challenges and risks of artificial intelligence.[301][302] In May 2024 at the AI Seoul Summit, 16 global AI tech companies agreed to safety commitments on the development of AI.[303][304]\n

\n

History

\n\n\n

The study of mechanical or "formal" reasoning began with philosophers and mathematicians in antiquity. The study of logic led directly to Alan Turing\'s theory of computation, which suggested that a machine, by shuffling symbols as simple as "0" and "1", could simulate any conceivable form of mathematical reasoning.[305][306] This, along with concurrent discoveries in cybernetics, information theory and neurobiology, led researchers to consider the possibility of building an "electronic brain".[r] They developed several areas of research that would become part of AI,[308] such as McCullouch and Pitts design for "artificial neurons" in 1943,[115] and Turing\'s influential 1950 paper \'Computing Machinery and Intelligence\', which introduced the Turing test and showed that "machine intelligence" was plausible.[309][306]\n

The field of AI research was founded at a workshop at Dartmouth College in 1956.[s][6] The attendees became the leaders of AI research in the 1960s.[t] They and their students produced programs that the press described as "astonishing":[u] computers were learning checkers strategies, solving word problems in algebra, proving logical theorems and speaking English.[v][7] Artificial intelligence laboratories were set up at a number of British and U.S. universities in the latter 1950s and early 1960s.[306]\n

Researchers in the 1960s and the 1970s were convinced that their methods would eventually succeed in creating a machine with general intelligence and considered this the goal of their field.[313] In 1965 Herbert Simon predicted, "machines will be capable, within twenty years, of doing any work a man can do".[314] In 1967 Marvin Minsky agreed, writing that "within a generation ... the problem of creating \'artificial intelligence\' will substantially be solved".[315] They had, however, underestimated the difficulty of the problem.[w] In 1974, both the U.S. and British governments cut off exploratory research in response to the criticism of Sir James Lighthill[317] and ongoing pressure from the U.S. Congress to fund more productive projects.[318] Minsky\'s and Papert\'s book Perceptrons was understood as proving that artificial neural networks would never be useful for solving real-world tasks, thus discrediting the approach altogether.[319] The "AI winter", a period when obtaining funding for AI projects was difficult, followed.[9]\n

In the early 1980s, AI research was revived by the commercial success of expert systems,[320] a form of AI program that simulated the knowledge and analytical skills of human experts. By 1985, the market for AI had reached over a billion dollars. At the same time, Japan\'s fifth generation computer project inspired the U.S. and British governments to restore funding for academic research.[8] However, beginning with the collapse of the Lisp Machine market in 1987, AI once again fell into disrepute, and a second, longer-lasting winter began.[10]\n

Up to this point, most of AI\'s funding had gone to projects that used high-level symbols to represent mental objects like plans, goals, beliefs, and known facts. In the 1980s, some researchers began to doubt that this approach would be able to imitate all the processes of human cognition, especially perception, robotics, learning and pattern recognition,[321] and began to look into "sub-symbolic" approaches.[322] Rodney Brooks rejected "representation" in general and focussed directly on engineering machines that move and survive.[x] Judea Pearl, Lofti Zadeh and others developed methods that handled incomplete and uncertain information by making reasonable guesses rather than precise logic.[86][327] But the most important development was the revival of "connectionism", including neural network research, by Geoffrey Hinton and others.[328] In 1990, Yann LeCun successfully showed that convolutional neural networks can recognize handwritten digits, the first of many successful applications of neural networks.[329]\n

AI gradually restored its reputation in the late 1990s and early 21st century by exploiting formal mathematical methods and by finding specific solutions to specific problems. This "narrow" and "formal" focus allowed researchers to produce verifiable results and collaborate with other fields (such as statistics, economics and mathematics).[330] By 2000, solutions developed by AI researchers were being widely used, although in the 1990s they were rarely described as "artificial intelligence" (a tendency known as the AI effect).[331]\nHowever, several academic researchers became concerned that AI was no longer pursuing its original goal of creating versatile, fully intelligent machines. Beginning around 2002, they founded the subfield of artificial general intelligence (or "AGI"), which had several well-funded institutions by the 2010s.[4]\n

Deep learning began to dominate industry benchmarks in 2012 and was adopted throughout the field.[11]\nFor many specific tasks, other methods were abandoned.[y]\nDeep learning\'s success was based on both hardware improvements (faster computers,[333] graphics processing units, cloud computing[334]) and access to large amounts of data[335] (including curated datasets,[334] such as ImageNet). Deep learning\'s success led to an enormous increase in interest and funding in AI.[z] The amount of machine learning research (measured by total publications) increased by 50% in the years 2015–2019.[292]\n

In 2016, issues of fairness and the misuse of technology were catapulted into center stage at machine learning conferences, publications vastly increased, funding became available, and many researchers re-focussed their careers on these issues. The alignment problem became a serious field of academic study.[269]\n

In the late teens and early 2020s, AGI companies began to deliver programs that created enormous interest. In 2015, AlphaGo, developed by DeepMind, beat the world champion Go player. The program was taught only the rules of the game and developed strategy by itself. GPT-3 is a large language model that was released in 2020 by OpenAI and is capable of generating high-quality human-like text.[336] These programs, and others, inspired an aggressive AI boom, where large companies began investing billions in AI research. According to AI Impacts, about $50 billion annually was invested in "AI" around 2022 in the U.S. alone and about 20% of the new U.S. Computer Science PhD graduates have specialized in "AI".[337] About 800,000 "AI"-related U.S. job openings existed in 2022.[338]\n

\n

Philosophy

\n\n

Philosophical debates have historically sought to determine the nature of intelligence and how to make intelligent machines.[339] Another major focus has been whether machines can be conscious, and the associated ethical implications.[340] Many other topics in philosophy are relevant to AI, such as epistemology and free will.[341] Rapid advancements have intensified public discussions on the philosophy and ethics of AI.[340]\n

\n

Defining artificial intelligence

\n\n

Alan Turing wrote in 1950 "I propose to consider the question \'can machines think\'?"[342] He advised changing the question from whether a machine "thinks", to "whether or not it is possible for machinery to show intelligent behaviour".[342] He devised the Turing test, which measures the ability of a machine to simulate human conversation.[309] Since we can only observe the behavior of the machine, it does not matter if it is "actually" thinking or literally has a "mind". Turing notes that we can not determine these things about other people but "it is usual to have a polite convention that everyone thinks."[343]\n

\n
The Turing test can provide some evidence of intelligence, but it penalizes non-human intelligent behavior.[344]
\n

Russell and Norvig agree with Turing that intelligence must be defined in terms of external behavior, not internal structure.[1] However, they are critical that the test requires the machine to imitate humans. "Aeronautical engineering texts," they wrote, "do not define the goal of their field as making \'machines that fly so exactly like pigeons that they can fool other pigeons.\'"[345] AI founder John McCarthy agreed, writing that "Artificial intelligence is not, by definition, simulation of human intelligence".[346]\n

McCarthy defines intelligence as "the computational part of the ability to achieve goals in the world".[347] Another AI founder, Marvin Minsky similarly describes it as "the ability to solve hard problems".[348] The leading AI textbook defines it as the study of agents that perceive their environment and take actions that maximize their chances of achieving defined goals.[1] These definitions view intelligence in terms of well-defined problems with well-defined solutions, where both the difficulty of the problem and the performance of the program are direct measures of the "intelligence" of the machine—and no other philosophical discussion is required, or may not even be possible.\n

Another definition has been adopted by Google,[349] a major practitioner in the field of AI. This definition stipulates the ability of systems to synthesize information as the manifestation of intelligence, similar to the way it is defined in biological intelligence.\n

Some authors have suggested in practice, that the definition of AI is vague and difficult to define, with contention as to whether classical algorithms should be categorised as AI,[350] with many companies during the early 2020s AI boom using the term as a marketing buzzword, often even if they did "not actually use AI in a material way".[351]\n

\n

Evaluating approaches to AI

\n

No established unifying theory or paradigm has guided AI research for most of its history.[aa] The unprecedented success of statistical machine learning in the 2010s eclipsed all other approaches (so much so that some sources, especially in the business world, use the term "artificial intelligence" to mean "machine learning with neural networks"). This approach is mostly sub-symbolic, soft and narrow. Critics argue that these questions may have to be revisited by future generations of AI researchers.\n

\n

Symbolic AI and its limits

\n

Symbolic AI (or "GOFAI")[353] simulated the high-level conscious reasoning that people use when they solve puzzles, express legal reasoning and do mathematics. They were highly successful at "intelligent" tasks such as algebra or IQ tests. In the 1960s, Newell and Simon proposed the physical symbol systems hypothesis: "A physical symbol system has the necessary and sufficient means of general intelligent action."[354]\n

However, the symbolic approach failed on many tasks that humans solve easily, such as learning, recognizing an object or commonsense reasoning. Moravec\'s paradox is the discovery that high-level "intelligent" tasks were easy for AI, but low level "instinctive" tasks were extremely difficult.[355] Philosopher Hubert Dreyfus had argued since the 1960s that human expertise depends on unconscious instinct rather than conscious symbol manipulation, and on having a "feel" for the situation, rather than explicit symbolic knowledge.[356] Although his arguments had been ridiculed and ignored when they were first presented, eventually, AI research came to agree with him.[ab][16]\n

The issue is not resolved: sub-symbolic reasoning can make many of the same inscrutable mistakes that human intuition does, such as algorithmic bias. Critics such as Noam Chomsky argue continuing research into symbolic AI will still be necessary to attain general intelligence,[358][359] in part because sub-symbolic AI is a move away from explainable AI: it can be difficult or impossible to understand why a modern statistical AI program made a particular decision. The emerging field of neuro-symbolic artificial intelligence attempts to bridge the two approaches.\n

\n

Neat vs. scruffy

\n\n

"Neats" hope that intelligent behavior is described using simple, elegant principles (such as logic, optimization, or neural networks). "Scruffies" expect that it necessarily requires solving a large number of unrelated problems. Neats defend their programs with theoretical rigor, scruffies rely mainly on incremental testing to see if they work. This issue was actively discussed in the 1970s and 1980s,[360] but eventually was seen as irrelevant. Modern AI has elements of both.\n

\n

Soft vs. hard computing

\n\n

Finding a provably correct or optimal solution is intractable for many important problems.[15] Soft computing is a set of techniques, including genetic algorithms, fuzzy logic and neural networks, that are tolerant of imprecision, uncertainty, partial truth and approximation. Soft computing was introduced in the late 1980s and most successful AI programs in the 21st century are examples of soft computing with neural networks.\n

\n

Narrow vs. general AI

\n\n

AI researchers are divided as to whether to pursue the goals of artificial general intelligence and superintelligence directly or to solve as many specific problems as possible (narrow AI) in hopes these solutions will lead indirectly to the field\'s long-term goals.[361][362] General intelligence is difficult to define and difficult to measure, and modern AI has had more verifiable successes by focusing on specific problems with specific solutions. The sub-field of artificial general intelligence studies this area exclusively.\n

\n

Machine consciousness, sentience, and mind

\n\n

The philosophy of mind does not know whether a machine can have a mind, consciousness and mental states, in the same sense that human beings do. This issue considers the internal experiences of the machine, rather than its external behavior. Mainstream AI research considers this issue irrelevant because it does not affect the goals of the field: to build machines that can solve problems using intelligence. Russell and Norvig add that "[t]he additional project of making a machine conscious in exactly the way humans are is not one that we are equipped to take on."[363] However, the question has become central to the philosophy of mind. It is also typically the central question at issue in artificial intelligence in fiction.\n

\n

Consciousness

\n\n

David Chalmers identified two problems in understanding the mind, which he named the "hard" and "easy" problems of consciousness.[364] The easy problem is understanding how the brain processes signals, makes plans and controls behavior. The hard problem is explaining how this feels or why it should feel like anything at all, assuming we are right in thinking that it truly does feel like something (Dennett\'s consciousness illusionism says this is an illusion). While human information processing is easy to explain, human subjective experience is difficult to explain. For example, it is easy to imagine a color-blind person who has learned to identify which objects in their field of view are red, but it is not clear what would be required for the person to know what red looks like.[365]\n

\n

Computationalism and functionalism

\n\n

Computationalism is the position in the philosophy of mind that the human mind is an information processing system and that thinking is a form of computing. Computationalism argues that the relationship between mind and body is similar or identical to the relationship between software and hardware and thus may be a solution to the mind–body problem. This philosophical position was inspired by the work of AI researchers and cognitive scientists in the 1960s and was originally proposed by philosophers Jerry Fodor and Hilary Putnam.[366]\n

Philosopher John Searle characterized this position as "strong AI": "The appropriately programmed computer with the right inputs and outputs would thereby have a mind in exactly the same sense human beings have minds."[ac] Searle counters this assertion with his Chinese room argument, which attempts to show that, even if a machine perfectly simulates human behavior, there is still no reason to suppose it also has a mind.[370]\n

\n

AI welfare and rights

\n

It is difficult or impossible to reliably evaluate whether an advanced AI is sentient (has the ability to feel), and if so, to what degree.[371] But if there is a significant chance that a given machine can feel and suffer, then it may be entitled to certain rights or welfare protection measures, similarly to animals.[372][373] Sapience (a set of capacities related to high intelligence, such as discernment or self-awareness) may provide another moral basis for AI rights.[372] Robot rights are also sometimes proposed as a practical way to integrate autonomous agents into society.[374]\n

In 2017, the European Union considered granting "electronic personhood" to some of the most capable AI systems. Similarly to the legal status of companies, it would have conferred rights but also responsibilities.[375] Critics argued in 2018 that granting rights to AI systems would downplay the importance of human rights, and that legislation should focus on user needs rather than speculative futuristic scenarios. They also noted that robots lacked the autonomy to take part to society on their own.[376][377]\n

Progress in AI increased interest in the topic. Proponents of AI welfare and rights often argue that AI sentience, if it emerges, would be particularly easy to deny. They warn that this may be a moral blind spot analogous to slavery or factory farming, which could lead to large-scale suffering if sentient AI is created and carelessly exploited.[373][372]\n

\n

Future

\n

Superintelligence and the singularity

\n

A superintelligence is a hypothetical agent that would possess intelligence far surpassing that of the brightest and most gifted human mind.[362]If research into artificial general intelligence produced sufficiently intelligent software, it might be able to reprogram and improve itself. The improved software would be even better at improving itself, leading to what I. J. Good called an "intelligence explosion" and Vernor Vinge called a "singularity".[378]\n

However, technologies cannot improve exponentially indefinitely, and typically follow an S-shaped curve, slowing when they reach the physical limits of what the technology can do.[379]\n

\n

Transhumanism

\n\n

Robot designer Hans Moravec, cyberneticist Kevin Warwick and inventor Ray Kurzweil have predicted that humans and machines may merge in the future into cyborgs that are more capable and powerful than either. This idea, called transhumanism, has roots in the writings of Aldous Huxley and Robert Ettinger.[380]\n

Edward Fredkin argues that "artificial intelligence is the next step in evolution", an idea first proposed by Samuel Butler\'s "Darwin among the Machines" as far back as 1863, and expanded upon by George Dyson in his 1998 book Darwin Among the Machines: The Evolution of Global Intelligence.[381]\n

\n

In fiction

\n\n
The word "robot" itself was coined by Karel Čapek in his 1921 play R.U.R., the title standing for "Rossum\'s Universal Robots".
\n

Thought-capable artificial beings have appeared as storytelling devices since antiquity,[382] and have been a persistent theme in science fiction.[383]\n

A common trope in these works began with Mary Shelley\'s Frankenstein, where a human creation becomes a threat to its masters. This includes such works as Arthur C. Clarke\'s and Stanley Kubrick\'s 2001: A Space Odyssey (both 1968), with HAL 9000, the murderous computer in charge of the Discovery One spaceship, as well as The Terminator (1984) and The Matrix (1999). In contrast, the rare loyal robots such as Gort from The Day the Earth Stood Still (1951) and Bishop from Aliens (1986) are less prominent in popular culture.[384]\n

Isaac Asimov introduced the Three Laws of Robotics in many stories, most notably with the "Multivac" super-intelligent computer. Asimov\'s laws are often brought up during lay discussions of machine ethics;[385] while almost all artificial intelligence researchers are familiar with Asimov\'s laws through popular culture, they generally consider the laws useless for many reasons, one of which is their ambiguity.[386]\n

Several works use AI to force us to confront the fundamental question of what makes us human, showing us artificial beings that have the ability to feel, and thus to suffer. This appears in Karel Čapek\'s R.U.R., the films A.I. Artificial Intelligence and Ex Machina, as well as the novel Do Androids Dream of Electric Sheep?, by Philip K. Dick. Dick considers the idea that our understanding of human subjectivity is altered by technology created with artificial intelligence.[387]\n

\n

See also

\n\n

Explanatory notes

\n
\n
    \n
  1. ^ a b This list of intelligent traits is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
  2. \n
  3. ^ a b This list of tools is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
  4. \n
  5. ^ It is among the reasons that expert systems proved to be inefficient for capturing knowledge.[30][31]\n
  6. \n
  7. ^ \n"Rational agent" is general term used in economics, philosophy and theoretical artificial intelligence. It can refer to anything that directs its behavior to accomplish goals, such as a person, an animal, a corporation, a nation, or in the case of AI, a computer program.\n
  8. \n
  9. ^ Alan Turing discussed the centrality of learning as early as 1950, in his classic paper "Computing Machinery and Intelligence".[42] In 1956, at the original Dartmouth AI summer conference, Ray Solomonoff wrote a report on unsupervised probabilistic machine learning: "An Inductive Inference Machine".[43]\n
  10. \n
  11. ^ See AI winter § Machine translation and the ALPAC report of 1966\n
  12. \n
  13. ^ \nCompared with symbolic logic, formal Bayesian inference is computationally expensive. For inference to be tractable, most observations must be conditionally independent of one another. AdSense uses a Bayesian network with over 300 million edges to learn which ads to serve.[93]\n
  14. \n
  15. ^ Expectation–maximization, one of the most popular algorithms in machine learning, allows clustering in the presence of unknown latent variables.[95]\n
  16. \n
  17. ^ \nSome form of deep neural networks (without a specific learning algorithm) were described by:\nWarren S. McCulloch and Walter Pitts (1943)[115]\nAlan Turing (1948);[116]\nKarl Steinbuch and Roger David Joseph (1961).[117]\nDeep or recurrent networks that learned (or used gradient descent) were developed by:\nFrank Rosenblatt(1957);[116]\nOliver Selfridge (1959);[117]\nAlexey Ivakhnenko and Valentin Lapa (1965);[118]\nKaoru Nakano (1971);[119]\nShun-Ichi Amari (1972);[119]\nJohn Joseph Hopfield (1982).[119]\nPrecursors to backpropagation were developed by:\nHenry J. Kelley (1960);[116]\nArthur E. Bryson (1962);[116]\nStuart Dreyfus (1962);[116]\nArthur E. Bryson and Yu-Chi Ho (1969);[116]\nBackpropagation was independently developed by:\nSeppo Linnainmaa (1970);[120]\nPaul Werbos (1974).[116]\n
  18. \n
  19. ^ Geoffrey Hinton said, of his work on neural networks in the 1990s, "our labeled datasets were thousands of times too small. [And] our computers were millions of times too slow."[121]\n
  20. \n
  21. ^ In statistics, a bias is a systematic error or deviation from the correct value. But in the context of fairness, it refers to a tendency in favor or against a certain group or individual characteristic, usually in a way that is considered unfair or harmful. A statistically unbiased AI system that produces disparate outcomes for different demographic groups may thus be viewed as biased in the ethical sense.[205]\n
  22. \n
  23. ^ Including Jon Kleinberg (Cornell University), Sendhil Mullainathan (University of Chicago), Cynthia Chouldechova (Carnegie Mellon) and Sam Corbett-Davis (Stanford)[214]\n
  24. \n
  25. ^ Moritz Hardt (a director at the Max Planck Institute for Intelligent Systems) argues that machine learning "is fundamentally the wrong tool for a lot of domains, where you\'re trying to design interventions and mechanisms that change the world."[219]\n
  26. \n
  27. ^ When the law was passed in 2018, it still contained a form of this provision.\n
  28. \n
  29. ^ This is the United Nations\' definition, and includes things like land mines as well.[233]\n
  30. \n
  31. ^ See table 4; 9% is both the OECD average and the U.S. average.[244]\n
  32. \n
  33. ^ Sometimes called a "robopocalypse"[252]\n
  34. \n
  35. ^ "Electronic brain" was the term used by the press around this time.[305][307]\n
  36. \n
  37. ^ \nDaniel Crevier wrote, "the conference is generally recognized as the official birthdate of the new science."[310] Russell and Norvig called the conference "the inception of artificial intelligence."[115]\n
  38. \n
  39. ^ \nRussell and Norvig wrote "for the next 20 years the field would be dominated by these people and their students."[311]\n
  40. \n
  41. ^ \nRussell and Norvig wrote "it was astonishing whenever a computer did anything kind of smartish".[312]\n
  42. \n
  43. ^ \nThe programs described are Arthur Samuel\'s checkers program for the IBM 701, Daniel Bobrow\'s STUDENT, Newell and Simon\'s Logic Theorist and Terry Winograd\'s SHRDLU.\n
  44. \n
  45. ^ Russell and Norvig write: "in almost all cases, these early systems failed on more difficult problems"[316]\n
  46. \n
  47. ^ \nEmbodied approaches to AI[323] were championed by Hans Moravec[324] and Rodney Brooks[325] and went by many names: Nouvelle AI.[325] Developmental robotics.[326]\n
  48. \n
  49. ^ Matteo Wong wrote in The Atlantic: "Whereas for decades, computer-science fields such as natural-language processing, computer vision, and robotics used extremely different methods, now they all use a programming method called "deep learning." As a result, their code and approaches have become more similar, and their models are easier to integrate into one another."[332]\n
  50. \n
  51. ^ Jack Clark wrote in Bloomberg: "After a half-decade of quiet breakthroughs in artificial intelligence, 2015 has been a landmark year. Computers are smarter and learning faster than ever", and noted that the number of software projects that use machine learning at Google increased from a "sporadic usage" in 2012 to more than 2,700 projects in 2015.[334]\n
  52. \n
  53. ^ Nils Nilsson wrote in 1983: "Simply put, there is wide disagreement in the field about what AI is all about."[352]\n
  54. \n
  55. ^ \nDaniel Crevier wrote that "time has proven the accuracy and perceptiveness of some of Dreyfus\'s comments. Had he formulated them less aggressively, constructive actions they suggested might have been taken much earlier."[357]\n
  56. \n
  57. ^ \nSearle presented this definition of "Strong AI" in 1999.[367] Searle\'s original formulation was "The appropriately programmed computer really is a mind, in the sense that computers given the right programs can be literally said to understand and have other cognitive states."[368] Strong AI is defined similarly by Russell and Norvig: "Stong AI – the assertion that machines that do so are actually thinking (as opposed to simulating thinking)."[369]\n
  58. \n
\n

References

\n
\n
    \n
  1. ^ a b c Russell & Norvig (2021), pp. 1–4.\n
  2. \n
  3. ^ AI set to exceed human brain power Archived 2008-02-19 at the Wayback Machine CNN.com (July 26, 2006)\n
  4. \n
  5. ^ Kaplan, Andreas; Haenlein, Michael (2019). "Siri, Siri, in my hand: Who\'s the fairest in the land? On the interpretations, illustrations, and implications of artificial intelligence". Business Horizons. 62: 15–25. doi:10.1016/j.bushor.2018.08.004. ISSN 0007-6813. S2CID 158433736.\n
  6. \n
  7. ^ a b c \nArtificial general intelligence: Russell & Norvig (2021, pp. 32–33, 1020–1021)
    Proposal for the modern version: Pennachin & Goertzel (2007)
    Warnings of overspecialization in AI from leading researchers: Nilsson (1995), McCarthy (2007), Beal & Winston (2009)
    \n
  8. \n
  9. ^ Russell & Norvig (2021, §1.2).\n
  10. \n
  11. ^ a b Dartmouth workshop: Russell & Norvig (2021, p. 18), McCorduck (2004, pp. 111–136), NRC (1999, pp. 200–201)
    The proposal: McCarthy et al. (1955)
    \n
  12. \n
  13. ^ a b Successful programs of the 1960s: McCorduck (2004, pp. 243–252), Crevier (1993, pp. 52–107), Moravec (1988, p. 9), Russell & Norvig (2021, pp. 19–21)\n
  14. \n
  15. ^ a b Funding initiatives in the early 1980s: Fifth Generation Project (Japan), Alvey (UK), Microelectronics and Computer Technology Corporation (US), Strategic Computing Initiative (US): McCorduck (2004, pp. 426–441), Crevier (1993, pp. 161–162, 197–203, 211, 240), Russell & Norvig (2021, p. 23), NRC (1999, pp. 210–211), Newquist (1994, pp. 235–248)\n
  16. \n
  17. ^ a b First AI Winter, Lighthill report, Mansfield Amendment: Crevier (1993, pp. 115–117), Russell & Norvig (2021, pp. 21–22), NRC (1999, pp. 212–213), Howe (1994), Newquist (1994, pp. 189–201)\n
  18. \n
  19. ^ a b Second AI Winter: Russell & Norvig (2021, p. 24), McCorduck (2004, pp. 430–435), Crevier (1993, pp. 209–210), NRC (1999, pp. 214–216), Newquist (1994, pp. 301–318)\n
  20. \n
  21. ^ a b Deep learning revolution, AlexNet: Goldman (2022), Russell & Norvig (2021, p. 26), McKinsey (2018)\n
  22. \n
  23. ^ Toews (2023).\n
  24. \n
  25. ^ Problem-solving, puzzle solving, game playing, and deduction: Russell & Norvig (2021, chpt. 3–5), Russell & Norvig (2021, chpt. 6) (constraint satisfaction), Poole, Mackworth & Goebel (1998, chpt. 2, 3, 7, 9), Luger & Stubblefield (2004, chpt. 3, 4, 6, 8), Nilsson (1998, chpt. 7–12)\n
  26. \n
  27. ^ Uncertain reasoning: Russell & Norvig (2021, chpt. 12–18), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 333–381), Nilsson (1998, chpt. 7–12)\n
  28. \n
  29. ^ a b c Intractability and efficiency and the combinatorial explosion: Russell & Norvig (2021, p. 21)\n
  30. \n
  31. ^ a b c Psychological evidence of the prevalence of sub-symbolic reasoning and knowledge: Kahneman (2011), Dreyfus & Dreyfus (1986), Wason & Shapiro (1966), Kahneman, Slovic & Tversky (1982)\n
  32. \n
  33. ^ Knowledge representation and knowledge engineering: Russell & Norvig (2021, chpt. 10), Poole, Mackworth & Goebel (1998, pp. 23–46, 69–81, 169–233, 235–277, 281–298, 319–345), Luger & Stubblefield (2004, pp. 227–243), Nilsson (1998, chpt. 17.1–17.4, 18)\n
  34. \n
  35. ^ Smoliar & Zhang (1994).\n
  36. \n
  37. ^ Neumann & Möller (2008).\n
  38. \n
  39. ^ Kuperman, Reichley & Bailey (2006).\n
  40. \n
  41. ^ McGarry (2005).\n
  42. \n
  43. ^ Bertini, Del Bimbo & Torniai (2006).\n
  44. \n
  45. ^ Russell & Norvig (2021), pp. 272.\n
  46. \n
  47. ^ Representing categories and relations: Semantic networks, description logics, inheritance (including frames, and scripts): Russell & Norvig (2021, §10.2 & 10.5), Poole, Mackworth & Goebel (1998, pp. 174–177), Luger & Stubblefield (2004, pp. 248–258), Nilsson (1998, chpt. 18.3)\n
  48. \n
  49. ^ Representing events and time:Situation calculus, event calculus, fluent calculus (including solving the frame problem): Russell & Norvig (2021, §10.3), Poole, Mackworth & Goebel (1998, pp. 281–298), Nilsson (1998, chpt. 18.2)\n
  50. \n
  51. ^ Causal calculus: Poole, Mackworth & Goebel (1998, pp. 335–337)\n
  52. \n
  53. ^ Representing knowledge about knowledge: Belief calculus, modal logics: Russell & Norvig (2021, §10.4), Poole, Mackworth & Goebel (1998, pp. 275–277)\n
  54. \n
  55. ^ a b Default reasoning, Frame problem, default logic, non-monotonic logics, circumscription, closed world assumption, abduction: Russell & Norvig (2021, §10.6), Poole, Mackworth & Goebel (1998, pp. 248–256, 323–335), Luger & Stubblefield (2004, pp. 335–363), Nilsson (1998, ~18.3.3)\n(Poole et al. places abduction under "default reasoning". Luger et al. places this under "uncertain reasoning").\n
  56. \n
  57. ^ a b Breadth of commonsense knowledge: Lenat & Guha (1989, Introduction), Crevier (1993, pp. 113–114), Moravec (1988, p. 13), Russell & Norvig (2021, pp. 241, 385, 982) (qualification problem)\n
  58. \n
  59. ^ Newquist (1994), p. 296.\n
  60. \n
  61. ^ Crevier (1993), pp. 204–208.\n
  62. \n
  63. ^ Russell & Norvig (2021), p. 528.\n
  64. \n
  65. ^ Automated planning: Russell & Norvig (2021, chpt. 11).\n
  66. \n
  67. ^ Automated decision making, Decision theory: Russell & Norvig (2021, chpt. 16–18).\n
  68. \n
  69. ^ Classical planning: Russell & Norvig (2021, Section 11.2).\n
  70. \n
  71. ^ Sensorless or "conformant" planning, contingent planning, replanning (a.k.a online planning): Russell & Norvig (2021, Section 11.5).\n
  72. \n
  73. ^ Uncertain preferences: Russell & Norvig (2021, Section 16.7)\nInverse reinforcement learning: Russell & Norvig (2021, Section 22.6)\n
  74. \n
  75. ^ Information value theory: Russell & Norvig (2021, Section 16.6).\n
  76. \n
  77. ^ Markov decision process: Russell & Norvig (2021, chpt. 17).\n
  78. \n
  79. ^ Game theory and multi-agent decision theory: Russell & Norvig (2021, chpt. 18).\n
  80. \n
  81. ^ Learning: Russell & Norvig (2021, chpt. 19–22), Poole, Mackworth & Goebel (1998, pp. 397–438), Luger & Stubblefield (2004, pp. 385–542), Nilsson (1998, chpt. 3.3, 10.3, 17.5, 20)\n
  82. \n
  83. ^ Turing (1950).\n
  84. \n
  85. ^ Solomonoff (1956).\n
  86. \n
  87. ^ Unsupervised learning: Russell & Norvig (2021, pp. 653) (definition), Russell & Norvig (2021, pp. 738–740) (cluster analysis), Russell & Norvig (2021, pp. 846–860) (word embedding)\n
  88. \n
  89. ^ a b Supervised learning: Russell & Norvig (2021, §19.2) (Definition), Russell & Norvig (2021, Chpt. 19–20) (Techniques)\n
  90. \n
  91. ^ Reinforcement learning: Russell & Norvig (2021, chpt. 22), Luger & Stubblefield (2004, pp. 442–449)\n
  92. \n
  93. ^ Transfer learning: Russell & Norvig (2021, pp. 281), The Economist (2016)\n
  94. \n
  95. ^ "Artificial Intelligence (AI): What Is AI and How Does It Work? | Built In". builtin.com. Retrieved 30 October 2023.\n
  96. \n
  97. ^ Computational learning theory: Russell & Norvig (2021, pp. 672–674), Jordan & Mitchell (2015)\n
  98. \n
  99. ^ Natural language processing (NLP): Russell & Norvig (2021, chpt. 23–24), Poole, Mackworth & Goebel (1998, pp. 91–104), Luger & Stubblefield (2004, pp. 591–632)\n
  100. \n
  101. ^ Subproblems of NLP: Russell & Norvig (2021, pp. 849–850)\n
  102. \n
  103. ^ Russell & Norvig (2021), pp. 856–858.\n
  104. \n
  105. ^ Dickson (2022).\n
  106. \n
  107. ^ Modern statistical and deep learning approaches to NLP: Russell & Norvig (2021, chpt. 24), Cambria & White (2014)\n
  108. \n
  109. ^ Vincent (2019).\n
  110. \n
  111. ^ Russell & Norvig (2021), pp. 875–878.\n
  112. \n
  113. ^ Bushwick (2023).\n
  114. \n
  115. ^ Computer vision: Russell & Norvig (2021, chpt. 25), Nilsson (1998, chpt. 6)\n
  116. \n
  117. ^ Russell & Norvig (2021), pp. 849–850.\n
  118. \n
  119. ^ Russell & Norvig (2021), pp. 895–899.\n
  120. \n
  121. ^ Russell & Norvig (2021), pp. 899–901.\n
  122. \n
  123. ^ Challa et al. (2011).\n
  124. \n
  125. ^ Russell & Norvig (2021), pp. 931–938.\n
  126. \n
  127. ^ MIT AIL (2014).\n
  128. \n
  129. ^ Affective computing: Thro (1993), Edelson (1991), Tao & Tan (2005), Scassellati (2002)\n
  130. \n
  131. ^ Waddell (2018).\n
  132. \n
  133. ^ Poria et al. (2017).\n
  134. \n
  135. ^ Search algorithms: Russell & Norvig (2021, chpts. 3–5), Poole, Mackworth & Goebel (1998, pp. 113–163), Luger & Stubblefield (2004, pp. 79–164, 193–219), Nilsson (1998, chpts. 7–12)\n
  136. \n
  137. ^ State space search: Russell & Norvig (2021, chpt. 3)\n
  138. \n
  139. ^ Russell & Norvig (2021), sect. 11.2.\n
  140. \n
  141. ^ Uninformed searches (breadth first search, depth-first search and general state space search): Russell & Norvig (2021, sect. 3.4), Poole, Mackworth & Goebel (1998, pp. 113–132), Luger & Stubblefield (2004, pp. 79–121), Nilsson (1998, chpt. 8)\n
  142. \n
  143. ^ Heuristic or informed searches (e.g., greedy best first and A*): Russell & Norvig (2021, sect. 3.5), Poole, Mackworth & Goebel (1998, pp. 132–147), Poole & Mackworth (2017, sect. 3.6), Luger & Stubblefield (2004, pp. 133–150)\n
  144. \n
  145. ^ Adversarial search: Russell & Norvig (2021, chpt. 5)\n
  146. \n
  147. ^ Local or "optimization" search: Russell & Norvig (2021, chpt. 4)\n
  148. \n
  149. ^ Singh Chauhan, Nagesh (18 December 2020). "Optimization Algorithms in Neural Networks". KDnuggets. Retrieved 13 January 2024.\n
  150. \n
  151. ^ Evolutionary computation: Russell & Norvig (2021, sect. 4.1.2)\n
  152. \n
  153. ^ Merkle & Middendorf (2013).\n
  154. \n
  155. ^ Logic: Russell & Norvig (2021, chpts. 6–9), Luger & Stubblefield (2004, pp. 35–77), Nilsson (1998, chpt. 13–16)\n
  156. \n
  157. ^ Propositional logic: Russell & Norvig (2021, chpt. 6), Luger & Stubblefield (2004, pp. 45–50), Nilsson (1998, chpt. 13)\n
  158. \n
  159. ^ First-order logic and features such as equality: Russell & Norvig (2021, chpt. 7), Poole, Mackworth & Goebel (1998, pp. 268–275), Luger & Stubblefield (2004, pp. 50–62), Nilsson (1998, chpt. 15)\n
  160. \n
  161. ^ Logical inference: Russell & Norvig (2021, chpt. 10)\n
  162. \n
  163. ^ logical deduction as search: Russell & Norvig (2021, sects. 9.3, 9.4), Poole, Mackworth & Goebel (1998, pp. ~46–52), Luger & Stubblefield (2004, pp. 62–73), Nilsson (1998, chpt. 4.2, 7.2)\n
  164. \n
  165. ^ Resolution and unification: Russell & Norvig (2021, sections 7.5.2, 9.2, 9.5)\n
  166. \n
  167. ^ Warren, D.H.; Pereira, L.M.; Pereira, F. (1977). "Prolog-the language and its implementation compared with Lisp". ACM SIGPLAN Notices. 12 (8): 109–115. doi:10.1145/872734.806939.\n
  168. \n
  169. ^ Fuzzy logic: Russell & Norvig (2021, pp. 214, 255, 459), Scientific American (1999)\n
  170. \n
  171. ^ a b Stochastic methods for uncertain reasoning: Russell & Norvig (2021, chpt. 12–18, 20), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 165–191, 333–381), Nilsson (1998, chpt. 19)\n
  172. \n
  173. ^ decision theory and decision analysis: Russell & Norvig (2021, chpt. 16–18), Poole, Mackworth & Goebel (1998, pp. 381–394)\n
  174. \n
  175. ^ Information value theory: Russell & Norvig (2021, sect. 16.6)\n
  176. \n
  177. ^ Markov decision processes and dynamic decision networks: Russell & Norvig (2021, chpt. 17)\n
  178. \n
  179. ^ a b c Stochastic temporal models: Russell & Norvig (2021, chpt. 14)\nHidden Markov model: Russell & Norvig (2021, sect. 14.3)\nKalman filters: Russell & Norvig (2021, sect. 14.4)\nDynamic Bayesian networks: Russell & Norvig (2021, sect. 14.5)\n
  180. \n
  181. ^ Game theory and mechanism design: Russell & Norvig (2021, chpt. 18)\n
  182. \n
  183. ^ Bayesian networks: Russell & Norvig (2021, sects. 12.5–12.6, 13.4–13.5, 14.3–14.5, 16.5, 20.2–20.3), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~182–190, ≈363–379), Nilsson (1998, chpt. 19.3–19.4)\n
  184. \n
  185. ^ Domingos (2015), chpt. 6.\n
  186. \n
  187. ^ Bayesian inference algorithm: Russell & Norvig (2021, sect. 13.3–13.5), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~363–379), Nilsson (1998, chpt. 19.4 & 7)\n
  188. \n
  189. ^ Domingos (2015), p. 210.\n
  190. \n
  191. ^ Bayesian learning and the expectation–maximization algorithm: Russell & Norvig (2021, chpt. 20), Poole, Mackworth & Goebel (1998, pp. 424–433), Nilsson (1998, chpt. 20), Domingos (2015, p. 210)\n
  192. \n
  193. ^ Bayesian decision theory and Bayesian decision networks: Russell & Norvig (2021, sect. 16.5)\n
  194. \n
  195. ^ Statistical learning methods and classifiers: Russell & Norvig (2021, chpt. 20),\n
  196. \n
  197. ^ Ciaramella, Alberto; Ciaramella, Marco (2024). Introduction to Artificial Intelligence: from data analysis to generative AI. Intellisemantic Editions. ISBN 978-8-8947-8760-3.\n
  198. \n
  199. ^ Decision trees: Russell & Norvig (2021, sect. 19.3), Domingos (2015, p. 88)\n
  200. \n
  201. ^ Non-parameteric learning models such as K-nearest neighbor and support vector machines: Russell & Norvig (2021, sect. 19.7), Domingos (2015, p. 187) (k-nearest neighbor)\n\n
  202. \n
  203. ^ Domingos (2015), p. 152.\n
  204. \n
  205. ^ Naive Bayes classifier: Russell & Norvig (2021, sect. 12.6), Domingos (2015, p. 152)\n
  206. \n
  207. ^ a b Neural networks: Russell & Norvig (2021, chpt. 21), Domingos (2015, Chapter 4)\n
  208. \n
  209. ^ Gradient calculation in computational graphs, backpropagation, automatic differentiation: Russell & Norvig (2021, sect. 21.2), Luger & Stubblefield (2004, pp. 467–474), Nilsson (1998, chpt. 3.3)\n
  210. \n
  211. ^ Universal approximation theorem: Russell & Norvig (2021, p. 752)\nThe theorem: Cybenko (1988), Hornik, Stinchcombe & White (1989)\n
  212. \n
  213. ^ Feedforward neural networks: Russell & Norvig (2021, sect. 21.1)\n
  214. \n
  215. ^ Recurrent neural networks: Russell & Norvig (2021, sect. 21.6)\n
  216. \n
  217. ^ Perceptrons: Russell & Norvig (2021, pp. 21, 22, 683, 22)\n
  218. \n
  219. ^ a b Deep learning: Russell & Norvig (2021, chpt. 21), Goodfellow, Bengio & Courville (2016), Hinton et al. (2016), Schmidhuber (2015)\n
  220. \n
  221. ^ Convolutional neural networks: Russell & Norvig (2021, sect. 21.3)\n
  222. \n
  223. ^ Deng & Yu (2014), pp. 199–200.\n
  224. \n
  225. ^ Ciresan, Meier & Schmidhuber (2012).\n
  226. \n
  227. ^ Russell & Norvig (2021), p. 751.\n
  228. \n
  229. ^ a b c Russell & Norvig (2021), p. 17.\n
  230. \n
  231. ^ a b c d e f g Russell & Norvig (2021), p. 785.\n
  232. \n
  233. ^ a b Schmidhuber (2022), sect. 5.\n
  234. \n
  235. ^ Schmidhuber (2022), sect. 6.\n
  236. \n
  237. ^ a b c Schmidhuber (2022), sect. 7.\n
  238. \n
  239. ^ Schmidhuber (2022), sect. 8.\n
  240. \n
  241. ^ Quoted in Christian (2020, p. 22)\n
  242. \n
  243. ^ Smith (2023).\n
  244. \n
  245. ^ "Explained: Generative AI". 9 November 2023.\n
  246. \n
  247. ^ "AI Writing and Content Creation Tools". MIT Sloan Teaching & Learning Technologies. Archived from the original on 25 December 2023. Retrieved 25 December 2023.\n
  248. \n
  249. ^ Marmouyet (2023).\n
  250. \n
  251. ^ Kobielus (2019).\n
  252. \n
  253. ^ Thomason, James (21 May 2024). "Mojo Rising: The resurgence of AI-first programming languages". VentureBeat. Archived from the original on 27 June 2024. Retrieved 26 May 2024.\n
  254. \n
  255. ^ Wodecki, Ben (5 May 2023). "7 AI Programming Languages You Need to Know". AI Business. Archived from the original on 25 July 2024. Retrieved 5 October 2024.\n
  256. \n
  257. ^ Plumb, Taryn (18 September 2024). "Why Jensen Huang and Marc Benioff see \'gigantic\' opportunity for agentic AI". VentureBeat. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
  258. \n
  259. ^ Davenport, T; Kalakota, R (June 2019). "The potential for artificial intelligence in healthcare". Future Healthc J. 6 (2): 94–98. doi:10.7861/futurehosp.6-2-94. PMC 6616181. PMID 31363513.\n
  260. \n
  261. ^ Lyakhova, U.A.; Lyakhov, P.A. (2024). "Systematic review of approaches to detection and classification of skin cancer using artificial intelligence: Development and prospects". Computers in Biology and Medicine. 178: 108742. doi:10.1016/j.compbiomed.2024.108742. PMID 38875908.\n
  262. \n
  263. ^ Alqudaihi, Kawther S.; Aslam, Nida; Khan, Irfan Ullah; Almuhaideb, Abdullah M.; Alsunaidi, Shikah J.; Ibrahim, Nehad M. Abdel Rahman; Alhaidari, Fahd A.; Shaikh, Fatema S.; Alsenbel, Yasmine M.; Alalharith, Dima M.; Alharthi, Hajar M.; Alghamdi, Wejdan M.; Alshahrani, Mohammed S. (2021). "Cough Sound Detection and Diagnosis Using Artificial Intelligence Techniques: Challenges and Opportunities". IEEE Access. 9: 102327–102344. Bibcode:2021IEEEA...9j2327A. doi:10.1109/ACCESS.2021.3097559. ISSN 2169-3536. PMC 8545201. PMID 34786317.\n
  264. \n
  265. ^ a b Bax, Monique; Thorpe, Jordan; Romanov, Valentin (December 2023). "The future of personalized cardiovascular medicine demands 3D and 4D printing, stem cells, and artificial intelligence". Frontiers in Sensors. 4. doi:10.3389/fsens.2023.1294721. ISSN 2673-5067.\n
  266. \n
  267. ^ Jumper, J; Evans, R; Pritzel, A (2021). "Highly accurate protein structure prediction with AlphaFold". Nature. 596 (7873): 583–589. Bibcode:2021Natur.596..583J. doi:10.1038/s41586-021-03819-2. PMC 8371605. PMID 34265844.\n
  268. \n
  269. ^ "AI discovers new class of antibiotics to kill drug-resistant bacteria". 20 December 2023. Archived from the original on 16 September 2024. Retrieved 5 October 2024.\n
  270. \n
  271. ^ "AI speeds up drug design for Parkinson\'s ten-fold". Cambridge University. 17 April 2024. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  272. \n
  273. ^ Horne, Robert I.; Andrzejewska, Ewa A.; Alam, Parvez; Brotzakis, Z. Faidon; Srivastava, Ankit; Aubert, Alice; Nowinska, Magdalena; Gregory, Rebecca C.; Staats, Roxine; Possenti, Andrea; Chia, Sean; Sormanni, Pietro; Ghetti, Bernardino; Caughey, Byron; Knowles, Tuomas P. J.; Vendruscolo, Michele (17 April 2024). "Discovery of potent inhibitors of α-synuclein aggregation using structure-based iterative learning". Nature Chemical Biology. 20 (5). Nature: 634–645. doi:10.1038/s41589-024-01580-x. PMC 11062903. PMID 38632492.\n
  274. \n
  275. ^ Grant, Eugene F.; Lardner, Rex (25 July 1952). "The Talk of the Town – It". The New Yorker. ISSN 0028-792X. Archived from the original on 16 February 2020. Retrieved 28 January 2024.\n
  276. \n
  277. ^ Anderson, Mark Robert (11 May 2017). "Twenty years on from Deep Blue vs Kasparov: how a chess match started the big data revolution". The Conversation. Archived from the original on 17 September 2024. Retrieved 28 January 2024.\n
  278. \n
  279. ^ Markoff, John (16 February 2011). "Computer Wins on \'Jeopardy!\': Trivial, It\'s Not". The New York Times. ISSN 0362-4331. Archived from the original on 22 October 2014. Retrieved 28 January 2024.\n
  280. \n
  281. ^ Byford, Sam (27 May 2017). "AlphaGo retires from competitive Go after defeating world number one 3–0". The Verge. Archived from the original on 7 June 2017. Retrieved 28 January 2024.\n
  282. \n
  283. ^ Brown, Noam; Sandholm, Tuomas (30 August 2019). "Superhuman AI for multiplayer poker". Science. 365 (6456): 885–890. Bibcode:2019Sci...365..885B. doi:10.1126/science.aay2400. ISSN 0036-8075. PMID 31296650.\n
  284. \n
  285. ^ "MuZero: Mastering Go, chess, shogi and Atari without rules". Google DeepMind. 23 December 2020. Retrieved 28 January 2024.\n
  286. \n
  287. ^ Sample, Ian (30 October 2019). "AI becomes grandmaster in \'fiendishly complex\' StarCraft II". The Guardian. ISSN 0261-3077. Archived from the original on 29 December 2020. Retrieved 28 January 2024.\n
  288. \n
  289. ^ Wurman, P. R.; Barrett, S.; Kawamoto, K. (2022). "Outracing champion Gran Turismo drivers with deep reinforcement learning" (PDF). Nature. 602 (7896): 223–228. Bibcode:2022Natur.602..223W. doi:10.1038/s41586-021-04357-7. PMID 35140384.\n
  290. \n
  291. ^ Wilkins, Alex (13 March 2024). "Google AI learns to play open-world video games by watching them". New Scientist. Archived from the original on 26 July 2024. Retrieved 21 July 2024.\n
  292. \n
  293. ^ Uesato, J. et al.: Improving mathematical reasoning with process supervision. Archived 15 September 2024 at the Wayback Machine openai.com, May 31, 2023. Retrieved 2024-08-07.\n
  294. \n
  295. ^ Srivastava, Saurabh (29 February 2024). "Functional Benchmarks for Robust Evaluation of Reasoning Performance, and the Reasoning Gap". arXiv:2402.19450 [cs.AI].\n
  296. \n
  297. ^ Roberts, Siobhan (25 July 2024). "AI achieves silver-medal standard solving International Mathematical Olympiad problems". The New York Times. Archived from the original on 26 September 2024. Retrieved 7 August 2024.\n
  298. \n
  299. ^ LLEMMA. eleuther.ai. Retrieved 2024-08-07.\n
  300. \n
  301. ^ AI Math. Archived 5 October 2024 at the Wayback Machine Caesars Labs, 2024. Retrieved 2024-08-07.\n
  302. \n
  303. ^ Alex McFarland: 7 Best AI for Math Tools. Archived 11 September 2024 at the Wayback Machine unite.ai. Retrieved 2024-08-07\n
  304. \n
  305. ^ Matthew Finio & Amanda Downie: IBM Think 2024 Primer, "What is Artificial Intelligence (AI) in Finance?" 8 Dec. 2023\n
  306. \n
  307. ^ M. Nicolas, J. Firzli: Pensions Age/European Pensions magazine, "Artificial Intelligence: Ask the Industry" May June 2024 https://videovoice.org/ai-in-finance-innovation-entrepreneurship-vs-over-regulation-with-the-eus-artificial-intelligence-act-wont-work-as-intended/ Archived 11 September 2024 at the Wayback Machine.\n
  308. \n
  309. ^ a b c Congressional Research Service (2019). Artificial Intelligence and National Security (PDF). Washington, DC: Congressional Research Service. Archived (PDF) from the original on 8 May 2020. Retrieved 5 October 2024.PD-notice\n
  310. \n
  311. ^ a b Slyusar, Vadym (2019). "Artificial intelligence as the basis of future control networks". ResearchGate. doi:10.13140/RG.2.2.30247.50087. Archived from the original on 28 April 2021. Retrieved 20 July 2019.\n
  312. \n
  313. ^ Knight, Will. "The US and 30 Other Nations Agree to Set Guardrails for Military AI". Wired. ISSN 1059-1028. Archived from the original on 20 September 2024. Retrieved 24 January 2024.\n
  314. \n
  315. ^ Newsom, Gavin; Weber, Shirley N. (6 September 2023). "Executive Order N-12-23" (PDF). Executive Department, State of California. Archived (PDF) from the original on 21 February 2024. Retrieved 7 September 2023.\n
  316. \n
  317. ^ Pinaya, Walter H. L.; Graham, Mark S.; Kerfoot, Eric; Tudosiu, Petru-Daniel; Dafflon, Jessica; Fernandez, Virginia; Sanchez, Pedro; Wolleb, Julia; da Costa, Pedro F.; Patel, Ashay (2023). "Generative AI for Medical Imaging: extending the MONAI Framework". arXiv:2307.15208 [eess.IV].\n
  318. \n
  319. ^ Griffith, Erin; Metz, Cade (27 January 2023). "Anthropic Said to Be Closing In on $300 Million in New A.I. Funding". The New York Times. Archived from the original on 9 December 2023. Retrieved 14 March 2023.\n
  320. \n
  321. ^ Lanxon, Nate; Bass, Dina; Davalos, Jackie (10 March 2023). "A Cheat Sheet to AI Buzzwords and Their Meanings". Bloomberg News. Archived from the original on 17 November 2023. Retrieved 14 March 2023.\n
  322. \n
  323. ^ Marcelline, Marco (27 May 2023). "ChatGPT: Most Americans Know About It, But Few Actually Use the AI Chatbot". PCMag. Archived from the original on 21 May 2024. Retrieved 28 January 2024.\n
  324. \n
  325. ^ Lu, Donna (31 March 2023). "Misinformation, mistakes and the Pope in a puffer: what rapidly evolving AI can – and can\'t – do". The Guardian. ISSN 0261-3077. Archived from the original on 10 June 2024. Retrieved 28 January 2024.\n
  326. \n
  327. ^ Hurst, Luke (23 May 2023). "How a fake image of a Pentagon explosion shared on Twitter caused a real dip on Wall Street". euronews. Retrieved 28 January 2024.\n
  328. \n
  329. ^ Poole, David; Mackworth, Alan (2023). Artificial Intelligence, Foundations of Computational Agents (3rd ed.). Cambridge University Press. doi:10.1017/9781009258227. ISBN 978-1-0092-5819-7. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  330. \n
  331. ^ Russell, Stuart; Norvig, Peter (2020). Artificial Intelligence: A Modern Approach (4th ed.). Pearson. ISBN 978-0-1346-1099-3.\n
  332. \n
  333. ^ "Why agents are the next frontier of generative AI". McKinsey Digital. 24 July 2024. Archived from the original on 3 October 2024. Retrieved 10 August 2024.\n
  334. \n
  335. ^ Ransbotham, Sam; Kiron, David; Gerbert, Philipp; Reeves, Martin (6 September 2017). "Reshaping Business With Artificial Intelligence". MIT Sloan Management Review. Archived from the original on 13 February 2024.\n
  336. \n
  337. ^ Sun, Yuran; Zhao, Xilei; Lovreglio, Ruggiero; Kuligowski, Erica (1 January 2024), Naser, M. Z. (ed.), "8 – AI for large-scale evacuation modeling: promises and challenges", Interpretable Machine Learning for the Analysis, Design, Assessment, and Informed Decision Making for Civil Infrastructure, Woodhead Publishing Series in Civil and Structural Engineering, Woodhead Publishing, pp. 185–204, ISBN 978-0-1282-4073-1, archived from the original on 19 May 2024, retrieved 28 June 2024.\n
  338. \n
  339. ^ Gomaa, Islam; Adelzadeh, Masoud; Gwynne, Steven; Spencer, Bruce; Ko, Yoon; Bénichou, Noureddine; Ma, Chunyun; Elsagan, Nour; Duong, Dana; Zalok, Ehab; Kinateder, Max (1 November 2021). "A Framework for Intelligent Fire Detection and Evacuation System". Fire Technology. 57 (6): 3179–3185. doi:10.1007/s10694-021-01157-3. ISSN 1572-8099. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  340. \n
  341. ^ Zhao, Xilei; Lovreglio, Ruggiero; Nilsson, Daniel (1 May 2020). "Modelling and interpreting pre-evacuation decision-making using machine learning". Automation in Construction. 113: 103140. doi:10.1016/j.autcon.2020.103140. ISSN 0926-5805. Archived from the original on 19 May 2024. Retrieved 5 October 2024.\n
  342. \n
  343. ^ "India\'s latest election embraced AI technology. Here are some ways it was used constructively". PBS News. 12 June 2024. Retrieved 28 October 2024.\n
  344. \n
  345. ^ Müller, Vincent C. (30 April 2020). "Ethics of Artificial Intelligence and Robotics". Stanford Encyclopedia of Philosophy Archive. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  346. \n
  347. ^ Simonite (2016).\n
  348. \n
  349. ^ Russell & Norvig (2021), p. 987.\n
  350. \n
  351. ^ Laskowski (2023).\n
  352. \n
  353. ^ GAO (2022).\n
  354. \n
  355. ^ Valinsky (2019).\n
  356. \n
  357. ^ Russell & Norvig (2021), p. 991.\n
  358. \n
  359. ^ Russell & Norvig (2021), pp. 991–992.\n
  360. \n
  361. ^ Christian (2020), p. 63.\n
  362. \n
  363. ^ Vincent (2022).\n
  364. \n
  365. ^ Kopel, Matthew. "Copyright Services: Fair Use". Cornell University Library. Archived from the original on 26 September 2024. Retrieved 26 April 2024.\n
  366. \n
  367. ^ Burgess, Matt. "How to Stop Your Data From Being Used to Train AI". Wired. ISSN 1059-1028. Archived from the original on 3 October 2024. Retrieved 26 April 2024.\n
  368. \n
  369. ^ Reisner (2023).\n
  370. \n
  371. ^ Alter & Harris (2023).\n
  372. \n
  373. ^ "Getting the Innovation Ecosystem Ready for AI. An IP policy toolkit" (PDF). WIPO.\n
  374. \n
  375. ^ Hammond, George (27 December 2023). "Big Tech is spending more than VC firms on AI startups". Ars Technica. Archived from the original on 10 January 2024.\n
  376. \n
  377. ^ Wong, Matteo (24 October 2023). "The Future of AI Is GOMA". The Atlantic. Archived from the original on 5 January 2024.\n
  378. \n
  379. ^ "Big tech and the pursuit of AI dominance". The Economist. 26 March 2023. Archived from the original on 29 December 2023.\n
  380. \n
  381. ^ Fung, Brian (19 December 2023). "Where the battle to dominate AI may be won". CNN Business. Archived from the original on 13 January 2024.\n
  382. \n
  383. ^ Metz, Cade (5 July 2023). "In the Age of A.I., Tech\'s Little Guys Need Big Friends". The New York Times. Archived from the original on 8 July 2024. Retrieved 5 October 2024.\n
  384. \n
  385. ^ "Electricity 2024 – Analysis". IEA. 24 January 2024. Retrieved 13 July 2024.\n
  386. \n
  387. ^ Calvert, Brian (28 March 2024). "AI already uses as much energy as a small country. It\'s only the beginning". Vox. New York, New York. Archived from the original on 3 July 2024. Retrieved 5 October 2024.\n
  388. \n
  389. ^ Halper, Evan; O\'Donovan, Caroline (21 June 2024). "AI is exhausting the power grid. Tech firms are seeking a miracle solution". Washington Post.\n
  390. \n
  391. ^ Davenport, Carly. "AI Data Centers and the Coming YS Power Demand Surge" (PDF). Goldman Sachs. Archived from the original (PDF) on 26 July 2024. Retrieved 5 October 2024.\n
  392. \n
  393. ^ Ryan, Carol (12 April 2024). "Energy-Guzzling AI Is Also the Future of Energy Savings". Wall Street Journal. Dow Jones.\n
  394. \n
  395. ^ Hiller, Jennifer (1 July 2024). "Tech Industry Wants to Lock Up Nuclear Power for AI". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  396. \n
  397. ^ Halper, Evan (20 September 2024). "Microsoft deal would reopen Three Mile Island nuclear plant to power AI". Washington Post.\n
  398. \n
  399. ^ Hiller, Jennifer (20 September 2024). "Three Mile Island\'s Nuclear Plant to Reopen, Help Power Microsoft\'s AI Centers". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  400. \n
  401. ^ Nicas (2018).\n
  402. \n
  403. ^ Rainie, Lee; Keeter, Scott; Perrin, Andrew (22 July 2019). "Trust and Distrust in America". Pew Research Center. Archived from the original on 22 February 2024.\n
  404. \n
  405. ^ Williams (2023).\n
  406. \n
  407. ^ Taylor & Hern (2023).\n
  408. \n
  409. ^ a b Samuel, Sigal (19 April 2022). "Why it\'s so damn hard to make AI fair and unbiased". Vox. Archived from the original on 5 October 2024. Retrieved 24 July 2024.\n
  410. \n
  411. ^ a b Rose (2023).\n
  412. \n
  413. ^ CNA (2019).\n
  414. \n
  415. ^ Goffrey (2008), p. 17.\n
  416. \n
  417. ^ Berdahl et al. (2023); Goffrey (2008, p. 17); Rose (2023); Russell & Norvig (2021, p. 995)\n
  418. \n
  419. ^ Christian (2020), p. 25.\n
  420. \n
  421. ^ a b Russell & Norvig (2021), p. 995.\n
  422. \n
  423. ^ Grant & Hill (2023).\n
  424. \n
  425. ^ Larson & Angwin (2016).\n
  426. \n
  427. ^ Christian (2020), p. 67–70.\n
  428. \n
  429. ^ Christian (2020, pp. 67–70); Russell & Norvig (2021, pp. 993–994)\n
  430. \n
  431. ^ Russell & Norvig (2021, p. 995); Lipartito (2011, p. 36); Goodman & Flaxman (2017, p. 6); Christian (2020, pp. 39–40, 65)\n
  432. \n
  433. ^ Quoted in Christian (2020, p. 65).\n
  434. \n
  435. ^ Russell & Norvig (2021, p. 994); Christian (2020, pp. 40, 80–81)\n
  436. \n
  437. ^ Quoted in Christian (2020, p. 80)\n
  438. \n
  439. ^ Dockrill (2022).\n
  440. \n
  441. ^ Sample (2017).\n
  442. \n
  443. ^ "Black Box AI". 16 June 2023. Archived from the original on 15 June 2024. Retrieved 5 October 2024.\n
  444. \n
  445. ^ Christian (2020), p. 110.\n
  446. \n
  447. ^ Christian (2020), pp. 88–91.\n
  448. \n
  449. ^ Christian (2020, p. 83); Russell & Norvig (2021, p. 997)\n
  450. \n
  451. ^ Christian (2020), p. 91.\n
  452. \n
  453. ^ Christian (2020), p. 83.\n
  454. \n
  455. ^ Verma (2021).\n
  456. \n
  457. ^ Rothman (2020).\n
  458. \n
  459. ^ Christian (2020), pp. 105–108.\n
  460. \n
  461. ^ Christian (2020), pp. 108–112.\n
  462. \n
  463. ^ Ropek, Lucas (21 May 2024). "New Anthropic Research Sheds Light on AI\'s \'Black Box\'". Gizmodo. Archived from the original on 5 October 2024. Retrieved 23 May 2024.\n
  464. \n
  465. ^ Russell & Norvig (2021), p. 989.\n
  466. \n
  467. ^ a b Russell & Norvig (2021), pp. 987–990.\n
  468. \n
  469. ^ Russell & Norvig (2021), p. 988.\n
  470. \n
  471. ^ Robitzski (2018); Sainato (2015)\n
  472. \n
  473. ^ Harari (2018).\n
  474. \n
  475. ^ Buckley, Chris; Mozur, Paul (22 May 2019). "How China Uses High-Tech Surveillance to Subdue Minorities". The New York Times. Archived from the original on 25 November 2019. Retrieved 2 July 2019.\n
  476. \n
  477. ^ "Security lapse exposed a Chinese smart city surveillance system". 3 May 2019. Archived from the original on 7 March 2021. Retrieved 14 September 2020.\n
  478. \n
  479. ^ Urbina et al. (2022).\n
  480. \n
  481. ^ a b E. McGaughey, \'Will Robots Automate Your Job Away? Full Employment, Basic Income, and Economic Democracy\' (2022), 51(3) Industrial Law Journal 511–559. Archived 27 May 2023 at the Wayback Machine.\n
  482. \n
  483. ^ Ford & Colvin (2015);McGaughey (2022)\n
  484. \n
  485. ^ IGM Chicago (2017).\n
  486. \n
  487. ^ Arntz, Gregory & Zierahn (2016), p. 33.\n
  488. \n
  489. ^ Lohr (2017); Frey & Osborne (2017); Arntz, Gregory & Zierahn (2016, p. 33)\n
  490. \n
  491. ^ Zhou, Viola (11 April 2023). "AI is already taking video game illustrators\' jobs in China". Rest of World. Archived from the original on 21 February 2024. Retrieved 17 August 2023.\n
  492. \n
  493. ^ Carter, Justin (11 April 2023). "China\'s game art industry reportedly decimated by growing AI use". Game Developer. Archived from the original on 17 August 2023. Retrieved 17 August 2023.\n
  494. \n
  495. ^ Morgenstern (2015).\n
  496. \n
  497. ^ Mahdawi (2017); Thompson (2014)\n
  498. \n
  499. ^ Tarnoff, Ben (4 August 2023). "Lessons from Eliza". The Guardian Weekly. pp. 34–39.\n
  500. \n
  501. ^ Cellan-Jones (2014).\n
  502. \n
  503. ^ Russell & Norvig 2021, p. 1001.\n
  504. \n
  505. ^ Bostrom (2014).\n
  506. \n
  507. ^ Russell (2019).\n
  508. \n
  509. ^ Bostrom (2014); Müller & Bostrom (2014); Bostrom (2015).\n
  510. \n
  511. ^ Harari (2023).\n
  512. \n
  513. ^ Müller & Bostrom (2014).\n
  514. \n
  515. ^ Leaders\' concerns about the existential risks of AI around 2015: Rawlinson (2015), Holley (2015), Gibbs (2014), Sainato (2015)\n
  516. \n
  517. ^ ""Godfather of artificial intelligence" talks impact and potential of new AI". CBS News. 25 March 2023. Archived from the original on 28 March 2023. Retrieved 28 March 2023.\n
  518. \n
  519. ^ Pittis, Don (4 May 2023). "Canadian artificial intelligence leader Geoffrey Hinton piles on fears of computer takeover". CBC. Archived from the original on 7 July 2024. Retrieved 5 October 2024.\n
  520. \n
  521. ^ "\'50–50 chance\' that AI outsmarts humanity, Geoffrey Hinton says". Bloomberg BNN. 14 June 2024. Retrieved 6 July 2024.\n
  522. \n
  523. ^ Valance (2023).\n
  524. \n
  525. ^ Taylor, Josh (7 May 2023). "Rise of artificial intelligence is inevitable but should not be feared, \'father of AI\' says". The Guardian. Archived from the original on 23 October 2023. Retrieved 26 May 2023.\n
  526. \n
  527. ^ Colton, Emma (7 May 2023). "\'Father of AI\' says tech fears misplaced: \'You cannot stop it\'". Fox News. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
  528. \n
  529. ^ Jones, Hessie (23 May 2023). "Juergen Schmidhuber, Renowned \'Father Of Modern AI,\' Says His Life\'s Work Won\'t Lead To Dystopia". Forbes. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
  530. \n
  531. ^ McMorrow, Ryan (19 December 2023). "Andrew Ng: \'Do we think the world is better off with more or less intelligence?\'". Financial Times. Archived from the original on 25 January 2024. Retrieved 30 December 2023.\n
  532. \n
  533. ^ Levy, Steven (22 December 2023). "How Not to Be Stupid About AI, With Yann LeCun". Wired. Archived from the original on 28 December 2023. Retrieved 30 December 2023.\n
  534. \n
  535. ^ Arguments that AI is not an imminent risk: Brooks (2014), Geist (2015), Madrigal (2015), Lee (2014)\n
  536. \n
  537. ^ a b Christian (2020), pp. 67, 73.\n
  538. \n
  539. ^ Yudkowsky (2008).\n
  540. \n
  541. ^ a b Anderson & Anderson (2011).\n
  542. \n
  543. ^ AAAI (2014).\n
  544. \n
  545. ^ Wallach (2010).\n
  546. \n
  547. ^ Russell (2019), p. 173.\n
  548. \n
  549. ^ Stewart, Ashley; Melton, Monica. "Hugging Face CEO says he\'s focused on building a \'sustainable model\' for the $4.5 billion open-source-AI startup". Business Insider. Archived from the original on 25 September 2024. Retrieved 14 April 2024.\n
  550. \n
  551. ^ Wiggers, Kyle (9 April 2024). "Google open sources tools to support AI model development". TechCrunch. Archived from the original on 10 September 2024. Retrieved 14 April 2024.\n
  552. \n
  553. ^ Heaven, Will Douglas (12 May 2023). "The open-source AI boom is built on Big Tech\'s handouts. How long will it last?". MIT Technology Review. Retrieved 14 April 2024.\n
  554. \n
  555. ^ Brodsky, Sascha (19 December 2023). "Mistral AI\'s New Language Model Aims for Open Source Supremacy". AI Business. Archived from the original on 5 September 2024. Retrieved 5 October 2024.\n
  556. \n
  557. ^ Edwards, Benj (22 February 2024). "Stability announces Stable Diffusion 3, a next-gen AI image generator". Ars Technica. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
  558. \n
  559. ^ Marshall, Matt (29 January 2024). "How enterprises are using open source LLMs: 16 examples". VentureBeat. Archived from the original on 26 September 2024. Retrieved 5 October 2024.\n
  560. \n
  561. ^ Piper, Kelsey (2 February 2024). "Should we make our most powerful AI models open source to all?". Vox. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
  562. \n
  563. ^ Alan Turing Institute (2019). "Understanding artificial intelligence ethics and safety" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
  564. \n
  565. ^ Alan Turing Institute (2023). "AI Ethics and Governance in Practice" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
  566. \n
  567. ^ Floridi, Luciano; Cowls, Josh (23 June 2019). "A Unified Framework of Five Principles for AI in Society". Harvard Data Science Review. 1 (1). doi:10.1162/99608f92.8cd550d1. S2CID 198775713.\n
  568. \n
  569. ^ Buruk, Banu; Ekmekci, Perihan Elif; Arda, Berna (1 September 2020). "A critical perspective on guidelines for responsible and trustworthy artificial intelligence". Medicine, Health Care and Philosophy. 23 (3): 387–399. doi:10.1007/s11019-020-09948-1. ISSN 1572-8633. PMID 32236794. S2CID 214766800. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  570. \n
  571. ^ Kamila, Manoj Kumar; Jasrotia, Sahil Singh (1 January 2023). "Ethical issues in the development of artificial intelligence: recognizing the risks". International Journal of Ethics and Systems. ahead-of-print (ahead-of-print). doi:10.1108/IJOES-05-2023-0107. ISSN 2514-9369. S2CID 259614124. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  572. \n
  573. ^ "AI Safety Institute releases new AI safety evaluations platform". UK Government. 10 May 2024. Archived from the original on 5 October 2024. Retrieved 14 May 2024.\n
  574. \n
  575. ^ Regulation of AI to mitigate risks: Berryhill et al. (2019), Barfield & Pagallo (2018), Iphofen & Kritikos (2019), Wirtz, Weyerer & Geyer (2018), Buiten (2019)\n
  576. \n\n
  577. ^ a b Vincent (2023).\n
  578. \n
  579. ^ Stanford University (2023).\n
  580. \n
  581. ^ a b c d UNESCO (2021).\n
  582. \n
  583. ^ Kissinger (2021).\n
  584. \n
  585. ^ Altman, Brockman & Sutskever (2023).\n
  586. \n
  587. ^ VOA News (25 October 2023). "UN Announces Advisory Body on Artificial Intelligence". Archived from the original on 18 September 2024. Retrieved 5 October 2024.\n
  588. \n
  589. ^ "Council of Europe opens first ever global treaty on AI for signature". Council of Europe. 5 September 2024. Archived from the original on 17 September 2024. Retrieved 17 September 2024.\n
  590. \n
  591. ^ Edwards (2023).\n
  592. \n
  593. ^ Kasperowicz (2023).\n
  594. \n
  595. ^ Fox News (2023).\n
  596. \n
  597. ^ Milmo, Dan (3 November 2023). "Hope or Horror? The great AI debate dividing its pioneers". The Guardian Weekly. pp. 10–12.\n
  598. \n
  599. ^ "The Bletchley Declaration by Countries Attending the AI Safety Summit, 1–2 November 2023". GOV.UK. 1 November 2023. Archived from the original on 1 November 2023. Retrieved 2 November 2023.\n
  600. \n
  601. ^ "Countries agree to safe and responsible development of frontier AI in landmark Bletchley Declaration". GOV.UK (Press release). Archived from the original on 1 November 2023. Retrieved 1 November 2023.\n
  602. \n
  603. ^ "Second global AI summit secures safety commitments from companies". Reuters. 21 May 2024. Retrieved 23 May 2024.\n
  604. \n
  605. ^ "Frontier AI Safety Commitments, AI Seoul Summit 2024". gov.uk. 21 May 2024. Archived from the original on 23 May 2024. Retrieved 23 May 2024.\n
  606. \n
  607. ^ a b Russell & Norvig 2021, p. 9.\n
  608. \n
  609. ^ a b c Copeland, J., ed. (2004). The Essential Turing: the ideas that gave birth to the computer age. Oxford, England: Clarendon Press. ISBN 0-1982-5079-7.\n
  610. \n
  611. ^ "Google books ngram". Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  612. \n
  613. ^ AI\'s immediate precursors: McCorduck (2004, pp. 51–107), Crevier (1993, pp. 27–32), Russell & Norvig (2021, pp. 8–17), Moravec (1988, p. 3)\n
  614. \n
  615. ^ a b Turing\'s original publication of the Turing test in "Computing machinery and intelligence": Turing (1950)\nHistorical influence and philosophical implications: Haugeland (1985, pp. 6–9), Crevier (1993, p. 24), McCorduck (2004, pp. 70–71), Russell & Norvig (2021, pp. 2, 984)\n
  616. \n
  617. ^ Crevier (1993), pp. 47–49.\n
  618. \n
  619. ^ Russell & Norvig (2003), p. 17.\n
  620. \n
  621. ^ Russell & Norvig (2003), p. 18.\n
  622. \n
  623. ^ Newquist (1994), pp. 86–86.\n
  624. \n
  625. ^ Simon (1965, p. 96) quoted in Crevier (1993, p. 109)\n
  626. \n
  627. ^ Minsky (1967, p. 2) quoted in Crevier (1993, p. 109)\n
  628. \n
  629. ^ Russell & Norvig (2021), p. 21.\n
  630. \n
  631. ^ Lighthill (1973).\n
  632. \n
  633. ^ NRC 1999, pp. 212–213.\n
  634. \n
  635. ^ Russell & Norvig (2021), p. 22.\n
  636. \n
  637. ^ Expert systems: Russell & Norvig (2021, pp. 23, 292), Luger & Stubblefield (2004, pp. 227–331), Nilsson (1998, chpt. 17.4), McCorduck (2004, pp. 327–335, 434–435), Crevier (1993, pp. 145–162, 197–203), Newquist (1994, pp. 155–183)\n
  638. \n
  639. ^ Russell & Norvig (2021), p. 24.\n
  640. \n
  641. ^ Nilsson (1998), p. 7.\n
  642. \n
  643. ^ McCorduck (2004), pp. 454–462.\n
  644. \n
  645. ^ Moravec (1988).\n
  646. \n
  647. ^ a b Brooks (1990).\n
  648. \n
  649. ^ Developmental robotics: Weng et al. (2001), Lungarella et al. (2003), Asada et al. (2009), Oudeyer (2010)\n
  650. \n
  651. ^ Russell & Norvig (2021), p. 25.\n
  652. \n
  653. ^ Crevier (1993, pp. 214–215), Russell & Norvig (2021, pp. 24, 26)\n
  654. \n
  655. ^ Russell & Norvig (2021), p. 26.\n
  656. \n
  657. ^ Formal and narrow methods adopted in the 1990s: Russell & Norvig (2021, pp. 24–26), McCorduck (2004, pp. 486–487)\n
  658. \n
  659. ^ AI widely used in the late 1990s: Kurzweil (2005, p. 265), NRC (1999, pp. 216–222), Newquist (1994, pp. 189–201)\n
  660. \n
  661. ^ Wong (2023).\n
  662. \n
  663. ^ Moore\'s Law and AI: Russell & Norvig (2021, pp. 14, 27)\n
  664. \n
  665. ^ a b c Clark (2015b).\n
  666. \n
  667. ^ Big data: Russell & Norvig (2021, p. 26)\n
  668. \n
  669. ^ Sagar, Ram (3 June 2020). "OpenAI Releases GPT-3, The Largest Model So Far". Analytics India Magazine. Archived from the original on 4 August 2020. Retrieved 15 March 2023.\n
  670. \n
  671. ^ DiFeliciantonio (2023).\n
  672. \n
  673. ^ Goswami (2023).\n
  674. \n
  675. ^ Grayling, Anthony; Ball, Brian (1 August 2024). "Philosophy is crucial in the age of AI". The Conversation. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
  676. \n
  677. ^ a b Jarow, Oshan (15 June 2024). "Will AI ever become conscious? It depends on how you think about biology". Vox. Archived from the original on 21 September 2024. Retrieved 4 October 2024.\n
  678. \n
  679. ^ McCarthy, John. "The Philosophy of AI and the AI of Philosophy". jmc.stanford.edu. Archived from the original on 23 October 2018. Retrieved 3 October 2024.\n
  680. \n
  681. ^ a b Turing (1950), p. 1.\n
  682. \n
  683. ^ Turing (1950), Under "The Argument from Consciousness".\n
  684. \n
  685. ^ Kirk-Giannini, Cameron Domenico; Goldstein, Simon (16 October 2023). "AI is closer than ever to passing the Turing test for \'intelligence\'. What happens when it does?". The Conversation. Archived from the original on 25 September 2024. Retrieved 17 August 2024.\n
  686. \n
  687. ^ Russell & Norvig (2021), p. 3.\n
  688. \n
  689. ^ Maker (2006).\n
  690. \n
  691. ^ McCarthy (1999).\n
  692. \n
  693. ^ Minsky (1986).\n
  694. \n
  695. ^ "What Is Artificial Intelligence (AI)?". Google Cloud Platform. Archived from the original on 31 July 2023. Retrieved 16 October 2023.\n
  696. \n
  697. ^ "One of the Biggest Problems in Regulating AI Is Agreeing on a Definition". carnegieendowment.org. Retrieved 31 July 2024.\n
  698. \n
  699. ^ "AI or BS? How to tell if a marketing tool really uses artificial intelligence". The Drum. Retrieved 31 July 2024.\n
  700. \n
  701. ^ Nilsson (1983), p. 10.\n
  702. \n
  703. ^ Haugeland (1985), pp. 112–117.\n
  704. \n
  705. ^ Physical symbol system hypothesis: Newell & Simon (1976, p. 116)\nHistorical significance: McCorduck (2004, p. 153), Russell & Norvig (2021, p. 19)\n
  706. \n
  707. ^ Moravec\'s paradox: Moravec (1988, pp. 15–16), Minsky (1986, p. 29), Pinker (2007, pp. 190–191)\n
  708. \n
  709. ^ Dreyfus\' critique of AI: Dreyfus (1972), Dreyfus & Dreyfus (1986)\nHistorical significance and philosophical implications: Crevier (1993, pp. 120–132), McCorduck (2004, pp. 211–239), Russell & Norvig (2021, pp. 981–982), Fearn (2007, chpt. 3)\n
  710. \n
  711. ^ Crevier (1993), p. 125.\n
  712. \n
  713. ^ Langley (2011).\n
  714. \n
  715. ^ Katz (2012).\n
  716. \n
  717. ^ Neats vs. scruffies, the historic debate: McCorduck (2004, pp. 421–424, 486–489), Crevier (1993, p. 168), Nilsson (1983, pp. 10–11), Russell & Norvig (2021, p. 24)\nA classic example of the "scruffy" approach to intelligence: Minsky (1986)\nA modern example of neat AI and its aspirations in the 21st century: Domingos (2015)\n
  718. \n
  719. ^ Pennachin & Goertzel (2007).\n
  720. \n
  721. ^ a b Roberts (2016).\n
  722. \n
  723. ^ Russell & Norvig (2021), p. 986.\n
  724. \n
  725. ^ Chalmers (1995).\n
  726. \n
  727. ^ Dennett (1991).\n
  728. \n
  729. ^ Horst (2005).\n
  730. \n
  731. ^ Searle (1999).\n
  732. \n
  733. ^ Searle (1980), p. 1.\n
  734. \n
  735. ^ Russell & Norvig (2021), p. 9817.\n
  736. \n
  737. ^ Searle\'s Chinese room argument: Searle (1980). Searle\'s original presentation of the thought experiment., Searle (1999).\nDiscussion: Russell & Norvig (2021, pp. 985), McCorduck (2004, pp. 443–445), Crevier (1993, pp. 269–271)\n
  738. \n
  739. ^ Leith, Sam (7 July 2022). "Nick Bostrom: How can we be certain a machine isn\'t conscious?". The Spectator. Archived from the original on 26 September 2024. Retrieved 23 February 2024.\n
  740. \n
  741. ^ a b c Thomson, Jonny (31 October 2022). "Why don\'t robots have rights?". Big Think. Archived from the original on 13 September 2024. Retrieved 23 February 2024.\n
  742. \n
  743. ^ a b Kateman, Brian (24 July 2023). "AI Should Be Terrified of Humans". Time. Archived from the original on 25 September 2024. Retrieved 23 February 2024.\n
  744. \n
  745. ^ Wong, Jeff (10 July 2023). "What leaders need to know about robot rights". Fast Company.\n
  746. \n
  747. ^ Hern, Alex (12 January 2017). "Give robots \'personhood\' status, EU committee argues". The Guardian. ISSN 0261-3077. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
  748. \n
  749. ^ Dovey, Dana (14 April 2018). "Experts Don\'t Think Robots Should Have Rights". Newsweek. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
  750. \n
  751. ^ Cuddy, Alice (13 April 2018). "Robot rights violate human rights, experts warn EU". euronews. Archived from the original on 19 September 2024. Retrieved 23 February 2024.\n
  752. \n
  753. ^ The Intelligence explosion and technological singularity: Russell & Norvig (2021, pp. 1004–1005), Omohundro (2008), Kurzweil (2005)\n\nI. J. Good\'s "intelligence explosion": Good (1965)\n\nVernor Vinge\'s "singularity": Vinge (1993)\n
  754. \n
  755. ^ Russell & Norvig (2021), p. 1005.\n
  756. \n
  757. ^ Transhumanism: Moravec (1988), Kurzweil (2005), Russell & Norvig (2021, p. 1005)\n
  758. \n
  759. ^ AI as evolution: Edward Fredkin is quoted in McCorduck (2004, p. 401), Butler (1863), Dyson (1998)\n
  760. \n
  761. ^ AI in myth: McCorduck (2004, pp. 4–5)\n
  762. \n
  763. ^ McCorduck (2004), pp. 340–400.\n
  764. \n
  765. ^ Buttazzo (2001).\n
  766. \n
  767. ^ Anderson (2008).\n
  768. \n
  769. ^ McCauley (2007).\n
  770. \n
  771. ^ Galvan (1997).\n
  772. \n
\n

AI textbooks

\n

The two most widely used textbooks in 2023 (see the Open Syllabus):\n

\n\n

These were the four of the most widely used AI textbooks in 2008:\n

\n
\n\n
\n

Other textbooks:\n

\n\n

History of AI

\n
\n\n
\n

Other sources

\n
\n\n\n
\n

Further reading

\n
\n\n
\n
\n\n\n\n\n\n\n\n\n\n
\n
\n\t\t\t\t\t\n\t\t\t\t
\n\t\t\t\n\t\t\t\n\t\t\n\t\t\n\t \n \n
\n\t\n
\n\n\n'}'. -[2024-11-03 22:43:55,586] [DEBUG] [Engine]: Agent 'agent3' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_TrTVB3OUtV4ke7MW60yG2iOF', type='function')], function_call=None) -[2024-11-03 22:43:55,636] [INFO] [Engine]: Assigning task to agent4: Search for the latest trends in AI using Twitter. -[2024-11-03 22:43:55,637] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Search for the latest trends in AI using Twitter.'. -[2024-11-03 22:43:57,360] [INFO] [BaseAgent]: Agent 'agent4' called 'fetch_webpage' with args '{'url': 'https://twitter.com/search?q=%23AI%20%23Trends&src=typed_query'}'. -[2024-11-03 22:43:57,362] [INFO] [BaseAgent]: Agent 'agent4' obtained result '{'success': True, 'error-msg': '', 'url': 'https://twitter.com/search?q=%23AI%20%23Trends&src=typed_query', 'content': '\n \n \n x.com\n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n \n \n '}'. -[2024-11-03 22:43:57,363] [DEBUG] [Engine]: Agent 'agent4' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23Trends&src=typed_query"}', name='fetch_webpage'), id='call_c33NiU2Gn7JaFCR4tAD4EXux', type='function')], function_call=None) -[2024-11-03 22:43:57,364] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_eSeTGNVhjUBe86NXHrrHqDBa', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_TrTVB3OUtV4ke7MW60yG2iOF', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23Trends&src=typed_query"}', name='fetch_webpage'), id='call_c33NiU2Gn7JaFCR4tAD4EXux', type='function')], function_call=None) - -[2024-11-03 22:43:57,365] [INFO] [Engine]: Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_eSeTGNVhjUBe86NXHrrHqDBa', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_TrTVB3OUtV4ke7MW60yG2iOF', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23Trends&src=typed_query"}', name='fetch_webpage'), id='call_c33NiU2Gn7JaFCR4tAD4EXux', type='function')], function_call=None) - -[2024-11-03 22:43:57,367] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_eSeTGNVhjUBe86NXHrrHqDBa', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_TrTVB3OUtV4ke7MW60yG2iOF', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23Trends&src=typed_query"}', name='fetch_webpage'), id='call_c33NiU2Gn7JaFCR4tAD4EXux', type='function')], function_call=None) - -[2024-11-03 22:43:58,069] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-03 22:43:58,071] [INFO] [Engine]: Starting iteration 2 -[2024-11-03 22:43:58,072] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_eSeTGNVhjUBe86NXHrrHqDBa', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_TrTVB3OUtV4ke7MW60yG2iOF', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23Trends&src=typed_query"}', name='fetch_webpage'), id='call_c33NiU2Gn7JaFCR4tAD4EXux', type='function')], function_call=None) - - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-11-03 22:43:59,171] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent1': 'Please collect the information gathered by agent2, agent3, and agent4 and compile a summary of the latest trends in AI.', 'agent3': 'Please continue researching the latest trends in AI using Wikipedia.', 'agent4': 'Please continue researching the latest trends in AI using Twitter.'}, 'continue': True} -[2024-11-03 22:43:59,173] [INFO] [Engine]: Assigned tasks: {'agent1': 'Please collect the information gathered by agent2, agent3, and agent4 and compile a summary of the latest trends in AI.', 'agent3': 'Please continue researching the latest trends in AI using Wikipedia.', 'agent4': 'Please continue researching the latest trends in AI using Twitter.'} -[2024-11-03 22:43:59,175] [INFO] [Engine]: Assigning task to agent1: Please collect the information gathered by agent2, agent3, and agent4 and compile a summary of the latest trends in AI. -[2024-11-03 22:43:59,176] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Please collect the information gathered by agent2, agent3, and agent4 and compile a summary of the latest trends in AI.'. -[2024-11-03 22:44:06,679] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: Please provide the latest trends in AI. -[2024-11-03 22:44:06,680] [INFO] [BaseAgent]: Agent agent2 received message from agent1: Please provide the latest trends in AI. -[2024-11-03 22:44:06,681] [ERROR] [Engine]: Error while executing task for agent 'agent1': name 'available_agents' is not defined -[2024-11-03 22:44:06,682] [INFO] [Engine]: Assigning task to agent3: Please continue researching the latest trends in AI using Wikipedia. -[2024-11-03 22:44:06,683] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Please continue researching the latest trends in AI using Wikipedia.'. -[2024-11-03 22:44:22,955] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-03 22:44:22,956] [INFO] [Evaluator]: Total Token Consumption: 36 -[2024-11-03 22:44:22,957] [INFO] [Evaluator]: Average Tokens per Iteration: 36.0 -[2024-11-03 22:44:22,959] [INFO] [Engine]: Simulation completed. -[2024-11-03 22:47:13,163] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-11-03 22:47:13,172] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 22:47:13,172] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 22:47:13,173] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 22:47:13,174] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 22:47:13,175] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 22:47:13,176] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 22:47:13,177] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 22:47:13,178] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 22:47:13,182] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-11-03 22:47:13,182] [INFO] [AgentGraph]: Relationship added: agent2 --[reports_to]--> agent1 -[2024-11-03 22:47:13,183] [INFO] [AgentGraph]: Relationship added: agent3 --[reports_to]--> agent1 -[2024-11-03 22:47:13,184] [INFO] [AgentGraph]: Relationship added: agent4 --[reports_to]--> agent1 -[2024-11-03 22:47:13,185] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-03 22:47:13,188] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-03 22:47:13,189] [INFO] [Engine]: Engine initialized. -[2024-11-03 22:47:13,190] [INFO] [Engine]: Engine starting simulation. -[2024-11-03 22:47:13,191] [INFO] [Engine]: Starting iteration 1 -[2024-11-03 22:47:13,192] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-11-03 22:47:14,207] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'}, 'continue': True} -[2024-11-03 22:47:14,208] [INFO] [Engine]: Assigned tasks: {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'} -[2024-11-03 22:47:14,230] [INFO] [Engine]: Assigning task to agent2: Search for the latest trends in AI using Google. -[2024-11-03 22:47:14,231] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Search for the latest trends in AI using Google.'. -[2024-11-03 22:48:10,417] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Can you search for the latest trends in AI using Google? -[2024-11-03 22:48:10,419] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Can you search for the latest trends in AI using Google? -[2024-11-03 22:48:10,439] [ERROR] [Engine]: Error while executing task for agent 'agent2': name 'task' is not defined -[2024-11-03 22:48:10,440] [INFO] [Engine]: Assigning task to agent3: Search for the latest trends in AI using Wikipedia. -[2024-11-03 22:48:10,441] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Search for the latest trends in AI using Wikipedia.'. -[2024-11-03 22:48:15,900] [INFO] [BaseAgent]: Agent 'agent3' called 'fetch_webpage' with args '{'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence'}'. -[2024-11-03 22:48:15,909] [INFO] [BaseAgent]: Agent 'agent3' obtained result '{'success': True, 'error-msg': '', 'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence', 'content': '\n\n\n\nArtificial intelligence - Wikipedia\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nJump to content\n
\n\t
\n\t\t
\n\t\t\t
\n\n\t\t\n\t\t\t\n\n\n\t\t
\n\t\t
\n\t\t\t\n\n\n\t\t\t\n\n\t\t
\n\t\n\n
\n\t
\n\t\t
\n\t\t\t
\n\t\t
\n\t\t
\n\t\t\t
\n\t\t
\n\t\t\t\n\t\t
\n\t
\n\t
\n\t\t\t\t
\n\t\t\n\t\t\t
\n\t\t
\n\t\t
\n\t\t\t
\n\t\t\t\t
\n\t\t\t\t\t\n\t\t\t\t\t

Artificial intelligence

\n\t\t\t\t\t\t\t\n
\n\t\n\t\n\t
\n\n\t\t
\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t
\n\n\t
\n
\n
\n\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\t
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
\n\t\t\t\t\t\t
\n\t\t\t\t\t\t\t\n\t\t\t\t\n\t\t\t\t\t\t\t
\n\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\t\n\t\t\t\t\t\t
\n\t\t\n\t\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\t\t
\n\t\t
Page semi-protected
\n\t\t
\n\n\t\t\t\t\t\t
From Wikipedia, the free encyclopedia
\n\t\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t
\n\n

\n

\n\n\n\n\n\n\n\n

Artificial intelligence (AI), in its broadest sense, is intelligence emulated by machines, particularly computer systems. It is a field of research in computer science that develops and studies methods and software that enable machines to perceive their environment and use learning and intelligence to take actions that maximize their chances of achieving defined goals.[1] Such machines may be called AIs.\n

Some high-profile applications of AI include advanced web search engines (e.g., Google Search); recommendation systems (used by YouTube, Amazon, and Netflix); interacting via human speech (e.g., Google Assistant, Siri, and Alexa); autonomous vehicles (e.g., Waymo); generative and creative tools (e.g., ChatGPT, and AI art); and superhuman play and analysis in strategy games (e.g., chess and Go). However, many AI applications are not perceived as AI: "A lot of cutting edge AI has filtered into general applications, often without being called AI because once something becomes useful enough and common enough it\'s not labeled AI anymore."[2][3]\n

The various subfields of AI research are centered around particular goals and the use of particular tools. The traditional goals of AI research include reasoning, knowledge representation, planning, learning, natural language processing, perception, and support for robotics.[a] General intelligence—the ability to complete any task performable by a human on an at least equal level—is among the field\'s long-term goals.[4] To reach these goals, AI researchers have adapted and integrated a wide range of techniques, including search and mathematical optimization, formal logic, artificial neural networks, and methods based on statistics, operations research, and economics.[b] AI also draws upon psychology, linguistics, philosophy, neuroscience, and other fields.[5]\n

Artificial intelligence was founded as an academic discipline in 1956,[6] and the field went through multiple cycles of optimism,[7][8] followed by periods of disappointment and loss of funding, known as AI winter.[9][10] Funding and interest vastly increased after 2012 when deep learning outperformed previous AI techniques.[11] This growth accelerated further after 2017 with the transformer architecture,[12] and by the early 2020s hundreds of billions of dollars were being invested in AI (known as the "AI boom"). The widespread use of AI in the 21st century exposed several unintended consequences and harms in the present and raised concerns about its risks and long-term effects in the future, prompting discussions about regulatory policies to ensure the safety and benefits of the technology.\n

\n\n

Goals

\n

The general problem of fully simulating (or creating) intelligence is mostly found to be overwhelming. However, some types of problems have been successfully broken into more achievable subproblems. These consist of particular traits or capabilities that researchers expect an intelligent system to display. The traits described below have received the most attention and cover the scope of AI research.[a]\n

\n

Reasoning and problem-solving

\n

Early researchers developed algorithms that imitated step-by-step reasoning that humans use when they solve puzzles or make logical deductions.[13] By the late 1980s and 1990s, methods were developed for dealing with uncertain or incomplete information, employing concepts from probability and economics.[14]\n

Many of these algorithms are insufficient for solving large reasoning problems because they experience a "combinatorial explosion": They become exponentially slower as the problems grow.[15] Even humans rarely use the step-by-step deduction that early AI research could model. They solve most of their problems using fast, intuitive judgments.[16] Accurate and efficient reasoning is an unsolved problem.\n

\n

Knowledge representation

\n
An ontology represents knowledge as a set of concepts within a domain and the relationships between those concepts.
\n

Knowledge representation and knowledge engineering[17] allow AI programs to answer questions intelligently and make deductions about real-world facts. Formal knowledge representations are used in content-based indexing and retrieval,[18] scene interpretation,[19] clinical decision support,[20] knowledge discovery (mining "interesting" and actionable inferences from large databases),[21] and other areas.[22]\n

A knowledge base is a body of knowledge represented in a form that can be used by a program. An ontology is the set of objects, relations, concepts, and properties used by a particular domain of knowledge.[23] Knowledge bases need to represent things such as objects, properties, categories, and relations between objects;[24] situations, events, states, and time;[25] causes and effects;[26] knowledge about knowledge (what we know about what other people know);[27] default reasoning (things that humans assume are true until they are told differently and will remain true even when other facts are changing);[28] and many other aspects and domains of knowledge.\n

Among the most difficult problems in knowledge representation are the breadth of commonsense knowledge (the set of atomic facts that the average person knows is enormous);[29] and the sub-symbolic form of most commonsense knowledge (much of what people know is not represented as "facts" or "statements" that they could express verbally).[16] There is also the difficulty of knowledge acquisition, the problem of obtaining knowledge for AI applications.[c]\n

\n

Planning and decision-making

\n

An "agent" is anything that perceives and takes actions in the world. A rational agent has goals or preferences and takes actions to make them happen.[d][32] In automated planning, the agent has a specific goal.[33] In automated decision-making, the agent has preferences—there are some situations it would prefer to be in, and some situations it is trying to avoid. The decision-making agent assigns a number to each situation (called the "utility") that measures how much the agent prefers it. For each possible action, it can calculate the "expected utility": the utility of all possible outcomes of the action, weighted by the probability that the outcome will occur. It can then choose the action with the maximum expected utility.[34]\n

In classical planning, the agent knows exactly what the effect of any action will be.[35] In most real-world problems, however, the agent may not be certain about the situation they are in (it is "unknown" or "unobservable") and it may not know for certain what will happen after each possible action (it is not "deterministic"). It must choose an action by making a probabilistic guess and then reassess the situation to see if the action worked.[36]\n

In some problems, the agent\'s preferences may be uncertain, especially if there are other agents or humans involved. These can be learned (e.g., with inverse reinforcement learning), or the agent can seek information to improve its preferences.[37] Information value theory can be used to weigh the value of exploratory or experimental actions.[38] The space of possible future actions and situations is typically intractably large, so the agents must take actions and evaluate situations while being uncertain of what the outcome will be.\n

A Markov decision process has a transition model that describes the probability that a particular action will change the state in a particular way and a reward function that supplies the utility of each state and the cost of each action. A policy associates a decision with each possible state. The policy could be calculated (e.g., by iteration), be heuristic, or it can be learned.[39]\n

Game theory describes the rational behavior of multiple interacting agents and is used in AI programs that make decisions that involve other agents.[40]\n

\n

Learning

\n

Machine learning is the study of programs that can improve their performance on a given task automatically.[41] It has been a part of AI from the beginning.[e]\n

There are several kinds of machine learning. Unsupervised learning analyzes a stream of data and finds patterns and makes predictions without any other guidance.[44] Supervised learning requires a human to label the input data first, and comes in two main varieties: classification (where the program must learn to predict what category the input belongs in) and regression (where the program must deduce a numeric function based on numeric input).[45]\n

In reinforcement learning, the agent is rewarded for good responses and punished for bad ones. The agent learns to choose responses that are classified as "good".[46] Transfer learning is when the knowledge gained from one problem is applied to a new problem.[47] Deep learning is a type of machine learning that runs inputs through biologically inspired artificial neural networks for all of these types of learning.[48]\n

Computational learning theory can assess learners by computational complexity, by sample complexity (how much data is required), or by other notions of optimization.[49]\n

\n
\n

Natural language processing

\n

Natural language processing (NLP)[50] allows programs to read, write and communicate in human languages such as English. Specific problems include speech recognition, speech synthesis, machine translation, information extraction, information retrieval and question answering.[51]\n

Early work, based on Noam Chomsky\'s generative grammar and semantic networks, had difficulty with word-sense disambiguation[f] unless restricted to small domains called "micro-worlds" (due to the common sense knowledge problem[29]). Margaret Masterman believed that it was meaning and not grammar that was the key to understanding languages, and that thesauri and not dictionaries should be the basis of computational language structure.\n

Modern deep learning techniques for NLP include word embedding (representing words, typically as vectors encoding their meaning),[52] transformers (a deep learning architecture using an attention mechanism),[53] and others.[54] In 2019, generative pre-trained transformer (or "GPT") language models began to generate coherent text,[55][56] and by 2023, these models were able to get human-level scores on the bar exam, SAT test, GRE test, and many other real-world applications.[57]\n

\n

Perception

\n

Machine perception is the ability to use input from sensors (such as cameras, microphones, wireless signals, active lidar, sonar, radar, and tactile sensors) to deduce aspects of the world. Computer vision is the ability to analyze visual input.[58]\n

The field includes speech recognition,[59] image classification,[60] facial recognition, object recognition,[61]object tracking,[62] and robotic perception.[63]\n

\n

Social intelligence

\n
Kismet, a robot head which was made in the 1990s; it is a machine that can recognize and simulate emotions.[64]
\n

Affective computing is an interdisciplinary umbrella that comprises systems that recognize, interpret, process, or simulate human feeling, emotion, and mood.[65] For example, some virtual assistants are programmed to speak conversationally or even to banter humorously; it makes them appear more sensitive to the emotional dynamics of human interaction, or to otherwise facilitate human–computer interaction.\n

However, this tends to give naïve users an unrealistic conception of the intelligence of existing computer agents.[66] Moderate successes related to affective computing include textual sentiment analysis and, more recently, multimodal sentiment analysis, wherein AI classifies the affects displayed by a videotaped subject.[67]\n

\n

General intelligence

\n

A machine with artificial general intelligence should be able to solve a wide variety of problems with breadth and versatility similar to human intelligence.[4]\n

\n

Techniques

\n

AI research uses a wide variety of techniques to accomplish the goals above.[b]\n

\n

Search and optimization

\n

AI can solve many problems by intelligently searching through many possible solutions.[68] There are two very different kinds of search used in AI: state space search and local search.\n

\n
\n

State space search searches through a tree of possible states to try to find a goal state.[69] For example, planning algorithms search through trees of goals and subgoals, attempting to find a path to a target goal, a process called means-ends analysis.[70]\n

Simple exhaustive searches[71] are rarely sufficient for most real-world problems: the search space (the number of places to search) quickly grows to astronomical numbers. The result is a search that is too slow or never completes.[15] "Heuristics" or "rules of thumb" can help prioritize choices that are more likely to reach a goal.[72]\n

Adversarial search is used for game-playing programs, such as chess or Go. It searches through a tree of possible moves and counter-moves, looking for a winning position.[73]\n

\n
\n
Illustration of gradient descent for 3 different starting points; two parameters (represented by the plan coordinates) are adjusted in order to minimize the loss function (the height)

Local search uses mathematical optimization to find a solution to a problem. It begins with some form of guess and refines it incrementally.[74]\n

Gradient descent is a type of local search that optimizes a set of numerical parameters by incrementally adjusting them to minimize a loss function. Variants of gradient descent are commonly used to train neural networks.[75]\n

Another type of local search is evolutionary computation, which aims to iteratively improve a set of candidate solutions by "mutating" and "recombining" them, selecting only the fittest to survive each generation.[76]\n

Distributed search processes can coordinate via swarm intelligence algorithms. Two popular swarm algorithms used in search are particle swarm optimization (inspired by bird flocking) and ant colony optimization (inspired by ant trails).[77]\n

\n

Logic

\n

Formal logic is used for reasoning and knowledge representation.[78]\nFormal logic comes in two main forms: propositional logic (which operates on statements that are true or false and uses logical connectives such as "and", "or", "not" and "implies")[79] and predicate logic (which also operates on objects, predicates and relations and uses quantifiers such as "Every X is a Y" and "There are some Xs that are Ys").[80]\n

Deductive reasoning in logic is the process of proving a new statement (conclusion) from other statements that are given and assumed to be true (the premises).[81] Proofs can be structured as proof trees, in which nodes are labelled by sentences, and children nodes are connected to parent nodes by inference rules.\n

Given a problem and a set of premises, problem-solving reduces to searching for a proof tree whose root node is labelled by a solution of the problem and whose leaf nodes are labelled by premises or axioms. In the case of Horn clauses, problem-solving search can be performed by reasoning forwards from the premises or backwards from the problem.[82] In the more general case of the clausal form of first-order logic, resolution is a single, axiom-free rule of inference, in which a problem is solved by proving a contradiction from premises that include the negation of the problem to be solved.[83]\n

Inference in both Horn clause logic and first-order logic is undecidable, and therefore intractable. However, backward reasoning with Horn clauses, which underpins computation in the logic programming language Prolog, is Turing complete. Moreover, its efficiency is competitive with computation in other symbolic programming languages.[84]\n

Fuzzy logic assigns a "degree of truth" between 0 and 1. It can therefore handle propositions that are vague and partially true.[85]\n

Non-monotonic logics, including logic programming with negation as failure, are designed to handle default reasoning.[28] Other specialized versions of logic have been developed to describe many complex domains.\n

\n

Probabilistic methods for uncertain reasoning

\n
A simple Bayesian network, with the associated conditional probability tables
\n

Many problems in AI (including in reasoning, planning, learning, perception, and robotics) require the agent to operate with incomplete or uncertain information. AI researchers have devised a number of tools to solve these problems using methods from probability theory and economics.[86] Precise mathematical tools have been developed that analyze how an agent can make choices and plan, using decision theory, decision analysis,[87] and information value theory.[88] These tools include models such as Markov decision processes,[89] dynamic decision networks,[90] game theory and mechanism design.[91]\n

Bayesian networks[92] are a tool that can be used for reasoning (using the Bayesian inference algorithm),[g][94] learning (using the expectation–maximization algorithm),[h][96] planning (using decision networks)[97] and perception (using dynamic Bayesian networks).[90]\n

Probabilistic algorithms can also be used for filtering, prediction, smoothing, and finding explanations for streams of data, thus helping perception systems analyze processes that occur over time (e.g., hidden Markov models or Kalman filters).[90]\n

\n
Expectation–maximization clustering of Old Faithful eruption data starts from a random guess but then successfully converges on an accurate clustering of the two physically distinct modes of eruption.
\n

Classifiers and statistical learning methods

\n

The simplest AI applications can be divided into two types: classifiers (e.g., "if shiny then diamond"), on one hand, and controllers (e.g., "if diamond then pick up"), on the other hand. Classifiers[98] are functions that use pattern matching to determine the closest match. They can be fine-tuned based on chosen examples using supervised learning. Each pattern (also called an "observation") is labeled with a certain predefined class. All the observations combined with their class labels are known as a data set. When a new observation is received, that observation is classified based on previous experience.[45]\n

There are many kinds of classifiers in use.[99] The decision tree is the simplest and most widely used symbolic machine learning algorithm.[100] K-nearest neighbor algorithm was the most widely used analogical AI until the mid-1990s, and Kernel methods such as the support vector machine (SVM) displaced k-nearest neighbor in the 1990s.[101]\nThe naive Bayes classifier is reportedly the "most widely used learner"[102] at Google, due in part to its scalability.[103]\nNeural networks are also used as classifiers.[104]\n

\n

Artificial neural networks

\n
A neural network is an interconnected group of nodes, akin to the vast network of neurons in the human brain.
\n

An artificial neural network is based on a collection of nodes also known as artificial neurons, which loosely model the neurons in a biological brain. It is trained to recognise patterns; once trained, it can recognise those patterns in fresh data. There is an input, at least one hidden layer of nodes and an output. Each node applies a function and once the weight crosses its specified threshold, the data is transmitted to the next layer. A network is typically called a deep neural network if it has at least 2 hidden layers.[104]\n

Learning algorithms for neural networks use local search to choose the weights that will get the right output for each input during training. The most common training technique is the backpropagation algorithm.[105] Neural networks learn to model complex relationships between inputs and outputs and find patterns in data. In theory, a neural network can learn any function.[106]\n

In feedforward neural networks the signal passes in only one direction.[107] Recurrent neural networks feed the output signal back into the input, which allows short-term memories of previous input events. Long short term memory is the most successful network architecture for recurrent networks.[108] Perceptrons[109] use only a single layer of neurons; deep learning[110] uses multiple layers. Convolutional neural networks strengthen the connection between neurons that are "close" to each other—this is especially important in image processing, where a local set of neurons must identify an "edge" before the network can identify an object.[111]\n

\n
\n

Deep learning

\n
\n

Deep learning[110] uses several layers of neurons between the network\'s inputs and outputs. The multiple layers can progressively extract higher-level features from the raw input. For example, in image processing, lower layers may identify edges, while higher layers may identify the concepts relevant to a human such as digits, letters, or faces.[112]\n

Deep learning has profoundly improved the performance of programs in many important subfields of artificial intelligence, including computer vision, speech recognition, natural language processing, image classification,[113] and others. The reason that deep learning performs so well in so many applications is not known as of 2023.[114] The sudden success of deep learning in 2012–2015 did not occur because of some new discovery or theoretical breakthrough (deep neural networks and backpropagation had been described by many people, as far back as the 1950s)[i] but because of two factors: the incredible increase in computer power (including the hundred-fold increase in speed by switching to GPUs) and the availability of vast amounts of training data, especially the giant curated datasets used for benchmark testing, such as ImageNet.[j]\n

\n

GPT

\n

Generative pre-trained transformers (GPT) are large language models (LLMs) that generate text based on the semantic relationships between words in sentences. Text-based GPT models are pretrained on a large corpus of text that can be from the Internet. The pretraining consists of predicting the next token (a token being usually a word, subword, or punctuation). Throughout this pretraining, GPT models accumulate knowledge about the world and can then generate human-like text by repeatedly predicting the next token. Typically, a subsequent training phase makes the model more truthful, useful, and harmless, usually with a technique called reinforcement learning from human feedback (RLHF). Current GPT models are prone to generating falsehoods called "hallucinations", although this can be reduced with RLHF and quality data. They are used in chatbots, which allow people to ask a question or request a task in simple text.[122][123]\n

Current models and services include Gemini (formerly Bard), ChatGPT, Grok, Claude, Copilot, and LLaMA.[124] Multimodal GPT models can process different types of data (modalities) such as images, videos, sound, and text.[125]\n

\n

Hardware and software

\n\n

In the late 2010s, graphics processing units (GPUs) that were increasingly designed with AI-specific enhancements and used with specialized TensorFlow software had replaced previously used central processing unit (CPUs) as the dominant means for large-scale (commercial and academic) machine learning models\' training.[126] Specialized programming languages such as Prolog were used in early AI research,[127] but general-purpose programming languages like Python have become predominant.[128]\n

The transistor density in integrated circuits has been observed to roughly double every 18 months—a trend known as Moore\'s law, named after the Intel co-founder Gordon Moore, who first identified it. Improvements in GPUs have been even faster.[129]\n

\n

Applications

\n

AI and machine learning technology is used in most of the essential applications of the 2020s, including: search engines (such as Google Search), targeting online advertisements, recommendation systems (offered by Netflix, YouTube or Amazon), driving internet traffic, targeted advertising (AdSense, Facebook), virtual assistants (such as Siri or Alexa), autonomous vehicles (including drones, ADAS and self-driving cars), automatic language translation (Microsoft Translator, Google Translate), facial recognition (Apple\'s Face ID or Microsoft\'s DeepFace and Google\'s FaceNet) and image labeling (used by Facebook, Apple\'s iPhoto and TikTok). The deployment of AI may be overseen by a Chief automation officer (CAO).\n

Health and medicine

\n\n

The application of AI in medicine and medical research has the potential to increase patient care and quality of life.[130] Through the lens of the Hippocratic Oath, medical professionals are ethically compelled to use AI, if applications can more accurately diagnose and treat patients.[131][132]\n

For medical research, AI is an important tool for processing and integrating big data. This is particularly important for organoid and tissue engineering development which use microscopy imaging as a key technique in fabrication.[133] It has been suggested that AI can overcome discrepancies in funding allocated to different fields of research.[133] New AI tools can deepen the understanding of biomedically relevant pathways. For example, AlphaFold 2 (2021) demonstrated the ability to approximate, in hours rather than months, the 3D structure of a protein.[134] In 2023, it was reported that AI-guided drug discovery helped find a class of antibiotics capable of killing two different types of drug-resistant bacteria.[135] In 2024, researchers used machine learning to accelerate the search for Parkinson\'s disease drug treatments. Their aim was to identify compounds that block the clumping, or aggregation, of alpha-synuclein (the protein that characterises Parkinson\'s disease). They were able to speed up the initial screening process ten-fold and reduce the cost by a thousand-fold.[136][137]\n

\n

Games

\n\n

Game playing programs have been used since the 1950s to demonstrate and test AI\'s most advanced techniques.[138] Deep Blue became the first computer chess-playing system to beat a reigning world chess champion, Garry Kasparov, on 11 May 1997.[139] In 2011, in a Jeopardy! quiz show exhibition match, IBM\'s question answering system, Watson, defeated the two greatest Jeopardy! champions, Brad Rutter and Ken Jennings, by a significant margin.[140] In March 2016, AlphaGo won 4 out of 5 games of Go in a match with Go champion Lee Sedol, becoming the first computer Go-playing system to beat a professional Go player without handicaps. Then, in 2017, it defeated Ke Jie, who was the best Go player in the world.[141] Other programs handle imperfect-information games, such as the poker-playing program Pluribus.[142] DeepMind developed increasingly generalistic reinforcement learning models, such as with MuZero, which could be trained to play chess, Go, or Atari games.[143] In 2019, DeepMind\'s AlphaStar achieved grandmaster level in StarCraft II, a particularly challenging real-time strategy game that involves incomplete knowledge of what happens on the map.[144] In 2021, an AI agent competed in a PlayStation Gran Turismo competition, winning against four of the world\'s best Gran Turismo drivers using deep reinforcement learning.[145] In 2024, Google DeepMind introduced SIMA, a type of AI capable of autonomously playing nine previously unseen open-world video games by observing screen output, as well as executing short, specific tasks in response to natural language instructions.[146]\n

\n

Mathematics

\n

In mathematics, special forms of formal step-by-step reasoning are used. In contrast, LLMs such as GPT-4 Turbo, Gemini Ultra, Claude Opus, LLaMa-2 or Mistral Large are working with probabilistic models, which can produce wrong answers in the form of hallucinations. Therefore, they need not only a large database of mathematical problems to learn from but also methods such as supervised fine-tuning or trained classifiers with human-annotated data to improve answers for new problems and learn from corrections.[147] A 2024 study showed that the performance of some language models for reasoning capabilities in solving math problems not included in their training data was low, even for problems with only minor deviations from trained data.[148]\n

Alternatively, dedicated models for mathematic problem solving with higher precision for the outcome including proof of theorems have been developed such as Alpha Tensor, Alpha Geometry and Alpha Proof all from Google DeepMind,[149] Llemma from eleuther[150] or Julius.[151]\n

When natural language is used to describe mathematical problems, converters transform such prompts into a formal language such as Lean to define mathematic tasks.\n

Some models have been developed to solve challenging problems and reach good results in benchmark tests, others to serve as educational tools in mathematics.[152]\n

\n

Finance

\n

Finance is one of the fastest growing sectors where applied AI tools are being deployed: from retail online banking to investment advice and insurance, where automated "robot advisers" have been in use for some years.[153]\n

World Pensions experts like Nicolas Firzli insist it may be too early to see the emergence of highly innovative AI-informed financial products and services: "the deployment of AI tools will simply further automatise things: destroying tens of thousands of jobs in banking, financial planning, and pension advice in the process, but I\'m not sure it will unleash a new wave of [e.g., sophisticated] pension innovation."[154]\n

\n

Military

\n\n

Various countries are deploying AI military applications.[155] The main applications enhance command and control, communications, sensors, integration and interoperability.[156] Research is targeting intelligence collection and analysis, logistics, cyber operations, information operations, and semiautonomous and autonomous vehicles.[155] AI technologies enable coordination of sensors and effectors, threat detection and identification, marking of enemy positions, target acquisition, coordination and deconfliction of distributed Joint Fires between networked combat vehicles involving manned and unmanned teams.[156] AI was incorporated into military operations in Iraq and Syria.[155]\n

In November 2023, US Vice President Kamala Harris disclosed a declaration signed by 31 nations to set guardrails for the military use of AI. The commitments include using legal reviews to ensure the compliance of military AI with international laws, and being cautious and transparent in the development of this technology.[157]\n

\n

Generative AI

\n\n
Vincent van Gogh in watercolour created by generative AI software
\n

In the early 2020s, generative AI gained widespread prominence. GenAI is AI capable of generating text, images, videos, or other data using generative models,[158][159] often in response to prompts.[160][161]\n

In March 2023, 58% of U.S. adults had heard about ChatGPT and 14% had tried it.[162] The increasing realism and ease-of-use of AI-based text-to-image generators such as Midjourney, DALL-E, and Stable Diffusion sparked a trend of viral AI-generated photos. Widespread attention was gained by a fake photo of Pope Francis wearing a white puffer coat, the fictional arrest of Donald Trump, and a hoax of an attack on the Pentagon, as well as the usage in professional creative arts.[163][164]\n

\n

Agents

\n

Artificial intelligent (AI) agents are software entities designed to perceive their environment, make decisions, and take actions autonomously to achieve specific goals. These agents can interact with users, their environment, or other agents. AI agents are used in various applications, including virtual assistants, chatbots, autonomous vehicles, game-playing systems, and industrial robotics. AI agents operate within the constraints of their programming, available computational resources, and hardware limitations. This means they are restricted to performing tasks within their defined scope and have finite memory and processing capabilities. In real-world applications, AI agents often face time constraints for decision-making and action execution. Many AI agents incorporate learning algorithms, enabling them to improve their performance over time through experience or training. Using machine learning, AI agents can adapt to new situations and optimise their behaviour for their designated tasks.[165][166][167]\n

\n

Other industry-specific tasks

\n

There are also thousands of successful AI applications used to solve specific problems for specific industries or institutions. In a 2017 survey, one in five companies reported having incorporated "AI" in some offerings or processes.[168] A few examples are energy storage, medical diagnosis, military logistics, applications that predict the result of judicial decisions, foreign policy, or supply chain management.\n

AI applications for evacuation and disaster management are growing. AI has been used to investigate if and how people evacuated in large scale and small scale evacuations using historical data from GPS, videos or social media. Further, AI can provide real time information on the real time evacuation conditions.[169][170][171]\n

In agriculture, AI has helped farmers identify areas that need irrigation, fertilization, pesticide treatments or increasing yield. Agronomists use AI to conduct research and development. AI has been used to predict the ripening time for crops such as tomatoes, monitor soil moisture, operate agricultural robots, conduct predictive analytics, classify livestock pig call emotions, automate greenhouses, detect diseases and pests, and save water.\n

Artificial intelligence is used in astronomy to analyze increasing amounts of available data and applications, mainly for "classification, regression, clustering, forecasting, generation, discovery, and the development of new scientific insights." For example, it is used for discovering exoplanets, forecasting solar activity, and distinguishing between signals and instrumental effects in gravitational wave astronomy. Additionally, it could be used for activities in space, such as space exploration, including the analysis of data from space missions, real-time science decisions of spacecraft, space debris avoidance, and more autonomous operation.\n

During the 2024 Indian elections, US$50 millions was spent on authorized AI-generated content, notably by creating deepfakes of allied (including sometimes deceased) politicians to better engage with voters, and by translating speeches to various local languages.[172] \n

\n

Ethics

\n\n

AI has potential benefits and potential risks.[173] AI may be able to advance science and find solutions for serious problems: Demis Hassabis of Deep Mind hopes to "solve intelligence, and then use that to solve everything else".[174] However, as the use of AI has become widespread, several unintended consequences and risks have been identified.[175] In-production systems can sometimes not factor ethics and bias into their AI training processes, especially when the AI algorithms are inherently unexplainable in deep learning.[176]\n

\n

Risks and harm

\n
\n\n

Machine learning algorithms require large amounts of data. The techniques used to acquire this data have raised concerns about privacy, surveillance and copyright.\n

AI-powered devices and services, such as virtual assistants and IoT products, continuously collect personal information, raising concerns about intrusive data gathering and unauthorized access by third parties. The loss of privacy is further exacerbated by AI\'s ability to process and combine vast amounts of data, potentially leading to a surveillance society where individual activities are constantly monitored and analyzed without adequate safeguards or transparency.\n

Sensitive user data collected may include online activity records, geolocation data, video or audio.[177] For example, in order to build speech recognition algorithms, Amazon has recorded millions of private conversations and allowed temporary workers to listen to and transcribe some of them.[178] Opinions about this widespread surveillance range from those who see it as a necessary evil to those for whom it is clearly unethical and a violation of the right to privacy.[179]\n

AI developers argue that this is the only way to deliver valuable applications. and have developed several techniques that attempt to preserve privacy while still obtaining the data, such as data aggregation, de-identification and differential privacy.[180] Since 2016, some privacy experts, such as Cynthia Dwork, have begun to view privacy in terms of fairness. Brian Christian wrote that experts have pivoted "from the question of \'what they know\' to the question of \'what they\'re doing with it\'."[181]\n

Generative AI is often trained on unlicensed copyrighted works, including in domains such as images or computer code; the output is then used under the rationale of "fair use". Experts disagree about how well and under what circumstances this rationale will hold up in courts of law; relevant factors may include "the purpose and character of the use of the copyrighted work" and "the effect upon the potential market for the copyrighted work".[182][183] Website owners who do not wish to have their content scraped can indicate it in a "robots.txt" file.[184] In 2023, leading authors (including John Grisham and Jonathan Franzen) sued AI companies for using their work to train generative AI.[185][186] Another discussed approach is to envision a separate sui generis system of protection for creations generated by AI to ensure fair attribution and compensation for human authors.[187]\n

\n

Dominance by tech giants

\n

The commercial AI scene is dominated by Big Tech companies such as Alphabet Inc., Amazon, Apple Inc., Meta Platforms, and Microsoft.[188][189][190] Some of these players already own the vast majority of existing cloud infrastructure and computing power from data centers, allowing them to entrench further in the marketplace.[191][192]\n

\n

Substantial power needs and other environmental impacts

\n\n

In January 2024, the International Energy Agency (IEA) released Electricity 2024, Analysis and Forecast to 2026, forecasting electric power use.[193] This is the first IEA report to make projections for data centers and power consumption for artificial intelligence and cryptocurrency. The report states that power demand for these uses might double by 2026, with additional electric power usage equal to electricity used by the whole Japanese nation.[194]\n

Prodigious power consumption by AI is responsible for the growth of fossil fuels use, and might delay closings of obsolete, carbon-emitting coal energy facilities. There is a feverish rise in the construction of data centers throughout the US, making large technology firms (e.g., Microsoft, Meta, Google, Amazon) into voracious consumers of electric power. Projected electric consumption is so immense that there is concern that it will be fulfilled no matter the source. A ChatGPT search involves the use of 10 times the electrical energy as a Google search. The large firms are in haste to find power sources – from nuclear energy to geothermal to fusion. The tech firms argue that – in the long view – AI will be eventually kinder to the environment, but they need the energy now. AI makes the power grid more efficient and "intelligent", will assist in the growth of nuclear power, and track overall carbon emissions, according to technology firms.[195]\n

A 2024 Goldman Sachs Research Paper, AI Data Centers and the Coming US Power Demand Surge, found "US power demand (is) likely to experience growth not seen in a generation...." and forecasts that, by 2030, US data centers will consume 8% of US power, as opposed to 3% in 2022, presaging growth for the electrical power generation industry by a variety of means.[196] Data centers\' need for more and more electrical power is such that they might max out the electrical grid. The Big Tech companies counter that AI can be used to maximize the utilization of the grid by all.[197]\n

In 2024, the Wall Street Journal reported that big AI companies have begun negotiations with the US nuclear power providers to provide electricity to the data centers. In March 2024 Amazon purchased a Pennsylvania nuclear-powered data center for $650 Million (US).[198]\n

In September 2024, Microsoft announced an agreement with Constellation Energy to re-open the Three Mile Island nuclear power plant to provide Microsoft with 100% of all electric power produced by the plant for 20 years. Reopening the plant, which suffered a partial nuclear meltdown of its Unit 2 reactor in 1979, will require Constellation to get through strict regulatory processes which will include extensive safety scrutiny from the US Nuclear Regulatory Commission. If approved (this will be the first ever US re-commissioning of a nuclear plant), over 835 megawatts of power – enough for 800,000 homes – of energy will be produced. The cost for re-opening and upgrading is estimated at $1.6 billion (US) and is dependent on tax breaks for nuclear power contained in the 2022 US Inflation Reduction Act.[199] The US government and the state of Michigan are investing almost $2 billion (US) to reopen the Palisades Nuclear reactor on Lake Michigan. Closed since 2022, the plant is planned to be reopened in October 2025. The Three Mile Island facility will be renamed the Crane Clean Energy Center after Chris Crane, a nuclear proponent and former CEO of Exelon who was responsible for Exelon spinoff of Constellation.[200]\n

\n

Misinformation

\n\n

YouTube, Facebook and others use recommender systems to guide users to more content. These AI programs were given the goal of maximizing user engagement (that is, the only goal was to keep people watching). The AI learned that users tended to choose misinformation, conspiracy theories, and extreme partisan content, and, to keep them watching, the AI recommended more of it. Users also tended to watch more content on the same subject, so the AI led people into filter bubbles where they received multiple versions of the same misinformation.[201] This convinced many users that the misinformation was true, and ultimately undermined trust in institutions, the media and the government.[202] The AI program had correctly learned to maximize its goal, but the result was harmful to society. After the U.S. election in 2016, major technology companies took steps to mitigate the problem [citation needed].\n

In 2022, generative AI began to create images, audio, video and text that are indistinguishable from real photographs, recordings, films, or human writing. It is possible for bad actors to use this technology to create massive amounts of misinformation or propaganda.[203] AI pioneer Geoffrey Hinton expressed concern about AI enabling "authoritarian leaders to manipulate their electorates" on a large scale, among other risks.[204]\n

\n

Algorithmic bias and fairness

\n\n

Machine learning applications will be biased[k] if they learn from biased data.[206] The developers may not be aware that the bias exists.[207] Bias can be introduced by the way training data is selected and by the way a model is deployed.[208][206] If a biased algorithm is used to make decisions that can seriously harm people (as it can in medicine, finance, recruitment, housing or policing) then the algorithm may cause discrimination.[209] The field of fairness studies how to prevent harms from algorithmic biases.\n

On June 28, 2015, Google Photos\'s new image labeling feature mistakenly identified Jacky Alcine and a friend as "gorillas" because they were black. The system was trained on a dataset that contained very few images of black people,[210] a problem called "sample size disparity".[211] Google "fixed" this problem by preventing the system from labelling anything as a "gorilla". Eight years later, in 2023, Google Photos still could not identify a gorilla, and neither could similar products from Apple, Facebook, Microsoft and Amazon.[212]\n

COMPAS is a commercial program widely used by U.S. courts to assess the likelihood of a defendant becoming a recidivist. In 2016, Julia Angwin at ProPublica discovered that COMPAS exhibited racial bias, despite the fact that the program was not told the races of the defendants. Although the error rate for both whites and blacks was calibrated equal at exactly 61%, the errors for each race were different—the system consistently overestimated the chance that a black person would re-offend and would underestimate the chance that a white person would not re-offend.[213] In 2017, several researchers[l] showed that it was mathematically impossible for COMPAS to accommodate all possible measures of fairness when the base rates of re-offense were different for whites and blacks in the data.[215]\n

A program can make biased decisions even if the data does not explicitly mention a problematic feature (such as "race" or "gender"). The feature will correlate with other features (like "address", "shopping history" or "first name"), and the program will make the same decisions based on these features as it would on "race" or "gender".[216] Moritz Hardt said "the most robust fact in this research area is that fairness through blindness doesn\'t work."[217]\n

Criticism of COMPAS highlighted that machine learning models are designed to make "predictions" that are only valid if we assume that the future will resemble the past. If they are trained on data that includes the results of racist decisions in the past, machine learning models must predict that racist decisions will be made in the future. If an application then uses these predictions as recommendations, some of these "recommendations" will likely be racist.[218] Thus, machine learning is not well suited to help make decisions in areas where there is hope that the future will be better than the past. It is descriptive rather than prescriptive.[m]\n

Bias and unfairness may go undetected because the developers are overwhelmingly white and male: among AI engineers, about 4% are black and 20% are women.[211]\n

There are various conflicting definitions and mathematical models of fairness. These notions depend on ethical assumptions, and are influenced by beliefs about society. One broad category is distributive fairness, which focuses on the outcomes, often identifying groups and seeking to compensate for statistical disparities. Representational fairness tries to ensure that AI systems do not reinforce negative stereotypes or render certain groups invisible. Procedural fairness focuses on the decision process rather than the outcome. The most relevant notions of fairness may depend on the context, notably the type of AI application and the stakeholders. The subjectivity in the notions of bias and fairness makes it difficult for companies to operationalize them. Having access to sensitive attributes such as race or gender is also considered by many AI ethicists to be necessary in order to compensate for biases, but it may conflict with anti-discrimination laws.[205]\n

At its 2022 Conference on Fairness, Accountability, and Transparency (ACM FAccT 2022), the Association for Computing Machinery, in Seoul, South Korea, presented and published findings that recommend that until AI and robotics systems are demonstrated to be free of bias mistakes, they are unsafe, and the use of self-learning neural networks trained on vast, unregulated sources of flawed internet data should be curtailed.[dubiousdiscuss][220]\n

\n

Lack of transparency

\n\n

Many AI systems are so complex that their designers cannot explain how they reach their decisions.[221] Particularly with deep neural networks, in which there are a large amount of non-linear relationships between inputs and outputs. But some popular explainability techniques exist.[222]\n

It is impossible to be certain that a program is operating correctly if no one knows how exactly it works. There have been many cases where a machine learning program passed rigorous tests, but nevertheless learned something different than what the programmers intended. For example, a system that could identify skin diseases better than medical professionals was found to actually have a strong tendency to classify images with a ruler as "cancerous", because pictures of malignancies typically include a ruler to show the scale.[223] Another machine learning system designed to help effectively allocate medical resources was found to classify patients with asthma as being at "low risk" of dying from pneumonia. Having asthma is actually a severe risk factor, but since the patients having asthma would usually get much more medical care, they were relatively unlikely to die according to the training data. The correlation between asthma and low risk of dying from pneumonia was real, but misleading.[224]\n

People who have been harmed by an algorithm\'s decision have a right to an explanation.[225] Doctors, for example, are expected to clearly and completely explain to their colleagues the reasoning behind any decision they make. Early drafts of the European Union\'s General Data Protection Regulation in 2016 included an explicit statement that this right exists.[n] Industry experts noted that this is an unsolved problem with no solution in sight. Regulators argued that nevertheless the harm is real: if the problem has no solution, the tools should not be used.[226]\n

DARPA established the XAI ("Explainable Artificial Intelligence") program in 2014 to try to solve these problems.[227]\n

Several approaches aim to address the transparency problem. SHAP enables to visualise the contribution of each feature to the output.[228] LIME can locally approximate a model\'s outputs with a simpler, interpretable model.[229] Multitask learning provides a large number of outputs in addition to the target classification. These other outputs can help developers deduce what the network has learned.[230] Deconvolution, DeepDream and other generative methods can allow developers to see what different layers of a deep network for computer vision have learned, and produce output that can suggest what the network is learning.[231] For generative pre-trained transformers, Anthropic developed a technique based on dictionary learning that associates patterns of neuron activations with human-understandable concepts.[232]\n

\n

Bad actors and weaponized AI

\n\n

Artificial intelligence provides a number of tools that are useful to bad actors, such as authoritarian governments, terrorists, criminals or rogue states.\n

A lethal autonomous weapon is a machine that locates, selects and engages human targets without human supervision.[o] Widely available AI tools can be used by bad actors to develop inexpensive autonomous weapons and, if produced at scale, they are potentially weapons of mass destruction.[234] Even when used in conventional warfare, it is unlikely that they will be unable to reliably choose targets and could potentially kill an innocent person.[234] In 2014, 30 nations (including China) supported a ban on autonomous weapons under the United Nations\' Convention on Certain Conventional Weapons, however the United States and others disagreed.[235] By 2015, over fifty countries were reported to be researching battlefield robots.[236]\n

AI tools make it easier for authoritarian governments to efficiently control their citizens in several ways. Face and voice recognition allow widespread surveillance. Machine learning, operating this data, can classify potential enemies of the state and prevent them from hiding. Recommendation systems can precisely target propaganda and misinformation for maximum effect. Deepfakes and generative AI aid in producing misinformation. Advanced AI can make authoritarian centralized decision making more competitive than liberal and decentralized systems such as markets. It lowers the cost and difficulty of digital warfare and advanced spyware.[237] All these technologies have been available since 2020 or earlier—AI facial recognition systems are already being used for mass surveillance in China.[238][239]\n

There many other ways that AI is expected to help bad actors, some of which can not be foreseen. For example, machine-learning AI is able to design tens of thousands of toxic molecules in a matter of hours.[240]\n

\n

Technological unemployment

\n\n

Economists have frequently highlighted the risks of redundancies from AI, and speculated about unemployment if there is no adequate social policy for full employment.[241]\n

In the past, technology has tended to increase rather than reduce total employment, but economists acknowledge that "we\'re in uncharted territory" with AI.[242] A survey of economists showed disagreement about whether the increasing use of robots and AI will cause a substantial increase in long-term unemployment, but they generally agree that it could be a net benefit if productivity gains are redistributed.[243] Risk estimates vary; for example, in the 2010s, Michael Osborne and Carl Benedikt Frey estimated 47% of U.S. jobs are at "high risk" of potential automation, while an OECD report classified only 9% of U.S. jobs as "high risk".[p][245] The methodology of speculating about future employment levels has been criticised as lacking evidential foundation, and for implying that technology, rather than social policy, creates unemployment, as opposed to redundancies.[241] In April 2023, it was reported that 70% of the jobs for Chinese video game illustrators had been eliminated by generative artificial intelligence.[246][247]\n

Unlike previous waves of automation, many middle-class jobs may be eliminated by artificial intelligence; The Economist stated in 2015 that "the worry that AI could do to white-collar jobs what steam power did to blue-collar ones during the Industrial Revolution" is "worth taking seriously".[248] Jobs at extreme risk range from paralegals to fast food cooks, while job demand is likely to increase for care-related professions ranging from personal healthcare to the clergy.[249]\n

From the early days of the development of artificial intelligence, there have been arguments, for example, those put forward by Joseph Weizenbaum, about whether tasks that can be done by computers actually should be done by them, given the difference between computers and humans, and between quantitative calculation and qualitative, value-based judgement.[250]\n

\n

Existential risk

\n\n


\nAs of November 2024, the threat of AI over humanity remains very low, since current AI LLM technology is very far away from having the required information with which to harness the ever increasing processing capabilities of other forms of AI. In its current framework, the AI threat is naturally calculated viewing not just the sum of its parts, but the parts themselves. As it stands, one of these crucial parts, LLMs, would be relied on heavily to control the world as a kind of seperate entity. Its widely accepted that LLMs AI technology currently provides the most efficient way, by far, in which AI technology would be used to obtain the universal sourcing/learning of all the information required to \'control\' humanity. There is no evidence to suggest since the roll out of ChatGPT and other LLMs in late 2022 that its current sourcing/learning of information is accumulating and improving at a dangerously exponential rate that was feared 2 years ago. Currently, its information sourcing and learning still exhibits fundamental errors and learning difficulties when relied upon for research. It suggests ultimately that AI, or at least facets of AI, are greatly overhyped. \n

It has been argued AI will become so powerful that humanity may irreversibly lose control of it. This could, as physicist Stephen Hawking stated, "spell the end of the human race".[251] This scenario has been common in science fiction, when a computer or robot suddenly develops a human-like "self-awareness" (or "sentience" or "consciousness") and becomes a malevolent character.[q] These sci-fi scenarios are misleading in several ways.\n

First, AI does not require human-like "sentience" to be an existential risk. Modern AI programs are given specific goals and use learning and intelligence to achieve them. Philosopher Nick Bostrom argued that if one gives almost any goal to a sufficiently powerful AI, it may choose to destroy humanity to achieve it (he used the example of a paperclip factory manager).[253] Stuart Russell gives the example of household robot that tries to find a way to kill its owner to prevent it from being unplugged, reasoning that "you can\'t fetch the coffee if you\'re dead."[254] In order to be safe for humanity, a superintelligence would have to be genuinely aligned with humanity\'s morality and values so that it is "fundamentally on our side".[255]\n

Second, Yuval Noah Harari argues that AI does not require a robot body or physical control to pose an existential risk. The essential parts of civilization are not physical. Things like ideologies, law, government, money and the economy are made of language; they exist because there are stories that billions of people believe. The current prevalence of misinformation suggests that an AI could use language to convince people to believe anything, even to take actions that are destructive.[256]\n

The opinions amongst experts and industry insiders are mixed, with sizable fractions both concerned and unconcerned by risk from eventual superintelligent AI.[257] Personalities such as Stephen Hawking, Bill Gates, and Elon Musk,[258] as well as AI pioneers such as Yoshua Bengio, Stuart Russell, Demis Hassabis, and Sam Altman, have expressed concerns about existential risk from AI.\n

In May 2023, Geoffrey Hinton announced his resignation from Google in order to be able to "freely speak out about the risks of AI" without "considering how this impacts Google."[259] He notably mentioned risks of an AI takeover,[260] and stressed that in order to avoid the worst outcomes, establishing safety guidelines will require cooperation among those competing in use of AI.[261]\n

In 2023, many leading AI experts issued the joint statement that "Mitigating the risk of extinction from AI should be a global priority alongside other societal-scale risks such as pandemics and nuclear war".[262]\n

Other researchers, however, spoke in favor of a less dystopian view. AI pioneer Juergen Schmidhuber did not sign the joint statement, emphasising that in 95% of all cases, AI research is about making "human lives longer and healthier and easier."[263] While the tools that are now being used to improve lives can also be used by bad actors, "they can also be used against the bad actors."[264][265] Andrew Ng also argued that "it\'s a mistake to fall for the doomsday hype on AI—and that regulators who do will only benefit vested interests."[266] Yann LeCun "scoffs at his peers\' dystopian scenarios of supercharged misinformation and even, eventually, human extinction."[267] In the early 2010s, experts argued that the risks are too distant in the future to warrant research or that humans will be valuable from the perspective of a superintelligent machine.[268] However, after 2016, the study of current and future risks and possible solutions became a serious area of research.[269]\n

Above all, AI at this point, is many years away from being an existential threat to humans. Simply because it openly admits via LLMs that it does not \n


\n

\n

Ethical machines and alignment

\n\n

Friendly AI are machines that have been designed from the beginning to minimize risks and to make choices that benefit humans. Eliezer Yudkowsky, who coined the term, argues that developing friendly AI should be a higher research priority: it may require a large investment and it must be completed before AI becomes an existential risk.[270]\n

Machines with intelligence have the potential to use their intelligence to make ethical decisions. The field of machine ethics provides machines with ethical principles and procedures for resolving ethical dilemmas.[271]\nThe field of machine ethics is also called computational morality,[271]\nand was founded at an AAAI symposium in 2005.[272]\n

Other approaches include Wendell Wallach\'s "artificial moral agents"[273] and Stuart J. Russell\'s three principles for developing provably beneficial machines.[274]\n

\n

Open source

\n

Active organizations in the AI open-source community include Hugging Face,[275] Google,[276] EleutherAI and Meta.[277] Various AI models, such as Llama 2, Mistral or Stable Diffusion, have been made open-weight,[278][279] meaning that their architecture and trained parameters (the "weights") are publicly available. Open-weight models can be freely fine-tuned, which allows companies to specialize them with their own data and for their own use-case.[280] Open-weight models are useful for research and innovation but can also be misused. Since they can be fine-tuned, any built-in security measure, such as objecting to harmful requests, can be trained away until it becomes ineffective. Some researchers warn that future AI models may develop dangerous capabilities (such as the potential to drastically facilitate bioterrorism) and that once released on the Internet, they cannot be deleted everywhere if needed. They recommend pre-release audits and cost-benefit analyses.[281]\n

\n

Frameworks

\n

Artificial Intelligence projects can have their ethical permissibility tested while designing, developing, and implementing an AI system. An AI framework such as the Care and Act Framework containing the SUM values—developed by the Alan Turing Institute tests projects in four main areas:[282][283]\n

\n
  • Respect the dignity of individual people
  • \n
  • Connect with other people sincerely, openly, and inclusively
  • \n
  • Care for the wellbeing of everyone
  • \n
  • Protect social values, justice, and the public interest
\n

Other developments in ethical frameworks include those decided upon during the Asilomar Conference, the Montreal Declaration for Responsible AI, and the IEEE\'s Ethics of Autonomous Systems initiative, among others;[284] however, these principles do not go without their criticisms, especially regards to the people chosen contributes to these frameworks.[285]\n

Promotion of the wellbeing of the people and communities that these technologies affect requires consideration of the social and ethical implications at all stages of AI system design, development and implementation, and collaboration between job roles such as data scientists, product managers, data engineers, domain experts, and delivery managers.[286]\n

The UK AI Safety Institute released in 2024 a testing toolset called \'Inspect\' for AI safety evaluations available under a MIT open-source licence which is freely available on GitHub and can be improved with third-party packages. It can be used to evaluate AI models in a range of areas including core knowledge, ability to reason, and autonomous capabilities.[287]\n

\n

Regulation

\n\n
AI Safety Summit
The first global AI Safety Summit was held in 2023 with a declaration calling for international co-operation.
\n

The regulation of artificial intelligence is the development of public sector policies and laws for promoting and regulating AI; it is therefore related to the broader regulation of algorithms.[288] The regulatory and policy landscape for AI is an emerging issue in jurisdictions globally.[289] According to AI Index at Stanford, the annual number of AI-related laws passed in the 127 survey countries jumped from one passed in 2016 to 37 passed in 2022 alone.[290][291] Between 2016 and 2020, more than 30 countries adopted dedicated strategies for AI.[292] Most EU member states had released national AI strategies, as had Canada, China, India, Japan, Mauritius, the Russian Federation, Saudi Arabia, United Arab Emirates, U.S., and Vietnam. Others were in the process of elaborating their own AI strategy, including Bangladesh, Malaysia and Tunisia.[292] The Global Partnership on Artificial Intelligence was launched in June 2020, stating a need for AI to be developed in accordance with human rights and democratic values, to ensure public confidence and trust in the technology.[292] Henry Kissinger, Eric Schmidt, and Daniel Huttenlocher published a joint statement in November 2021 calling for a government commission to regulate AI.[293] In 2023, OpenAI leaders published recommendations for the governance of superintelligence, which they believe may happen in less than 10 years.[294] In 2023, the United Nations also launched an advisory body to provide recommendations on AI governance; the body comprises technology company executives, governments officials and academics.[295] In 2024, the Council of Europe created the first international legally binding treaty on AI, called the "Framework Convention on Artificial Intelligence and Human Rights, Democracy and the Rule of Law". It was adopted by the European Union, the United States, the United Kingdom, and other signatories.[296]\n

In a 2022 Ipsos survey, attitudes towards AI varied greatly by country; 78% of Chinese citizens, but only 35% of Americans, agreed that "products and services using AI have more benefits than drawbacks".[290] A 2023 Reuters/Ipsos poll found that 61% of Americans agree, and 22% disagree, that AI poses risks to humanity.[297] In a 2023 Fox News poll, 35% of Americans thought it "very important", and an additional 41% thought it "somewhat important", for the federal government to regulate AI, versus 13% responding "not very important" and 8% responding "not at all important".[298][299]\n

In November 2023, the first global AI Safety Summit was held in Bletchley Park in the UK to discuss the near and far term risks of AI and the possibility of mandatory and voluntary regulatory frameworks.[300] 28 countries including the United States, China, and the European Union issued a declaration at the start of the summit, calling for international co-operation to manage the challenges and risks of artificial intelligence.[301][302] In May 2024 at the AI Seoul Summit, 16 global AI tech companies agreed to safety commitments on the development of AI.[303][304]\n

\n

History

\n\n\n

The study of mechanical or "formal" reasoning began with philosophers and mathematicians in antiquity. The study of logic led directly to Alan Turing\'s theory of computation, which suggested that a machine, by shuffling symbols as simple as "0" and "1", could simulate any conceivable form of mathematical reasoning.[305][306] This, along with concurrent discoveries in cybernetics, information theory and neurobiology, led researchers to consider the possibility of building an "electronic brain".[r] They developed several areas of research that would become part of AI,[308] such as McCullouch and Pitts design for "artificial neurons" in 1943,[115] and Turing\'s influential 1950 paper \'Computing Machinery and Intelligence\', which introduced the Turing test and showed that "machine intelligence" was plausible.[309][306]\n

The field of AI research was founded at a workshop at Dartmouth College in 1956.[s][6] The attendees became the leaders of AI research in the 1960s.[t] They and their students produced programs that the press described as "astonishing":[u] computers were learning checkers strategies, solving word problems in algebra, proving logical theorems and speaking English.[v][7] Artificial intelligence laboratories were set up at a number of British and U.S. universities in the latter 1950s and early 1960s.[306]\n

Researchers in the 1960s and the 1970s were convinced that their methods would eventually succeed in creating a machine with general intelligence and considered this the goal of their field.[313] In 1965 Herbert Simon predicted, "machines will be capable, within twenty years, of doing any work a man can do".[314] In 1967 Marvin Minsky agreed, writing that "within a generation ... the problem of creating \'artificial intelligence\' will substantially be solved".[315] They had, however, underestimated the difficulty of the problem.[w] In 1974, both the U.S. and British governments cut off exploratory research in response to the criticism of Sir James Lighthill[317] and ongoing pressure from the U.S. Congress to fund more productive projects.[318] Minsky\'s and Papert\'s book Perceptrons was understood as proving that artificial neural networks would never be useful for solving real-world tasks, thus discrediting the approach altogether.[319] The "AI winter", a period when obtaining funding for AI projects was difficult, followed.[9]\n

In the early 1980s, AI research was revived by the commercial success of expert systems,[320] a form of AI program that simulated the knowledge and analytical skills of human experts. By 1985, the market for AI had reached over a billion dollars. At the same time, Japan\'s fifth generation computer project inspired the U.S. and British governments to restore funding for academic research.[8] However, beginning with the collapse of the Lisp Machine market in 1987, AI once again fell into disrepute, and a second, longer-lasting winter began.[10]\n

Up to this point, most of AI\'s funding had gone to projects that used high-level symbols to represent mental objects like plans, goals, beliefs, and known facts. In the 1980s, some researchers began to doubt that this approach would be able to imitate all the processes of human cognition, especially perception, robotics, learning and pattern recognition,[321] and began to look into "sub-symbolic" approaches.[322] Rodney Brooks rejected "representation" in general and focussed directly on engineering machines that move and survive.[x] Judea Pearl, Lofti Zadeh and others developed methods that handled incomplete and uncertain information by making reasonable guesses rather than precise logic.[86][327] But the most important development was the revival of "connectionism", including neural network research, by Geoffrey Hinton and others.[328] In 1990, Yann LeCun successfully showed that convolutional neural networks can recognize handwritten digits, the first of many successful applications of neural networks.[329]\n

AI gradually restored its reputation in the late 1990s and early 21st century by exploiting formal mathematical methods and by finding specific solutions to specific problems. This "narrow" and "formal" focus allowed researchers to produce verifiable results and collaborate with other fields (such as statistics, economics and mathematics).[330] By 2000, solutions developed by AI researchers were being widely used, although in the 1990s they were rarely described as "artificial intelligence" (a tendency known as the AI effect).[331]\nHowever, several academic researchers became concerned that AI was no longer pursuing its original goal of creating versatile, fully intelligent machines. Beginning around 2002, they founded the subfield of artificial general intelligence (or "AGI"), which had several well-funded institutions by the 2010s.[4]\n

Deep learning began to dominate industry benchmarks in 2012 and was adopted throughout the field.[11]\nFor many specific tasks, other methods were abandoned.[y]\nDeep learning\'s success was based on both hardware improvements (faster computers,[333] graphics processing units, cloud computing[334]) and access to large amounts of data[335] (including curated datasets,[334] such as ImageNet). Deep learning\'s success led to an enormous increase in interest and funding in AI.[z] The amount of machine learning research (measured by total publications) increased by 50% in the years 2015–2019.[292]\n

In 2016, issues of fairness and the misuse of technology were catapulted into center stage at machine learning conferences, publications vastly increased, funding became available, and many researchers re-focussed their careers on these issues. The alignment problem became a serious field of academic study.[269]\n

In the late teens and early 2020s, AGI companies began to deliver programs that created enormous interest. In 2015, AlphaGo, developed by DeepMind, beat the world champion Go player. The program was taught only the rules of the game and developed strategy by itself. GPT-3 is a large language model that was released in 2020 by OpenAI and is capable of generating high-quality human-like text.[336] These programs, and others, inspired an aggressive AI boom, where large companies began investing billions in AI research. According to AI Impacts, about $50 billion annually was invested in "AI" around 2022 in the U.S. alone and about 20% of the new U.S. Computer Science PhD graduates have specialized in "AI".[337] About 800,000 "AI"-related U.S. job openings existed in 2022.[338]\n

\n

Philosophy

\n\n

Philosophical debates have historically sought to determine the nature of intelligence and how to make intelligent machines.[339] Another major focus has been whether machines can be conscious, and the associated ethical implications.[340] Many other topics in philosophy are relevant to AI, such as epistemology and free will.[341] Rapid advancements have intensified public discussions on the philosophy and ethics of AI.[340]\n

\n

Defining artificial intelligence

\n\n

Alan Turing wrote in 1950 "I propose to consider the question \'can machines think\'?"[342] He advised changing the question from whether a machine "thinks", to "whether or not it is possible for machinery to show intelligent behaviour".[342] He devised the Turing test, which measures the ability of a machine to simulate human conversation.[309] Since we can only observe the behavior of the machine, it does not matter if it is "actually" thinking or literally has a "mind". Turing notes that we can not determine these things about other people but "it is usual to have a polite convention that everyone thinks."[343]\n

\n
The Turing test can provide some evidence of intelligence, but it penalizes non-human intelligent behavior.[344]
\n

Russell and Norvig agree with Turing that intelligence must be defined in terms of external behavior, not internal structure.[1] However, they are critical that the test requires the machine to imitate humans. "Aeronautical engineering texts," they wrote, "do not define the goal of their field as making \'machines that fly so exactly like pigeons that they can fool other pigeons.\'"[345] AI founder John McCarthy agreed, writing that "Artificial intelligence is not, by definition, simulation of human intelligence".[346]\n

McCarthy defines intelligence as "the computational part of the ability to achieve goals in the world".[347] Another AI founder, Marvin Minsky similarly describes it as "the ability to solve hard problems".[348] The leading AI textbook defines it as the study of agents that perceive their environment and take actions that maximize their chances of achieving defined goals.[1] These definitions view intelligence in terms of well-defined problems with well-defined solutions, where both the difficulty of the problem and the performance of the program are direct measures of the "intelligence" of the machine—and no other philosophical discussion is required, or may not even be possible.\n

Another definition has been adopted by Google,[349] a major practitioner in the field of AI. This definition stipulates the ability of systems to synthesize information as the manifestation of intelligence, similar to the way it is defined in biological intelligence.\n

Some authors have suggested in practice, that the definition of AI is vague and difficult to define, with contention as to whether classical algorithms should be categorised as AI,[350] with many companies during the early 2020s AI boom using the term as a marketing buzzword, often even if they did "not actually use AI in a material way".[351]\n

\n

Evaluating approaches to AI

\n

No established unifying theory or paradigm has guided AI research for most of its history.[aa] The unprecedented success of statistical machine learning in the 2010s eclipsed all other approaches (so much so that some sources, especially in the business world, use the term "artificial intelligence" to mean "machine learning with neural networks"). This approach is mostly sub-symbolic, soft and narrow. Critics argue that these questions may have to be revisited by future generations of AI researchers.\n

\n

Symbolic AI and its limits

\n

Symbolic AI (or "GOFAI")[353] simulated the high-level conscious reasoning that people use when they solve puzzles, express legal reasoning and do mathematics. They were highly successful at "intelligent" tasks such as algebra or IQ tests. In the 1960s, Newell and Simon proposed the physical symbol systems hypothesis: "A physical symbol system has the necessary and sufficient means of general intelligent action."[354]\n

However, the symbolic approach failed on many tasks that humans solve easily, such as learning, recognizing an object or commonsense reasoning. Moravec\'s paradox is the discovery that high-level "intelligent" tasks were easy for AI, but low level "instinctive" tasks were extremely difficult.[355] Philosopher Hubert Dreyfus had argued since the 1960s that human expertise depends on unconscious instinct rather than conscious symbol manipulation, and on having a "feel" for the situation, rather than explicit symbolic knowledge.[356] Although his arguments had been ridiculed and ignored when they were first presented, eventually, AI research came to agree with him.[ab][16]\n

The issue is not resolved: sub-symbolic reasoning can make many of the same inscrutable mistakes that human intuition does, such as algorithmic bias. Critics such as Noam Chomsky argue continuing research into symbolic AI will still be necessary to attain general intelligence,[358][359] in part because sub-symbolic AI is a move away from explainable AI: it can be difficult or impossible to understand why a modern statistical AI program made a particular decision. The emerging field of neuro-symbolic artificial intelligence attempts to bridge the two approaches.\n

\n

Neat vs. scruffy

\n\n

"Neats" hope that intelligent behavior is described using simple, elegant principles (such as logic, optimization, or neural networks). "Scruffies" expect that it necessarily requires solving a large number of unrelated problems. Neats defend their programs with theoretical rigor, scruffies rely mainly on incremental testing to see if they work. This issue was actively discussed in the 1970s and 1980s,[360] but eventually was seen as irrelevant. Modern AI has elements of both.\n

\n

Soft vs. hard computing

\n\n

Finding a provably correct or optimal solution is intractable for many important problems.[15] Soft computing is a set of techniques, including genetic algorithms, fuzzy logic and neural networks, that are tolerant of imprecision, uncertainty, partial truth and approximation. Soft computing was introduced in the late 1980s and most successful AI programs in the 21st century are examples of soft computing with neural networks.\n

\n

Narrow vs. general AI

\n\n

AI researchers are divided as to whether to pursue the goals of artificial general intelligence and superintelligence directly or to solve as many specific problems as possible (narrow AI) in hopes these solutions will lead indirectly to the field\'s long-term goals.[361][362] General intelligence is difficult to define and difficult to measure, and modern AI has had more verifiable successes by focusing on specific problems with specific solutions. The sub-field of artificial general intelligence studies this area exclusively.\n

\n

Machine consciousness, sentience, and mind

\n\n

The philosophy of mind does not know whether a machine can have a mind, consciousness and mental states, in the same sense that human beings do. This issue considers the internal experiences of the machine, rather than its external behavior. Mainstream AI research considers this issue irrelevant because it does not affect the goals of the field: to build machines that can solve problems using intelligence. Russell and Norvig add that "[t]he additional project of making a machine conscious in exactly the way humans are is not one that we are equipped to take on."[363] However, the question has become central to the philosophy of mind. It is also typically the central question at issue in artificial intelligence in fiction.\n

\n

Consciousness

\n\n

David Chalmers identified two problems in understanding the mind, which he named the "hard" and "easy" problems of consciousness.[364] The easy problem is understanding how the brain processes signals, makes plans and controls behavior. The hard problem is explaining how this feels or why it should feel like anything at all, assuming we are right in thinking that it truly does feel like something (Dennett\'s consciousness illusionism says this is an illusion). While human information processing is easy to explain, human subjective experience is difficult to explain. For example, it is easy to imagine a color-blind person who has learned to identify which objects in their field of view are red, but it is not clear what would be required for the person to know what red looks like.[365]\n

\n

Computationalism and functionalism

\n\n

Computationalism is the position in the philosophy of mind that the human mind is an information processing system and that thinking is a form of computing. Computationalism argues that the relationship between mind and body is similar or identical to the relationship between software and hardware and thus may be a solution to the mind–body problem. This philosophical position was inspired by the work of AI researchers and cognitive scientists in the 1960s and was originally proposed by philosophers Jerry Fodor and Hilary Putnam.[366]\n

Philosopher John Searle characterized this position as "strong AI": "The appropriately programmed computer with the right inputs and outputs would thereby have a mind in exactly the same sense human beings have minds."[ac] Searle counters this assertion with his Chinese room argument, which attempts to show that, even if a machine perfectly simulates human behavior, there is still no reason to suppose it also has a mind.[370]\n

\n

AI welfare and rights

\n

It is difficult or impossible to reliably evaluate whether an advanced AI is sentient (has the ability to feel), and if so, to what degree.[371] But if there is a significant chance that a given machine can feel and suffer, then it may be entitled to certain rights or welfare protection measures, similarly to animals.[372][373] Sapience (a set of capacities related to high intelligence, such as discernment or self-awareness) may provide another moral basis for AI rights.[372] Robot rights are also sometimes proposed as a practical way to integrate autonomous agents into society.[374]\n

In 2017, the European Union considered granting "electronic personhood" to some of the most capable AI systems. Similarly to the legal status of companies, it would have conferred rights but also responsibilities.[375] Critics argued in 2018 that granting rights to AI systems would downplay the importance of human rights, and that legislation should focus on user needs rather than speculative futuristic scenarios. They also noted that robots lacked the autonomy to take part to society on their own.[376][377]\n

Progress in AI increased interest in the topic. Proponents of AI welfare and rights often argue that AI sentience, if it emerges, would be particularly easy to deny. They warn that this may be a moral blind spot analogous to slavery or factory farming, which could lead to large-scale suffering if sentient AI is created and carelessly exploited.[373][372]\n

\n

Future

\n

Superintelligence and the singularity

\n

A superintelligence is a hypothetical agent that would possess intelligence far surpassing that of the brightest and most gifted human mind.[362]If research into artificial general intelligence produced sufficiently intelligent software, it might be able to reprogram and improve itself. The improved software would be even better at improving itself, leading to what I. J. Good called an "intelligence explosion" and Vernor Vinge called a "singularity".[378]\n

However, technologies cannot improve exponentially indefinitely, and typically follow an S-shaped curve, slowing when they reach the physical limits of what the technology can do.[379]\n

\n

Transhumanism

\n\n

Robot designer Hans Moravec, cyberneticist Kevin Warwick and inventor Ray Kurzweil have predicted that humans and machines may merge in the future into cyborgs that are more capable and powerful than either. This idea, called transhumanism, has roots in the writings of Aldous Huxley and Robert Ettinger.[380]\n

Edward Fredkin argues that "artificial intelligence is the next step in evolution", an idea first proposed by Samuel Butler\'s "Darwin among the Machines" as far back as 1863, and expanded upon by George Dyson in his 1998 book Darwin Among the Machines: The Evolution of Global Intelligence.[381]\n

\n

In fiction

\n\n
The word "robot" itself was coined by Karel Čapek in his 1921 play R.U.R., the title standing for "Rossum\'s Universal Robots".
\n

Thought-capable artificial beings have appeared as storytelling devices since antiquity,[382] and have been a persistent theme in science fiction.[383]\n

A common trope in these works began with Mary Shelley\'s Frankenstein, where a human creation becomes a threat to its masters. This includes such works as Arthur C. Clarke\'s and Stanley Kubrick\'s 2001: A Space Odyssey (both 1968), with HAL 9000, the murderous computer in charge of the Discovery One spaceship, as well as The Terminator (1984) and The Matrix (1999). In contrast, the rare loyal robots such as Gort from The Day the Earth Stood Still (1951) and Bishop from Aliens (1986) are less prominent in popular culture.[384]\n

Isaac Asimov introduced the Three Laws of Robotics in many stories, most notably with the "Multivac" super-intelligent computer. Asimov\'s laws are often brought up during lay discussions of machine ethics;[385] while almost all artificial intelligence researchers are familiar with Asimov\'s laws through popular culture, they generally consider the laws useless for many reasons, one of which is their ambiguity.[386]\n

Several works use AI to force us to confront the fundamental question of what makes us human, showing us artificial beings that have the ability to feel, and thus to suffer. This appears in Karel Čapek\'s R.U.R., the films A.I. Artificial Intelligence and Ex Machina, as well as the novel Do Androids Dream of Electric Sheep?, by Philip K. Dick. Dick considers the idea that our understanding of human subjectivity is altered by technology created with artificial intelligence.[387]\n

\n

See also

\n\n

Explanatory notes

\n
\n
    \n
  1. ^ a b This list of intelligent traits is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
  2. \n
  3. ^ a b This list of tools is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
  4. \n
  5. ^ It is among the reasons that expert systems proved to be inefficient for capturing knowledge.[30][31]\n
  6. \n
  7. ^ \n"Rational agent" is general term used in economics, philosophy and theoretical artificial intelligence. It can refer to anything that directs its behavior to accomplish goals, such as a person, an animal, a corporation, a nation, or in the case of AI, a computer program.\n
  8. \n
  9. ^ Alan Turing discussed the centrality of learning as early as 1950, in his classic paper "Computing Machinery and Intelligence".[42] In 1956, at the original Dartmouth AI summer conference, Ray Solomonoff wrote a report on unsupervised probabilistic machine learning: "An Inductive Inference Machine".[43]\n
  10. \n
  11. ^ See AI winter § Machine translation and the ALPAC report of 1966\n
  12. \n
  13. ^ \nCompared with symbolic logic, formal Bayesian inference is computationally expensive. For inference to be tractable, most observations must be conditionally independent of one another. AdSense uses a Bayesian network with over 300 million edges to learn which ads to serve.[93]\n
  14. \n
  15. ^ Expectation–maximization, one of the most popular algorithms in machine learning, allows clustering in the presence of unknown latent variables.[95]\n
  16. \n
  17. ^ \nSome form of deep neural networks (without a specific learning algorithm) were described by:\nWarren S. McCulloch and Walter Pitts (1943)[115]\nAlan Turing (1948);[116]\nKarl Steinbuch and Roger David Joseph (1961).[117]\nDeep or recurrent networks that learned (or used gradient descent) were developed by:\nFrank Rosenblatt(1957);[116]\nOliver Selfridge (1959);[117]\nAlexey Ivakhnenko and Valentin Lapa (1965);[118]\nKaoru Nakano (1971);[119]\nShun-Ichi Amari (1972);[119]\nJohn Joseph Hopfield (1982).[119]\nPrecursors to backpropagation were developed by:\nHenry J. Kelley (1960);[116]\nArthur E. Bryson (1962);[116]\nStuart Dreyfus (1962);[116]\nArthur E. Bryson and Yu-Chi Ho (1969);[116]\nBackpropagation was independently developed by:\nSeppo Linnainmaa (1970);[120]\nPaul Werbos (1974).[116]\n
  18. \n
  19. ^ Geoffrey Hinton said, of his work on neural networks in the 1990s, "our labeled datasets were thousands of times too small. [And] our computers were millions of times too slow."[121]\n
  20. \n
  21. ^ In statistics, a bias is a systematic error or deviation from the correct value. But in the context of fairness, it refers to a tendency in favor or against a certain group or individual characteristic, usually in a way that is considered unfair or harmful. A statistically unbiased AI system that produces disparate outcomes for different demographic groups may thus be viewed as biased in the ethical sense.[205]\n
  22. \n
  23. ^ Including Jon Kleinberg (Cornell University), Sendhil Mullainathan (University of Chicago), Cynthia Chouldechova (Carnegie Mellon) and Sam Corbett-Davis (Stanford)[214]\n
  24. \n
  25. ^ Moritz Hardt (a director at the Max Planck Institute for Intelligent Systems) argues that machine learning "is fundamentally the wrong tool for a lot of domains, where you\'re trying to design interventions and mechanisms that change the world."[219]\n
  26. \n
  27. ^ When the law was passed in 2018, it still contained a form of this provision.\n
  28. \n
  29. ^ This is the United Nations\' definition, and includes things like land mines as well.[233]\n
  30. \n
  31. ^ See table 4; 9% is both the OECD average and the U.S. average.[244]\n
  32. \n
  33. ^ Sometimes called a "robopocalypse"[252]\n
  34. \n
  35. ^ "Electronic brain" was the term used by the press around this time.[305][307]\n
  36. \n
  37. ^ \nDaniel Crevier wrote, "the conference is generally recognized as the official birthdate of the new science."[310] Russell and Norvig called the conference "the inception of artificial intelligence."[115]\n
  38. \n
  39. ^ \nRussell and Norvig wrote "for the next 20 years the field would be dominated by these people and their students."[311]\n
  40. \n
  41. ^ \nRussell and Norvig wrote "it was astonishing whenever a computer did anything kind of smartish".[312]\n
  42. \n
  43. ^ \nThe programs described are Arthur Samuel\'s checkers program for the IBM 701, Daniel Bobrow\'s STUDENT, Newell and Simon\'s Logic Theorist and Terry Winograd\'s SHRDLU.\n
  44. \n
  45. ^ Russell and Norvig write: "in almost all cases, these early systems failed on more difficult problems"[316]\n
  46. \n
  47. ^ \nEmbodied approaches to AI[323] were championed by Hans Moravec[324] and Rodney Brooks[325] and went by many names: Nouvelle AI.[325] Developmental robotics.[326]\n
  48. \n
  49. ^ Matteo Wong wrote in The Atlantic: "Whereas for decades, computer-science fields such as natural-language processing, computer vision, and robotics used extremely different methods, now they all use a programming method called "deep learning." As a result, their code and approaches have become more similar, and their models are easier to integrate into one another."[332]\n
  50. \n
  51. ^ Jack Clark wrote in Bloomberg: "After a half-decade of quiet breakthroughs in artificial intelligence, 2015 has been a landmark year. Computers are smarter and learning faster than ever", and noted that the number of software projects that use machine learning at Google increased from a "sporadic usage" in 2012 to more than 2,700 projects in 2015.[334]\n
  52. \n
  53. ^ Nils Nilsson wrote in 1983: "Simply put, there is wide disagreement in the field about what AI is all about."[352]\n
  54. \n
  55. ^ \nDaniel Crevier wrote that "time has proven the accuracy and perceptiveness of some of Dreyfus\'s comments. Had he formulated them less aggressively, constructive actions they suggested might have been taken much earlier."[357]\n
  56. \n
  57. ^ \nSearle presented this definition of "Strong AI" in 1999.[367] Searle\'s original formulation was "The appropriately programmed computer really is a mind, in the sense that computers given the right programs can be literally said to understand and have other cognitive states."[368] Strong AI is defined similarly by Russell and Norvig: "Stong AI – the assertion that machines that do so are actually thinking (as opposed to simulating thinking)."[369]\n
  58. \n
\n

References

\n
\n
    \n
  1. ^ a b c Russell & Norvig (2021), pp. 1–4.\n
  2. \n
  3. ^ AI set to exceed human brain power Archived 2008-02-19 at the Wayback Machine CNN.com (July 26, 2006)\n
  4. \n
  5. ^ Kaplan, Andreas; Haenlein, Michael (2019). "Siri, Siri, in my hand: Who\'s the fairest in the land? On the interpretations, illustrations, and implications of artificial intelligence". Business Horizons. 62: 15–25. doi:10.1016/j.bushor.2018.08.004. ISSN 0007-6813. S2CID 158433736.\n
  6. \n
  7. ^ a b c \nArtificial general intelligence: Russell & Norvig (2021, pp. 32–33, 1020–1021)
    Proposal for the modern version: Pennachin & Goertzel (2007)
    Warnings of overspecialization in AI from leading researchers: Nilsson (1995), McCarthy (2007), Beal & Winston (2009)
    \n
  8. \n
  9. ^ Russell & Norvig (2021, §1.2).\n
  10. \n
  11. ^ a b Dartmouth workshop: Russell & Norvig (2021, p. 18), McCorduck (2004, pp. 111–136), NRC (1999, pp. 200–201)
    The proposal: McCarthy et al. (1955)
    \n
  12. \n
  13. ^ a b Successful programs of the 1960s: McCorduck (2004, pp. 243–252), Crevier (1993, pp. 52–107), Moravec (1988, p. 9), Russell & Norvig (2021, pp. 19–21)\n
  14. \n
  15. ^ a b Funding initiatives in the early 1980s: Fifth Generation Project (Japan), Alvey (UK), Microelectronics and Computer Technology Corporation (US), Strategic Computing Initiative (US): McCorduck (2004, pp. 426–441), Crevier (1993, pp. 161–162, 197–203, 211, 240), Russell & Norvig (2021, p. 23), NRC (1999, pp. 210–211), Newquist (1994, pp. 235–248)\n
  16. \n
  17. ^ a b First AI Winter, Lighthill report, Mansfield Amendment: Crevier (1993, pp. 115–117), Russell & Norvig (2021, pp. 21–22), NRC (1999, pp. 212–213), Howe (1994), Newquist (1994, pp. 189–201)\n
  18. \n
  19. ^ a b Second AI Winter: Russell & Norvig (2021, p. 24), McCorduck (2004, pp. 430–435), Crevier (1993, pp. 209–210), NRC (1999, pp. 214–216), Newquist (1994, pp. 301–318)\n
  20. \n
  21. ^ a b Deep learning revolution, AlexNet: Goldman (2022), Russell & Norvig (2021, p. 26), McKinsey (2018)\n
  22. \n
  23. ^ Toews (2023).\n
  24. \n
  25. ^ Problem-solving, puzzle solving, game playing, and deduction: Russell & Norvig (2021, chpt. 3–5), Russell & Norvig (2021, chpt. 6) (constraint satisfaction), Poole, Mackworth & Goebel (1998, chpt. 2, 3, 7, 9), Luger & Stubblefield (2004, chpt. 3, 4, 6, 8), Nilsson (1998, chpt. 7–12)\n
  26. \n
  27. ^ Uncertain reasoning: Russell & Norvig (2021, chpt. 12–18), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 333–381), Nilsson (1998, chpt. 7–12)\n
  28. \n
  29. ^ a b c Intractability and efficiency and the combinatorial explosion: Russell & Norvig (2021, p. 21)\n
  30. \n
  31. ^ a b c Psychological evidence of the prevalence of sub-symbolic reasoning and knowledge: Kahneman (2011), Dreyfus & Dreyfus (1986), Wason & Shapiro (1966), Kahneman, Slovic & Tversky (1982)\n
  32. \n
  33. ^ Knowledge representation and knowledge engineering: Russell & Norvig (2021, chpt. 10), Poole, Mackworth & Goebel (1998, pp. 23–46, 69–81, 169–233, 235–277, 281–298, 319–345), Luger & Stubblefield (2004, pp. 227–243), Nilsson (1998, chpt. 17.1–17.4, 18)\n
  34. \n
  35. ^ Smoliar & Zhang (1994).\n
  36. \n
  37. ^ Neumann & Möller (2008).\n
  38. \n
  39. ^ Kuperman, Reichley & Bailey (2006).\n
  40. \n
  41. ^ McGarry (2005).\n
  42. \n
  43. ^ Bertini, Del Bimbo & Torniai (2006).\n
  44. \n
  45. ^ Russell & Norvig (2021), pp. 272.\n
  46. \n
  47. ^ Representing categories and relations: Semantic networks, description logics, inheritance (including frames, and scripts): Russell & Norvig (2021, §10.2 & 10.5), Poole, Mackworth & Goebel (1998, pp. 174–177), Luger & Stubblefield (2004, pp. 248–258), Nilsson (1998, chpt. 18.3)\n
  48. \n
  49. ^ Representing events and time:Situation calculus, event calculus, fluent calculus (including solving the frame problem): Russell & Norvig (2021, §10.3), Poole, Mackworth & Goebel (1998, pp. 281–298), Nilsson (1998, chpt. 18.2)\n
  50. \n
  51. ^ Causal calculus: Poole, Mackworth & Goebel (1998, pp. 335–337)\n
  52. \n
  53. ^ Representing knowledge about knowledge: Belief calculus, modal logics: Russell & Norvig (2021, §10.4), Poole, Mackworth & Goebel (1998, pp. 275–277)\n
  54. \n
  55. ^ a b Default reasoning, Frame problem, default logic, non-monotonic logics, circumscription, closed world assumption, abduction: Russell & Norvig (2021, §10.6), Poole, Mackworth & Goebel (1998, pp. 248–256, 323–335), Luger & Stubblefield (2004, pp. 335–363), Nilsson (1998, ~18.3.3)\n(Poole et al. places abduction under "default reasoning". Luger et al. places this under "uncertain reasoning").\n
  56. \n
  57. ^ a b Breadth of commonsense knowledge: Lenat & Guha (1989, Introduction), Crevier (1993, pp. 113–114), Moravec (1988, p. 13), Russell & Norvig (2021, pp. 241, 385, 982) (qualification problem)\n
  58. \n
  59. ^ Newquist (1994), p. 296.\n
  60. \n
  61. ^ Crevier (1993), pp. 204–208.\n
  62. \n
  63. ^ Russell & Norvig (2021), p. 528.\n
  64. \n
  65. ^ Automated planning: Russell & Norvig (2021, chpt. 11).\n
  66. \n
  67. ^ Automated decision making, Decision theory: Russell & Norvig (2021, chpt. 16–18).\n
  68. \n
  69. ^ Classical planning: Russell & Norvig (2021, Section 11.2).\n
  70. \n
  71. ^ Sensorless or "conformant" planning, contingent planning, replanning (a.k.a online planning): Russell & Norvig (2021, Section 11.5).\n
  72. \n
  73. ^ Uncertain preferences: Russell & Norvig (2021, Section 16.7)\nInverse reinforcement learning: Russell & Norvig (2021, Section 22.6)\n
  74. \n
  75. ^ Information value theory: Russell & Norvig (2021, Section 16.6).\n
  76. \n
  77. ^ Markov decision process: Russell & Norvig (2021, chpt. 17).\n
  78. \n
  79. ^ Game theory and multi-agent decision theory: Russell & Norvig (2021, chpt. 18).\n
  80. \n
  81. ^ Learning: Russell & Norvig (2021, chpt. 19–22), Poole, Mackworth & Goebel (1998, pp. 397–438), Luger & Stubblefield (2004, pp. 385–542), Nilsson (1998, chpt. 3.3, 10.3, 17.5, 20)\n
  82. \n
  83. ^ Turing (1950).\n
  84. \n
  85. ^ Solomonoff (1956).\n
  86. \n
  87. ^ Unsupervised learning: Russell & Norvig (2021, pp. 653) (definition), Russell & Norvig (2021, pp. 738–740) (cluster analysis), Russell & Norvig (2021, pp. 846–860) (word embedding)\n
  88. \n
  89. ^ a b Supervised learning: Russell & Norvig (2021, §19.2) (Definition), Russell & Norvig (2021, Chpt. 19–20) (Techniques)\n
  90. \n
  91. ^ Reinforcement learning: Russell & Norvig (2021, chpt. 22), Luger & Stubblefield (2004, pp. 442–449)\n
  92. \n
  93. ^ Transfer learning: Russell & Norvig (2021, pp. 281), The Economist (2016)\n
  94. \n
  95. ^ "Artificial Intelligence (AI): What Is AI and How Does It Work? | Built In". builtin.com. Retrieved 30 October 2023.\n
  96. \n
  97. ^ Computational learning theory: Russell & Norvig (2021, pp. 672–674), Jordan & Mitchell (2015)\n
  98. \n
  99. ^ Natural language processing (NLP): Russell & Norvig (2021, chpt. 23–24), Poole, Mackworth & Goebel (1998, pp. 91–104), Luger & Stubblefield (2004, pp. 591–632)\n
  100. \n
  101. ^ Subproblems of NLP: Russell & Norvig (2021, pp. 849–850)\n
  102. \n
  103. ^ Russell & Norvig (2021), pp. 856–858.\n
  104. \n
  105. ^ Dickson (2022).\n
  106. \n
  107. ^ Modern statistical and deep learning approaches to NLP: Russell & Norvig (2021, chpt. 24), Cambria & White (2014)\n
  108. \n
  109. ^ Vincent (2019).\n
  110. \n
  111. ^ Russell & Norvig (2021), pp. 875–878.\n
  112. \n
  113. ^ Bushwick (2023).\n
  114. \n
  115. ^ Computer vision: Russell & Norvig (2021, chpt. 25), Nilsson (1998, chpt. 6)\n
  116. \n
  117. ^ Russell & Norvig (2021), pp. 849–850.\n
  118. \n
  119. ^ Russell & Norvig (2021), pp. 895–899.\n
  120. \n
  121. ^ Russell & Norvig (2021), pp. 899–901.\n
  122. \n
  123. ^ Challa et al. (2011).\n
  124. \n
  125. ^ Russell & Norvig (2021), pp. 931–938.\n
  126. \n
  127. ^ MIT AIL (2014).\n
  128. \n
  129. ^ Affective computing: Thro (1993), Edelson (1991), Tao & Tan (2005), Scassellati (2002)\n
  130. \n
  131. ^ Waddell (2018).\n
  132. \n
  133. ^ Poria et al. (2017).\n
  134. \n
  135. ^ Search algorithms: Russell & Norvig (2021, chpts. 3–5), Poole, Mackworth & Goebel (1998, pp. 113–163), Luger & Stubblefield (2004, pp. 79–164, 193–219), Nilsson (1998, chpts. 7–12)\n
  136. \n
  137. ^ State space search: Russell & Norvig (2021, chpt. 3)\n
  138. \n
  139. ^ Russell & Norvig (2021), sect. 11.2.\n
  140. \n
  141. ^ Uninformed searches (breadth first search, depth-first search and general state space search): Russell & Norvig (2021, sect. 3.4), Poole, Mackworth & Goebel (1998, pp. 113–132), Luger & Stubblefield (2004, pp. 79–121), Nilsson (1998, chpt. 8)\n
  142. \n
  143. ^ Heuristic or informed searches (e.g., greedy best first and A*): Russell & Norvig (2021, sect. 3.5), Poole, Mackworth & Goebel (1998, pp. 132–147), Poole & Mackworth (2017, sect. 3.6), Luger & Stubblefield (2004, pp. 133–150)\n
  144. \n
  145. ^ Adversarial search: Russell & Norvig (2021, chpt. 5)\n
  146. \n
  147. ^ Local or "optimization" search: Russell & Norvig (2021, chpt. 4)\n
  148. \n
  149. ^ Singh Chauhan, Nagesh (18 December 2020). "Optimization Algorithms in Neural Networks". KDnuggets. Retrieved 13 January 2024.\n
  150. \n
  151. ^ Evolutionary computation: Russell & Norvig (2021, sect. 4.1.2)\n
  152. \n
  153. ^ Merkle & Middendorf (2013).\n
  154. \n
  155. ^ Logic: Russell & Norvig (2021, chpts. 6–9), Luger & Stubblefield (2004, pp. 35–77), Nilsson (1998, chpt. 13–16)\n
  156. \n
  157. ^ Propositional logic: Russell & Norvig (2021, chpt. 6), Luger & Stubblefield (2004, pp. 45–50), Nilsson (1998, chpt. 13)\n
  158. \n
  159. ^ First-order logic and features such as equality: Russell & Norvig (2021, chpt. 7), Poole, Mackworth & Goebel (1998, pp. 268–275), Luger & Stubblefield (2004, pp. 50–62), Nilsson (1998, chpt. 15)\n
  160. \n
  161. ^ Logical inference: Russell & Norvig (2021, chpt. 10)\n
  162. \n
  163. ^ logical deduction as search: Russell & Norvig (2021, sects. 9.3, 9.4), Poole, Mackworth & Goebel (1998, pp. ~46–52), Luger & Stubblefield (2004, pp. 62–73), Nilsson (1998, chpt. 4.2, 7.2)\n
  164. \n
  165. ^ Resolution and unification: Russell & Norvig (2021, sections 7.5.2, 9.2, 9.5)\n
  166. \n
  167. ^ Warren, D.H.; Pereira, L.M.; Pereira, F. (1977). "Prolog-the language and its implementation compared with Lisp". ACM SIGPLAN Notices. 12 (8): 109–115. doi:10.1145/872734.806939.\n
  168. \n
  169. ^ Fuzzy logic: Russell & Norvig (2021, pp. 214, 255, 459), Scientific American (1999)\n
  170. \n
  171. ^ a b Stochastic methods for uncertain reasoning: Russell & Norvig (2021, chpt. 12–18, 20), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 165–191, 333–381), Nilsson (1998, chpt. 19)\n
  172. \n
  173. ^ decision theory and decision analysis: Russell & Norvig (2021, chpt. 16–18), Poole, Mackworth & Goebel (1998, pp. 381–394)\n
  174. \n
  175. ^ Information value theory: Russell & Norvig (2021, sect. 16.6)\n
  176. \n
  177. ^ Markov decision processes and dynamic decision networks: Russell & Norvig (2021, chpt. 17)\n
  178. \n
  179. ^ a b c Stochastic temporal models: Russell & Norvig (2021, chpt. 14)\nHidden Markov model: Russell & Norvig (2021, sect. 14.3)\nKalman filters: Russell & Norvig (2021, sect. 14.4)\nDynamic Bayesian networks: Russell & Norvig (2021, sect. 14.5)\n
  180. \n
  181. ^ Game theory and mechanism design: Russell & Norvig (2021, chpt. 18)\n
  182. \n
  183. ^ Bayesian networks: Russell & Norvig (2021, sects. 12.5–12.6, 13.4–13.5, 14.3–14.5, 16.5, 20.2–20.3), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~182–190, ≈363–379), Nilsson (1998, chpt. 19.3–19.4)\n
  184. \n
  185. ^ Domingos (2015), chpt. 6.\n
  186. \n
  187. ^ Bayesian inference algorithm: Russell & Norvig (2021, sect. 13.3–13.5), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~363–379), Nilsson (1998, chpt. 19.4 & 7)\n
  188. \n
  189. ^ Domingos (2015), p. 210.\n
  190. \n
  191. ^ Bayesian learning and the expectation–maximization algorithm: Russell & Norvig (2021, chpt. 20), Poole, Mackworth & Goebel (1998, pp. 424–433), Nilsson (1998, chpt. 20), Domingos (2015, p. 210)\n
  192. \n
  193. ^ Bayesian decision theory and Bayesian decision networks: Russell & Norvig (2021, sect. 16.5)\n
  194. \n
  195. ^ Statistical learning methods and classifiers: Russell & Norvig (2021, chpt. 20),\n
  196. \n
  197. ^ Ciaramella, Alberto; Ciaramella, Marco (2024). Introduction to Artificial Intelligence: from data analysis to generative AI. Intellisemantic Editions. ISBN 978-8-8947-8760-3.\n
  198. \n
  199. ^ Decision trees: Russell & Norvig (2021, sect. 19.3), Domingos (2015, p. 88)\n
  200. \n
  201. ^ Non-parameteric learning models such as K-nearest neighbor and support vector machines: Russell & Norvig (2021, sect. 19.7), Domingos (2015, p. 187) (k-nearest neighbor)\n\n
  202. \n
  203. ^ Domingos (2015), p. 152.\n
  204. \n
  205. ^ Naive Bayes classifier: Russell & Norvig (2021, sect. 12.6), Domingos (2015, p. 152)\n
  206. \n
  207. ^ a b Neural networks: Russell & Norvig (2021, chpt. 21), Domingos (2015, Chapter 4)\n
  208. \n
  209. ^ Gradient calculation in computational graphs, backpropagation, automatic differentiation: Russell & Norvig (2021, sect. 21.2), Luger & Stubblefield (2004, pp. 467–474), Nilsson (1998, chpt. 3.3)\n
  210. \n
  211. ^ Universal approximation theorem: Russell & Norvig (2021, p. 752)\nThe theorem: Cybenko (1988), Hornik, Stinchcombe & White (1989)\n
  212. \n
  213. ^ Feedforward neural networks: Russell & Norvig (2021, sect. 21.1)\n
  214. \n
  215. ^ Recurrent neural networks: Russell & Norvig (2021, sect. 21.6)\n
  216. \n
  217. ^ Perceptrons: Russell & Norvig (2021, pp. 21, 22, 683, 22)\n
  218. \n
  219. ^ a b Deep learning: Russell & Norvig (2021, chpt. 21), Goodfellow, Bengio & Courville (2016), Hinton et al. (2016), Schmidhuber (2015)\n
  220. \n
  221. ^ Convolutional neural networks: Russell & Norvig (2021, sect. 21.3)\n
  222. \n
  223. ^ Deng & Yu (2014), pp. 199–200.\n
  224. \n
  225. ^ Ciresan, Meier & Schmidhuber (2012).\n
  226. \n
  227. ^ Russell & Norvig (2021), p. 751.\n
  228. \n
  229. ^ a b c Russell & Norvig (2021), p. 17.\n
  230. \n
  231. ^ a b c d e f g Russell & Norvig (2021), p. 785.\n
  232. \n
  233. ^ a b Schmidhuber (2022), sect. 5.\n
  234. \n
  235. ^ Schmidhuber (2022), sect. 6.\n
  236. \n
  237. ^ a b c Schmidhuber (2022), sect. 7.\n
  238. \n
  239. ^ Schmidhuber (2022), sect. 8.\n
  240. \n
  241. ^ Quoted in Christian (2020, p. 22)\n
  242. \n
  243. ^ Smith (2023).\n
  244. \n
  245. ^ "Explained: Generative AI". 9 November 2023.\n
  246. \n
  247. ^ "AI Writing and Content Creation Tools". MIT Sloan Teaching & Learning Technologies. Archived from the original on 25 December 2023. Retrieved 25 December 2023.\n
  248. \n
  249. ^ Marmouyet (2023).\n
  250. \n
  251. ^ Kobielus (2019).\n
  252. \n
  253. ^ Thomason, James (21 May 2024). "Mojo Rising: The resurgence of AI-first programming languages". VentureBeat. Archived from the original on 27 June 2024. Retrieved 26 May 2024.\n
  254. \n
  255. ^ Wodecki, Ben (5 May 2023). "7 AI Programming Languages You Need to Know". AI Business. Archived from the original on 25 July 2024. Retrieved 5 October 2024.\n
  256. \n
  257. ^ Plumb, Taryn (18 September 2024). "Why Jensen Huang and Marc Benioff see \'gigantic\' opportunity for agentic AI". VentureBeat. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
  258. \n
  259. ^ Davenport, T; Kalakota, R (June 2019). "The potential for artificial intelligence in healthcare". Future Healthc J. 6 (2): 94–98. doi:10.7861/futurehosp.6-2-94. PMC 6616181. PMID 31363513.\n
  260. \n
  261. ^ Lyakhova, U.A.; Lyakhov, P.A. (2024). "Systematic review of approaches to detection and classification of skin cancer using artificial intelligence: Development and prospects". Computers in Biology and Medicine. 178: 108742. doi:10.1016/j.compbiomed.2024.108742. PMID 38875908.\n
  262. \n
  263. ^ Alqudaihi, Kawther S.; Aslam, Nida; Khan, Irfan Ullah; Almuhaideb, Abdullah M.; Alsunaidi, Shikah J.; Ibrahim, Nehad M. Abdel Rahman; Alhaidari, Fahd A.; Shaikh, Fatema S.; Alsenbel, Yasmine M.; Alalharith, Dima M.; Alharthi, Hajar M.; Alghamdi, Wejdan M.; Alshahrani, Mohammed S. (2021). "Cough Sound Detection and Diagnosis Using Artificial Intelligence Techniques: Challenges and Opportunities". IEEE Access. 9: 102327–102344. Bibcode:2021IEEEA...9j2327A. doi:10.1109/ACCESS.2021.3097559. ISSN 2169-3536. PMC 8545201. PMID 34786317.\n
  264. \n
  265. ^ a b Bax, Monique; Thorpe, Jordan; Romanov, Valentin (December 2023). "The future of personalized cardiovascular medicine demands 3D and 4D printing, stem cells, and artificial intelligence". Frontiers in Sensors. 4. doi:10.3389/fsens.2023.1294721. ISSN 2673-5067.\n
  266. \n
  267. ^ Jumper, J; Evans, R; Pritzel, A (2021). "Highly accurate protein structure prediction with AlphaFold". Nature. 596 (7873): 583–589. Bibcode:2021Natur.596..583J. doi:10.1038/s41586-021-03819-2. PMC 8371605. PMID 34265844.\n
  268. \n
  269. ^ "AI discovers new class of antibiotics to kill drug-resistant bacteria". 20 December 2023. Archived from the original on 16 September 2024. Retrieved 5 October 2024.\n
  270. \n
  271. ^ "AI speeds up drug design for Parkinson\'s ten-fold". Cambridge University. 17 April 2024. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  272. \n
  273. ^ Horne, Robert I.; Andrzejewska, Ewa A.; Alam, Parvez; Brotzakis, Z. Faidon; Srivastava, Ankit; Aubert, Alice; Nowinska, Magdalena; Gregory, Rebecca C.; Staats, Roxine; Possenti, Andrea; Chia, Sean; Sormanni, Pietro; Ghetti, Bernardino; Caughey, Byron; Knowles, Tuomas P. J.; Vendruscolo, Michele (17 April 2024). "Discovery of potent inhibitors of α-synuclein aggregation using structure-based iterative learning". Nature Chemical Biology. 20 (5). Nature: 634–645. doi:10.1038/s41589-024-01580-x. PMC 11062903. PMID 38632492.\n
  274. \n
  275. ^ Grant, Eugene F.; Lardner, Rex (25 July 1952). "The Talk of the Town – It". The New Yorker. ISSN 0028-792X. Archived from the original on 16 February 2020. Retrieved 28 January 2024.\n
  276. \n
  277. ^ Anderson, Mark Robert (11 May 2017). "Twenty years on from Deep Blue vs Kasparov: how a chess match started the big data revolution". The Conversation. Archived from the original on 17 September 2024. Retrieved 28 January 2024.\n
  278. \n
  279. ^ Markoff, John (16 February 2011). "Computer Wins on \'Jeopardy!\': Trivial, It\'s Not". The New York Times. ISSN 0362-4331. Archived from the original on 22 October 2014. Retrieved 28 January 2024.\n
  280. \n
  281. ^ Byford, Sam (27 May 2017). "AlphaGo retires from competitive Go after defeating world number one 3–0". The Verge. Archived from the original on 7 June 2017. Retrieved 28 January 2024.\n
  282. \n
  283. ^ Brown, Noam; Sandholm, Tuomas (30 August 2019). "Superhuman AI for multiplayer poker". Science. 365 (6456): 885–890. Bibcode:2019Sci...365..885B. doi:10.1126/science.aay2400. ISSN 0036-8075. PMID 31296650.\n
  284. \n
  285. ^ "MuZero: Mastering Go, chess, shogi and Atari without rules". Google DeepMind. 23 December 2020. Retrieved 28 January 2024.\n
  286. \n
  287. ^ Sample, Ian (30 October 2019). "AI becomes grandmaster in \'fiendishly complex\' StarCraft II". The Guardian. ISSN 0261-3077. Archived from the original on 29 December 2020. Retrieved 28 January 2024.\n
  288. \n
  289. ^ Wurman, P. R.; Barrett, S.; Kawamoto, K. (2022). "Outracing champion Gran Turismo drivers with deep reinforcement learning" (PDF). Nature. 602 (7896): 223–228. Bibcode:2022Natur.602..223W. doi:10.1038/s41586-021-04357-7. PMID 35140384.\n
  290. \n
  291. ^ Wilkins, Alex (13 March 2024). "Google AI learns to play open-world video games by watching them". New Scientist. Archived from the original on 26 July 2024. Retrieved 21 July 2024.\n
  292. \n
  293. ^ Uesato, J. et al.: Improving mathematical reasoning with process supervision. Archived 15 September 2024 at the Wayback Machine openai.com, May 31, 2023. Retrieved 2024-08-07.\n
  294. \n
  295. ^ Srivastava, Saurabh (29 February 2024). "Functional Benchmarks for Robust Evaluation of Reasoning Performance, and the Reasoning Gap". arXiv:2402.19450 [cs.AI].\n
  296. \n
  297. ^ Roberts, Siobhan (25 July 2024). "AI achieves silver-medal standard solving International Mathematical Olympiad problems". The New York Times. Archived from the original on 26 September 2024. Retrieved 7 August 2024.\n
  298. \n
  299. ^ LLEMMA. eleuther.ai. Retrieved 2024-08-07.\n
  300. \n
  301. ^ AI Math. Archived 5 October 2024 at the Wayback Machine Caesars Labs, 2024. Retrieved 2024-08-07.\n
  302. \n
  303. ^ Alex McFarland: 7 Best AI for Math Tools. Archived 11 September 2024 at the Wayback Machine unite.ai. Retrieved 2024-08-07\n
  304. \n
  305. ^ Matthew Finio & Amanda Downie: IBM Think 2024 Primer, "What is Artificial Intelligence (AI) in Finance?" 8 Dec. 2023\n
  306. \n
  307. ^ M. Nicolas, J. Firzli: Pensions Age/European Pensions magazine, "Artificial Intelligence: Ask the Industry" May June 2024 https://videovoice.org/ai-in-finance-innovation-entrepreneurship-vs-over-regulation-with-the-eus-artificial-intelligence-act-wont-work-as-intended/ Archived 11 September 2024 at the Wayback Machine.\n
  308. \n
  309. ^ a b c Congressional Research Service (2019). Artificial Intelligence and National Security (PDF). Washington, DC: Congressional Research Service. Archived (PDF) from the original on 8 May 2020. Retrieved 5 October 2024.PD-notice\n
  310. \n
  311. ^ a b Slyusar, Vadym (2019). "Artificial intelligence as the basis of future control networks". ResearchGate. doi:10.13140/RG.2.2.30247.50087. Archived from the original on 28 April 2021. Retrieved 20 July 2019.\n
  312. \n
  313. ^ Knight, Will. "The US and 30 Other Nations Agree to Set Guardrails for Military AI". Wired. ISSN 1059-1028. Archived from the original on 20 September 2024. Retrieved 24 January 2024.\n
  314. \n
  315. ^ Newsom, Gavin; Weber, Shirley N. (6 September 2023). "Executive Order N-12-23" (PDF). Executive Department, State of California. Archived (PDF) from the original on 21 February 2024. Retrieved 7 September 2023.\n
  316. \n
  317. ^ Pinaya, Walter H. L.; Graham, Mark S.; Kerfoot, Eric; Tudosiu, Petru-Daniel; Dafflon, Jessica; Fernandez, Virginia; Sanchez, Pedro; Wolleb, Julia; da Costa, Pedro F.; Patel, Ashay (2023). "Generative AI for Medical Imaging: extending the MONAI Framework". arXiv:2307.15208 [eess.IV].\n
  318. \n
  319. ^ Griffith, Erin; Metz, Cade (27 January 2023). "Anthropic Said to Be Closing In on $300 Million in New A.I. Funding". The New York Times. Archived from the original on 9 December 2023. Retrieved 14 March 2023.\n
  320. \n
  321. ^ Lanxon, Nate; Bass, Dina; Davalos, Jackie (10 March 2023). "A Cheat Sheet to AI Buzzwords and Their Meanings". Bloomberg News. Archived from the original on 17 November 2023. Retrieved 14 March 2023.\n
  322. \n
  323. ^ Marcelline, Marco (27 May 2023). "ChatGPT: Most Americans Know About It, But Few Actually Use the AI Chatbot". PCMag. Archived from the original on 21 May 2024. Retrieved 28 January 2024.\n
  324. \n
  325. ^ Lu, Donna (31 March 2023). "Misinformation, mistakes and the Pope in a puffer: what rapidly evolving AI can – and can\'t – do". The Guardian. ISSN 0261-3077. Archived from the original on 10 June 2024. Retrieved 28 January 2024.\n
  326. \n
  327. ^ Hurst, Luke (23 May 2023). "How a fake image of a Pentagon explosion shared on Twitter caused a real dip on Wall Street". euronews. Retrieved 28 January 2024.\n
  328. \n
  329. ^ Poole, David; Mackworth, Alan (2023). Artificial Intelligence, Foundations of Computational Agents (3rd ed.). Cambridge University Press. doi:10.1017/9781009258227. ISBN 978-1-0092-5819-7. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  330. \n
  331. ^ Russell, Stuart; Norvig, Peter (2020). Artificial Intelligence: A Modern Approach (4th ed.). Pearson. ISBN 978-0-1346-1099-3.\n
  332. \n
  333. ^ "Why agents are the next frontier of generative AI". McKinsey Digital. 24 July 2024. Archived from the original on 3 October 2024. Retrieved 10 August 2024.\n
  334. \n
  335. ^ Ransbotham, Sam; Kiron, David; Gerbert, Philipp; Reeves, Martin (6 September 2017). "Reshaping Business With Artificial Intelligence". MIT Sloan Management Review. Archived from the original on 13 February 2024.\n
  336. \n
  337. ^ Sun, Yuran; Zhao, Xilei; Lovreglio, Ruggiero; Kuligowski, Erica (1 January 2024), Naser, M. Z. (ed.), "8 – AI for large-scale evacuation modeling: promises and challenges", Interpretable Machine Learning for the Analysis, Design, Assessment, and Informed Decision Making for Civil Infrastructure, Woodhead Publishing Series in Civil and Structural Engineering, Woodhead Publishing, pp. 185–204, ISBN 978-0-1282-4073-1, archived from the original on 19 May 2024, retrieved 28 June 2024.\n
  338. \n
  339. ^ Gomaa, Islam; Adelzadeh, Masoud; Gwynne, Steven; Spencer, Bruce; Ko, Yoon; Bénichou, Noureddine; Ma, Chunyun; Elsagan, Nour; Duong, Dana; Zalok, Ehab; Kinateder, Max (1 November 2021). "A Framework for Intelligent Fire Detection and Evacuation System". Fire Technology. 57 (6): 3179–3185. doi:10.1007/s10694-021-01157-3. ISSN 1572-8099. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  340. \n
  341. ^ Zhao, Xilei; Lovreglio, Ruggiero; Nilsson, Daniel (1 May 2020). "Modelling and interpreting pre-evacuation decision-making using machine learning". Automation in Construction. 113: 103140. doi:10.1016/j.autcon.2020.103140. ISSN 0926-5805. Archived from the original on 19 May 2024. Retrieved 5 October 2024.\n
  342. \n
  343. ^ "India\'s latest election embraced AI technology. Here are some ways it was used constructively". PBS News. 12 June 2024. Retrieved 28 October 2024.\n
  344. \n
  345. ^ Müller, Vincent C. (30 April 2020). "Ethics of Artificial Intelligence and Robotics". Stanford Encyclopedia of Philosophy Archive. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  346. \n
  347. ^ Simonite (2016).\n
  348. \n
  349. ^ Russell & Norvig (2021), p. 987.\n
  350. \n
  351. ^ Laskowski (2023).\n
  352. \n
  353. ^ GAO (2022).\n
  354. \n
  355. ^ Valinsky (2019).\n
  356. \n
  357. ^ Russell & Norvig (2021), p. 991.\n
  358. \n
  359. ^ Russell & Norvig (2021), pp. 991–992.\n
  360. \n
  361. ^ Christian (2020), p. 63.\n
  362. \n
  363. ^ Vincent (2022).\n
  364. \n
  365. ^ Kopel, Matthew. "Copyright Services: Fair Use". Cornell University Library. Archived from the original on 26 September 2024. Retrieved 26 April 2024.\n
  366. \n
  367. ^ Burgess, Matt. "How to Stop Your Data From Being Used to Train AI". Wired. ISSN 1059-1028. Archived from the original on 3 October 2024. Retrieved 26 April 2024.\n
  368. \n
  369. ^ Reisner (2023).\n
  370. \n
  371. ^ Alter & Harris (2023).\n
  372. \n
  373. ^ "Getting the Innovation Ecosystem Ready for AI. An IP policy toolkit" (PDF). WIPO.\n
  374. \n
  375. ^ Hammond, George (27 December 2023). "Big Tech is spending more than VC firms on AI startups". Ars Technica. Archived from the original on 10 January 2024.\n
  376. \n
  377. ^ Wong, Matteo (24 October 2023). "The Future of AI Is GOMA". The Atlantic. Archived from the original on 5 January 2024.\n
  378. \n
  379. ^ "Big tech and the pursuit of AI dominance". The Economist. 26 March 2023. Archived from the original on 29 December 2023.\n
  380. \n
  381. ^ Fung, Brian (19 December 2023). "Where the battle to dominate AI may be won". CNN Business. Archived from the original on 13 January 2024.\n
  382. \n
  383. ^ Metz, Cade (5 July 2023). "In the Age of A.I., Tech\'s Little Guys Need Big Friends". The New York Times. Archived from the original on 8 July 2024. Retrieved 5 October 2024.\n
  384. \n
  385. ^ "Electricity 2024 – Analysis". IEA. 24 January 2024. Retrieved 13 July 2024.\n
  386. \n
  387. ^ Calvert, Brian (28 March 2024). "AI already uses as much energy as a small country. It\'s only the beginning". Vox. New York, New York. Archived from the original on 3 July 2024. Retrieved 5 October 2024.\n
  388. \n
  389. ^ Halper, Evan; O\'Donovan, Caroline (21 June 2024). "AI is exhausting the power grid. Tech firms are seeking a miracle solution". Washington Post.\n
  390. \n
  391. ^ Davenport, Carly. "AI Data Centers and the Coming YS Power Demand Surge" (PDF). Goldman Sachs. Archived from the original (PDF) on 26 July 2024. Retrieved 5 October 2024.\n
  392. \n
  393. ^ Ryan, Carol (12 April 2024). "Energy-Guzzling AI Is Also the Future of Energy Savings". Wall Street Journal. Dow Jones.\n
  394. \n
  395. ^ Hiller, Jennifer (1 July 2024). "Tech Industry Wants to Lock Up Nuclear Power for AI". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  396. \n
  397. ^ Halper, Evan (20 September 2024). "Microsoft deal would reopen Three Mile Island nuclear plant to power AI". Washington Post.\n
  398. \n
  399. ^ Hiller, Jennifer (20 September 2024). "Three Mile Island\'s Nuclear Plant to Reopen, Help Power Microsoft\'s AI Centers". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  400. \n
  401. ^ Nicas (2018).\n
  402. \n
  403. ^ Rainie, Lee; Keeter, Scott; Perrin, Andrew (22 July 2019). "Trust and Distrust in America". Pew Research Center. Archived from the original on 22 February 2024.\n
  404. \n
  405. ^ Williams (2023).\n
  406. \n
  407. ^ Taylor & Hern (2023).\n
  408. \n
  409. ^ a b Samuel, Sigal (19 April 2022). "Why it\'s so damn hard to make AI fair and unbiased". Vox. Archived from the original on 5 October 2024. Retrieved 24 July 2024.\n
  410. \n
  411. ^ a b Rose (2023).\n
  412. \n
  413. ^ CNA (2019).\n
  414. \n
  415. ^ Goffrey (2008), p. 17.\n
  416. \n
  417. ^ Berdahl et al. (2023); Goffrey (2008, p. 17); Rose (2023); Russell & Norvig (2021, p. 995)\n
  418. \n
  419. ^ Christian (2020), p. 25.\n
  420. \n
  421. ^ a b Russell & Norvig (2021), p. 995.\n
  422. \n
  423. ^ Grant & Hill (2023).\n
  424. \n
  425. ^ Larson & Angwin (2016).\n
  426. \n
  427. ^ Christian (2020), p. 67–70.\n
  428. \n
  429. ^ Christian (2020, pp. 67–70); Russell & Norvig (2021, pp. 993–994)\n
  430. \n
  431. ^ Russell & Norvig (2021, p. 995); Lipartito (2011, p. 36); Goodman & Flaxman (2017, p. 6); Christian (2020, pp. 39–40, 65)\n
  432. \n
  433. ^ Quoted in Christian (2020, p. 65).\n
  434. \n
  435. ^ Russell & Norvig (2021, p. 994); Christian (2020, pp. 40, 80–81)\n
  436. \n
  437. ^ Quoted in Christian (2020, p. 80)\n
  438. \n
  439. ^ Dockrill (2022).\n
  440. \n
  441. ^ Sample (2017).\n
  442. \n
  443. ^ "Black Box AI". 16 June 2023. Archived from the original on 15 June 2024. Retrieved 5 October 2024.\n
  444. \n
  445. ^ Christian (2020), p. 110.\n
  446. \n
  447. ^ Christian (2020), pp. 88–91.\n
  448. \n
  449. ^ Christian (2020, p. 83); Russell & Norvig (2021, p. 997)\n
  450. \n
  451. ^ Christian (2020), p. 91.\n
  452. \n
  453. ^ Christian (2020), p. 83.\n
  454. \n
  455. ^ Verma (2021).\n
  456. \n
  457. ^ Rothman (2020).\n
  458. \n
  459. ^ Christian (2020), pp. 105–108.\n
  460. \n
  461. ^ Christian (2020), pp. 108–112.\n
  462. \n
  463. ^ Ropek, Lucas (21 May 2024). "New Anthropic Research Sheds Light on AI\'s \'Black Box\'". Gizmodo. Archived from the original on 5 October 2024. Retrieved 23 May 2024.\n
  464. \n
  465. ^ Russell & Norvig (2021), p. 989.\n
  466. \n
  467. ^ a b Russell & Norvig (2021), pp. 987–990.\n
  468. \n
  469. ^ Russell & Norvig (2021), p. 988.\n
  470. \n
  471. ^ Robitzski (2018); Sainato (2015)\n
  472. \n
  473. ^ Harari (2018).\n
  474. \n
  475. ^ Buckley, Chris; Mozur, Paul (22 May 2019). "How China Uses High-Tech Surveillance to Subdue Minorities". The New York Times. Archived from the original on 25 November 2019. Retrieved 2 July 2019.\n
  476. \n
  477. ^ "Security lapse exposed a Chinese smart city surveillance system". 3 May 2019. Archived from the original on 7 March 2021. Retrieved 14 September 2020.\n
  478. \n
  479. ^ Urbina et al. (2022).\n
  480. \n
  481. ^ a b E. McGaughey, \'Will Robots Automate Your Job Away? Full Employment, Basic Income, and Economic Democracy\' (2022), 51(3) Industrial Law Journal 511–559. Archived 27 May 2023 at the Wayback Machine.\n
  482. \n
  483. ^ Ford & Colvin (2015);McGaughey (2022)\n
  484. \n
  485. ^ IGM Chicago (2017).\n
  486. \n
  487. ^ Arntz, Gregory & Zierahn (2016), p. 33.\n
  488. \n
  489. ^ Lohr (2017); Frey & Osborne (2017); Arntz, Gregory & Zierahn (2016, p. 33)\n
  490. \n
  491. ^ Zhou, Viola (11 April 2023). "AI is already taking video game illustrators\' jobs in China". Rest of World. Archived from the original on 21 February 2024. Retrieved 17 August 2023.\n
  492. \n
  493. ^ Carter, Justin (11 April 2023). "China\'s game art industry reportedly decimated by growing AI use". Game Developer. Archived from the original on 17 August 2023. Retrieved 17 August 2023.\n
  494. \n
  495. ^ Morgenstern (2015).\n
  496. \n
  497. ^ Mahdawi (2017); Thompson (2014)\n
  498. \n
  499. ^ Tarnoff, Ben (4 August 2023). "Lessons from Eliza". The Guardian Weekly. pp. 34–39.\n
  500. \n
  501. ^ Cellan-Jones (2014).\n
  502. \n
  503. ^ Russell & Norvig 2021, p. 1001.\n
  504. \n
  505. ^ Bostrom (2014).\n
  506. \n
  507. ^ Russell (2019).\n
  508. \n
  509. ^ Bostrom (2014); Müller & Bostrom (2014); Bostrom (2015).\n
  510. \n
  511. ^ Harari (2023).\n
  512. \n
  513. ^ Müller & Bostrom (2014).\n
  514. \n
  515. ^ Leaders\' concerns about the existential risks of AI around 2015: Rawlinson (2015), Holley (2015), Gibbs (2014), Sainato (2015)\n
  516. \n
  517. ^ ""Godfather of artificial intelligence" talks impact and potential of new AI". CBS News. 25 March 2023. Archived from the original on 28 March 2023. Retrieved 28 March 2023.\n
  518. \n
  519. ^ Pittis, Don (4 May 2023). "Canadian artificial intelligence leader Geoffrey Hinton piles on fears of computer takeover". CBC. Archived from the original on 7 July 2024. Retrieved 5 October 2024.\n
  520. \n
  521. ^ "\'50–50 chance\' that AI outsmarts humanity, Geoffrey Hinton says". Bloomberg BNN. 14 June 2024. Retrieved 6 July 2024.\n
  522. \n
  523. ^ Valance (2023).\n
  524. \n
  525. ^ Taylor, Josh (7 May 2023). "Rise of artificial intelligence is inevitable but should not be feared, \'father of AI\' says". The Guardian. Archived from the original on 23 October 2023. Retrieved 26 May 2023.\n
  526. \n
  527. ^ Colton, Emma (7 May 2023). "\'Father of AI\' says tech fears misplaced: \'You cannot stop it\'". Fox News. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
  528. \n
  529. ^ Jones, Hessie (23 May 2023). "Juergen Schmidhuber, Renowned \'Father Of Modern AI,\' Says His Life\'s Work Won\'t Lead To Dystopia". Forbes. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
  530. \n
  531. ^ McMorrow, Ryan (19 December 2023). "Andrew Ng: \'Do we think the world is better off with more or less intelligence?\'". Financial Times. Archived from the original on 25 January 2024. Retrieved 30 December 2023.\n
  532. \n
  533. ^ Levy, Steven (22 December 2023). "How Not to Be Stupid About AI, With Yann LeCun". Wired. Archived from the original on 28 December 2023. Retrieved 30 December 2023.\n
  534. \n
  535. ^ Arguments that AI is not an imminent risk: Brooks (2014), Geist (2015), Madrigal (2015), Lee (2014)\n
  536. \n
  537. ^ a b Christian (2020), pp. 67, 73.\n
  538. \n
  539. ^ Yudkowsky (2008).\n
  540. \n
  541. ^ a b Anderson & Anderson (2011).\n
  542. \n
  543. ^ AAAI (2014).\n
  544. \n
  545. ^ Wallach (2010).\n
  546. \n
  547. ^ Russell (2019), p. 173.\n
  548. \n
  549. ^ Stewart, Ashley; Melton, Monica. "Hugging Face CEO says he\'s focused on building a \'sustainable model\' for the $4.5 billion open-source-AI startup". Business Insider. Archived from the original on 25 September 2024. Retrieved 14 April 2024.\n
  550. \n
  551. ^ Wiggers, Kyle (9 April 2024). "Google open sources tools to support AI model development". TechCrunch. Archived from the original on 10 September 2024. Retrieved 14 April 2024.\n
  552. \n
  553. ^ Heaven, Will Douglas (12 May 2023). "The open-source AI boom is built on Big Tech\'s handouts. How long will it last?". MIT Technology Review. Retrieved 14 April 2024.\n
  554. \n
  555. ^ Brodsky, Sascha (19 December 2023). "Mistral AI\'s New Language Model Aims for Open Source Supremacy". AI Business. Archived from the original on 5 September 2024. Retrieved 5 October 2024.\n
  556. \n
  557. ^ Edwards, Benj (22 February 2024). "Stability announces Stable Diffusion 3, a next-gen AI image generator". Ars Technica. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
  558. \n
  559. ^ Marshall, Matt (29 January 2024). "How enterprises are using open source LLMs: 16 examples". VentureBeat. Archived from the original on 26 September 2024. Retrieved 5 October 2024.\n
  560. \n
  561. ^ Piper, Kelsey (2 February 2024). "Should we make our most powerful AI models open source to all?". Vox. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
  562. \n
  563. ^ Alan Turing Institute (2019). "Understanding artificial intelligence ethics and safety" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
  564. \n
  565. ^ Alan Turing Institute (2023). "AI Ethics and Governance in Practice" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
  566. \n
  567. ^ Floridi, Luciano; Cowls, Josh (23 June 2019). "A Unified Framework of Five Principles for AI in Society". Harvard Data Science Review. 1 (1). doi:10.1162/99608f92.8cd550d1. S2CID 198775713.\n
  568. \n
  569. ^ Buruk, Banu; Ekmekci, Perihan Elif; Arda, Berna (1 September 2020). "A critical perspective on guidelines for responsible and trustworthy artificial intelligence". Medicine, Health Care and Philosophy. 23 (3): 387–399. doi:10.1007/s11019-020-09948-1. ISSN 1572-8633. PMID 32236794. S2CID 214766800. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  570. \n
  571. ^ Kamila, Manoj Kumar; Jasrotia, Sahil Singh (1 January 2023). "Ethical issues in the development of artificial intelligence: recognizing the risks". International Journal of Ethics and Systems. ahead-of-print (ahead-of-print). doi:10.1108/IJOES-05-2023-0107. ISSN 2514-9369. S2CID 259614124. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  572. \n
  573. ^ "AI Safety Institute releases new AI safety evaluations platform". UK Government. 10 May 2024. Archived from the original on 5 October 2024. Retrieved 14 May 2024.\n
  574. \n
  575. ^ Regulation of AI to mitigate risks: Berryhill et al. (2019), Barfield & Pagallo (2018), Iphofen & Kritikos (2019), Wirtz, Weyerer & Geyer (2018), Buiten (2019)\n
  576. \n\n
  577. ^ a b Vincent (2023).\n
  578. \n
  579. ^ Stanford University (2023).\n
  580. \n
  581. ^ a b c d UNESCO (2021).\n
  582. \n
  583. ^ Kissinger (2021).\n
  584. \n
  585. ^ Altman, Brockman & Sutskever (2023).\n
  586. \n
  587. ^ VOA News (25 October 2023). "UN Announces Advisory Body on Artificial Intelligence". Archived from the original on 18 September 2024. Retrieved 5 October 2024.\n
  588. \n
  589. ^ "Council of Europe opens first ever global treaty on AI for signature". Council of Europe. 5 September 2024. Archived from the original on 17 September 2024. Retrieved 17 September 2024.\n
  590. \n
  591. ^ Edwards (2023).\n
  592. \n
  593. ^ Kasperowicz (2023).\n
  594. \n
  595. ^ Fox News (2023).\n
  596. \n
  597. ^ Milmo, Dan (3 November 2023). "Hope or Horror? The great AI debate dividing its pioneers". The Guardian Weekly. pp. 10–12.\n
  598. \n
  599. ^ "The Bletchley Declaration by Countries Attending the AI Safety Summit, 1–2 November 2023". GOV.UK. 1 November 2023. Archived from the original on 1 November 2023. Retrieved 2 November 2023.\n
  600. \n
  601. ^ "Countries agree to safe and responsible development of frontier AI in landmark Bletchley Declaration". GOV.UK (Press release). Archived from the original on 1 November 2023. Retrieved 1 November 2023.\n
  602. \n
  603. ^ "Second global AI summit secures safety commitments from companies". Reuters. 21 May 2024. Retrieved 23 May 2024.\n
  604. \n
  605. ^ "Frontier AI Safety Commitments, AI Seoul Summit 2024". gov.uk. 21 May 2024. Archived from the original on 23 May 2024. Retrieved 23 May 2024.\n
  606. \n
  607. ^ a b Russell & Norvig 2021, p. 9.\n
  608. \n
  609. ^ a b c Copeland, J., ed. (2004). The Essential Turing: the ideas that gave birth to the computer age. Oxford, England: Clarendon Press. ISBN 0-1982-5079-7.\n
  610. \n
  611. ^ "Google books ngram". Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  612. \n
  613. ^ AI\'s immediate precursors: McCorduck (2004, pp. 51–107), Crevier (1993, pp. 27–32), Russell & Norvig (2021, pp. 8–17), Moravec (1988, p. 3)\n
  614. \n
  615. ^ a b Turing\'s original publication of the Turing test in "Computing machinery and intelligence": Turing (1950)\nHistorical influence and philosophical implications: Haugeland (1985, pp. 6–9), Crevier (1993, p. 24), McCorduck (2004, pp. 70–71), Russell & Norvig (2021, pp. 2, 984)\n
  616. \n
  617. ^ Crevier (1993), pp. 47–49.\n
  618. \n
  619. ^ Russell & Norvig (2003), p. 17.\n
  620. \n
  621. ^ Russell & Norvig (2003), p. 18.\n
  622. \n
  623. ^ Newquist (1994), pp. 86–86.\n
  624. \n
  625. ^ Simon (1965, p. 96) quoted in Crevier (1993, p. 109)\n
  626. \n
  627. ^ Minsky (1967, p. 2) quoted in Crevier (1993, p. 109)\n
  628. \n
  629. ^ Russell & Norvig (2021), p. 21.\n
  630. \n
  631. ^ Lighthill (1973).\n
  632. \n
  633. ^ NRC 1999, pp. 212–213.\n
  634. \n
  635. ^ Russell & Norvig (2021), p. 22.\n
  636. \n
  637. ^ Expert systems: Russell & Norvig (2021, pp. 23, 292), Luger & Stubblefield (2004, pp. 227–331), Nilsson (1998, chpt. 17.4), McCorduck (2004, pp. 327–335, 434–435), Crevier (1993, pp. 145–162, 197–203), Newquist (1994, pp. 155–183)\n
  638. \n
  639. ^ Russell & Norvig (2021), p. 24.\n
  640. \n
  641. ^ Nilsson (1998), p. 7.\n
  642. \n
  643. ^ McCorduck (2004), pp. 454–462.\n
  644. \n
  645. ^ Moravec (1988).\n
  646. \n
  647. ^ a b Brooks (1990).\n
  648. \n
  649. ^ Developmental robotics: Weng et al. (2001), Lungarella et al. (2003), Asada et al. (2009), Oudeyer (2010)\n
  650. \n
  651. ^ Russell & Norvig (2021), p. 25.\n
  652. \n
  653. ^ Crevier (1993, pp. 214–215), Russell & Norvig (2021, pp. 24, 26)\n
  654. \n
  655. ^ Russell & Norvig (2021), p. 26.\n
  656. \n
  657. ^ Formal and narrow methods adopted in the 1990s: Russell & Norvig (2021, pp. 24–26), McCorduck (2004, pp. 486–487)\n
  658. \n
  659. ^ AI widely used in the late 1990s: Kurzweil (2005, p. 265), NRC (1999, pp. 216–222), Newquist (1994, pp. 189–201)\n
  660. \n
  661. ^ Wong (2023).\n
  662. \n
  663. ^ Moore\'s Law and AI: Russell & Norvig (2021, pp. 14, 27)\n
  664. \n
  665. ^ a b c Clark (2015b).\n
  666. \n
  667. ^ Big data: Russell & Norvig (2021, p. 26)\n
  668. \n
  669. ^ Sagar, Ram (3 June 2020). "OpenAI Releases GPT-3, The Largest Model So Far". Analytics India Magazine. Archived from the original on 4 August 2020. Retrieved 15 March 2023.\n
  670. \n
  671. ^ DiFeliciantonio (2023).\n
  672. \n
  673. ^ Goswami (2023).\n
  674. \n
  675. ^ Grayling, Anthony; Ball, Brian (1 August 2024). "Philosophy is crucial in the age of AI". The Conversation. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
  676. \n
  677. ^ a b Jarow, Oshan (15 June 2024). "Will AI ever become conscious? It depends on how you think about biology". Vox. Archived from the original on 21 September 2024. Retrieved 4 October 2024.\n
  678. \n
  679. ^ McCarthy, John. "The Philosophy of AI and the AI of Philosophy". jmc.stanford.edu. Archived from the original on 23 October 2018. Retrieved 3 October 2024.\n
  680. \n
  681. ^ a b Turing (1950), p. 1.\n
  682. \n
  683. ^ Turing (1950), Under "The Argument from Consciousness".\n
  684. \n
  685. ^ Kirk-Giannini, Cameron Domenico; Goldstein, Simon (16 October 2023). "AI is closer than ever to passing the Turing test for \'intelligence\'. What happens when it does?". The Conversation. Archived from the original on 25 September 2024. Retrieved 17 August 2024.\n
  686. \n
  687. ^ Russell & Norvig (2021), p. 3.\n
  688. \n
  689. ^ Maker (2006).\n
  690. \n
  691. ^ McCarthy (1999).\n
  692. \n
  693. ^ Minsky (1986).\n
  694. \n
  695. ^ "What Is Artificial Intelligence (AI)?". Google Cloud Platform. Archived from the original on 31 July 2023. Retrieved 16 October 2023.\n
  696. \n
  697. ^ "One of the Biggest Problems in Regulating AI Is Agreeing on a Definition". carnegieendowment.org. Retrieved 31 July 2024.\n
  698. \n
  699. ^ "AI or BS? How to tell if a marketing tool really uses artificial intelligence". The Drum. Retrieved 31 July 2024.\n
  700. \n
  701. ^ Nilsson (1983), p. 10.\n
  702. \n
  703. ^ Haugeland (1985), pp. 112–117.\n
  704. \n
  705. ^ Physical symbol system hypothesis: Newell & Simon (1976, p. 116)\nHistorical significance: McCorduck (2004, p. 153), Russell & Norvig (2021, p. 19)\n
  706. \n
  707. ^ Moravec\'s paradox: Moravec (1988, pp. 15–16), Minsky (1986, p. 29), Pinker (2007, pp. 190–191)\n
  708. \n
  709. ^ Dreyfus\' critique of AI: Dreyfus (1972), Dreyfus & Dreyfus (1986)\nHistorical significance and philosophical implications: Crevier (1993, pp. 120–132), McCorduck (2004, pp. 211–239), Russell & Norvig (2021, pp. 981–982), Fearn (2007, chpt. 3)\n
  710. \n
  711. ^ Crevier (1993), p. 125.\n
  712. \n
  713. ^ Langley (2011).\n
  714. \n
  715. ^ Katz (2012).\n
  716. \n
  717. ^ Neats vs. scruffies, the historic debate: McCorduck (2004, pp. 421–424, 486–489), Crevier (1993, p. 168), Nilsson (1983, pp. 10–11), Russell & Norvig (2021, p. 24)\nA classic example of the "scruffy" approach to intelligence: Minsky (1986)\nA modern example of neat AI and its aspirations in the 21st century: Domingos (2015)\n
  718. \n
  719. ^ Pennachin & Goertzel (2007).\n
  720. \n
  721. ^ a b Roberts (2016).\n
  722. \n
  723. ^ Russell & Norvig (2021), p. 986.\n
  724. \n
  725. ^ Chalmers (1995).\n
  726. \n
  727. ^ Dennett (1991).\n
  728. \n
  729. ^ Horst (2005).\n
  730. \n
  731. ^ Searle (1999).\n
  732. \n
  733. ^ Searle (1980), p. 1.\n
  734. \n
  735. ^ Russell & Norvig (2021), p. 9817.\n
  736. \n
  737. ^ Searle\'s Chinese room argument: Searle (1980). Searle\'s original presentation of the thought experiment., Searle (1999).\nDiscussion: Russell & Norvig (2021, pp. 985), McCorduck (2004, pp. 443–445), Crevier (1993, pp. 269–271)\n
  738. \n
  739. ^ Leith, Sam (7 July 2022). "Nick Bostrom: How can we be certain a machine isn\'t conscious?". The Spectator. Archived from the original on 26 September 2024. Retrieved 23 February 2024.\n
  740. \n
  741. ^ a b c Thomson, Jonny (31 October 2022). "Why don\'t robots have rights?". Big Think. Archived from the original on 13 September 2024. Retrieved 23 February 2024.\n
  742. \n
  743. ^ a b Kateman, Brian (24 July 2023). "AI Should Be Terrified of Humans". Time. Archived from the original on 25 September 2024. Retrieved 23 February 2024.\n
  744. \n
  745. ^ Wong, Jeff (10 July 2023). "What leaders need to know about robot rights". Fast Company.\n
  746. \n
  747. ^ Hern, Alex (12 January 2017). "Give robots \'personhood\' status, EU committee argues". The Guardian. ISSN 0261-3077. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
  748. \n
  749. ^ Dovey, Dana (14 April 2018). "Experts Don\'t Think Robots Should Have Rights". Newsweek. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
  750. \n
  751. ^ Cuddy, Alice (13 April 2018). "Robot rights violate human rights, experts warn EU". euronews. Archived from the original on 19 September 2024. Retrieved 23 February 2024.\n
  752. \n
  753. ^ The Intelligence explosion and technological singularity: Russell & Norvig (2021, pp. 1004–1005), Omohundro (2008), Kurzweil (2005)\n\nI. J. Good\'s "intelligence explosion": Good (1965)\n\nVernor Vinge\'s "singularity": Vinge (1993)\n
  754. \n
  755. ^ Russell & Norvig (2021), p. 1005.\n
  756. \n
  757. ^ Transhumanism: Moravec (1988), Kurzweil (2005), Russell & Norvig (2021, p. 1005)\n
  758. \n
  759. ^ AI as evolution: Edward Fredkin is quoted in McCorduck (2004, p. 401), Butler (1863), Dyson (1998)\n
  760. \n
  761. ^ AI in myth: McCorduck (2004, pp. 4–5)\n
  762. \n
  763. ^ McCorduck (2004), pp. 340–400.\n
  764. \n
  765. ^ Buttazzo (2001).\n
  766. \n
  767. ^ Anderson (2008).\n
  768. \n
  769. ^ McCauley (2007).\n
  770. \n
  771. ^ Galvan (1997).\n
  772. \n
\n

AI textbooks

\n

The two most widely used textbooks in 2023 (see the Open Syllabus):\n

\n\n

These were the four of the most widely used AI textbooks in 2008:\n

\n
\n\n
\n

Other textbooks:\n

\n\n

History of AI

\n
\n\n
\n

Other sources

\n
\n\n\n
\n

Further reading

\n
\n\n
\n
\n\n\n\n\n\n\n\n\n\n
\n
\n\t\t\t\t\t\n\t\t\t\t
\n\t\t\t\n\t\t\t\n\t\t\n\t\t\n\t \n \n
\n\t\n
\n\n\n'}'. -[2024-11-03 22:48:15,938] [DEBUG] [Engine]: Agent 'agent3' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_5CNZvSOGf8L9mxr9PXjz2Yxr', type='function')], function_call=None) -[2024-11-03 22:48:16,134] [INFO] [Engine]: Assigning task to agent4: Search for the latest trends in AI using Twitter. -[2024-11-03 22:48:16,136] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Search for the latest trends in AI using Twitter.'. -[2024-11-03 22:48:21,933] [INFO] [BaseAgent]: Agent 'agent4' called 'fetch_webpage' with args '{'url': 'https://twitter.com/search?q=%23AI%20%23Trends&src=typed_query'}'. -[2024-11-03 22:48:21,933] [INFO] [BaseAgent]: Agent 'agent4' obtained result '{'success': True, 'error-msg': '', 'url': 'https://twitter.com/search?q=%23AI%20%23Trends&src=typed_query', 'content': '\n \n \n x.com\n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n \n \n '}'. -[2024-11-03 22:48:21,935] [DEBUG] [Engine]: Agent 'agent4' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23Trends&src=typed_query"}', name='fetch_webpage'), id='call_Y0BC0pJwTvHGLJhmBX5TBIzN', type='function')], function_call=None) -[2024-11-03 22:48:21,936] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_5CNZvSOGf8L9mxr9PXjz2Yxr', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23Trends&src=typed_query"}', name='fetch_webpage'), id='call_Y0BC0pJwTvHGLJhmBX5TBIzN', type='function')], function_call=None) - -[2024-11-03 22:48:21,937] [INFO] [Engine]: Agents' Results Summary: -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_5CNZvSOGf8L9mxr9PXjz2Yxr', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23Trends&src=typed_query"}', name='fetch_webpage'), id='call_Y0BC0pJwTvHGLJhmBX5TBIzN', type='function')], function_call=None) - -[2024-11-03 22:48:21,938] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_5CNZvSOGf8L9mxr9PXjz2Yxr', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23Trends&src=typed_query"}', name='fetch_webpage'), id='call_Y0BC0pJwTvHGLJhmBX5TBIzN', type='function')], function_call=None) - -[2024-11-03 22:48:22,365] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-03 22:48:22,366] [INFO] [Engine]: Starting iteration 2 -[2024-11-03 22:48:22,367] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. -Agents' Results Summary: -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_5CNZvSOGf8L9mxr9PXjz2Yxr', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23Trends&src=typed_query"}', name='fetch_webpage'), id='call_Y0BC0pJwTvHGLJhmBX5TBIzN', type='function')], function_call=None) - - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-11-03 22:48:23,957] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent1': 'Please ask agent3, agent4, and yourself to compile a summary of the latest trends in AI based on the information gathered from Wikipedia, Twitter, and other sources.', 'agent2': 'Task description for agent2', 'agent3': 'Please compile a summary of the latest trends in AI based on the information gathered from Wikipedia.', 'agent4': 'Please compile a summary of the latest trends in AI based on the information gathered from Twitter.'}, 'continue': True} -[2024-11-03 22:48:23,958] [INFO] [Engine]: Assigned tasks: {'agent1': 'Please ask agent3, agent4, and yourself to compile a summary of the latest trends in AI based on the information gathered from Wikipedia, Twitter, and other sources.', 'agent2': 'Task description for agent2', 'agent3': 'Please compile a summary of the latest trends in AI based on the information gathered from Wikipedia.', 'agent4': 'Please compile a summary of the latest trends in AI based on the information gathered from Twitter.'} -[2024-11-03 22:48:23,959] [INFO] [Engine]: Assigning task to agent1: Please ask agent3, agent4, and yourself to compile a summary of the latest trends in AI based on the information gathered from Wikipedia, Twitter, and other sources. -[2024-11-03 22:48:23,961] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Please ask agent3, agent4, and yourself to compile a summary of the latest trends in AI based on the information gathered from Wikipedia, Twitter, and other sources.'. -[2024-11-03 22:48:32,413] [INFO] [BaseAgent]: Agent agent1 sent message to agent3: Can you compile a summary of the latest trends in AI based on the information gathered from Wikipedia? -[2024-11-03 22:48:32,415] [INFO] [BaseAgent]: Agent agent3 received message from agent1: Can you compile a summary of the latest trends in AI based on the information gathered from Wikipedia? -[2024-11-03 22:48:32,422] [ERROR] [Engine]: Error while executing task for agent 'agent1': name 'task' is not defined -[2024-11-03 22:48:32,424] [INFO] [Engine]: Assigning task to agent2: Task description for agent2 -[2024-11-03 22:48:32,425] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Task description for agent2'. -[2024-11-03 22:50:08,969] [ERROR] [Engine]: Error while executing task for agent 'agent2': -[2024-11-03 22:50:08,971] [INFO] [Engine]: Assigning task to agent3: Please compile a summary of the latest trends in AI based on the information gathered from Wikipedia. -[2024-11-03 22:50:08,987] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Please compile a summary of the latest trends in AI based on the information gathered from Wikipedia.'. -[2024-11-03 22:50:12,525] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-03 22:50:12,526] [INFO] [Evaluator]: Total Token Consumption: 24 -[2024-11-03 22:50:12,528] [INFO] [Evaluator]: Average Tokens per Iteration: 24.0 -[2024-11-03 22:50:12,529] [INFO] [Engine]: Simulation completed. -[2024-11-03 22:51:55,751] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-11-03 22:51:55,770] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 22:51:55,770] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 22:51:55,772] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 22:51:55,773] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 22:51:55,774] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 22:51:55,775] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 22:51:55,776] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 22:51:55,777] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 22:51:55,780] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-11-03 22:51:55,780] [INFO] [AgentGraph]: Relationship added: agent2 --[reports_to]--> agent1 -[2024-11-03 22:51:55,781] [INFO] [AgentGraph]: Relationship added: agent3 --[reports_to]--> agent1 -[2024-11-03 22:51:55,782] [INFO] [AgentGraph]: Relationship added: agent4 --[reports_to]--> agent1 -[2024-11-03 22:51:55,784] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-03 22:51:55,787] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-03 22:51:55,787] [INFO] [Engine]: Engine initialized. -[2024-11-03 22:51:55,788] [INFO] [Engine]: Engine starting simulation. -[2024-11-03 22:51:55,789] [INFO] [Engine]: Starting iteration 1 -[2024-11-03 22:51:55,791] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-11-03 22:51:57,201] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'}, 'continue': True} -[2024-11-03 22:51:57,202] [INFO] [Engine]: Assigned tasks: {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'} -[2024-11-03 22:51:57,203] [INFO] [Engine]: Assigning task to agent2: Search for the latest trends in AI using Google. -[2024-11-03 22:51:57,204] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Search for the latest trends in AI using Google.'. -[2024-11-03 22:52:08,630] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Can you search for the latest trends in AI using Google? -[2024-11-03 22:52:08,662] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Can you search for the latest trends in AI using Google? -[2024-11-03 22:52:08,696] [ERROR] [Engine]: Error while executing task for agent 'agent2': Function marble.llms.model_prompting.model_prompting() parameter tool_choice={'type': 'function', 'function': {'name': 'communicate_to'}} violates type hint typing.Optional[str], as dict {'type': 'function', 'function': {'name': 'communicate_to'}} not  or str. -[2024-11-03 22:52:08,697] [INFO] [Engine]: Assigning task to agent3: Search for the latest trends in AI using Wikipedia. -[2024-11-03 22:52:08,698] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Search for the latest trends in AI using Wikipedia.'. -[2024-11-03 22:53:59,525] [INFO] [BaseAgent]: Agent 'agent3' called 'fetch_webpage' with args '{'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence'}'. -[2024-11-03 22:53:59,534] [INFO] [BaseAgent]: Agent 'agent3' obtained result '{'success': True, 'error-msg': '', 'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence', 'content': '\n\n\n\nArtificial intelligence - Wikipedia\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nJump to content\n
\n\t
\n\t\t
\n\t\t\t
\n\n\t\t\n\t\t\t\n\n\n\t\t
\n\t\t
\n\t\t\t\n\n\n\t\t\t\n\n\t\t
\n\t\n\n
\n\t
\n\t\t
\n\t\t\t
\n\t\t
\n\t\t
\n\t\t\t
\n\t\t
\n\t\t\t\n\t\t
\n\t
\n\t
\n\t\t\t\t
\n\t\t\n\t\t\t
\n\t\t
\n\t\t
\n\t\t\t
\n\t\t\t\t
\n\t\t\t\t\t\n\t\t\t\t\t

Artificial intelligence

\n\t\t\t\t\t\t\t\n
\n\t\n\t\n\t
\n\n\t\t
\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t
\n\n\t
\n
\n
\n\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\t
\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
\n\t\t\t\t\t\t
\n\t\t\t\t\t\t\t\n\t\t\t\t\n\t\t\t\t\t\t\t
\n\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\t\n\t\t\t\t\t\t
\n\t\t\n\t\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\t\t
\n\t\t
Page semi-protected
\n\t\t
\n\n\t\t\t\t\t\t
From Wikipedia, the free encyclopedia
\n\t\t\t\t\t
\n\t\t\t\t\t
\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t
\n\n

\n

\n\n\n\n\n\n\n\n

Artificial intelligence (AI), in its broadest sense, is intelligence emulated by machines, particularly computer systems. It is a field of research in computer science that develops and studies methods and software that enable machines to perceive their environment and use learning and intelligence to take actions that maximize their chances of achieving defined goals.[1] Such machines may be called AIs.\n

Some high-profile applications of AI include advanced web search engines (e.g., Google Search); recommendation systems (used by YouTube, Amazon, and Netflix); interacting via human speech (e.g., Google Assistant, Siri, and Alexa); autonomous vehicles (e.g., Waymo); generative and creative tools (e.g., ChatGPT, and AI art); and superhuman play and analysis in strategy games (e.g., chess and Go). However, many AI applications are not perceived as AI: "A lot of cutting edge AI has filtered into general applications, often without being called AI because once something becomes useful enough and common enough it\'s not labeled AI anymore."[2][3]\n

The various subfields of AI research are centered around particular goals and the use of particular tools. The traditional goals of AI research include reasoning, knowledge representation, planning, learning, natural language processing, perception, and support for robotics.[a] General intelligence—the ability to complete any task performable by a human on an at least equal level—is among the field\'s long-term goals.[4] To reach these goals, AI researchers have adapted and integrated a wide range of techniques, including search and mathematical optimization, formal logic, artificial neural networks, and methods based on statistics, operations research, and economics.[b] AI also draws upon psychology, linguistics, philosophy, neuroscience, and other fields.[5]\n

Artificial intelligence was founded as an academic discipline in 1956,[6] and the field went through multiple cycles of optimism,[7][8] followed by periods of disappointment and loss of funding, known as AI winter.[9][10] Funding and interest vastly increased after 2012 when deep learning outperformed previous AI techniques.[11] This growth accelerated further after 2017 with the transformer architecture,[12] and by the early 2020s hundreds of billions of dollars were being invested in AI (known as the "AI boom"). The widespread use of AI in the 21st century exposed several unintended consequences and harms in the present and raised concerns about its risks and long-term effects in the future, prompting discussions about regulatory policies to ensure the safety and benefits of the technology.\n

\n\n

Goals

\n

The general problem of fully simulating (or creating) intelligence is mostly found to be overwhelming. However, some types of problems have been successfully broken into more achievable subproblems. These consist of particular traits or capabilities that researchers expect an intelligent system to display. The traits described below have received the most attention and cover the scope of AI research.[a]\n

\n

Reasoning and problem-solving

\n

Early researchers developed algorithms that imitated step-by-step reasoning that humans use when they solve puzzles or make logical deductions.[13] By the late 1980s and 1990s, methods were developed for dealing with uncertain or incomplete information, employing concepts from probability and economics.[14]\n

Many of these algorithms are insufficient for solving large reasoning problems because they experience a "combinatorial explosion": They become exponentially slower as the problems grow.[15] Even humans rarely use the step-by-step deduction that early AI research could model. They solve most of their problems using fast, intuitive judgments.[16] Accurate and efficient reasoning is an unsolved problem.\n

\n

Knowledge representation

\n
An ontology represents knowledge as a set of concepts within a domain and the relationships between those concepts.
\n

Knowledge representation and knowledge engineering[17] allow AI programs to answer questions intelligently and make deductions about real-world facts. Formal knowledge representations are used in content-based indexing and retrieval,[18] scene interpretation,[19] clinical decision support,[20] knowledge discovery (mining "interesting" and actionable inferences from large databases),[21] and other areas.[22]\n

A knowledge base is a body of knowledge represented in a form that can be used by a program. An ontology is the set of objects, relations, concepts, and properties used by a particular domain of knowledge.[23] Knowledge bases need to represent things such as objects, properties, categories, and relations between objects;[24] situations, events, states, and time;[25] causes and effects;[26] knowledge about knowledge (what we know about what other people know);[27] default reasoning (things that humans assume are true until they are told differently and will remain true even when other facts are changing);[28] and many other aspects and domains of knowledge.\n

Among the most difficult problems in knowledge representation are the breadth of commonsense knowledge (the set of atomic facts that the average person knows is enormous);[29] and the sub-symbolic form of most commonsense knowledge (much of what people know is not represented as "facts" or "statements" that they could express verbally).[16] There is also the difficulty of knowledge acquisition, the problem of obtaining knowledge for AI applications.[c]\n

\n

Planning and decision-making

\n

An "agent" is anything that perceives and takes actions in the world. A rational agent has goals or preferences and takes actions to make them happen.[d][32] In automated planning, the agent has a specific goal.[33] In automated decision-making, the agent has preferences—there are some situations it would prefer to be in, and some situations it is trying to avoid. The decision-making agent assigns a number to each situation (called the "utility") that measures how much the agent prefers it. For each possible action, it can calculate the "expected utility": the utility of all possible outcomes of the action, weighted by the probability that the outcome will occur. It can then choose the action with the maximum expected utility.[34]\n

In classical planning, the agent knows exactly what the effect of any action will be.[35] In most real-world problems, however, the agent may not be certain about the situation they are in (it is "unknown" or "unobservable") and it may not know for certain what will happen after each possible action (it is not "deterministic"). It must choose an action by making a probabilistic guess and then reassess the situation to see if the action worked.[36]\n

In some problems, the agent\'s preferences may be uncertain, especially if there are other agents or humans involved. These can be learned (e.g., with inverse reinforcement learning), or the agent can seek information to improve its preferences.[37] Information value theory can be used to weigh the value of exploratory or experimental actions.[38] The space of possible future actions and situations is typically intractably large, so the agents must take actions and evaluate situations while being uncertain of what the outcome will be.\n

A Markov decision process has a transition model that describes the probability that a particular action will change the state in a particular way and a reward function that supplies the utility of each state and the cost of each action. A policy associates a decision with each possible state. The policy could be calculated (e.g., by iteration), be heuristic, or it can be learned.[39]\n

Game theory describes the rational behavior of multiple interacting agents and is used in AI programs that make decisions that involve other agents.[40]\n

\n

Learning

\n

Machine learning is the study of programs that can improve their performance on a given task automatically.[41] It has been a part of AI from the beginning.[e]\n

There are several kinds of machine learning. Unsupervised learning analyzes a stream of data and finds patterns and makes predictions without any other guidance.[44] Supervised learning requires a human to label the input data first, and comes in two main varieties: classification (where the program must learn to predict what category the input belongs in) and regression (where the program must deduce a numeric function based on numeric input).[45]\n

In reinforcement learning, the agent is rewarded for good responses and punished for bad ones. The agent learns to choose responses that are classified as "good".[46] Transfer learning is when the knowledge gained from one problem is applied to a new problem.[47] Deep learning is a type of machine learning that runs inputs through biologically inspired artificial neural networks for all of these types of learning.[48]\n

Computational learning theory can assess learners by computational complexity, by sample complexity (how much data is required), or by other notions of optimization.[49]\n

\n
\n

Natural language processing

\n

Natural language processing (NLP)[50] allows programs to read, write and communicate in human languages such as English. Specific problems include speech recognition, speech synthesis, machine translation, information extraction, information retrieval and question answering.[51]\n

Early work, based on Noam Chomsky\'s generative grammar and semantic networks, had difficulty with word-sense disambiguation[f] unless restricted to small domains called "micro-worlds" (due to the common sense knowledge problem[29]). Margaret Masterman believed that it was meaning and not grammar that was the key to understanding languages, and that thesauri and not dictionaries should be the basis of computational language structure.\n

Modern deep learning techniques for NLP include word embedding (representing words, typically as vectors encoding their meaning),[52] transformers (a deep learning architecture using an attention mechanism),[53] and others.[54] In 2019, generative pre-trained transformer (or "GPT") language models began to generate coherent text,[55][56] and by 2023, these models were able to get human-level scores on the bar exam, SAT test, GRE test, and many other real-world applications.[57]\n

\n

Perception

\n

Machine perception is the ability to use input from sensors (such as cameras, microphones, wireless signals, active lidar, sonar, radar, and tactile sensors) to deduce aspects of the world. Computer vision is the ability to analyze visual input.[58]\n

The field includes speech recognition,[59] image classification,[60] facial recognition, object recognition,[61]object tracking,[62] and robotic perception.[63]\n

\n

Social intelligence

\n
Kismet, a robot head which was made in the 1990s; it is a machine that can recognize and simulate emotions.[64]
\n

Affective computing is an interdisciplinary umbrella that comprises systems that recognize, interpret, process, or simulate human feeling, emotion, and mood.[65] For example, some virtual assistants are programmed to speak conversationally or even to banter humorously; it makes them appear more sensitive to the emotional dynamics of human interaction, or to otherwise facilitate human–computer interaction.\n

However, this tends to give naïve users an unrealistic conception of the intelligence of existing computer agents.[66] Moderate successes related to affective computing include textual sentiment analysis and, more recently, multimodal sentiment analysis, wherein AI classifies the affects displayed by a videotaped subject.[67]\n

\n

General intelligence

\n

A machine with artificial general intelligence should be able to solve a wide variety of problems with breadth and versatility similar to human intelligence.[4]\n

\n

Techniques

\n

AI research uses a wide variety of techniques to accomplish the goals above.[b]\n

\n

Search and optimization

\n

AI can solve many problems by intelligently searching through many possible solutions.[68] There are two very different kinds of search used in AI: state space search and local search.\n

\n
\n

State space search searches through a tree of possible states to try to find a goal state.[69] For example, planning algorithms search through trees of goals and subgoals, attempting to find a path to a target goal, a process called means-ends analysis.[70]\n

Simple exhaustive searches[71] are rarely sufficient for most real-world problems: the search space (the number of places to search) quickly grows to astronomical numbers. The result is a search that is too slow or never completes.[15] "Heuristics" or "rules of thumb" can help prioritize choices that are more likely to reach a goal.[72]\n

Adversarial search is used for game-playing programs, such as chess or Go. It searches through a tree of possible moves and counter-moves, looking for a winning position.[73]\n

\n
\n
Illustration of gradient descent for 3 different starting points; two parameters (represented by the plan coordinates) are adjusted in order to minimize the loss function (the height)

Local search uses mathematical optimization to find a solution to a problem. It begins with some form of guess and refines it incrementally.[74]\n

Gradient descent is a type of local search that optimizes a set of numerical parameters by incrementally adjusting them to minimize a loss function. Variants of gradient descent are commonly used to train neural networks.[75]\n

Another type of local search is evolutionary computation, which aims to iteratively improve a set of candidate solutions by "mutating" and "recombining" them, selecting only the fittest to survive each generation.[76]\n

Distributed search processes can coordinate via swarm intelligence algorithms. Two popular swarm algorithms used in search are particle swarm optimization (inspired by bird flocking) and ant colony optimization (inspired by ant trails).[77]\n

\n

Logic

\n

Formal logic is used for reasoning and knowledge representation.[78]\nFormal logic comes in two main forms: propositional logic (which operates on statements that are true or false and uses logical connectives such as "and", "or", "not" and "implies")[79] and predicate logic (which also operates on objects, predicates and relations and uses quantifiers such as "Every X is a Y" and "There are some Xs that are Ys").[80]\n

Deductive reasoning in logic is the process of proving a new statement (conclusion) from other statements that are given and assumed to be true (the premises).[81] Proofs can be structured as proof trees, in which nodes are labelled by sentences, and children nodes are connected to parent nodes by inference rules.\n

Given a problem and a set of premises, problem-solving reduces to searching for a proof tree whose root node is labelled by a solution of the problem and whose leaf nodes are labelled by premises or axioms. In the case of Horn clauses, problem-solving search can be performed by reasoning forwards from the premises or backwards from the problem.[82] In the more general case of the clausal form of first-order logic, resolution is a single, axiom-free rule of inference, in which a problem is solved by proving a contradiction from premises that include the negation of the problem to be solved.[83]\n

Inference in both Horn clause logic and first-order logic is undecidable, and therefore intractable. However, backward reasoning with Horn clauses, which underpins computation in the logic programming language Prolog, is Turing complete. Moreover, its efficiency is competitive with computation in other symbolic programming languages.[84]\n

Fuzzy logic assigns a "degree of truth" between 0 and 1. It can therefore handle propositions that are vague and partially true.[85]\n

Non-monotonic logics, including logic programming with negation as failure, are designed to handle default reasoning.[28] Other specialized versions of logic have been developed to describe many complex domains.\n

\n

Probabilistic methods for uncertain reasoning

\n
A simple Bayesian network, with the associated conditional probability tables
\n

Many problems in AI (including in reasoning, planning, learning, perception, and robotics) require the agent to operate with incomplete or uncertain information. AI researchers have devised a number of tools to solve these problems using methods from probability theory and economics.[86] Precise mathematical tools have been developed that analyze how an agent can make choices and plan, using decision theory, decision analysis,[87] and information value theory.[88] These tools include models such as Markov decision processes,[89] dynamic decision networks,[90] game theory and mechanism design.[91]\n

Bayesian networks[92] are a tool that can be used for reasoning (using the Bayesian inference algorithm),[g][94] learning (using the expectation–maximization algorithm),[h][96] planning (using decision networks)[97] and perception (using dynamic Bayesian networks).[90]\n

Probabilistic algorithms can also be used for filtering, prediction, smoothing, and finding explanations for streams of data, thus helping perception systems analyze processes that occur over time (e.g., hidden Markov models or Kalman filters).[90]\n

\n
Expectation–maximization clustering of Old Faithful eruption data starts from a random guess but then successfully converges on an accurate clustering of the two physically distinct modes of eruption.
\n

Classifiers and statistical learning methods

\n

The simplest AI applications can be divided into two types: classifiers (e.g., "if shiny then diamond"), on one hand, and controllers (e.g., "if diamond then pick up"), on the other hand. Classifiers[98] are functions that use pattern matching to determine the closest match. They can be fine-tuned based on chosen examples using supervised learning. Each pattern (also called an "observation") is labeled with a certain predefined class. All the observations combined with their class labels are known as a data set. When a new observation is received, that observation is classified based on previous experience.[45]\n

There are many kinds of classifiers in use.[99] The decision tree is the simplest and most widely used symbolic machine learning algorithm.[100] K-nearest neighbor algorithm was the most widely used analogical AI until the mid-1990s, and Kernel methods such as the support vector machine (SVM) displaced k-nearest neighbor in the 1990s.[101]\nThe naive Bayes classifier is reportedly the "most widely used learner"[102] at Google, due in part to its scalability.[103]\nNeural networks are also used as classifiers.[104]\n

\n

Artificial neural networks

\n
A neural network is an interconnected group of nodes, akin to the vast network of neurons in the human brain.
\n

An artificial neural network is based on a collection of nodes also known as artificial neurons, which loosely model the neurons in a biological brain. It is trained to recognise patterns; once trained, it can recognise those patterns in fresh data. There is an input, at least one hidden layer of nodes and an output. Each node applies a function and once the weight crosses its specified threshold, the data is transmitted to the next layer. A network is typically called a deep neural network if it has at least 2 hidden layers.[104]\n

Learning algorithms for neural networks use local search to choose the weights that will get the right output for each input during training. The most common training technique is the backpropagation algorithm.[105] Neural networks learn to model complex relationships between inputs and outputs and find patterns in data. In theory, a neural network can learn any function.[106]\n

In feedforward neural networks the signal passes in only one direction.[107] Recurrent neural networks feed the output signal back into the input, which allows short-term memories of previous input events. Long short term memory is the most successful network architecture for recurrent networks.[108] Perceptrons[109] use only a single layer of neurons; deep learning[110] uses multiple layers. Convolutional neural networks strengthen the connection between neurons that are "close" to each other—this is especially important in image processing, where a local set of neurons must identify an "edge" before the network can identify an object.[111]\n

\n
\n

Deep learning

\n
\n

Deep learning[110] uses several layers of neurons between the network\'s inputs and outputs. The multiple layers can progressively extract higher-level features from the raw input. For example, in image processing, lower layers may identify edges, while higher layers may identify the concepts relevant to a human such as digits, letters, or faces.[112]\n

Deep learning has profoundly improved the performance of programs in many important subfields of artificial intelligence, including computer vision, speech recognition, natural language processing, image classification,[113] and others. The reason that deep learning performs so well in so many applications is not known as of 2023.[114] The sudden success of deep learning in 2012–2015 did not occur because of some new discovery or theoretical breakthrough (deep neural networks and backpropagation had been described by many people, as far back as the 1950s)[i] but because of two factors: the incredible increase in computer power (including the hundred-fold increase in speed by switching to GPUs) and the availability of vast amounts of training data, especially the giant curated datasets used for benchmark testing, such as ImageNet.[j]\n

\n

GPT

\n

Generative pre-trained transformers (GPT) are large language models (LLMs) that generate text based on the semantic relationships between words in sentences. Text-based GPT models are pretrained on a large corpus of text that can be from the Internet. The pretraining consists of predicting the next token (a token being usually a word, subword, or punctuation). Throughout this pretraining, GPT models accumulate knowledge about the world and can then generate human-like text by repeatedly predicting the next token. Typically, a subsequent training phase makes the model more truthful, useful, and harmless, usually with a technique called reinforcement learning from human feedback (RLHF). Current GPT models are prone to generating falsehoods called "hallucinations", although this can be reduced with RLHF and quality data. They are used in chatbots, which allow people to ask a question or request a task in simple text.[122][123]\n

Current models and services include Gemini (formerly Bard), ChatGPT, Grok, Claude, Copilot, and LLaMA.[124] Multimodal GPT models can process different types of data (modalities) such as images, videos, sound, and text.[125]\n

\n

Hardware and software

\n\n

In the late 2010s, graphics processing units (GPUs) that were increasingly designed with AI-specific enhancements and used with specialized TensorFlow software had replaced previously used central processing unit (CPUs) as the dominant means for large-scale (commercial and academic) machine learning models\' training.[126] Specialized programming languages such as Prolog were used in early AI research,[127] but general-purpose programming languages like Python have become predominant.[128]\n

The transistor density in integrated circuits has been observed to roughly double every 18 months—a trend known as Moore\'s law, named after the Intel co-founder Gordon Moore, who first identified it. Improvements in GPUs have been even faster.[129]\n

\n

Applications

\n

AI and machine learning technology is used in most of the essential applications of the 2020s, including: search engines (such as Google Search), targeting online advertisements, recommendation systems (offered by Netflix, YouTube or Amazon), driving internet traffic, targeted advertising (AdSense, Facebook), virtual assistants (such as Siri or Alexa), autonomous vehicles (including drones, ADAS and self-driving cars), automatic language translation (Microsoft Translator, Google Translate), facial recognition (Apple\'s Face ID or Microsoft\'s DeepFace and Google\'s FaceNet) and image labeling (used by Facebook, Apple\'s iPhoto and TikTok). The deployment of AI may be overseen by a Chief automation officer (CAO).\n

Health and medicine

\n\n

The application of AI in medicine and medical research has the potential to increase patient care and quality of life.[130] Through the lens of the Hippocratic Oath, medical professionals are ethically compelled to use AI, if applications can more accurately diagnose and treat patients.[131][132]\n

For medical research, AI is an important tool for processing and integrating big data. This is particularly important for organoid and tissue engineering development which use microscopy imaging as a key technique in fabrication.[133] It has been suggested that AI can overcome discrepancies in funding allocated to different fields of research.[133] New AI tools can deepen the understanding of biomedically relevant pathways. For example, AlphaFold 2 (2021) demonstrated the ability to approximate, in hours rather than months, the 3D structure of a protein.[134] In 2023, it was reported that AI-guided drug discovery helped find a class of antibiotics capable of killing two different types of drug-resistant bacteria.[135] In 2024, researchers used machine learning to accelerate the search for Parkinson\'s disease drug treatments. Their aim was to identify compounds that block the clumping, or aggregation, of alpha-synuclein (the protein that characterises Parkinson\'s disease). They were able to speed up the initial screening process ten-fold and reduce the cost by a thousand-fold.[136][137]\n

\n

Games

\n\n

Game playing programs have been used since the 1950s to demonstrate and test AI\'s most advanced techniques.[138] Deep Blue became the first computer chess-playing system to beat a reigning world chess champion, Garry Kasparov, on 11 May 1997.[139] In 2011, in a Jeopardy! quiz show exhibition match, IBM\'s question answering system, Watson, defeated the two greatest Jeopardy! champions, Brad Rutter and Ken Jennings, by a significant margin.[140] In March 2016, AlphaGo won 4 out of 5 games of Go in a match with Go champion Lee Sedol, becoming the first computer Go-playing system to beat a professional Go player without handicaps. Then, in 2017, it defeated Ke Jie, who was the best Go player in the world.[141] Other programs handle imperfect-information games, such as the poker-playing program Pluribus.[142] DeepMind developed increasingly generalistic reinforcement learning models, such as with MuZero, which could be trained to play chess, Go, or Atari games.[143] In 2019, DeepMind\'s AlphaStar achieved grandmaster level in StarCraft II, a particularly challenging real-time strategy game that involves incomplete knowledge of what happens on the map.[144] In 2021, an AI agent competed in a PlayStation Gran Turismo competition, winning against four of the world\'s best Gran Turismo drivers using deep reinforcement learning.[145] In 2024, Google DeepMind introduced SIMA, a type of AI capable of autonomously playing nine previously unseen open-world video games by observing screen output, as well as executing short, specific tasks in response to natural language instructions.[146]\n

\n

Mathematics

\n

In mathematics, special forms of formal step-by-step reasoning are used. In contrast, LLMs such as GPT-4 Turbo, Gemini Ultra, Claude Opus, LLaMa-2 or Mistral Large are working with probabilistic models, which can produce wrong answers in the form of hallucinations. Therefore, they need not only a large database of mathematical problems to learn from but also methods such as supervised fine-tuning or trained classifiers with human-annotated data to improve answers for new problems and learn from corrections.[147] A 2024 study showed that the performance of some language models for reasoning capabilities in solving math problems not included in their training data was low, even for problems with only minor deviations from trained data.[148]\n

Alternatively, dedicated models for mathematic problem solving with higher precision for the outcome including proof of theorems have been developed such as Alpha Tensor, Alpha Geometry and Alpha Proof all from Google DeepMind,[149] Llemma from eleuther[150] or Julius.[151]\n

When natural language is used to describe mathematical problems, converters transform such prompts into a formal language such as Lean to define mathematic tasks.\n

Some models have been developed to solve challenging problems and reach good results in benchmark tests, others to serve as educational tools in mathematics.[152]\n

\n

Finance

\n

Finance is one of the fastest growing sectors where applied AI tools are being deployed: from retail online banking to investment advice and insurance, where automated "robot advisers" have been in use for some years.[153]\n

World Pensions experts like Nicolas Firzli insist it may be too early to see the emergence of highly innovative AI-informed financial products and services: "the deployment of AI tools will simply further automatise things: destroying tens of thousands of jobs in banking, financial planning, and pension advice in the process, but I\'m not sure it will unleash a new wave of [e.g., sophisticated] pension innovation."[154]\n

\n

Military

\n\n

Various countries are deploying AI military applications.[155] The main applications enhance command and control, communications, sensors, integration and interoperability.[156] Research is targeting intelligence collection and analysis, logistics, cyber operations, information operations, and semiautonomous and autonomous vehicles.[155] AI technologies enable coordination of sensors and effectors, threat detection and identification, marking of enemy positions, target acquisition, coordination and deconfliction of distributed Joint Fires between networked combat vehicles involving manned and unmanned teams.[156] AI was incorporated into military operations in Iraq and Syria.[155]\n

In November 2023, US Vice President Kamala Harris disclosed a declaration signed by 31 nations to set guardrails for the military use of AI. The commitments include using legal reviews to ensure the compliance of military AI with international laws, and being cautious and transparent in the development of this technology.[157]\n

\n

Generative AI

\n\n
Vincent van Gogh in watercolour created by generative AI software
\n

In the early 2020s, generative AI gained widespread prominence. GenAI is AI capable of generating text, images, videos, or other data using generative models,[158][159] often in response to prompts.[160][161]\n

In March 2023, 58% of U.S. adults had heard about ChatGPT and 14% had tried it.[162] The increasing realism and ease-of-use of AI-based text-to-image generators such as Midjourney, DALL-E, and Stable Diffusion sparked a trend of viral AI-generated photos. Widespread attention was gained by a fake photo of Pope Francis wearing a white puffer coat, the fictional arrest of Donald Trump, and a hoax of an attack on the Pentagon, as well as the usage in professional creative arts.[163][164]\n

\n

Agents

\n

Artificial intelligent (AI) agents are software entities designed to perceive their environment, make decisions, and take actions autonomously to achieve specific goals. These agents can interact with users, their environment, or other agents. AI agents are used in various applications, including virtual assistants, chatbots, autonomous vehicles, game-playing systems, and industrial robotics. AI agents operate within the constraints of their programming, available computational resources, and hardware limitations. This means they are restricted to performing tasks within their defined scope and have finite memory and processing capabilities. In real-world applications, AI agents often face time constraints for decision-making and action execution. Many AI agents incorporate learning algorithms, enabling them to improve their performance over time through experience or training. Using machine learning, AI agents can adapt to new situations and optimise their behaviour for their designated tasks.[165][166][167]\n

\n

Other industry-specific tasks

\n

There are also thousands of successful AI applications used to solve specific problems for specific industries or institutions. In a 2017 survey, one in five companies reported having incorporated "AI" in some offerings or processes.[168] A few examples are energy storage, medical diagnosis, military logistics, applications that predict the result of judicial decisions, foreign policy, or supply chain management.\n

AI applications for evacuation and disaster management are growing. AI has been used to investigate if and how people evacuated in large scale and small scale evacuations using historical data from GPS, videos or social media. Further, AI can provide real time information on the real time evacuation conditions.[169][170][171]\n

In agriculture, AI has helped farmers identify areas that need irrigation, fertilization, pesticide treatments or increasing yield. Agronomists use AI to conduct research and development. AI has been used to predict the ripening time for crops such as tomatoes, monitor soil moisture, operate agricultural robots, conduct predictive analytics, classify livestock pig call emotions, automate greenhouses, detect diseases and pests, and save water.\n

Artificial intelligence is used in astronomy to analyze increasing amounts of available data and applications, mainly for "classification, regression, clustering, forecasting, generation, discovery, and the development of new scientific insights." For example, it is used for discovering exoplanets, forecasting solar activity, and distinguishing between signals and instrumental effects in gravitational wave astronomy. Additionally, it could be used for activities in space, such as space exploration, including the analysis of data from space missions, real-time science decisions of spacecraft, space debris avoidance, and more autonomous operation.\n

During the 2024 Indian elections, US$50 millions was spent on authorized AI-generated content, notably by creating deepfakes of allied (including sometimes deceased) politicians to better engage with voters, and by translating speeches to various local languages.[172] \n

\n

Ethics

\n\n

AI has potential benefits and potential risks.[173] AI may be able to advance science and find solutions for serious problems: Demis Hassabis of Deep Mind hopes to "solve intelligence, and then use that to solve everything else".[174] However, as the use of AI has become widespread, several unintended consequences and risks have been identified.[175] In-production systems can sometimes not factor ethics and bias into their AI training processes, especially when the AI algorithms are inherently unexplainable in deep learning.[176]\n

\n

Risks and harm

\n
\n\n

Machine learning algorithms require large amounts of data. The techniques used to acquire this data have raised concerns about privacy, surveillance and copyright.\n

AI-powered devices and services, such as virtual assistants and IoT products, continuously collect personal information, raising concerns about intrusive data gathering and unauthorized access by third parties. The loss of privacy is further exacerbated by AI\'s ability to process and combine vast amounts of data, potentially leading to a surveillance society where individual activities are constantly monitored and analyzed without adequate safeguards or transparency.\n

Sensitive user data collected may include online activity records, geolocation data, video or audio.[177] For example, in order to build speech recognition algorithms, Amazon has recorded millions of private conversations and allowed temporary workers to listen to and transcribe some of them.[178] Opinions about this widespread surveillance range from those who see it as a necessary evil to those for whom it is clearly unethical and a violation of the right to privacy.[179]\n

AI developers argue that this is the only way to deliver valuable applications. and have developed several techniques that attempt to preserve privacy while still obtaining the data, such as data aggregation, de-identification and differential privacy.[180] Since 2016, some privacy experts, such as Cynthia Dwork, have begun to view privacy in terms of fairness. Brian Christian wrote that experts have pivoted "from the question of \'what they know\' to the question of \'what they\'re doing with it\'."[181]\n

Generative AI is often trained on unlicensed copyrighted works, including in domains such as images or computer code; the output is then used under the rationale of "fair use". Experts disagree about how well and under what circumstances this rationale will hold up in courts of law; relevant factors may include "the purpose and character of the use of the copyrighted work" and "the effect upon the potential market for the copyrighted work".[182][183] Website owners who do not wish to have their content scraped can indicate it in a "robots.txt" file.[184] In 2023, leading authors (including John Grisham and Jonathan Franzen) sued AI companies for using their work to train generative AI.[185][186] Another discussed approach is to envision a separate sui generis system of protection for creations generated by AI to ensure fair attribution and compensation for human authors.[187]\n

\n

Dominance by tech giants

\n

The commercial AI scene is dominated by Big Tech companies such as Alphabet Inc., Amazon, Apple Inc., Meta Platforms, and Microsoft.[188][189][190] Some of these players already own the vast majority of existing cloud infrastructure and computing power from data centers, allowing them to entrench further in the marketplace.[191][192]\n

\n

Substantial power needs and other environmental impacts

\n\n

In January 2024, the International Energy Agency (IEA) released Electricity 2024, Analysis and Forecast to 2026, forecasting electric power use.[193] This is the first IEA report to make projections for data centers and power consumption for artificial intelligence and cryptocurrency. The report states that power demand for these uses might double by 2026, with additional electric power usage equal to electricity used by the whole Japanese nation.[194]\n

Prodigious power consumption by AI is responsible for the growth of fossil fuels use, and might delay closings of obsolete, carbon-emitting coal energy facilities. There is a feverish rise in the construction of data centers throughout the US, making large technology firms (e.g., Microsoft, Meta, Google, Amazon) into voracious consumers of electric power. Projected electric consumption is so immense that there is concern that it will be fulfilled no matter the source. A ChatGPT search involves the use of 10 times the electrical energy as a Google search. The large firms are in haste to find power sources – from nuclear energy to geothermal to fusion. The tech firms argue that – in the long view – AI will be eventually kinder to the environment, but they need the energy now. AI makes the power grid more efficient and "intelligent", will assist in the growth of nuclear power, and track overall carbon emissions, according to technology firms.[195]\n

A 2024 Goldman Sachs Research Paper, AI Data Centers and the Coming US Power Demand Surge, found "US power demand (is) likely to experience growth not seen in a generation...." and forecasts that, by 2030, US data centers will consume 8% of US power, as opposed to 3% in 2022, presaging growth for the electrical power generation industry by a variety of means.[196] Data centers\' need for more and more electrical power is such that they might max out the electrical grid. The Big Tech companies counter that AI can be used to maximize the utilization of the grid by all.[197]\n

In 2024, the Wall Street Journal reported that big AI companies have begun negotiations with the US nuclear power providers to provide electricity to the data centers. In March 2024 Amazon purchased a Pennsylvania nuclear-powered data center for $650 Million (US).[198]\n

In September 2024, Microsoft announced an agreement with Constellation Energy to re-open the Three Mile Island nuclear power plant to provide Microsoft with 100% of all electric power produced by the plant for 20 years. Reopening the plant, which suffered a partial nuclear meltdown of its Unit 2 reactor in 1979, will require Constellation to get through strict regulatory processes which will include extensive safety scrutiny from the US Nuclear Regulatory Commission. If approved (this will be the first ever US re-commissioning of a nuclear plant), over 835 megawatts of power – enough for 800,000 homes – of energy will be produced. The cost for re-opening and upgrading is estimated at $1.6 billion (US) and is dependent on tax breaks for nuclear power contained in the 2022 US Inflation Reduction Act.[199] The US government and the state of Michigan are investing almost $2 billion (US) to reopen the Palisades Nuclear reactor on Lake Michigan. Closed since 2022, the plant is planned to be reopened in October 2025. The Three Mile Island facility will be renamed the Crane Clean Energy Center after Chris Crane, a nuclear proponent and former CEO of Exelon who was responsible for Exelon spinoff of Constellation.[200]\n

\n

Misinformation

\n\n

YouTube, Facebook and others use recommender systems to guide users to more content. These AI programs were given the goal of maximizing user engagement (that is, the only goal was to keep people watching). The AI learned that users tended to choose misinformation, conspiracy theories, and extreme partisan content, and, to keep them watching, the AI recommended more of it. Users also tended to watch more content on the same subject, so the AI led people into filter bubbles where they received multiple versions of the same misinformation.[201] This convinced many users that the misinformation was true, and ultimately undermined trust in institutions, the media and the government.[202] The AI program had correctly learned to maximize its goal, but the result was harmful to society. After the U.S. election in 2016, major technology companies took steps to mitigate the problem [citation needed].\n

In 2022, generative AI began to create images, audio, video and text that are indistinguishable from real photographs, recordings, films, or human writing. It is possible for bad actors to use this technology to create massive amounts of misinformation or propaganda.[203] AI pioneer Geoffrey Hinton expressed concern about AI enabling "authoritarian leaders to manipulate their electorates" on a large scale, among other risks.[204]\n

\n

Algorithmic bias and fairness

\n\n

Machine learning applications will be biased[k] if they learn from biased data.[206] The developers may not be aware that the bias exists.[207] Bias can be introduced by the way training data is selected and by the way a model is deployed.[208][206] If a biased algorithm is used to make decisions that can seriously harm people (as it can in medicine, finance, recruitment, housing or policing) then the algorithm may cause discrimination.[209] The field of fairness studies how to prevent harms from algorithmic biases.\n

On June 28, 2015, Google Photos\'s new image labeling feature mistakenly identified Jacky Alcine and a friend as "gorillas" because they were black. The system was trained on a dataset that contained very few images of black people,[210] a problem called "sample size disparity".[211] Google "fixed" this problem by preventing the system from labelling anything as a "gorilla". Eight years later, in 2023, Google Photos still could not identify a gorilla, and neither could similar products from Apple, Facebook, Microsoft and Amazon.[212]\n

COMPAS is a commercial program widely used by U.S. courts to assess the likelihood of a defendant becoming a recidivist. In 2016, Julia Angwin at ProPublica discovered that COMPAS exhibited racial bias, despite the fact that the program was not told the races of the defendants. Although the error rate for both whites and blacks was calibrated equal at exactly 61%, the errors for each race were different—the system consistently overestimated the chance that a black person would re-offend and would underestimate the chance that a white person would not re-offend.[213] In 2017, several researchers[l] showed that it was mathematically impossible for COMPAS to accommodate all possible measures of fairness when the base rates of re-offense were different for whites and blacks in the data.[215]\n

A program can make biased decisions even if the data does not explicitly mention a problematic feature (such as "race" or "gender"). The feature will correlate with other features (like "address", "shopping history" or "first name"), and the program will make the same decisions based on these features as it would on "race" or "gender".[216] Moritz Hardt said "the most robust fact in this research area is that fairness through blindness doesn\'t work."[217]\n

Criticism of COMPAS highlighted that machine learning models are designed to make "predictions" that are only valid if we assume that the future will resemble the past. If they are trained on data that includes the results of racist decisions in the past, machine learning models must predict that racist decisions will be made in the future. If an application then uses these predictions as recommendations, some of these "recommendations" will likely be racist.[218] Thus, machine learning is not well suited to help make decisions in areas where there is hope that the future will be better than the past. It is descriptive rather than prescriptive.[m]\n

Bias and unfairness may go undetected because the developers are overwhelmingly white and male: among AI engineers, about 4% are black and 20% are women.[211]\n

There are various conflicting definitions and mathematical models of fairness. These notions depend on ethical assumptions, and are influenced by beliefs about society. One broad category is distributive fairness, which focuses on the outcomes, often identifying groups and seeking to compensate for statistical disparities. Representational fairness tries to ensure that AI systems do not reinforce negative stereotypes or render certain groups invisible. Procedural fairness focuses on the decision process rather than the outcome. The most relevant notions of fairness may depend on the context, notably the type of AI application and the stakeholders. The subjectivity in the notions of bias and fairness makes it difficult for companies to operationalize them. Having access to sensitive attributes such as race or gender is also considered by many AI ethicists to be necessary in order to compensate for biases, but it may conflict with anti-discrimination laws.[205]\n

At its 2022 Conference on Fairness, Accountability, and Transparency (ACM FAccT 2022), the Association for Computing Machinery, in Seoul, South Korea, presented and published findings that recommend that until AI and robotics systems are demonstrated to be free of bias mistakes, they are unsafe, and the use of self-learning neural networks trained on vast, unregulated sources of flawed internet data should be curtailed.[dubiousdiscuss][220]\n

\n

Lack of transparency

\n\n

Many AI systems are so complex that their designers cannot explain how they reach their decisions.[221] Particularly with deep neural networks, in which there are a large amount of non-linear relationships between inputs and outputs. But some popular explainability techniques exist.[222]\n

It is impossible to be certain that a program is operating correctly if no one knows how exactly it works. There have been many cases where a machine learning program passed rigorous tests, but nevertheless learned something different than what the programmers intended. For example, a system that could identify skin diseases better than medical professionals was found to actually have a strong tendency to classify images with a ruler as "cancerous", because pictures of malignancies typically include a ruler to show the scale.[223] Another machine learning system designed to help effectively allocate medical resources was found to classify patients with asthma as being at "low risk" of dying from pneumonia. Having asthma is actually a severe risk factor, but since the patients having asthma would usually get much more medical care, they were relatively unlikely to die according to the training data. The correlation between asthma and low risk of dying from pneumonia was real, but misleading.[224]\n

People who have been harmed by an algorithm\'s decision have a right to an explanation.[225] Doctors, for example, are expected to clearly and completely explain to their colleagues the reasoning behind any decision they make. Early drafts of the European Union\'s General Data Protection Regulation in 2016 included an explicit statement that this right exists.[n] Industry experts noted that this is an unsolved problem with no solution in sight. Regulators argued that nevertheless the harm is real: if the problem has no solution, the tools should not be used.[226]\n

DARPA established the XAI ("Explainable Artificial Intelligence") program in 2014 to try to solve these problems.[227]\n

Several approaches aim to address the transparency problem. SHAP enables to visualise the contribution of each feature to the output.[228] LIME can locally approximate a model\'s outputs with a simpler, interpretable model.[229] Multitask learning provides a large number of outputs in addition to the target classification. These other outputs can help developers deduce what the network has learned.[230] Deconvolution, DeepDream and other generative methods can allow developers to see what different layers of a deep network for computer vision have learned, and produce output that can suggest what the network is learning.[231] For generative pre-trained transformers, Anthropic developed a technique based on dictionary learning that associates patterns of neuron activations with human-understandable concepts.[232]\n

\n

Bad actors and weaponized AI

\n\n

Artificial intelligence provides a number of tools that are useful to bad actors, such as authoritarian governments, terrorists, criminals or rogue states.\n

A lethal autonomous weapon is a machine that locates, selects and engages human targets without human supervision.[o] Widely available AI tools can be used by bad actors to develop inexpensive autonomous weapons and, if produced at scale, they are potentially weapons of mass destruction.[234] Even when used in conventional warfare, it is unlikely that they will be unable to reliably choose targets and could potentially kill an innocent person.[234] In 2014, 30 nations (including China) supported a ban on autonomous weapons under the United Nations\' Convention on Certain Conventional Weapons, however the United States and others disagreed.[235] By 2015, over fifty countries were reported to be researching battlefield robots.[236]\n

AI tools make it easier for authoritarian governments to efficiently control their citizens in several ways. Face and voice recognition allow widespread surveillance. Machine learning, operating this data, can classify potential enemies of the state and prevent them from hiding. Recommendation systems can precisely target propaganda and misinformation for maximum effect. Deepfakes and generative AI aid in producing misinformation. Advanced AI can make authoritarian centralized decision making more competitive than liberal and decentralized systems such as markets. It lowers the cost and difficulty of digital warfare and advanced spyware.[237] All these technologies have been available since 2020 or earlier—AI facial recognition systems are already being used for mass surveillance in China.[238][239]\n

There many other ways that AI is expected to help bad actors, some of which can not be foreseen. For example, machine-learning AI is able to design tens of thousands of toxic molecules in a matter of hours.[240]\n

\n

Technological unemployment

\n\n

Economists have frequently highlighted the risks of redundancies from AI, and speculated about unemployment if there is no adequate social policy for full employment.[241]\n

In the past, technology has tended to increase rather than reduce total employment, but economists acknowledge that "we\'re in uncharted territory" with AI.[242] A survey of economists showed disagreement about whether the increasing use of robots and AI will cause a substantial increase in long-term unemployment, but they generally agree that it could be a net benefit if productivity gains are redistributed.[243] Risk estimates vary; for example, in the 2010s, Michael Osborne and Carl Benedikt Frey estimated 47% of U.S. jobs are at "high risk" of potential automation, while an OECD report classified only 9% of U.S. jobs as "high risk".[p][245] The methodology of speculating about future employment levels has been criticised as lacking evidential foundation, and for implying that technology, rather than social policy, creates unemployment, as opposed to redundancies.[241] In April 2023, it was reported that 70% of the jobs for Chinese video game illustrators had been eliminated by generative artificial intelligence.[246][247]\n

Unlike previous waves of automation, many middle-class jobs may be eliminated by artificial intelligence; The Economist stated in 2015 that "the worry that AI could do to white-collar jobs what steam power did to blue-collar ones during the Industrial Revolution" is "worth taking seriously".[248] Jobs at extreme risk range from paralegals to fast food cooks, while job demand is likely to increase for care-related professions ranging from personal healthcare to the clergy.[249]\n

From the early days of the development of artificial intelligence, there have been arguments, for example, those put forward by Joseph Weizenbaum, about whether tasks that can be done by computers actually should be done by them, given the difference between computers and humans, and between quantitative calculation and qualitative, value-based judgement.[250]\n

\n

Existential risk

\n\n

It has been argued AI will become so powerful that humanity may irreversibly lose control of it. This could, as physicist Stephen Hawking stated, "spell the end of the human race".[251] This scenario has been common in science fiction, when a computer or robot suddenly develops a human-like "self-awareness" (or "sentience" or "consciousness") and becomes a malevolent character.[q] These sci-fi scenarios are misleading in several ways.\n

First, AI does not require human-like "sentience" to be an existential risk. Modern AI programs are given specific goals and use learning and intelligence to achieve them. Philosopher Nick Bostrom argued that if one gives almost any goal to a sufficiently powerful AI, it may choose to destroy humanity to achieve it (he used the example of a paperclip factory manager).[253] Stuart Russell gives the example of household robot that tries to find a way to kill its owner to prevent it from being unplugged, reasoning that "you can\'t fetch the coffee if you\'re dead."[254] In order to be safe for humanity, a superintelligence would have to be genuinely aligned with humanity\'s morality and values so that it is "fundamentally on our side".[255]\n

Second, Yuval Noah Harari argues that AI does not require a robot body or physical control to pose an existential risk. The essential parts of civilization are not physical. Things like ideologies, law, government, money and the economy are made of language; they exist because there are stories that billions of people believe. The current prevalence of misinformation suggests that an AI could use language to convince people to believe anything, even to take actions that are destructive.[256]\n

The opinions amongst experts and industry insiders are mixed, with sizable fractions both concerned and unconcerned by risk from eventual superintelligent AI.[257] Personalities such as Stephen Hawking, Bill Gates, and Elon Musk,[258] as well as AI pioneers such as Yoshua Bengio, Stuart Russell, Demis Hassabis, and Sam Altman, have expressed concerns about existential risk from AI.\n

In May 2023, Geoffrey Hinton announced his resignation from Google in order to be able to "freely speak out about the risks of AI" without "considering how this impacts Google."[259] He notably mentioned risks of an AI takeover,[260] and stressed that in order to avoid the worst outcomes, establishing safety guidelines will require cooperation among those competing in use of AI.[261]\n

In 2023, many leading AI experts issued the joint statement that "Mitigating the risk of extinction from AI should be a global priority alongside other societal-scale risks such as pandemics and nuclear war".[262]\n

Other researchers, however, spoke in favor of a less dystopian view. AI pioneer Juergen Schmidhuber did not sign the joint statement, emphasising that in 95% of all cases, AI research is about making "human lives longer and healthier and easier."[263] While the tools that are now being used to improve lives can also be used by bad actors, "they can also be used against the bad actors."[264][265] Andrew Ng also argued that "it\'s a mistake to fall for the doomsday hype on AI—and that regulators who do will only benefit vested interests."[266] Yann LeCun "scoffs at his peers\' dystopian scenarios of supercharged misinformation and even, eventually, human extinction."[267] In the early 2010s, experts argued that the risks are too distant in the future to warrant research or that humans will be valuable from the perspective of a superintelligent machine.[268] However, after 2016, the study of current and future risks and possible solutions became a serious area of research.[269]\n

\n

Ethical machines and alignment

\n\n

Friendly AI are machines that have been designed from the beginning to minimize risks and to make choices that benefit humans. Eliezer Yudkowsky, who coined the term, argues that developing friendly AI should be a higher research priority: it may require a large investment and it must be completed before AI becomes an existential risk.[270]\n

Machines with intelligence have the potential to use their intelligence to make ethical decisions. The field of machine ethics provides machines with ethical principles and procedures for resolving ethical dilemmas.[271]\nThe field of machine ethics is also called computational morality,[271]\nand was founded at an AAAI symposium in 2005.[272]\n

Other approaches include Wendell Wallach\'s "artificial moral agents"[273] and Stuart J. Russell\'s three principles for developing provably beneficial machines.[274]\n

\n

Open source

\n

Active organizations in the AI open-source community include Hugging Face,[275] Google,[276] EleutherAI and Meta.[277] Various AI models, such as Llama 2, Mistral or Stable Diffusion, have been made open-weight,[278][279] meaning that their architecture and trained parameters (the "weights") are publicly available. Open-weight models can be freely fine-tuned, which allows companies to specialize them with their own data and for their own use-case.[280] Open-weight models are useful for research and innovation but can also be misused. Since they can be fine-tuned, any built-in security measure, such as objecting to harmful requests, can be trained away until it becomes ineffective. Some researchers warn that future AI models may develop dangerous capabilities (such as the potential to drastically facilitate bioterrorism) and that once released on the Internet, they cannot be deleted everywhere if needed. They recommend pre-release audits and cost-benefit analyses.[281]\n

\n

Frameworks

\n

Artificial Intelligence projects can have their ethical permissibility tested while designing, developing, and implementing an AI system. An AI framework such as the Care and Act Framework containing the SUM values—developed by the Alan Turing Institute tests projects in four main areas:[282][283]\n

\n
  • Respect the dignity of individual people
  • \n
  • Connect with other people sincerely, openly, and inclusively
  • \n
  • Care for the wellbeing of everyone
  • \n
  • Protect social values, justice, and the public interest
\n

Other developments in ethical frameworks include those decided upon during the Asilomar Conference, the Montreal Declaration for Responsible AI, and the IEEE\'s Ethics of Autonomous Systems initiative, among others;[284] however, these principles do not go without their criticisms, especially regards to the people chosen contributes to these frameworks.[285]\n

Promotion of the wellbeing of the people and communities that these technologies affect requires consideration of the social and ethical implications at all stages of AI system design, development and implementation, and collaboration between job roles such as data scientists, product managers, data engineers, domain experts, and delivery managers.[286]\n

The UK AI Safety Institute released in 2024 a testing toolset called \'Inspect\' for AI safety evaluations available under a MIT open-source licence which is freely available on GitHub and can be improved with third-party packages. It can be used to evaluate AI models in a range of areas including core knowledge, ability to reason, and autonomous capabilities.[287]\n

\n

Regulation

\n\n
AI Safety Summit
The first global AI Safety Summit was held in 2023 with a declaration calling for international co-operation.
\n

The regulation of artificial intelligence is the development of public sector policies and laws for promoting and regulating AI; it is therefore related to the broader regulation of algorithms.[288] The regulatory and policy landscape for AI is an emerging issue in jurisdictions globally.[289] According to AI Index at Stanford, the annual number of AI-related laws passed in the 127 survey countries jumped from one passed in 2016 to 37 passed in 2022 alone.[290][291] Between 2016 and 2020, more than 30 countries adopted dedicated strategies for AI.[292] Most EU member states had released national AI strategies, as had Canada, China, India, Japan, Mauritius, the Russian Federation, Saudi Arabia, United Arab Emirates, U.S., and Vietnam. Others were in the process of elaborating their own AI strategy, including Bangladesh, Malaysia and Tunisia.[292] The Global Partnership on Artificial Intelligence was launched in June 2020, stating a need for AI to be developed in accordance with human rights and democratic values, to ensure public confidence and trust in the technology.[292] Henry Kissinger, Eric Schmidt, and Daniel Huttenlocher published a joint statement in November 2021 calling for a government commission to regulate AI.[293] In 2023, OpenAI leaders published recommendations for the governance of superintelligence, which they believe may happen in less than 10 years.[294] In 2023, the United Nations also launched an advisory body to provide recommendations on AI governance; the body comprises technology company executives, governments officials and academics.[295] In 2024, the Council of Europe created the first international legally binding treaty on AI, called the "Framework Convention on Artificial Intelligence and Human Rights, Democracy and the Rule of Law". It was adopted by the European Union, the United States, the United Kingdom, and other signatories.[296]\n

In a 2022 Ipsos survey, attitudes towards AI varied greatly by country; 78% of Chinese citizens, but only 35% of Americans, agreed that "products and services using AI have more benefits than drawbacks".[290] A 2023 Reuters/Ipsos poll found that 61% of Americans agree, and 22% disagree, that AI poses risks to humanity.[297] In a 2023 Fox News poll, 35% of Americans thought it "very important", and an additional 41% thought it "somewhat important", for the federal government to regulate AI, versus 13% responding "not very important" and 8% responding "not at all important".[298][299]\n

In November 2023, the first global AI Safety Summit was held in Bletchley Park in the UK to discuss the near and far term risks of AI and the possibility of mandatory and voluntary regulatory frameworks.[300] 28 countries including the United States, China, and the European Union issued a declaration at the start of the summit, calling for international co-operation to manage the challenges and risks of artificial intelligence.[301][302] In May 2024 at the AI Seoul Summit, 16 global AI tech companies agreed to safety commitments on the development of AI.[303][304]\n

\n

History

\n\n\n

The study of mechanical or "formal" reasoning began with philosophers and mathematicians in antiquity. The study of logic led directly to Alan Turing\'s theory of computation, which suggested that a machine, by shuffling symbols as simple as "0" and "1", could simulate any conceivable form of mathematical reasoning.[305][306] This, along with concurrent discoveries in cybernetics, information theory and neurobiology, led researchers to consider the possibility of building an "electronic brain".[r] They developed several areas of research that would become part of AI,[308] such as McCullouch and Pitts design for "artificial neurons" in 1943,[115] and Turing\'s influential 1950 paper \'Computing Machinery and Intelligence\', which introduced the Turing test and showed that "machine intelligence" was plausible.[309][306]\n

The field of AI research was founded at a workshop at Dartmouth College in 1956.[s][6] The attendees became the leaders of AI research in the 1960s.[t] They and their students produced programs that the press described as "astonishing":[u] computers were learning checkers strategies, solving word problems in algebra, proving logical theorems and speaking English.[v][7] Artificial intelligence laboratories were set up at a number of British and U.S. universities in the latter 1950s and early 1960s.[306]\n

Researchers in the 1960s and the 1970s were convinced that their methods would eventually succeed in creating a machine with general intelligence and considered this the goal of their field.[313] In 1965 Herbert Simon predicted, "machines will be capable, within twenty years, of doing any work a man can do".[314] In 1967 Marvin Minsky agreed, writing that "within a generation ... the problem of creating \'artificial intelligence\' will substantially be solved".[315] They had, however, underestimated the difficulty of the problem.[w] In 1974, both the U.S. and British governments cut off exploratory research in response to the criticism of Sir James Lighthill[317] and ongoing pressure from the U.S. Congress to fund more productive projects.[318] Minsky\'s and Papert\'s book Perceptrons was understood as proving that artificial neural networks would never be useful for solving real-world tasks, thus discrediting the approach altogether.[319] The "AI winter", a period when obtaining funding for AI projects was difficult, followed.[9]\n

In the early 1980s, AI research was revived by the commercial success of expert systems,[320] a form of AI program that simulated the knowledge and analytical skills of human experts. By 1985, the market for AI had reached over a billion dollars. At the same time, Japan\'s fifth generation computer project inspired the U.S. and British governments to restore funding for academic research.[8] However, beginning with the collapse of the Lisp Machine market in 1987, AI once again fell into disrepute, and a second, longer-lasting winter began.[10]\n

Up to this point, most of AI\'s funding had gone to projects that used high-level symbols to represent mental objects like plans, goals, beliefs, and known facts. In the 1980s, some researchers began to doubt that this approach would be able to imitate all the processes of human cognition, especially perception, robotics, learning and pattern recognition,[321] and began to look into "sub-symbolic" approaches.[322] Rodney Brooks rejected "representation" in general and focussed directly on engineering machines that move and survive.[x] Judea Pearl, Lofti Zadeh and others developed methods that handled incomplete and uncertain information by making reasonable guesses rather than precise logic.[86][327] But the most important development was the revival of "connectionism", including neural network research, by Geoffrey Hinton and others.[328] In 1990, Yann LeCun successfully showed that convolutional neural networks can recognize handwritten digits, the first of many successful applications of neural networks.[329]\n

AI gradually restored its reputation in the late 1990s and early 21st century by exploiting formal mathematical methods and by finding specific solutions to specific problems. This "narrow" and "formal" focus allowed researchers to produce verifiable results and collaborate with other fields (such as statistics, economics and mathematics).[330] By 2000, solutions developed by AI researchers were being widely used, although in the 1990s they were rarely described as "artificial intelligence" (a tendency known as the AI effect).[331]\nHowever, several academic researchers became concerned that AI was no longer pursuing its original goal of creating versatile, fully intelligent machines. Beginning around 2002, they founded the subfield of artificial general intelligence (or "AGI"), which had several well-funded institutions by the 2010s.[4]\n

Deep learning began to dominate industry benchmarks in 2012 and was adopted throughout the field.[11]\nFor many specific tasks, other methods were abandoned.[y]\nDeep learning\'s success was based on both hardware improvements (faster computers,[333] graphics processing units, cloud computing[334]) and access to large amounts of data[335] (including curated datasets,[334] such as ImageNet). Deep learning\'s success led to an enormous increase in interest and funding in AI.[z] The amount of machine learning research (measured by total publications) increased by 50% in the years 2015–2019.[292]\n

In 2016, issues of fairness and the misuse of technology were catapulted into center stage at machine learning conferences, publications vastly increased, funding became available, and many researchers re-focussed their careers on these issues. The alignment problem became a serious field of academic study.[269]\n

In the late teens and early 2020s, AGI companies began to deliver programs that created enormous interest. In 2015, AlphaGo, developed by DeepMind, beat the world champion Go player. The program was taught only the rules of the game and developed strategy by itself. GPT-3 is a large language model that was released in 2020 by OpenAI and is capable of generating high-quality human-like text.[336] These programs, and others, inspired an aggressive AI boom, where large companies began investing billions in AI research. According to AI Impacts, about $50 billion annually was invested in "AI" around 2022 in the U.S. alone and about 20% of the new U.S. Computer Science PhD graduates have specialized in "AI".[337] About 800,000 "AI"-related U.S. job openings existed in 2022.[338]\n

\n

Philosophy

\n\n

Philosophical debates have historically sought to determine the nature of intelligence and how to make intelligent machines.[339] Another major focus has been whether machines can be conscious, and the associated ethical implications.[340] Many other topics in philosophy are relevant to AI, such as epistemology and free will.[341] Rapid advancements have intensified public discussions on the philosophy and ethics of AI.[340]\n

\n

Defining artificial intelligence

\n\n

Alan Turing wrote in 1950 "I propose to consider the question \'can machines think\'?"[342] He advised changing the question from whether a machine "thinks", to "whether or not it is possible for machinery to show intelligent behaviour".[342] He devised the Turing test, which measures the ability of a machine to simulate human conversation.[309] Since we can only observe the behavior of the machine, it does not matter if it is "actually" thinking or literally has a "mind". Turing notes that we can not determine these things about other people but "it is usual to have a polite convention that everyone thinks."[343]\n

\n
The Turing test can provide some evidence of intelligence, but it penalizes non-human intelligent behavior.[344]
\n

Russell and Norvig agree with Turing that intelligence must be defined in terms of external behavior, not internal structure.[1] However, they are critical that the test requires the machine to imitate humans. "Aeronautical engineering texts," they wrote, "do not define the goal of their field as making \'machines that fly so exactly like pigeons that they can fool other pigeons.\'"[345] AI founder John McCarthy agreed, writing that "Artificial intelligence is not, by definition, simulation of human intelligence".[346]\n

McCarthy defines intelligence as "the computational part of the ability to achieve goals in the world".[347] Another AI founder, Marvin Minsky similarly describes it as "the ability to solve hard problems".[348] The leading AI textbook defines it as the study of agents that perceive their environment and take actions that maximize their chances of achieving defined goals.[1] These definitions view intelligence in terms of well-defined problems with well-defined solutions, where both the difficulty of the problem and the performance of the program are direct measures of the "intelligence" of the machine—and no other philosophical discussion is required, or may not even be possible.\n

Another definition has been adopted by Google,[349] a major practitioner in the field of AI. This definition stipulates the ability of systems to synthesize information as the manifestation of intelligence, similar to the way it is defined in biological intelligence.\n

Some authors have suggested in practice, that the definition of AI is vague and difficult to define, with contention as to whether classical algorithms should be categorised as AI,[350] with many companies during the early 2020s AI boom using the term as a marketing buzzword, often even if they did "not actually use AI in a material way".[351]\n

\n

Evaluating approaches to AI

\n

No established unifying theory or paradigm has guided AI research for most of its history.[aa] The unprecedented success of statistical machine learning in the 2010s eclipsed all other approaches (so much so that some sources, especially in the business world, use the term "artificial intelligence" to mean "machine learning with neural networks"). This approach is mostly sub-symbolic, soft and narrow. Critics argue that these questions may have to be revisited by future generations of AI researchers.\n

\n

Symbolic AI and its limits

\n

Symbolic AI (or "GOFAI")[353] simulated the high-level conscious reasoning that people use when they solve puzzles, express legal reasoning and do mathematics. They were highly successful at "intelligent" tasks such as algebra or IQ tests. In the 1960s, Newell and Simon proposed the physical symbol systems hypothesis: "A physical symbol system has the necessary and sufficient means of general intelligent action."[354]\n

However, the symbolic approach failed on many tasks that humans solve easily, such as learning, recognizing an object or commonsense reasoning. Moravec\'s paradox is the discovery that high-level "intelligent" tasks were easy for AI, but low level "instinctive" tasks were extremely difficult.[355] Philosopher Hubert Dreyfus had argued since the 1960s that human expertise depends on unconscious instinct rather than conscious symbol manipulation, and on having a "feel" for the situation, rather than explicit symbolic knowledge.[356] Although his arguments had been ridiculed and ignored when they were first presented, eventually, AI research came to agree with him.[ab][16]\n

The issue is not resolved: sub-symbolic reasoning can make many of the same inscrutable mistakes that human intuition does, such as algorithmic bias. Critics such as Noam Chomsky argue continuing research into symbolic AI will still be necessary to attain general intelligence,[358][359] in part because sub-symbolic AI is a move away from explainable AI: it can be difficult or impossible to understand why a modern statistical AI program made a particular decision. The emerging field of neuro-symbolic artificial intelligence attempts to bridge the two approaches.\n

\n

Neat vs. scruffy

\n\n

"Neats" hope that intelligent behavior is described using simple, elegant principles (such as logic, optimization, or neural networks). "Scruffies" expect that it necessarily requires solving a large number of unrelated problems. Neats defend their programs with theoretical rigor, scruffies rely mainly on incremental testing to see if they work. This issue was actively discussed in the 1970s and 1980s,[360] but eventually was seen as irrelevant. Modern AI has elements of both.\n

\n

Soft vs. hard computing

\n\n

Finding a provably correct or optimal solution is intractable for many important problems.[15] Soft computing is a set of techniques, including genetic algorithms, fuzzy logic and neural networks, that are tolerant of imprecision, uncertainty, partial truth and approximation. Soft computing was introduced in the late 1980s and most successful AI programs in the 21st century are examples of soft computing with neural networks.\n

\n

Narrow vs. general AI

\n\n

AI researchers are divided as to whether to pursue the goals of artificial general intelligence and superintelligence directly or to solve as many specific problems as possible (narrow AI) in hopes these solutions will lead indirectly to the field\'s long-term goals.[361][362] General intelligence is difficult to define and difficult to measure, and modern AI has had more verifiable successes by focusing on specific problems with specific solutions. The sub-field of artificial general intelligence studies this area exclusively.\n

\n

Machine consciousness, sentience, and mind

\n\n

The philosophy of mind does not know whether a machine can have a mind, consciousness and mental states, in the same sense that human beings do. This issue considers the internal experiences of the machine, rather than its external behavior. Mainstream AI research considers this issue irrelevant because it does not affect the goals of the field: to build machines that can solve problems using intelligence. Russell and Norvig add that "[t]he additional project of making a machine conscious in exactly the way humans are is not one that we are equipped to take on."[363] However, the question has become central to the philosophy of mind. It is also typically the central question at issue in artificial intelligence in fiction.\n

\n

Consciousness

\n\n

David Chalmers identified two problems in understanding the mind, which he named the "hard" and "easy" problems of consciousness.[364] The easy problem is understanding how the brain processes signals, makes plans and controls behavior. The hard problem is explaining how this feels or why it should feel like anything at all, assuming we are right in thinking that it truly does feel like something (Dennett\'s consciousness illusionism says this is an illusion). While human information processing is easy to explain, human subjective experience is difficult to explain. For example, it is easy to imagine a color-blind person who has learned to identify which objects in their field of view are red, but it is not clear what would be required for the person to know what red looks like.[365]\n

\n

Computationalism and functionalism

\n\n

Computationalism is the position in the philosophy of mind that the human mind is an information processing system and that thinking is a form of computing. Computationalism argues that the relationship between mind and body is similar or identical to the relationship between software and hardware and thus may be a solution to the mind–body problem. This philosophical position was inspired by the work of AI researchers and cognitive scientists in the 1960s and was originally proposed by philosophers Jerry Fodor and Hilary Putnam.[366]\n

Philosopher John Searle characterized this position as "strong AI": "The appropriately programmed computer with the right inputs and outputs would thereby have a mind in exactly the same sense human beings have minds."[ac] Searle counters this assertion with his Chinese room argument, which attempts to show that, even if a machine perfectly simulates human behavior, there is still no reason to suppose it also has a mind.[370]\n

\n

AI welfare and rights

\n

It is difficult or impossible to reliably evaluate whether an advanced AI is sentient (has the ability to feel), and if so, to what degree.[371] But if there is a significant chance that a given machine can feel and suffer, then it may be entitled to certain rights or welfare protection measures, similarly to animals.[372][373] Sapience (a set of capacities related to high intelligence, such as discernment or self-awareness) may provide another moral basis for AI rights.[372] Robot rights are also sometimes proposed as a practical way to integrate autonomous agents into society.[374]\n

In 2017, the European Union considered granting "electronic personhood" to some of the most capable AI systems. Similarly to the legal status of companies, it would have conferred rights but also responsibilities.[375] Critics argued in 2018 that granting rights to AI systems would downplay the importance of human rights, and that legislation should focus on user needs rather than speculative futuristic scenarios. They also noted that robots lacked the autonomy to take part to society on their own.[376][377]\n

Progress in AI increased interest in the topic. Proponents of AI welfare and rights often argue that AI sentience, if it emerges, would be particularly easy to deny. They warn that this may be a moral blind spot analogous to slavery or factory farming, which could lead to large-scale suffering if sentient AI is created and carelessly exploited.[373][372]\n

\n

Future

\n

Superintelligence and the singularity

\n

A superintelligence is a hypothetical agent that would possess intelligence far surpassing that of the brightest and most gifted human mind.[362]If research into artificial general intelligence produced sufficiently intelligent software, it might be able to reprogram and improve itself. The improved software would be even better at improving itself, leading to what I. J. Good called an "intelligence explosion" and Vernor Vinge called a "singularity".[378]\n

However, technologies cannot improve exponentially indefinitely, and typically follow an S-shaped curve, slowing when they reach the physical limits of what the technology can do.[379]\n

\n

Transhumanism

\n\n

Robot designer Hans Moravec, cyberneticist Kevin Warwick and inventor Ray Kurzweil have predicted that humans and machines may merge in the future into cyborgs that are more capable and powerful than either. This idea, called transhumanism, has roots in the writings of Aldous Huxley and Robert Ettinger.[380]\n

Edward Fredkin argues that "artificial intelligence is the next step in evolution", an idea first proposed by Samuel Butler\'s "Darwin among the Machines" as far back as 1863, and expanded upon by George Dyson in his 1998 book Darwin Among the Machines: The Evolution of Global Intelligence.[381]\n

\n

In fiction

\n\n
The word "robot" itself was coined by Karel Čapek in his 1921 play R.U.R., the title standing for "Rossum\'s Universal Robots".
\n

Thought-capable artificial beings have appeared as storytelling devices since antiquity,[382] and have been a persistent theme in science fiction.[383]\n

A common trope in these works began with Mary Shelley\'s Frankenstein, where a human creation becomes a threat to its masters. This includes such works as Arthur C. Clarke\'s and Stanley Kubrick\'s 2001: A Space Odyssey (both 1968), with HAL 9000, the murderous computer in charge of the Discovery One spaceship, as well as The Terminator (1984) and The Matrix (1999). In contrast, the rare loyal robots such as Gort from The Day the Earth Stood Still (1951) and Bishop from Aliens (1986) are less prominent in popular culture.[384]\n

Isaac Asimov introduced the Three Laws of Robotics in many stories, most notably with the "Multivac" super-intelligent computer. Asimov\'s laws are often brought up during lay discussions of machine ethics;[385] while almost all artificial intelligence researchers are familiar with Asimov\'s laws through popular culture, they generally consider the laws useless for many reasons, one of which is their ambiguity.[386]\n

Several works use AI to force us to confront the fundamental question of what makes us human, showing us artificial beings that have the ability to feel, and thus to suffer. This appears in Karel Čapek\'s R.U.R., the films A.I. Artificial Intelligence and Ex Machina, as well as the novel Do Androids Dream of Electric Sheep?, by Philip K. Dick. Dick considers the idea that our understanding of human subjectivity is altered by technology created with artificial intelligence.[387]\n

\n

See also

\n\n

Explanatory notes

\n
\n
    \n
  1. ^ a b This list of intelligent traits is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
  2. \n
  3. ^ a b This list of tools is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
  4. \n
  5. ^ It is among the reasons that expert systems proved to be inefficient for capturing knowledge.[30][31]\n
  6. \n
  7. ^ \n"Rational agent" is general term used in economics, philosophy and theoretical artificial intelligence. It can refer to anything that directs its behavior to accomplish goals, such as a person, an animal, a corporation, a nation, or in the case of AI, a computer program.\n
  8. \n
  9. ^ Alan Turing discussed the centrality of learning as early as 1950, in his classic paper "Computing Machinery and Intelligence".[42] In 1956, at the original Dartmouth AI summer conference, Ray Solomonoff wrote a report on unsupervised probabilistic machine learning: "An Inductive Inference Machine".[43]\n
  10. \n
  11. ^ See AI winter § Machine translation and the ALPAC report of 1966\n
  12. \n
  13. ^ \nCompared with symbolic logic, formal Bayesian inference is computationally expensive. For inference to be tractable, most observations must be conditionally independent of one another. AdSense uses a Bayesian network with over 300 million edges to learn which ads to serve.[93]\n
  14. \n
  15. ^ Expectation–maximization, one of the most popular algorithms in machine learning, allows clustering in the presence of unknown latent variables.[95]\n
  16. \n
  17. ^ \nSome form of deep neural networks (without a specific learning algorithm) were described by:\nWarren S. McCulloch and Walter Pitts (1943)[115]\nAlan Turing (1948);[116]\nKarl Steinbuch and Roger David Joseph (1961).[117]\nDeep or recurrent networks that learned (or used gradient descent) were developed by:\nFrank Rosenblatt(1957);[116]\nOliver Selfridge (1959);[117]\nAlexey Ivakhnenko and Valentin Lapa (1965);[118]\nKaoru Nakano (1971);[119]\nShun-Ichi Amari (1972);[119]\nJohn Joseph Hopfield (1982).[119]\nPrecursors to backpropagation were developed by:\nHenry J. Kelley (1960);[116]\nArthur E. Bryson (1962);[116]\nStuart Dreyfus (1962);[116]\nArthur E. Bryson and Yu-Chi Ho (1969);[116]\nBackpropagation was independently developed by:\nSeppo Linnainmaa (1970);[120]\nPaul Werbos (1974).[116]\n
  18. \n
  19. ^ Geoffrey Hinton said, of his work on neural networks in the 1990s, "our labeled datasets were thousands of times too small. [And] our computers were millions of times too slow."[121]\n
  20. \n
  21. ^ In statistics, a bias is a systematic error or deviation from the correct value. But in the context of fairness, it refers to a tendency in favor or against a certain group or individual characteristic, usually in a way that is considered unfair or harmful. A statistically unbiased AI system that produces disparate outcomes for different demographic groups may thus be viewed as biased in the ethical sense.[205]\n
  22. \n
  23. ^ Including Jon Kleinberg (Cornell University), Sendhil Mullainathan (University of Chicago), Cynthia Chouldechova (Carnegie Mellon) and Sam Corbett-Davis (Stanford)[214]\n
  24. \n
  25. ^ Moritz Hardt (a director at the Max Planck Institute for Intelligent Systems) argues that machine learning "is fundamentally the wrong tool for a lot of domains, where you\'re trying to design interventions and mechanisms that change the world."[219]\n
  26. \n
  27. ^ When the law was passed in 2018, it still contained a form of this provision.\n
  28. \n
  29. ^ This is the United Nations\' definition, and includes things like land mines as well.[233]\n
  30. \n
  31. ^ See table 4; 9% is both the OECD average and the U.S. average.[244]\n
  32. \n
  33. ^ Sometimes called a "robopocalypse"[252]\n
  34. \n
  35. ^ "Electronic brain" was the term used by the press around this time.[305][307]\n
  36. \n
  37. ^ \nDaniel Crevier wrote, "the conference is generally recognized as the official birthdate of the new science."[310] Russell and Norvig called the conference "the inception of artificial intelligence."[115]\n
  38. \n
  39. ^ \nRussell and Norvig wrote "for the next 20 years the field would be dominated by these people and their students."[311]\n
  40. \n
  41. ^ \nRussell and Norvig wrote "it was astonishing whenever a computer did anything kind of smartish".[312]\n
  42. \n
  43. ^ \nThe programs described are Arthur Samuel\'s checkers program for the IBM 701, Daniel Bobrow\'s STUDENT, Newell and Simon\'s Logic Theorist and Terry Winograd\'s SHRDLU.\n
  44. \n
  45. ^ Russell and Norvig write: "in almost all cases, these early systems failed on more difficult problems"[316]\n
  46. \n
  47. ^ \nEmbodied approaches to AI[323] were championed by Hans Moravec[324] and Rodney Brooks[325] and went by many names: Nouvelle AI.[325] Developmental robotics.[326]\n
  48. \n
  49. ^ Matteo Wong wrote in The Atlantic: "Whereas for decades, computer-science fields such as natural-language processing, computer vision, and robotics used extremely different methods, now they all use a programming method called "deep learning." As a result, their code and approaches have become more similar, and their models are easier to integrate into one another."[332]\n
  50. \n
  51. ^ Jack Clark wrote in Bloomberg: "After a half-decade of quiet breakthroughs in artificial intelligence, 2015 has been a landmark year. Computers are smarter and learning faster than ever", and noted that the number of software projects that use machine learning at Google increased from a "sporadic usage" in 2012 to more than 2,700 projects in 2015.[334]\n
  52. \n
  53. ^ Nils Nilsson wrote in 1983: "Simply put, there is wide disagreement in the field about what AI is all about."[352]\n
  54. \n
  55. ^ \nDaniel Crevier wrote that "time has proven the accuracy and perceptiveness of some of Dreyfus\'s comments. Had he formulated them less aggressively, constructive actions they suggested might have been taken much earlier."[357]\n
  56. \n
  57. ^ \nSearle presented this definition of "Strong AI" in 1999.[367] Searle\'s original formulation was "The appropriately programmed computer really is a mind, in the sense that computers given the right programs can be literally said to understand and have other cognitive states."[368] Strong AI is defined similarly by Russell and Norvig: "Stong AI – the assertion that machines that do so are actually thinking (as opposed to simulating thinking)."[369]\n
  58. \n
\n

References

\n
\n
    \n
  1. ^ a b c Russell & Norvig (2021), pp. 1–4.\n
  2. \n
  3. ^ AI set to exceed human brain power Archived 2008-02-19 at the Wayback Machine CNN.com (July 26, 2006)\n
  4. \n
  5. ^ Kaplan, Andreas; Haenlein, Michael (2019). "Siri, Siri, in my hand: Who\'s the fairest in the land? On the interpretations, illustrations, and implications of artificial intelligence". Business Horizons. 62: 15–25. doi:10.1016/j.bushor.2018.08.004. ISSN 0007-6813. S2CID 158433736.\n
  6. \n
  7. ^ a b c \nArtificial general intelligence: Russell & Norvig (2021, pp. 32–33, 1020–1021)
    Proposal for the modern version: Pennachin & Goertzel (2007)
    Warnings of overspecialization in AI from leading researchers: Nilsson (1995), McCarthy (2007), Beal & Winston (2009)
    \n
  8. \n
  9. ^ Russell & Norvig (2021, §1.2).\n
  10. \n
  11. ^ a b Dartmouth workshop: Russell & Norvig (2021, p. 18), McCorduck (2004, pp. 111–136), NRC (1999, pp. 200–201)
    The proposal: McCarthy et al. (1955)
    \n
  12. \n
  13. ^ a b Successful programs of the 1960s: McCorduck (2004, pp. 243–252), Crevier (1993, pp. 52–107), Moravec (1988, p. 9), Russell & Norvig (2021, pp. 19–21)\n
  14. \n
  15. ^ a b Funding initiatives in the early 1980s: Fifth Generation Project (Japan), Alvey (UK), Microelectronics and Computer Technology Corporation (US), Strategic Computing Initiative (US): McCorduck (2004, pp. 426–441), Crevier (1993, pp. 161–162, 197–203, 211, 240), Russell & Norvig (2021, p. 23), NRC (1999, pp. 210–211), Newquist (1994, pp. 235–248)\n
  16. \n
  17. ^ a b First AI Winter, Lighthill report, Mansfield Amendment: Crevier (1993, pp. 115–117), Russell & Norvig (2021, pp. 21–22), NRC (1999, pp. 212–213), Howe (1994), Newquist (1994, pp. 189–201)\n
  18. \n
  19. ^ a b Second AI Winter: Russell & Norvig (2021, p. 24), McCorduck (2004, pp. 430–435), Crevier (1993, pp. 209–210), NRC (1999, pp. 214–216), Newquist (1994, pp. 301–318)\n
  20. \n
  21. ^ a b Deep learning revolution, AlexNet: Goldman (2022), Russell & Norvig (2021, p. 26), McKinsey (2018)\n
  22. \n
  23. ^ Toews (2023).\n
  24. \n
  25. ^ Problem-solving, puzzle solving, game playing, and deduction: Russell & Norvig (2021, chpt. 3–5), Russell & Norvig (2021, chpt. 6) (constraint satisfaction), Poole, Mackworth & Goebel (1998, chpt. 2, 3, 7, 9), Luger & Stubblefield (2004, chpt. 3, 4, 6, 8), Nilsson (1998, chpt. 7–12)\n
  26. \n
  27. ^ Uncertain reasoning: Russell & Norvig (2021, chpt. 12–18), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 333–381), Nilsson (1998, chpt. 7–12)\n
  28. \n
  29. ^ a b c Intractability and efficiency and the combinatorial explosion: Russell & Norvig (2021, p. 21)\n
  30. \n
  31. ^ a b c Psychological evidence of the prevalence of sub-symbolic reasoning and knowledge: Kahneman (2011), Dreyfus & Dreyfus (1986), Wason & Shapiro (1966), Kahneman, Slovic & Tversky (1982)\n
  32. \n
  33. ^ Knowledge representation and knowledge engineering: Russell & Norvig (2021, chpt. 10), Poole, Mackworth & Goebel (1998, pp. 23–46, 69–81, 169–233, 235–277, 281–298, 319–345), Luger & Stubblefield (2004, pp. 227–243), Nilsson (1998, chpt. 17.1–17.4, 18)\n
  34. \n
  35. ^ Smoliar & Zhang (1994).\n
  36. \n
  37. ^ Neumann & Möller (2008).\n
  38. \n
  39. ^ Kuperman, Reichley & Bailey (2006).\n
  40. \n
  41. ^ McGarry (2005).\n
  42. \n
  43. ^ Bertini, Del Bimbo & Torniai (2006).\n
  44. \n
  45. ^ Russell & Norvig (2021), pp. 272.\n
  46. \n
  47. ^ Representing categories and relations: Semantic networks, description logics, inheritance (including frames, and scripts): Russell & Norvig (2021, §10.2 & 10.5), Poole, Mackworth & Goebel (1998, pp. 174–177), Luger & Stubblefield (2004, pp. 248–258), Nilsson (1998, chpt. 18.3)\n
  48. \n
  49. ^ Representing events and time:Situation calculus, event calculus, fluent calculus (including solving the frame problem): Russell & Norvig (2021, §10.3), Poole, Mackworth & Goebel (1998, pp. 281–298), Nilsson (1998, chpt. 18.2)\n
  50. \n
  51. ^ Causal calculus: Poole, Mackworth & Goebel (1998, pp. 335–337)\n
  52. \n
  53. ^ Representing knowledge about knowledge: Belief calculus, modal logics: Russell & Norvig (2021, §10.4), Poole, Mackworth & Goebel (1998, pp. 275–277)\n
  54. \n
  55. ^ a b Default reasoning, Frame problem, default logic, non-monotonic logics, circumscription, closed world assumption, abduction: Russell & Norvig (2021, §10.6), Poole, Mackworth & Goebel (1998, pp. 248–256, 323–335), Luger & Stubblefield (2004, pp. 335–363), Nilsson (1998, ~18.3.3)\n(Poole et al. places abduction under "default reasoning". Luger et al. places this under "uncertain reasoning").\n
  56. \n
  57. ^ a b Breadth of commonsense knowledge: Lenat & Guha (1989, Introduction), Crevier (1993, pp. 113–114), Moravec (1988, p. 13), Russell & Norvig (2021, pp. 241, 385, 982) (qualification problem)\n
  58. \n
  59. ^ Newquist (1994), p. 296.\n
  60. \n
  61. ^ Crevier (1993), pp. 204–208.\n
  62. \n
  63. ^ Russell & Norvig (2021), p. 528.\n
  64. \n
  65. ^ Automated planning: Russell & Norvig (2021, chpt. 11).\n
  66. \n
  67. ^ Automated decision making, Decision theory: Russell & Norvig (2021, chpt. 16–18).\n
  68. \n
  69. ^ Classical planning: Russell & Norvig (2021, Section 11.2).\n
  70. \n
  71. ^ Sensorless or "conformant" planning, contingent planning, replanning (a.k.a online planning): Russell & Norvig (2021, Section 11.5).\n
  72. \n
  73. ^ Uncertain preferences: Russell & Norvig (2021, Section 16.7)\nInverse reinforcement learning: Russell & Norvig (2021, Section 22.6)\n
  74. \n
  75. ^ Information value theory: Russell & Norvig (2021, Section 16.6).\n
  76. \n
  77. ^ Markov decision process: Russell & Norvig (2021, chpt. 17).\n
  78. \n
  79. ^ Game theory and multi-agent decision theory: Russell & Norvig (2021, chpt. 18).\n
  80. \n
  81. ^ Learning: Russell & Norvig (2021, chpt. 19–22), Poole, Mackworth & Goebel (1998, pp. 397–438), Luger & Stubblefield (2004, pp. 385–542), Nilsson (1998, chpt. 3.3, 10.3, 17.5, 20)\n
  82. \n
  83. ^ Turing (1950).\n
  84. \n
  85. ^ Solomonoff (1956).\n
  86. \n
  87. ^ Unsupervised learning: Russell & Norvig (2021, pp. 653) (definition), Russell & Norvig (2021, pp. 738–740) (cluster analysis), Russell & Norvig (2021, pp. 846–860) (word embedding)\n
  88. \n
  89. ^ a b Supervised learning: Russell & Norvig (2021, §19.2) (Definition), Russell & Norvig (2021, Chpt. 19–20) (Techniques)\n
  90. \n
  91. ^ Reinforcement learning: Russell & Norvig (2021, chpt. 22), Luger & Stubblefield (2004, pp. 442–449)\n
  92. \n
  93. ^ Transfer learning: Russell & Norvig (2021, pp. 281), The Economist (2016)\n
  94. \n
  95. ^ "Artificial Intelligence (AI): What Is AI and How Does It Work? | Built In". builtin.com. Retrieved 30 October 2023.\n
  96. \n
  97. ^ Computational learning theory: Russell & Norvig (2021, pp. 672–674), Jordan & Mitchell (2015)\n
  98. \n
  99. ^ Natural language processing (NLP): Russell & Norvig (2021, chpt. 23–24), Poole, Mackworth & Goebel (1998, pp. 91–104), Luger & Stubblefield (2004, pp. 591–632)\n
  100. \n
  101. ^ Subproblems of NLP: Russell & Norvig (2021, pp. 849–850)\n
  102. \n
  103. ^ Russell & Norvig (2021), pp. 856–858.\n
  104. \n
  105. ^ Dickson (2022).\n
  106. \n
  107. ^ Modern statistical and deep learning approaches to NLP: Russell & Norvig (2021, chpt. 24), Cambria & White (2014)\n
  108. \n
  109. ^ Vincent (2019).\n
  110. \n
  111. ^ Russell & Norvig (2021), pp. 875–878.\n
  112. \n
  113. ^ Bushwick (2023).\n
  114. \n
  115. ^ Computer vision: Russell & Norvig (2021, chpt. 25), Nilsson (1998, chpt. 6)\n
  116. \n
  117. ^ Russell & Norvig (2021), pp. 849–850.\n
  118. \n
  119. ^ Russell & Norvig (2021), pp. 895–899.\n
  120. \n
  121. ^ Russell & Norvig (2021), pp. 899–901.\n
  122. \n
  123. ^ Challa et al. (2011).\n
  124. \n
  125. ^ Russell & Norvig (2021), pp. 931–938.\n
  126. \n
  127. ^ MIT AIL (2014).\n
  128. \n
  129. ^ Affective computing: Thro (1993), Edelson (1991), Tao & Tan (2005), Scassellati (2002)\n
  130. \n
  131. ^ Waddell (2018).\n
  132. \n
  133. ^ Poria et al. (2017).\n
  134. \n
  135. ^ Search algorithms: Russell & Norvig (2021, chpts. 3–5), Poole, Mackworth & Goebel (1998, pp. 113–163), Luger & Stubblefield (2004, pp. 79–164, 193–219), Nilsson (1998, chpts. 7–12)\n
  136. \n
  137. ^ State space search: Russell & Norvig (2021, chpt. 3)\n
  138. \n
  139. ^ Russell & Norvig (2021), sect. 11.2.\n
  140. \n
  141. ^ Uninformed searches (breadth first search, depth-first search and general state space search): Russell & Norvig (2021, sect. 3.4), Poole, Mackworth & Goebel (1998, pp. 113–132), Luger & Stubblefield (2004, pp. 79–121), Nilsson (1998, chpt. 8)\n
  142. \n
  143. ^ Heuristic or informed searches (e.g., greedy best first and A*): Russell & Norvig (2021, sect. 3.5), Poole, Mackworth & Goebel (1998, pp. 132–147), Poole & Mackworth (2017, sect. 3.6), Luger & Stubblefield (2004, pp. 133–150)\n
  144. \n
  145. ^ Adversarial search: Russell & Norvig (2021, chpt. 5)\n
  146. \n
  147. ^ Local or "optimization" search: Russell & Norvig (2021, chpt. 4)\n
  148. \n
  149. ^ Singh Chauhan, Nagesh (18 December 2020). "Optimization Algorithms in Neural Networks". KDnuggets. Retrieved 13 January 2024.\n
  150. \n
  151. ^ Evolutionary computation: Russell & Norvig (2021, sect. 4.1.2)\n
  152. \n
  153. ^ Merkle & Middendorf (2013).\n
  154. \n
  155. ^ Logic: Russell & Norvig (2021, chpts. 6–9), Luger & Stubblefield (2004, pp. 35–77), Nilsson (1998, chpt. 13–16)\n
  156. \n
  157. ^ Propositional logic: Russell & Norvig (2021, chpt. 6), Luger & Stubblefield (2004, pp. 45–50), Nilsson (1998, chpt. 13)\n
  158. \n
  159. ^ First-order logic and features such as equality: Russell & Norvig (2021, chpt. 7), Poole, Mackworth & Goebel (1998, pp. 268–275), Luger & Stubblefield (2004, pp. 50–62), Nilsson (1998, chpt. 15)\n
  160. \n
  161. ^ Logical inference: Russell & Norvig (2021, chpt. 10)\n
  162. \n
  163. ^ logical deduction as search: Russell & Norvig (2021, sects. 9.3, 9.4), Poole, Mackworth & Goebel (1998, pp. ~46–52), Luger & Stubblefield (2004, pp. 62–73), Nilsson (1998, chpt. 4.2, 7.2)\n
  164. \n
  165. ^ Resolution and unification: Russell & Norvig (2021, sections 7.5.2, 9.2, 9.5)\n
  166. \n
  167. ^ Warren, D.H.; Pereira, L.M.; Pereira, F. (1977). "Prolog-the language and its implementation compared with Lisp". ACM SIGPLAN Notices. 12 (8): 109–115. doi:10.1145/872734.806939.\n
  168. \n
  169. ^ Fuzzy logic: Russell & Norvig (2021, pp. 214, 255, 459), Scientific American (1999)\n
  170. \n
  171. ^ a b Stochastic methods for uncertain reasoning: Russell & Norvig (2021, chpt. 12–18, 20), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 165–191, 333–381), Nilsson (1998, chpt. 19)\n
  172. \n
  173. ^ decision theory and decision analysis: Russell & Norvig (2021, chpt. 16–18), Poole, Mackworth & Goebel (1998, pp. 381–394)\n
  174. \n
  175. ^ Information value theory: Russell & Norvig (2021, sect. 16.6)\n
  176. \n
  177. ^ Markov decision processes and dynamic decision networks: Russell & Norvig (2021, chpt. 17)\n
  178. \n
  179. ^ a b c Stochastic temporal models: Russell & Norvig (2021, chpt. 14)\nHidden Markov model: Russell & Norvig (2021, sect. 14.3)\nKalman filters: Russell & Norvig (2021, sect. 14.4)\nDynamic Bayesian networks: Russell & Norvig (2021, sect. 14.5)\n
  180. \n
  181. ^ Game theory and mechanism design: Russell & Norvig (2021, chpt. 18)\n
  182. \n
  183. ^ Bayesian networks: Russell & Norvig (2021, sects. 12.5–12.6, 13.4–13.5, 14.3–14.5, 16.5, 20.2–20.3), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~182–190, ≈363–379), Nilsson (1998, chpt. 19.3–19.4)\n
  184. \n
  185. ^ Domingos (2015), chpt. 6.\n
  186. \n
  187. ^ Bayesian inference algorithm: Russell & Norvig (2021, sect. 13.3–13.5), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~363–379), Nilsson (1998, chpt. 19.4 & 7)\n
  188. \n
  189. ^ Domingos (2015), p. 210.\n
  190. \n
  191. ^ Bayesian learning and the expectation–maximization algorithm: Russell & Norvig (2021, chpt. 20), Poole, Mackworth & Goebel (1998, pp. 424–433), Nilsson (1998, chpt. 20), Domingos (2015, p. 210)\n
  192. \n
  193. ^ Bayesian decision theory and Bayesian decision networks: Russell & Norvig (2021, sect. 16.5)\n
  194. \n
  195. ^ Statistical learning methods and classifiers: Russell & Norvig (2021, chpt. 20),\n
  196. \n
  197. ^ Ciaramella, Alberto; Ciaramella, Marco (2024). Introduction to Artificial Intelligence: from data analysis to generative AI. Intellisemantic Editions. ISBN 978-8-8947-8760-3.\n
  198. \n
  199. ^ Decision trees: Russell & Norvig (2021, sect. 19.3), Domingos (2015, p. 88)\n
  200. \n
  201. ^ Non-parameteric learning models such as K-nearest neighbor and support vector machines: Russell & Norvig (2021, sect. 19.7), Domingos (2015, p. 187) (k-nearest neighbor)\n\n
  202. \n
  203. ^ Domingos (2015), p. 152.\n
  204. \n
  205. ^ Naive Bayes classifier: Russell & Norvig (2021, sect. 12.6), Domingos (2015, p. 152)\n
  206. \n
  207. ^ a b Neural networks: Russell & Norvig (2021, chpt. 21), Domingos (2015, Chapter 4)\n
  208. \n
  209. ^ Gradient calculation in computational graphs, backpropagation, automatic differentiation: Russell & Norvig (2021, sect. 21.2), Luger & Stubblefield (2004, pp. 467–474), Nilsson (1998, chpt. 3.3)\n
  210. \n
  211. ^ Universal approximation theorem: Russell & Norvig (2021, p. 752)\nThe theorem: Cybenko (1988), Hornik, Stinchcombe & White (1989)\n
  212. \n
  213. ^ Feedforward neural networks: Russell & Norvig (2021, sect. 21.1)\n
  214. \n
  215. ^ Recurrent neural networks: Russell & Norvig (2021, sect. 21.6)\n
  216. \n
  217. ^ Perceptrons: Russell & Norvig (2021, pp. 21, 22, 683, 22)\n
  218. \n
  219. ^ a b Deep learning: Russell & Norvig (2021, chpt. 21), Goodfellow, Bengio & Courville (2016), Hinton et al. (2016), Schmidhuber (2015)\n
  220. \n
  221. ^ Convolutional neural networks: Russell & Norvig (2021, sect. 21.3)\n
  222. \n
  223. ^ Deng & Yu (2014), pp. 199–200.\n
  224. \n
  225. ^ Ciresan, Meier & Schmidhuber (2012).\n
  226. \n
  227. ^ Russell & Norvig (2021), p. 751.\n
  228. \n
  229. ^ a b c Russell & Norvig (2021), p. 17.\n
  230. \n
  231. ^ a b c d e f g Russell & Norvig (2021), p. 785.\n
  232. \n
  233. ^ a b Schmidhuber (2022), sect. 5.\n
  234. \n
  235. ^ Schmidhuber (2022), sect. 6.\n
  236. \n
  237. ^ a b c Schmidhuber (2022), sect. 7.\n
  238. \n
  239. ^ Schmidhuber (2022), sect. 8.\n
  240. \n
  241. ^ Quoted in Christian (2020, p. 22)\n
  242. \n
  243. ^ Smith (2023).\n
  244. \n
  245. ^ "Explained: Generative AI". 9 November 2023.\n
  246. \n
  247. ^ "AI Writing and Content Creation Tools". MIT Sloan Teaching & Learning Technologies. Archived from the original on 25 December 2023. Retrieved 25 December 2023.\n
  248. \n
  249. ^ Marmouyet (2023).\n
  250. \n
  251. ^ Kobielus (2019).\n
  252. \n
  253. ^ Thomason, James (21 May 2024). "Mojo Rising: The resurgence of AI-first programming languages". VentureBeat. Archived from the original on 27 June 2024. Retrieved 26 May 2024.\n
  254. \n
  255. ^ Wodecki, Ben (5 May 2023). "7 AI Programming Languages You Need to Know". AI Business. Archived from the original on 25 July 2024. Retrieved 5 October 2024.\n
  256. \n
  257. ^ Plumb, Taryn (18 September 2024). "Why Jensen Huang and Marc Benioff see \'gigantic\' opportunity for agentic AI". VentureBeat. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
  258. \n
  259. ^ Davenport, T; Kalakota, R (June 2019). "The potential for artificial intelligence in healthcare". Future Healthc J. 6 (2): 94–98. doi:10.7861/futurehosp.6-2-94. PMC 6616181. PMID 31363513.\n
  260. \n
  261. ^ Lyakhova, U.A.; Lyakhov, P.A. (2024). "Systematic review of approaches to detection and classification of skin cancer using artificial intelligence: Development and prospects". Computers in Biology and Medicine. 178: 108742. doi:10.1016/j.compbiomed.2024.108742. PMID 38875908.\n
  262. \n
  263. ^ Alqudaihi, Kawther S.; Aslam, Nida; Khan, Irfan Ullah; Almuhaideb, Abdullah M.; Alsunaidi, Shikah J.; Ibrahim, Nehad M. Abdel Rahman; Alhaidari, Fahd A.; Shaikh, Fatema S.; Alsenbel, Yasmine M.; Alalharith, Dima M.; Alharthi, Hajar M.; Alghamdi, Wejdan M.; Alshahrani, Mohammed S. (2021). "Cough Sound Detection and Diagnosis Using Artificial Intelligence Techniques: Challenges and Opportunities". IEEE Access. 9: 102327–102344. Bibcode:2021IEEEA...9j2327A. doi:10.1109/ACCESS.2021.3097559. ISSN 2169-3536. PMC 8545201. PMID 34786317.\n
  264. \n
  265. ^ a b Bax, Monique; Thorpe, Jordan; Romanov, Valentin (December 2023). "The future of personalized cardiovascular medicine demands 3D and 4D printing, stem cells, and artificial intelligence". Frontiers in Sensors. 4. doi:10.3389/fsens.2023.1294721. ISSN 2673-5067.\n
  266. \n
  267. ^ Jumper, J; Evans, R; Pritzel, A (2021). "Highly accurate protein structure prediction with AlphaFold". Nature. 596 (7873): 583–589. Bibcode:2021Natur.596..583J. doi:10.1038/s41586-021-03819-2. PMC 8371605. PMID 34265844.\n
  268. \n
  269. ^ "AI discovers new class of antibiotics to kill drug-resistant bacteria". 20 December 2023. Archived from the original on 16 September 2024. Retrieved 5 October 2024.\n
  270. \n
  271. ^ "AI speeds up drug design for Parkinson\'s ten-fold". Cambridge University. 17 April 2024. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  272. \n
  273. ^ Horne, Robert I.; Andrzejewska, Ewa A.; Alam, Parvez; Brotzakis, Z. Faidon; Srivastava, Ankit; Aubert, Alice; Nowinska, Magdalena; Gregory, Rebecca C.; Staats, Roxine; Possenti, Andrea; Chia, Sean; Sormanni, Pietro; Ghetti, Bernardino; Caughey, Byron; Knowles, Tuomas P. J.; Vendruscolo, Michele (17 April 2024). "Discovery of potent inhibitors of α-synuclein aggregation using structure-based iterative learning". Nature Chemical Biology. 20 (5). Nature: 634–645. doi:10.1038/s41589-024-01580-x. PMC 11062903. PMID 38632492.\n
  274. \n
  275. ^ Grant, Eugene F.; Lardner, Rex (25 July 1952). "The Talk of the Town – It". The New Yorker. ISSN 0028-792X. Archived from the original on 16 February 2020. Retrieved 28 January 2024.\n
  276. \n
  277. ^ Anderson, Mark Robert (11 May 2017). "Twenty years on from Deep Blue vs Kasparov: how a chess match started the big data revolution". The Conversation. Archived from the original on 17 September 2024. Retrieved 28 January 2024.\n
  278. \n
  279. ^ Markoff, John (16 February 2011). "Computer Wins on \'Jeopardy!\': Trivial, It\'s Not". The New York Times. ISSN 0362-4331. Archived from the original on 22 October 2014. Retrieved 28 January 2024.\n
  280. \n
  281. ^ Byford, Sam (27 May 2017). "AlphaGo retires from competitive Go after defeating world number one 3–0". The Verge. Archived from the original on 7 June 2017. Retrieved 28 January 2024.\n
  282. \n
  283. ^ Brown, Noam; Sandholm, Tuomas (30 August 2019). "Superhuman AI for multiplayer poker". Science. 365 (6456): 885–890. Bibcode:2019Sci...365..885B. doi:10.1126/science.aay2400. ISSN 0036-8075. PMID 31296650.\n
  284. \n
  285. ^ "MuZero: Mastering Go, chess, shogi and Atari without rules". Google DeepMind. 23 December 2020. Retrieved 28 January 2024.\n
  286. \n
  287. ^ Sample, Ian (30 October 2019). "AI becomes grandmaster in \'fiendishly complex\' StarCraft II". The Guardian. ISSN 0261-3077. Archived from the original on 29 December 2020. Retrieved 28 January 2024.\n
  288. \n
  289. ^ Wurman, P. R.; Barrett, S.; Kawamoto, K. (2022). "Outracing champion Gran Turismo drivers with deep reinforcement learning" (PDF). Nature. 602 (7896): 223–228. Bibcode:2022Natur.602..223W. doi:10.1038/s41586-021-04357-7. PMID 35140384.\n
  290. \n
  291. ^ Wilkins, Alex (13 March 2024). "Google AI learns to play open-world video games by watching them". New Scientist. Archived from the original on 26 July 2024. Retrieved 21 July 2024.\n
  292. \n
  293. ^ Uesato, J. et al.: Improving mathematical reasoning with process supervision. Archived 15 September 2024 at the Wayback Machine openai.com, May 31, 2023. Retrieved 2024-08-07.\n
  294. \n
  295. ^ Srivastava, Saurabh (29 February 2024). "Functional Benchmarks for Robust Evaluation of Reasoning Performance, and the Reasoning Gap". arXiv:2402.19450 [cs.AI].\n
  296. \n
  297. ^ Roberts, Siobhan (25 July 2024). "AI achieves silver-medal standard solving International Mathematical Olympiad problems". The New York Times. Archived from the original on 26 September 2024. Retrieved 7 August 2024.\n
  298. \n
  299. ^ LLEMMA. eleuther.ai. Retrieved 2024-08-07.\n
  300. \n
  301. ^ AI Math. Archived 5 October 2024 at the Wayback Machine Caesars Labs, 2024. Retrieved 2024-08-07.\n
  302. \n
  303. ^ Alex McFarland: 7 Best AI for Math Tools. Archived 11 September 2024 at the Wayback Machine unite.ai. Retrieved 2024-08-07\n
  304. \n
  305. ^ Matthew Finio & Amanda Downie: IBM Think 2024 Primer, "What is Artificial Intelligence (AI) in Finance?" 8 Dec. 2023\n
  306. \n
  307. ^ M. Nicolas, J. Firzli: Pensions Age/European Pensions magazine, "Artificial Intelligence: Ask the Industry" May June 2024 https://videovoice.org/ai-in-finance-innovation-entrepreneurship-vs-over-regulation-with-the-eus-artificial-intelligence-act-wont-work-as-intended/ Archived 11 September 2024 at the Wayback Machine.\n
  308. \n
  309. ^ a b c Congressional Research Service (2019). Artificial Intelligence and National Security (PDF). Washington, DC: Congressional Research Service. Archived (PDF) from the original on 8 May 2020. Retrieved 5 October 2024.PD-notice\n
  310. \n
  311. ^ a b Slyusar, Vadym (2019). "Artificial intelligence as the basis of future control networks". ResearchGate. doi:10.13140/RG.2.2.30247.50087. Archived from the original on 28 April 2021. Retrieved 20 July 2019.\n
  312. \n
  313. ^ Knight, Will. "The US and 30 Other Nations Agree to Set Guardrails for Military AI". Wired. ISSN 1059-1028. Archived from the original on 20 September 2024. Retrieved 24 January 2024.\n
  314. \n
  315. ^ Newsom, Gavin; Weber, Shirley N. (6 September 2023). "Executive Order N-12-23" (PDF). Executive Department, State of California. Archived (PDF) from the original on 21 February 2024. Retrieved 7 September 2023.\n
  316. \n
  317. ^ Pinaya, Walter H. L.; Graham, Mark S.; Kerfoot, Eric; Tudosiu, Petru-Daniel; Dafflon, Jessica; Fernandez, Virginia; Sanchez, Pedro; Wolleb, Julia; da Costa, Pedro F.; Patel, Ashay (2023). "Generative AI for Medical Imaging: extending the MONAI Framework". arXiv:2307.15208 [eess.IV].\n
  318. \n
  319. ^ Griffith, Erin; Metz, Cade (27 January 2023). "Anthropic Said to Be Closing In on $300 Million in New A.I. Funding". The New York Times. Archived from the original on 9 December 2023. Retrieved 14 March 2023.\n
  320. \n
  321. ^ Lanxon, Nate; Bass, Dina; Davalos, Jackie (10 March 2023). "A Cheat Sheet to AI Buzzwords and Their Meanings". Bloomberg News. Archived from the original on 17 November 2023. Retrieved 14 March 2023.\n
  322. \n
  323. ^ Marcelline, Marco (27 May 2023). "ChatGPT: Most Americans Know About It, But Few Actually Use the AI Chatbot". PCMag. Archived from the original on 21 May 2024. Retrieved 28 January 2024.\n
  324. \n
  325. ^ Lu, Donna (31 March 2023). "Misinformation, mistakes and the Pope in a puffer: what rapidly evolving AI can – and can\'t – do". The Guardian. ISSN 0261-3077. Archived from the original on 10 June 2024. Retrieved 28 January 2024.\n
  326. \n
  327. ^ Hurst, Luke (23 May 2023). "How a fake image of a Pentagon explosion shared on Twitter caused a real dip on Wall Street". euronews. Retrieved 28 January 2024.\n
  328. \n
  329. ^ Poole, David; Mackworth, Alan (2023). Artificial Intelligence, Foundations of Computational Agents (3rd ed.). Cambridge University Press. doi:10.1017/9781009258227. ISBN 978-1-0092-5819-7. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  330. \n
  331. ^ Russell, Stuart; Norvig, Peter (2020). Artificial Intelligence: A Modern Approach (4th ed.). Pearson. ISBN 978-0-1346-1099-3.\n
  332. \n
  333. ^ "Why agents are the next frontier of generative AI". McKinsey Digital. 24 July 2024. Archived from the original on 3 October 2024. Retrieved 10 August 2024.\n
  334. \n
  335. ^ Ransbotham, Sam; Kiron, David; Gerbert, Philipp; Reeves, Martin (6 September 2017). "Reshaping Business With Artificial Intelligence". MIT Sloan Management Review. Archived from the original on 13 February 2024.\n
  336. \n
  337. ^ Sun, Yuran; Zhao, Xilei; Lovreglio, Ruggiero; Kuligowski, Erica (1 January 2024), Naser, M. Z. (ed.), "8 – AI for large-scale evacuation modeling: promises and challenges", Interpretable Machine Learning for the Analysis, Design, Assessment, and Informed Decision Making for Civil Infrastructure, Woodhead Publishing Series in Civil and Structural Engineering, Woodhead Publishing, pp. 185–204, ISBN 978-0-1282-4073-1, archived from the original on 19 May 2024, retrieved 28 June 2024.\n
  338. \n
  339. ^ Gomaa, Islam; Adelzadeh, Masoud; Gwynne, Steven; Spencer, Bruce; Ko, Yoon; Bénichou, Noureddine; Ma, Chunyun; Elsagan, Nour; Duong, Dana; Zalok, Ehab; Kinateder, Max (1 November 2021). "A Framework for Intelligent Fire Detection and Evacuation System". Fire Technology. 57 (6): 3179–3185. doi:10.1007/s10694-021-01157-3. ISSN 1572-8099. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  340. \n
  341. ^ Zhao, Xilei; Lovreglio, Ruggiero; Nilsson, Daniel (1 May 2020). "Modelling and interpreting pre-evacuation decision-making using machine learning". Automation in Construction. 113: 103140. doi:10.1016/j.autcon.2020.103140. ISSN 0926-5805. Archived from the original on 19 May 2024. Retrieved 5 October 2024.\n
  342. \n
  343. ^ "India\'s latest election embraced AI technology. Here are some ways it was used constructively". PBS News. 12 June 2024. Retrieved 28 October 2024.\n
  344. \n
  345. ^ Müller, Vincent C. (30 April 2020). "Ethics of Artificial Intelligence and Robotics". Stanford Encyclopedia of Philosophy Archive. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  346. \n
  347. ^ Simonite (2016).\n
  348. \n
  349. ^ Russell & Norvig (2021), p. 987.\n
  350. \n
  351. ^ Laskowski (2023).\n
  352. \n
  353. ^ GAO (2022).\n
  354. \n
  355. ^ Valinsky (2019).\n
  356. \n
  357. ^ Russell & Norvig (2021), p. 991.\n
  358. \n
  359. ^ Russell & Norvig (2021), pp. 991–992.\n
  360. \n
  361. ^ Christian (2020), p. 63.\n
  362. \n
  363. ^ Vincent (2022).\n
  364. \n
  365. ^ Kopel, Matthew. "Copyright Services: Fair Use". Cornell University Library. Archived from the original on 26 September 2024. Retrieved 26 April 2024.\n
  366. \n
  367. ^ Burgess, Matt. "How to Stop Your Data From Being Used to Train AI". Wired. ISSN 1059-1028. Archived from the original on 3 October 2024. Retrieved 26 April 2024.\n
  368. \n
  369. ^ Reisner (2023).\n
  370. \n
  371. ^ Alter & Harris (2023).\n
  372. \n
  373. ^ "Getting the Innovation Ecosystem Ready for AI. An IP policy toolkit" (PDF). WIPO.\n
  374. \n
  375. ^ Hammond, George (27 December 2023). "Big Tech is spending more than VC firms on AI startups". Ars Technica. Archived from the original on 10 January 2024.\n
  376. \n
  377. ^ Wong, Matteo (24 October 2023). "The Future of AI Is GOMA". The Atlantic. Archived from the original on 5 January 2024.\n
  378. \n
  379. ^ "Big tech and the pursuit of AI dominance". The Economist. 26 March 2023. Archived from the original on 29 December 2023.\n
  380. \n
  381. ^ Fung, Brian (19 December 2023). "Where the battle to dominate AI may be won". CNN Business. Archived from the original on 13 January 2024.\n
  382. \n
  383. ^ Metz, Cade (5 July 2023). "In the Age of A.I., Tech\'s Little Guys Need Big Friends". The New York Times. Archived from the original on 8 July 2024. Retrieved 5 October 2024.\n
  384. \n
  385. ^ "Electricity 2024 – Analysis". IEA. 24 January 2024. Retrieved 13 July 2024.\n
  386. \n
  387. ^ Calvert, Brian (28 March 2024). "AI already uses as much energy as a small country. It\'s only the beginning". Vox. New York, New York. Archived from the original on 3 July 2024. Retrieved 5 October 2024.\n
  388. \n
  389. ^ Halper, Evan; O\'Donovan, Caroline (21 June 2024). "AI is exhausting the power grid. Tech firms are seeking a miracle solution". Washington Post.\n
  390. \n
  391. ^ Davenport, Carly. "AI Data Centers and the Coming YS Power Demand Surge" (PDF). Goldman Sachs. Archived from the original (PDF) on 26 July 2024. Retrieved 5 October 2024.\n
  392. \n
  393. ^ Ryan, Carol (12 April 2024). "Energy-Guzzling AI Is Also the Future of Energy Savings". Wall Street Journal. Dow Jones.\n
  394. \n
  395. ^ Hiller, Jennifer (1 July 2024). "Tech Industry Wants to Lock Up Nuclear Power for AI". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  396. \n
  397. ^ Halper, Evan (20 September 2024). "Microsoft deal would reopen Three Mile Island nuclear plant to power AI". Washington Post.\n
  398. \n
  399. ^ Hiller, Jennifer (20 September 2024). "Three Mile Island\'s Nuclear Plant to Reopen, Help Power Microsoft\'s AI Centers". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  400. \n
  401. ^ Nicas (2018).\n
  402. \n
  403. ^ Rainie, Lee; Keeter, Scott; Perrin, Andrew (22 July 2019). "Trust and Distrust in America". Pew Research Center. Archived from the original on 22 February 2024.\n
  404. \n
  405. ^ Williams (2023).\n
  406. \n
  407. ^ Taylor & Hern (2023).\n
  408. \n
  409. ^ a b Samuel, Sigal (19 April 2022). "Why it\'s so damn hard to make AI fair and unbiased". Vox. Archived from the original on 5 October 2024. Retrieved 24 July 2024.\n
  410. \n
  411. ^ a b Rose (2023).\n
  412. \n
  413. ^ CNA (2019).\n
  414. \n
  415. ^ Goffrey (2008), p. 17.\n
  416. \n
  417. ^ Berdahl et al. (2023); Goffrey (2008, p. 17); Rose (2023); Russell & Norvig (2021, p. 995)\n
  418. \n
  419. ^ Christian (2020), p. 25.\n
  420. \n
  421. ^ a b Russell & Norvig (2021), p. 995.\n
  422. \n
  423. ^ Grant & Hill (2023).\n
  424. \n
  425. ^ Larson & Angwin (2016).\n
  426. \n
  427. ^ Christian (2020), p. 67–70.\n
  428. \n
  429. ^ Christian (2020, pp. 67–70); Russell & Norvig (2021, pp. 993–994)\n
  430. \n
  431. ^ Russell & Norvig (2021, p. 995); Lipartito (2011, p. 36); Goodman & Flaxman (2017, p. 6); Christian (2020, pp. 39–40, 65)\n
  432. \n
  433. ^ Quoted in Christian (2020, p. 65).\n
  434. \n
  435. ^ Russell & Norvig (2021, p. 994); Christian (2020, pp. 40, 80–81)\n
  436. \n
  437. ^ Quoted in Christian (2020, p. 80)\n
  438. \n
  439. ^ Dockrill (2022).\n
  440. \n
  441. ^ Sample (2017).\n
  442. \n
  443. ^ "Black Box AI". 16 June 2023. Archived from the original on 15 June 2024. Retrieved 5 October 2024.\n
  444. \n
  445. ^ Christian (2020), p. 110.\n
  446. \n
  447. ^ Christian (2020), pp. 88–91.\n
  448. \n
  449. ^ Christian (2020, p. 83); Russell & Norvig (2021, p. 997)\n
  450. \n
  451. ^ Christian (2020), p. 91.\n
  452. \n
  453. ^ Christian (2020), p. 83.\n
  454. \n
  455. ^ Verma (2021).\n
  456. \n
  457. ^ Rothman (2020).\n
  458. \n
  459. ^ Christian (2020), pp. 105–108.\n
  460. \n
  461. ^ Christian (2020), pp. 108–112.\n
  462. \n
  463. ^ Ropek, Lucas (21 May 2024). "New Anthropic Research Sheds Light on AI\'s \'Black Box\'". Gizmodo. Archived from the original on 5 October 2024. Retrieved 23 May 2024.\n
  464. \n
  465. ^ Russell & Norvig (2021), p. 989.\n
  466. \n
  467. ^ a b Russell & Norvig (2021), pp. 987–990.\n
  468. \n
  469. ^ Russell & Norvig (2021), p. 988.\n
  470. \n
  471. ^ Robitzski (2018); Sainato (2015)\n
  472. \n
  473. ^ Harari (2018).\n
  474. \n
  475. ^ Buckley, Chris; Mozur, Paul (22 May 2019). "How China Uses High-Tech Surveillance to Subdue Minorities". The New York Times. Archived from the original on 25 November 2019. Retrieved 2 July 2019.\n
  476. \n
  477. ^ "Security lapse exposed a Chinese smart city surveillance system". 3 May 2019. Archived from the original on 7 March 2021. Retrieved 14 September 2020.\n
  478. \n
  479. ^ Urbina et al. (2022).\n
  480. \n
  481. ^ a b E. McGaughey, \'Will Robots Automate Your Job Away? Full Employment, Basic Income, and Economic Democracy\' (2022), 51(3) Industrial Law Journal 511–559. Archived 27 May 2023 at the Wayback Machine.\n
  482. \n
  483. ^ Ford & Colvin (2015);McGaughey (2022)\n
  484. \n
  485. ^ IGM Chicago (2017).\n
  486. \n
  487. ^ Arntz, Gregory & Zierahn (2016), p. 33.\n
  488. \n
  489. ^ Lohr (2017); Frey & Osborne (2017); Arntz, Gregory & Zierahn (2016, p. 33)\n
  490. \n
  491. ^ Zhou, Viola (11 April 2023). "AI is already taking video game illustrators\' jobs in China". Rest of World. Archived from the original on 21 February 2024. Retrieved 17 August 2023.\n
  492. \n
  493. ^ Carter, Justin (11 April 2023). "China\'s game art industry reportedly decimated by growing AI use". Game Developer. Archived from the original on 17 August 2023. Retrieved 17 August 2023.\n
  494. \n
  495. ^ Morgenstern (2015).\n
  496. \n
  497. ^ Mahdawi (2017); Thompson (2014)\n
  498. \n
  499. ^ Tarnoff, Ben (4 August 2023). "Lessons from Eliza". The Guardian Weekly. pp. 34–39.\n
  500. \n
  501. ^ Cellan-Jones (2014).\n
  502. \n
  503. ^ Russell & Norvig 2021, p. 1001.\n
  504. \n
  505. ^ Bostrom (2014).\n
  506. \n
  507. ^ Russell (2019).\n
  508. \n
  509. ^ Bostrom (2014); Müller & Bostrom (2014); Bostrom (2015).\n
  510. \n
  511. ^ Harari (2023).\n
  512. \n
  513. ^ Müller & Bostrom (2014).\n
  514. \n
  515. ^ Leaders\' concerns about the existential risks of AI around 2015: Rawlinson (2015), Holley (2015), Gibbs (2014), Sainato (2015)\n
  516. \n
  517. ^ ""Godfather of artificial intelligence" talks impact and potential of new AI". CBS News. 25 March 2023. Archived from the original on 28 March 2023. Retrieved 28 March 2023.\n
  518. \n
  519. ^ Pittis, Don (4 May 2023). "Canadian artificial intelligence leader Geoffrey Hinton piles on fears of computer takeover". CBC. Archived from the original on 7 July 2024. Retrieved 5 October 2024.\n
  520. \n
  521. ^ "\'50–50 chance\' that AI outsmarts humanity, Geoffrey Hinton says". Bloomberg BNN. 14 June 2024. Retrieved 6 July 2024.\n
  522. \n
  523. ^ Valance (2023).\n
  524. \n
  525. ^ Taylor, Josh (7 May 2023). "Rise of artificial intelligence is inevitable but should not be feared, \'father of AI\' says". The Guardian. Archived from the original on 23 October 2023. Retrieved 26 May 2023.\n
  526. \n
  527. ^ Colton, Emma (7 May 2023). "\'Father of AI\' says tech fears misplaced: \'You cannot stop it\'". Fox News. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
  528. \n
  529. ^ Jones, Hessie (23 May 2023). "Juergen Schmidhuber, Renowned \'Father Of Modern AI,\' Says His Life\'s Work Won\'t Lead To Dystopia". Forbes. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
  530. \n
  531. ^ McMorrow, Ryan (19 December 2023). "Andrew Ng: \'Do we think the world is better off with more or less intelligence?\'". Financial Times. Archived from the original on 25 January 2024. Retrieved 30 December 2023.\n
  532. \n
  533. ^ Levy, Steven (22 December 2023). "How Not to Be Stupid About AI, With Yann LeCun". Wired. Archived from the original on 28 December 2023. Retrieved 30 December 2023.\n
  534. \n
  535. ^ Arguments that AI is not an imminent risk: Brooks (2014), Geist (2015), Madrigal (2015), Lee (2014)\n
  536. \n
  537. ^ a b Christian (2020), pp. 67, 73.\n
  538. \n
  539. ^ Yudkowsky (2008).\n
  540. \n
  541. ^ a b Anderson & Anderson (2011).\n
  542. \n
  543. ^ AAAI (2014).\n
  544. \n
  545. ^ Wallach (2010).\n
  546. \n
  547. ^ Russell (2019), p. 173.\n
  548. \n
  549. ^ Stewart, Ashley; Melton, Monica. "Hugging Face CEO says he\'s focused on building a \'sustainable model\' for the $4.5 billion open-source-AI startup". Business Insider. Archived from the original on 25 September 2024. Retrieved 14 April 2024.\n
  550. \n
  551. ^ Wiggers, Kyle (9 April 2024). "Google open sources tools to support AI model development". TechCrunch. Archived from the original on 10 September 2024. Retrieved 14 April 2024.\n
  552. \n
  553. ^ Heaven, Will Douglas (12 May 2023). "The open-source AI boom is built on Big Tech\'s handouts. How long will it last?". MIT Technology Review. Retrieved 14 April 2024.\n
  554. \n
  555. ^ Brodsky, Sascha (19 December 2023). "Mistral AI\'s New Language Model Aims for Open Source Supremacy". AI Business. Archived from the original on 5 September 2024. Retrieved 5 October 2024.\n
  556. \n
  557. ^ Edwards, Benj (22 February 2024). "Stability announces Stable Diffusion 3, a next-gen AI image generator". Ars Technica. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
  558. \n
  559. ^ Marshall, Matt (29 January 2024). "How enterprises are using open source LLMs: 16 examples". VentureBeat. Archived from the original on 26 September 2024. Retrieved 5 October 2024.\n
  560. \n
  561. ^ Piper, Kelsey (2 February 2024). "Should we make our most powerful AI models open source to all?". Vox. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
  562. \n
  563. ^ Alan Turing Institute (2019). "Understanding artificial intelligence ethics and safety" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
  564. \n
  565. ^ Alan Turing Institute (2023). "AI Ethics and Governance in Practice" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
  566. \n
  567. ^ Floridi, Luciano; Cowls, Josh (23 June 2019). "A Unified Framework of Five Principles for AI in Society". Harvard Data Science Review. 1 (1). doi:10.1162/99608f92.8cd550d1. S2CID 198775713.\n
  568. \n
  569. ^ Buruk, Banu; Ekmekci, Perihan Elif; Arda, Berna (1 September 2020). "A critical perspective on guidelines for responsible and trustworthy artificial intelligence". Medicine, Health Care and Philosophy. 23 (3): 387–399. doi:10.1007/s11019-020-09948-1. ISSN 1572-8633. PMID 32236794. S2CID 214766800. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  570. \n
  571. ^ Kamila, Manoj Kumar; Jasrotia, Sahil Singh (1 January 2023). "Ethical issues in the development of artificial intelligence: recognizing the risks". International Journal of Ethics and Systems. ahead-of-print (ahead-of-print). doi:10.1108/IJOES-05-2023-0107. ISSN 2514-9369. S2CID 259614124. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  572. \n
  573. ^ "AI Safety Institute releases new AI safety evaluations platform". UK Government. 10 May 2024. Archived from the original on 5 October 2024. Retrieved 14 May 2024.\n
  574. \n
  575. ^ Regulation of AI to mitigate risks: Berryhill et al. (2019), Barfield & Pagallo (2018), Iphofen & Kritikos (2019), Wirtz, Weyerer & Geyer (2018), Buiten (2019)\n
  576. \n\n
  577. ^ a b Vincent (2023).\n
  578. \n
  579. ^ Stanford University (2023).\n
  580. \n
  581. ^ a b c d UNESCO (2021).\n
  582. \n
  583. ^ Kissinger (2021).\n
  584. \n
  585. ^ Altman, Brockman & Sutskever (2023).\n
  586. \n
  587. ^ VOA News (25 October 2023). "UN Announces Advisory Body on Artificial Intelligence". Archived from the original on 18 September 2024. Retrieved 5 October 2024.\n
  588. \n
  589. ^ "Council of Europe opens first ever global treaty on AI for signature". Council of Europe. 5 September 2024. Archived from the original on 17 September 2024. Retrieved 17 September 2024.\n
  590. \n
  591. ^ Edwards (2023).\n
  592. \n
  593. ^ Kasperowicz (2023).\n
  594. \n
  595. ^ Fox News (2023).\n
  596. \n
  597. ^ Milmo, Dan (3 November 2023). "Hope or Horror? The great AI debate dividing its pioneers". The Guardian Weekly. pp. 10–12.\n
  598. \n
  599. ^ "The Bletchley Declaration by Countries Attending the AI Safety Summit, 1–2 November 2023". GOV.UK. 1 November 2023. Archived from the original on 1 November 2023. Retrieved 2 November 2023.\n
  600. \n
  601. ^ "Countries agree to safe and responsible development of frontier AI in landmark Bletchley Declaration". GOV.UK (Press release). Archived from the original on 1 November 2023. Retrieved 1 November 2023.\n
  602. \n
  603. ^ "Second global AI summit secures safety commitments from companies". Reuters. 21 May 2024. Retrieved 23 May 2024.\n
  604. \n
  605. ^ "Frontier AI Safety Commitments, AI Seoul Summit 2024". gov.uk. 21 May 2024. Archived from the original on 23 May 2024. Retrieved 23 May 2024.\n
  606. \n
  607. ^ a b Russell & Norvig 2021, p. 9.\n
  608. \n
  609. ^ a b c Copeland, J., ed. (2004). The Essential Turing: the ideas that gave birth to the computer age. Oxford, England: Clarendon Press. ISBN 0-1982-5079-7.\n
  610. \n
  611. ^ "Google books ngram". Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
  612. \n
  613. ^ AI\'s immediate precursors: McCorduck (2004, pp. 51–107), Crevier (1993, pp. 27–32), Russell & Norvig (2021, pp. 8–17), Moravec (1988, p. 3)\n
  614. \n
  615. ^ a b Turing\'s original publication of the Turing test in "Computing machinery and intelligence": Turing (1950)\nHistorical influence and philosophical implications: Haugeland (1985, pp. 6–9), Crevier (1993, p. 24), McCorduck (2004, pp. 70–71), Russell & Norvig (2021, pp. 2, 984)\n
  616. \n
  617. ^ Crevier (1993), pp. 47–49.\n
  618. \n
  619. ^ Russell & Norvig (2003), p. 17.\n
  620. \n
  621. ^ Russell & Norvig (2003), p. 18.\n
  622. \n
  623. ^ Newquist (1994), pp. 86–86.\n
  624. \n
  625. ^ Simon (1965, p. 96) quoted in Crevier (1993, p. 109)\n
  626. \n
  627. ^ Minsky (1967, p. 2) quoted in Crevier (1993, p. 109)\n
  628. \n
  629. ^ Russell & Norvig (2021), p. 21.\n
  630. \n
  631. ^ Lighthill (1973).\n
  632. \n
  633. ^ NRC 1999, pp. 212–213.\n
  634. \n
  635. ^ Russell & Norvig (2021), p. 22.\n
  636. \n
  637. ^ Expert systems: Russell & Norvig (2021, pp. 23, 292), Luger & Stubblefield (2004, pp. 227–331), Nilsson (1998, chpt. 17.4), McCorduck (2004, pp. 327–335, 434–435), Crevier (1993, pp. 145–162, 197–203), Newquist (1994, pp. 155–183)\n
  638. \n
  639. ^ Russell & Norvig (2021), p. 24.\n
  640. \n
  641. ^ Nilsson (1998), p. 7.\n
  642. \n
  643. ^ McCorduck (2004), pp. 454–462.\n
  644. \n
  645. ^ Moravec (1988).\n
  646. \n
  647. ^ a b Brooks (1990).\n
  648. \n
  649. ^ Developmental robotics: Weng et al. (2001), Lungarella et al. (2003), Asada et al. (2009), Oudeyer (2010)\n
  650. \n
  651. ^ Russell & Norvig (2021), p. 25.\n
  652. \n
  653. ^ Crevier (1993, pp. 214–215), Russell & Norvig (2021, pp. 24, 26)\n
  654. \n
  655. ^ Russell & Norvig (2021), p. 26.\n
  656. \n
  657. ^ Formal and narrow methods adopted in the 1990s: Russell & Norvig (2021, pp. 24–26), McCorduck (2004, pp. 486–487)\n
  658. \n
  659. ^ AI widely used in the late 1990s: Kurzweil (2005, p. 265), NRC (1999, pp. 216–222), Newquist (1994, pp. 189–201)\n
  660. \n
  661. ^ Wong (2023).\n
  662. \n
  663. ^ Moore\'s Law and AI: Russell & Norvig (2021, pp. 14, 27)\n
  664. \n
  665. ^ a b c Clark (2015b).\n
  666. \n
  667. ^ Big data: Russell & Norvig (2021, p. 26)\n
  668. \n
  669. ^ Sagar, Ram (3 June 2020). "OpenAI Releases GPT-3, The Largest Model So Far". Analytics India Magazine. Archived from the original on 4 August 2020. Retrieved 15 March 2023.\n
  670. \n
  671. ^ DiFeliciantonio (2023).\n
  672. \n
  673. ^ Goswami (2023).\n
  674. \n
  675. ^ Grayling, Anthony; Ball, Brian (1 August 2024). "Philosophy is crucial in the age of AI". The Conversation. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
  676. \n
  677. ^ a b Jarow, Oshan (15 June 2024). "Will AI ever become conscious? It depends on how you think about biology". Vox. Archived from the original on 21 September 2024. Retrieved 4 October 2024.\n
  678. \n
  679. ^ McCarthy, John. "The Philosophy of AI and the AI of Philosophy". jmc.stanford.edu. Archived from the original on 23 October 2018. Retrieved 3 October 2024.\n
  680. \n
  681. ^ a b Turing (1950), p. 1.\n
  682. \n
  683. ^ Turing (1950), Under "The Argument from Consciousness".\n
  684. \n
  685. ^ Kirk-Giannini, Cameron Domenico; Goldstein, Simon (16 October 2023). "AI is closer than ever to passing the Turing test for \'intelligence\'. What happens when it does?". The Conversation. Archived from the original on 25 September 2024. Retrieved 17 August 2024.\n
  686. \n
  687. ^ Russell & Norvig (2021), p. 3.\n
  688. \n
  689. ^ Maker (2006).\n
  690. \n
  691. ^ McCarthy (1999).\n
  692. \n
  693. ^ Minsky (1986).\n
  694. \n
  695. ^ "What Is Artificial Intelligence (AI)?". Google Cloud Platform. Archived from the original on 31 July 2023. Retrieved 16 October 2023.\n
  696. \n
  697. ^ "One of the Biggest Problems in Regulating AI Is Agreeing on a Definition". carnegieendowment.org. Retrieved 31 July 2024.\n
  698. \n
  699. ^ "AI or BS? How to tell if a marketing tool really uses artificial intelligence". The Drum. Retrieved 31 July 2024.\n
  700. \n
  701. ^ Nilsson (1983), p. 10.\n
  702. \n
  703. ^ Haugeland (1985), pp. 112–117.\n
  704. \n
  705. ^ Physical symbol system hypothesis: Newell & Simon (1976, p. 116)\nHistorical significance: McCorduck (2004, p. 153), Russell & Norvig (2021, p. 19)\n
  706. \n
  707. ^ Moravec\'s paradox: Moravec (1988, pp. 15–16), Minsky (1986, p. 29), Pinker (2007, pp. 190–191)\n
  708. \n
  709. ^ Dreyfus\' critique of AI: Dreyfus (1972), Dreyfus & Dreyfus (1986)\nHistorical significance and philosophical implications: Crevier (1993, pp. 120–132), McCorduck (2004, pp. 211–239), Russell & Norvig (2021, pp. 981–982), Fearn (2007, chpt. 3)\n
  710. \n
  711. ^ Crevier (1993), p. 125.\n
  712. \n
  713. ^ Langley (2011).\n
  714. \n
  715. ^ Katz (2012).\n
  716. \n
  717. ^ Neats vs. scruffies, the historic debate: McCorduck (2004, pp. 421–424, 486–489), Crevier (1993, p. 168), Nilsson (1983, pp. 10–11), Russell & Norvig (2021, p. 24)\nA classic example of the "scruffy" approach to intelligence: Minsky (1986)\nA modern example of neat AI and its aspirations in the 21st century: Domingos (2015)\n
  718. \n
  719. ^ Pennachin & Goertzel (2007).\n
  720. \n
  721. ^ a b Roberts (2016).\n
  722. \n
  723. ^ Russell & Norvig (2021), p. 986.\n
  724. \n
  725. ^ Chalmers (1995).\n
  726. \n
  727. ^ Dennett (1991).\n
  728. \n
  729. ^ Horst (2005).\n
  730. \n
  731. ^ Searle (1999).\n
  732. \n
  733. ^ Searle (1980), p. 1.\n
  734. \n
  735. ^ Russell & Norvig (2021), p. 9817.\n
  736. \n
  737. ^ Searle\'s Chinese room argument: Searle (1980). Searle\'s original presentation of the thought experiment., Searle (1999).\nDiscussion: Russell & Norvig (2021, pp. 985), McCorduck (2004, pp. 443–445), Crevier (1993, pp. 269–271)\n
  738. \n
  739. ^ Leith, Sam (7 July 2022). "Nick Bostrom: How can we be certain a machine isn\'t conscious?". The Spectator. Archived from the original on 26 September 2024. Retrieved 23 February 2024.\n
  740. \n
  741. ^ a b c Thomson, Jonny (31 October 2022). "Why don\'t robots have rights?". Big Think. Archived from the original on 13 September 2024. Retrieved 23 February 2024.\n
  742. \n
  743. ^ a b Kateman, Brian (24 July 2023). "AI Should Be Terrified of Humans". Time. Archived from the original on 25 September 2024. Retrieved 23 February 2024.\n
  744. \n
  745. ^ Wong, Jeff (10 July 2023). "What leaders need to know about robot rights". Fast Company.\n
  746. \n
  747. ^ Hern, Alex (12 January 2017). "Give robots \'personhood\' status, EU committee argues". The Guardian. ISSN 0261-3077. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
  748. \n
  749. ^ Dovey, Dana (14 April 2018). "Experts Don\'t Think Robots Should Have Rights". Newsweek. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
  750. \n
  751. ^ Cuddy, Alice (13 April 2018). "Robot rights violate human rights, experts warn EU". euronews. Archived from the original on 19 September 2024. Retrieved 23 February 2024.\n
  752. \n
  753. ^ The Intelligence explosion and technological singularity: Russell & Norvig (2021, pp. 1004–1005), Omohundro (2008), Kurzweil (2005)\n\nI. J. Good\'s "intelligence explosion": Good (1965)\n\nVernor Vinge\'s "singularity": Vinge (1993)\n
  754. \n
  755. ^ Russell & Norvig (2021), p. 1005.\n
  756. \n
  757. ^ Transhumanism: Moravec (1988), Kurzweil (2005), Russell & Norvig (2021, p. 1005)\n
  758. \n
  759. ^ AI as evolution: Edward Fredkin is quoted in McCorduck (2004, p. 401), Butler (1863), Dyson (1998)\n
  760. \n
  761. ^ AI in myth: McCorduck (2004, pp. 4–5)\n
  762. \n
  763. ^ McCorduck (2004), pp. 340–400.\n
  764. \n
  765. ^ Buttazzo (2001).\n
  766. \n
  767. ^ Anderson (2008).\n
  768. \n
  769. ^ McCauley (2007).\n
  770. \n
  771. ^ Galvan (1997).\n
  772. \n
\n

AI textbooks

\n

The two most widely used textbooks in 2023 (see the Open Syllabus):\n

\n\n

These were the four of the most widely used AI textbooks in 2008:\n

\n
\n\n
\n

Other textbooks:\n

\n\n

History of AI

\n
\n\n
\n

Other sources

\n
\n\n\n
\n

Further reading

\n
\n\n
\n
\n\n\n\n\n\n\n\n\n\n
\n
\n\t\t\t\t\t\n\t\t\t\t
\n\t\t\t\n\t\t\t\n\t\t\n\t\t\n\t \n \n
\n\t
    \n
    \n\n\n'}'. -[2024-11-03 22:53:59,598] [DEBUG] [Engine]: Agent 'agent3' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_10DTB4ep9CYqXaoOJQUHDw8w', type='function')], function_call=None) -[2024-11-03 22:53:59,686] [INFO] [Engine]: Assigning task to agent4: Search for the latest trends in AI using Twitter. -[2024-11-03 22:53:59,688] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Search for the latest trends in AI using Twitter.'. -[2024-11-03 22:54:01,339] [ERROR] [Engine]: Error while executing task for agent 'agent4': -[2024-11-03 22:54:01,341] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_10DTB4ep9CYqXaoOJQUHDw8w', type='function')], function_call=None) - -[2024-11-03 22:54:01,342] [INFO] [Engine]: Agents' Results Summary: -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_10DTB4ep9CYqXaoOJQUHDw8w', type='function')], function_call=None) - -[2024-11-03 22:54:01,344] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_10DTB4ep9CYqXaoOJQUHDw8w', type='function')], function_call=None) - -[2024-11-03 22:54:01,755] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': False} -[2024-11-03 22:54:01,756] [INFO] [Engine]: EnginePlanner decided to terminate the simulation. -[2024-11-03 22:54:01,758] [INFO] [Engine]: Engine simulation loop completed. -[2024-11-03 22:54:01,759] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-03 22:54:01,760] [INFO] [Evaluator]: Total Token Consumption: 12 -[2024-11-03 22:54:01,761] [INFO] [Evaluator]: Average Tokens per Iteration: 12.0 -[2024-11-03 22:54:01,762] [INFO] [Engine]: Simulation completed. -[2024-11-03 22:54:08,840] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-11-03 22:54:08,841] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 22:54:08,841] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 22:54:08,843] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 22:54:08,844] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 22:54:08,845] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 22:54:08,846] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 22:54:08,847] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 22:54:08,848] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 22:54:08,851] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-11-03 22:54:08,851] [INFO] [AgentGraph]: Relationship added: agent2 --[reports_to]--> agent1 -[2024-11-03 22:54:08,852] [INFO] [AgentGraph]: Relationship added: agent3 --[reports_to]--> agent1 -[2024-11-03 22:54:08,853] [INFO] [AgentGraph]: Relationship added: agent4 --[reports_to]--> agent1 -[2024-11-03 22:54:08,854] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-03 22:54:08,857] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-03 22:54:08,857] [INFO] [Engine]: Engine initialized. -[2024-11-03 22:54:08,858] [INFO] [Engine]: Engine starting simulation. -[2024-11-03 22:54:08,859] [INFO] [Engine]: Starting iteration 1 -[2024-11-03 22:54:08,861] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-11-03 22:54:09,866] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'}, 'continue': True} -[2024-11-03 22:54:09,867] [INFO] [Engine]: Assigned tasks: {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'} -[2024-11-03 22:54:09,868] [INFO] [Engine]: Assigning task to agent2: Search for the latest trends in AI using Google. -[2024-11-03 22:54:09,871] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Search for the latest trends in AI using Google.'. -[2024-11-03 22:54:23,132] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Can you search for the latest trends in AI using Google? -[2024-11-03 22:54:23,267] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Can you search for the latest trends in AI using Google? -[2024-11-03 22:54:23,296] [ERROR] [Engine]: Error while executing task for agent 'agent2': Function marble.llms.model_prompting.model_prompting() parameter tool_choice={'type': 'function', 'function': {'name': 'communicate_to'}} violates type hint typing.Optional[str], as dict {'type': 'function', 'function': {'name': 'communicate_to'}} not  or str. -[2024-11-03 22:54:23,297] [INFO] [Engine]: Assigning task to agent3: Search for the latest trends in AI using Wikipedia. -[2024-11-03 22:54:23,298] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Search for the latest trends in AI using Wikipedia.'. -[2024-11-03 22:55:45,722] [ERROR] [Engine]: Error while executing task for agent 'agent3': -[2024-11-03 22:55:45,723] [INFO] [Engine]: Assigning task to agent4: Search for the latest trends in AI using Twitter. -[2024-11-03 22:55:45,770] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Search for the latest trends in AI using Twitter.'. -[2024-11-03 22:55:48,454] [ERROR] [Engine]: Error while executing task for agent 'agent4': -[2024-11-03 22:55:48,456] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: - -[2024-11-03 22:55:48,457] [INFO] [Engine]: Agents' Results Summary: - -[2024-11-03 22:55:48,458] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: - -[2024-11-03 22:55:48,459] [ERROR] [Engine]: An error occurred during simulation. -Traceback (most recent call last): - File "/home/zhong42/marble/MARBLE/marble/engine/engine.py", line 147, in start - self.evaluator.update(self.environment, self.agents) - File "/home/zhong42/marble/MARBLE/marble/evaluator/evaluator.py", line 39, in update - if environment.is_task_completed(): - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/zhong42/marble/MARBLE/marble/environments/base_env.py", line 38, in is_task_completed - return self._compare_to_ground_truth(last_action_result, self.ground_truth) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/zhong42/marble/MARBLE/marble/environments/base_env.py", line 43, in _compare_to_ground_truth - result_str: str = result.get("result", "") - ^^^^^^^^^^ -AttributeError: 'str' object has no attribute 'get' -[2024-11-03 22:55:48,467] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-03 22:55:48,468] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-03 22:55:48,469] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-03 22:55:48,470] [INFO] [Engine]: Simulation completed. -[2024-11-03 22:55:55,897] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-11-03 22:55:55,899] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 22:55:55,899] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 22:55:55,900] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 22:55:55,901] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 22:55:55,902] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 22:55:55,903] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 22:55:55,904] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 22:55:55,905] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 22:55:56,003] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-11-03 22:55:56,003] [INFO] [AgentGraph]: Relationship added: agent2 --[reports_to]--> agent1 -[2024-11-03 22:55:56,004] [INFO] [AgentGraph]: Relationship added: agent3 --[reports_to]--> agent1 -[2024-11-03 22:55:56,006] [INFO] [AgentGraph]: Relationship added: agent4 --[reports_to]--> agent1 -[2024-11-03 22:55:56,007] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-03 22:55:56,010] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-03 22:55:56,010] [INFO] [Engine]: Engine initialized. -[2024-11-03 22:55:56,011] [INFO] [Engine]: Engine starting simulation. -[2024-11-03 22:55:56,012] [INFO] [Engine]: Starting iteration 1 -[2024-11-03 22:55:56,013] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-11-03 22:55:57,232] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'}, 'continue': True} -[2024-11-03 22:55:57,233] [INFO] [Engine]: Assigned tasks: {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'} -[2024-11-03 22:55:57,235] [INFO] [Engine]: Assigning task to agent2: Search for the latest trends in AI using Google. -[2024-11-03 22:55:57,236] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Search for the latest trends in AI using Google.'. -[2024-11-03 22:57:07,037] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Can you search for the latest trends in AI using Google? -[2024-11-03 22:57:07,038] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Can you search for the latest trends in AI using Google? -[2024-11-03 22:57:23,921] [ERROR] [Engine]: Error while executing task for agent 'agent2': -[2024-11-03 22:57:23,929] [INFO] [Engine]: Assigning task to agent3: Search for the latest trends in AI using Wikipedia. -[2024-11-03 22:57:23,930] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Search for the latest trends in AI using Wikipedia.'. -[2024-11-03 22:57:25,229] [ERROR] [Engine]: Error while executing task for agent 'agent3': -[2024-11-03 22:57:25,230] [INFO] [Engine]: Assigning task to agent4: Search for the latest trends in AI using Twitter. -[2024-11-03 22:57:25,232] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Search for the latest trends in AI using Twitter.'. -[2024-11-03 22:57:27,876] [ERROR] [Engine]: Error while executing task for agent 'agent4': -[2024-11-03 22:57:27,905] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: - -[2024-11-03 22:57:27,906] [INFO] [Engine]: Agents' Results Summary: - -[2024-11-03 22:57:27,907] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: - -[2024-11-03 22:57:27,908] [ERROR] [Engine]: An error occurred during simulation. -Traceback (most recent call last): - File "/home/zhong42/marble/MARBLE/marble/engine/engine.py", line 147, in start - self.evaluator.update(self.environment, self.agents) - File "/home/zhong42/marble/MARBLE/marble/evaluator/evaluator.py", line 39, in update - if environment.is_task_completed(): - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/zhong42/marble/MARBLE/marble/environments/base_env.py", line 38, in is_task_completed - return self._compare_to_ground_truth(last_action_result, self.ground_truth) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/zhong42/marble/MARBLE/marble/environments/base_env.py", line 43, in _compare_to_ground_truth - result_str: str = result.get("result", "") - ^^^^^^^^^^ -AttributeError: 'str' object has no attribute 'get' -[2024-11-03 22:57:27,912] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-03 22:57:27,913] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-03 22:57:27,914] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-03 22:57:27,915] [INFO] [Engine]: Simulation completed. -[2024-11-03 22:57:47,306] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-11-03 22:57:47,347] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 22:57:47,347] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 22:57:47,348] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 22:57:47,349] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 22:57:47,350] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 22:57:47,351] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 22:57:47,353] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 22:57:47,354] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 22:57:47,358] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-11-03 22:57:47,358] [INFO] [AgentGraph]: Relationship added: agent2 --[reports_to]--> agent1 -[2024-11-03 22:57:47,359] [INFO] [AgentGraph]: Relationship added: agent3 --[reports_to]--> agent1 -[2024-11-03 22:57:47,360] [INFO] [AgentGraph]: Relationship added: agent4 --[reports_to]--> agent1 -[2024-11-03 22:57:47,361] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-03 22:57:47,366] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-03 22:57:47,366] [INFO] [Engine]: Engine initialized. -[2024-11-03 22:57:47,367] [INFO] [Engine]: Engine starting simulation. -[2024-11-03 22:57:47,368] [INFO] [Engine]: Starting iteration 1 -[2024-11-03 22:57:47,369] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-11-03 22:57:48,328] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'}, 'continue': True} -[2024-11-03 22:57:48,329] [INFO] [Engine]: Assigned tasks: {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'} -[2024-11-03 22:57:48,331] [INFO] [Engine]: Assigning task to agent2: Search for the latest trends in AI using Google. -[2024-11-03 22:57:48,332] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Search for the latest trends in AI using Google.'. -[2024-11-03 22:57:53,725] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Can you search for the latest trends in AI using Google? -[2024-11-03 22:57:53,727] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Can you search for the latest trends in AI using Google? -[2024-11-03 22:57:59,484] [INFO] [BaseAgent]: Agent 'agent2' called 'new_communication_session' with args '{'target_agent_id': 'agent1', 'message': 'Can you search for the latest trends in AI using Google?'}'. -[2024-11-03 22:57:59,486] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'message': 'Successfully completed session 228cacdd-2675-4921-95a1-b84ac24eac29', 'session_id': 'In the chat history, agent2 asked agent1 to search for the latest trends in AI using Google.'}'. -[2024-11-03 22:57:59,487] [DEBUG] [Engine]: Agent 'agent2' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google?"}', name='new_communication_session'), id='call_9tHqnOjGIqgLUxqxDlOSdh4x', type='function')], function_call=None) -[2024-11-03 22:57:59,488] [INFO] [Engine]: Assigning task to agent3: Search for the latest trends in AI using Wikipedia. -[2024-11-03 22:57:59,489] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Search for the latest trends in AI using Wikipedia.'. -[2024-11-03 22:59:18,981] [ERROR] [Engine]: Error while executing task for agent 'agent3': -[2024-11-03 22:59:18,982] [INFO] [Engine]: Assigning task to agent4: Search for the latest trends in AI using Twitter. -[2024-11-03 22:59:19,089] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Search for the latest trends in AI using Twitter.'. -[2024-11-03 22:59:20,691] [ERROR] [Engine]: Error while executing task for agent 'agent4': -[2024-11-03 22:59:20,693] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google?"}', name='new_communication_session'), id='call_9tHqnOjGIqgLUxqxDlOSdh4x', type='function')], function_call=None) - -[2024-11-03 22:59:20,694] [INFO] [Engine]: Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google?"}', name='new_communication_session'), id='call_9tHqnOjGIqgLUxqxDlOSdh4x', type='function')], function_call=None) - -[2024-11-03 22:59:20,695] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google?"}', name='new_communication_session'), id='call_9tHqnOjGIqgLUxqxDlOSdh4x', type='function')], function_call=None) - -[2024-11-03 22:59:20,824] [ERROR] [Engine]: An error occurred during simulation. -Traceback (most recent call last): - File "/home/zhong42/marble/MARBLE/marble/engine/engine.py", line 147, in start - self.evaluator.update(self.environment, self.agents) - File "/home/zhong42/marble/MARBLE/marble/evaluator/evaluator.py", line 39, in update - if environment.is_task_completed(): - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/zhong42/marble/MARBLE/marble/environments/base_env.py", line 38, in is_task_completed - return self._compare_to_ground_truth(last_action_result, self.ground_truth) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/zhong42/marble/MARBLE/marble/environments/base_env.py", line 43, in _compare_to_ground_truth - result_str: str = result.get("result", "") - ^^^^^^^^^^ -AttributeError: 'str' object has no attribute 'get' -[2024-11-03 22:59:20,828] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-03 22:59:20,830] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-03 22:59:20,831] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-03 22:59:20,832] [INFO] [Engine]: Simulation completed. -[2024-11-03 22:59:27,261] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-11-03 22:59:27,317] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 22:59:27,317] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 22:59:27,318] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 22:59:27,319] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 22:59:27,320] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 22:59:27,322] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 22:59:27,323] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 22:59:27,324] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 22:59:27,327] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-11-03 22:59:27,327] [INFO] [AgentGraph]: Relationship added: agent2 --[reports_to]--> agent1 -[2024-11-03 22:59:27,328] [INFO] [AgentGraph]: Relationship added: agent3 --[reports_to]--> agent1 -[2024-11-03 22:59:27,329] [INFO] [AgentGraph]: Relationship added: agent4 --[reports_to]--> agent1 -[2024-11-03 22:59:27,330] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-03 22:59:27,333] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-03 22:59:27,334] [INFO] [Engine]: Engine initialized. -[2024-11-03 22:59:27,335] [INFO] [Engine]: Engine starting simulation. -[2024-11-03 22:59:27,336] [INFO] [Engine]: Starting iteration 1 -[2024-11-03 22:59:27,337] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-11-03 22:59:28,527] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'}, 'continue': True} -[2024-11-03 22:59:28,528] [INFO] [Engine]: Assigned tasks: {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'} -[2024-11-03 22:59:28,529] [INFO] [Engine]: Assigning task to agent2: Search for the latest trends in AI using Google. -[2024-11-03 22:59:28,530] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Search for the latest trends in AI using Google.'. -[2024-11-03 22:59:32,053] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Can you search for the latest trends in AI using Google? -[2024-11-03 22:59:32,055] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Can you search for the latest trends in AI using Google? -[2024-11-03 22:59:40,534] [INFO] [BaseAgent]: Agent 'agent2' called 'new_communication_session' with args '{'target_agent_id': 'agent1', 'message': 'Can you search for the latest trends in AI using Google?'}'. -[2024-11-03 22:59:40,536] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'message': 'Successfully completed session abeab23a-0d53-4c64-9dad-18ff9602bfb5', 'session_id': 'In the chat history, agent2 asked agent1 to search for the latest trends in AI using Google.'}'. -[2024-11-03 22:59:40,537] [DEBUG] [Engine]: Agent 'agent2' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google?"}', name='new_communication_session'), id='call_4OfDmmwPKVsEOAJPYuY6Y39P', type='function')], function_call=None) -[2024-11-03 22:59:40,538] [INFO] [Engine]: Assigning task to agent3: Search for the latest trends in AI using Wikipedia. -[2024-11-03 22:59:40,539] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Search for the latest trends in AI using Wikipedia.'. -[2024-11-03 23:00:47,517] [ERROR] [Engine]: Error while executing task for agent 'agent3': -[2024-11-03 23:00:47,520] [INFO] [Engine]: Assigning task to agent4: Search for the latest trends in AI using Twitter. -[2024-11-03 23:00:47,614] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Search for the latest trends in AI using Twitter.'. -[2024-11-03 23:00:49,412] [ERROR] [Engine]: Error while executing task for agent 'agent4': -[2024-11-03 23:00:49,413] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google?"}', name='new_communication_session'), id='call_4OfDmmwPKVsEOAJPYuY6Y39P', type='function')], function_call=None) - -[2024-11-03 23:00:49,414] [INFO] [Engine]: Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google?"}', name='new_communication_session'), id='call_4OfDmmwPKVsEOAJPYuY6Y39P', type='function')], function_call=None) - -[2024-11-03 23:00:49,416] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google?"}', name='new_communication_session'), id='call_4OfDmmwPKVsEOAJPYuY6Y39P', type='function')], function_call=None) - -[2024-11-03 23:00:49,468] [ERROR] [Engine]: An error occurred during simulation. -Traceback (most recent call last): - File "/home/zhong42/marble/MARBLE/marble/engine/engine.py", line 147, in start - self.evaluator.update(self.environment, self.agents) - File "/home/zhong42/marble/MARBLE/marble/evaluator/evaluator.py", line 39, in update - if environment.is_task_completed(): - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/zhong42/marble/MARBLE/marble/environments/base_env.py", line 38, in is_task_completed - return self._compare_to_ground_truth(last_action_result, self.ground_truth) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/zhong42/marble/MARBLE/marble/environments/base_env.py", line 43, in _compare_to_ground_truth - result_str: str = result.get("result", "") - ^^^^^^^^^^ -AttributeError: 'str' object has no attribute 'get' -[2024-11-03 23:00:49,478] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-03 23:00:49,479] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-03 23:00:49,480] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-03 23:00:49,482] [INFO] [Engine]: Simulation completed. -[2024-11-03 23:01:14,318] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-11-03 23:01:14,330] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 23:01:14,330] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 23:01:14,331] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 23:01:14,332] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 23:01:14,333] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 23:01:14,335] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 23:01:14,336] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 23:01:14,337] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 23:01:14,340] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-11-03 23:01:14,340] [INFO] [AgentGraph]: Relationship added: agent2 --[reports_to]--> agent1 -[2024-11-03 23:01:14,341] [INFO] [AgentGraph]: Relationship added: agent3 --[reports_to]--> agent1 -[2024-11-03 23:01:14,342] [INFO] [AgentGraph]: Relationship added: agent4 --[reports_to]--> agent1 -[2024-11-03 23:01:14,343] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-03 23:01:14,347] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-03 23:01:14,347] [INFO] [Engine]: Engine initialized. -[2024-11-03 23:01:14,348] [INFO] [Engine]: Engine starting simulation. -[2024-11-03 23:01:14,349] [INFO] [Engine]: Starting iteration 1 -[2024-11-03 23:01:14,350] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-11-03 23:01:15,634] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'}, 'continue': True} -[2024-11-03 23:01:15,635] [INFO] [Engine]: Assigned tasks: {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'} -[2024-11-03 23:01:15,636] [INFO] [Engine]: Assigning task to agent2: Search for the latest trends in AI using Google. -[2024-11-03 23:01:15,637] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Search for the latest trends in AI using Google.'. -[2024-11-03 23:01:26,625] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Can you search for the latest trends in AI using Google? -[2024-11-03 23:01:26,626] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Can you search for the latest trends in AI using Google? -[2024-11-03 23:02:58,681] [INFO] [BaseAgent]: Agent 'agent2' called 'new_communication_session' with args '{'target_agent_id': 'agent1', 'message': 'Can you search for the latest trends in AI using Google?'}'. -[2024-11-03 23:02:58,682] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'message': 'Successfully completed session 7477b6b8-f0eb-4fe8-a32b-766aaedd6bc5', 'session_id': 'In the chat history, agent2 asked agent1 to search for the latest trends in AI using Google.'}'. -[2024-11-03 23:02:58,772] [DEBUG] [Engine]: Agent 'agent2' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google?"}', name='new_communication_session'), id='call_lcuGV3qP6QDGEmfVEW9du0Sy', type='function')], function_call=None) -[2024-11-03 23:02:58,774] [INFO] [Engine]: Assigning task to agent3: Search for the latest trends in AI using Wikipedia. -[2024-11-03 23:02:58,775] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Search for the latest trends in AI using Wikipedia.'. -[2024-11-03 23:03:01,820] [ERROR] [Engine]: Error while executing task for agent 'agent3': -[2024-11-03 23:03:01,821] [INFO] [Engine]: Assigning task to agent4: Search for the latest trends in AI using Twitter. -[2024-11-03 23:03:01,822] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Search for the latest trends in AI using Twitter.'. -[2024-11-03 23:03:03,582] [ERROR] [Engine]: Error while executing task for agent 'agent4': -[2024-11-03 23:03:03,583] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google?"}', name='new_communication_session'), id='call_lcuGV3qP6QDGEmfVEW9du0Sy', type='function')], function_call=None) - -[2024-11-03 23:03:03,585] [INFO] [Engine]: Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google?"}', name='new_communication_session'), id='call_lcuGV3qP6QDGEmfVEW9du0Sy', type='function')], function_call=None) - -[2024-11-03 23:03:03,586] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google?"}', name='new_communication_session'), id='call_lcuGV3qP6QDGEmfVEW9du0Sy', type='function')], function_call=None) - -[2024-11-03 23:03:03,613] [ERROR] [Engine]: An error occurred during simulation. -Traceback (most recent call last): - File "/home/zhong42/marble/MARBLE/marble/engine/engine.py", line 147, in start - self.evaluator.update(self.environment, self.agents) - File "/home/zhong42/marble/MARBLE/marble/evaluator/evaluator.py", line 39, in update - if environment.is_task_completed(): - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/zhong42/marble/MARBLE/marble/environments/base_env.py", line 38, in is_task_completed - return self._compare_to_ground_truth(last_action_result, self.ground_truth) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/zhong42/marble/MARBLE/marble/environments/base_env.py", line 43, in _compare_to_ground_truth - result_str: str = result.get("result", "") - ^^^^^^^^^^ -AttributeError: 'str' object has no attribute 'get' -[2024-11-03 23:03:03,619] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-03 23:03:03,620] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-03 23:03:03,622] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-03 23:03:03,623] [INFO] [Engine]: Simulation completed. -[2024-11-03 23:03:10,924] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-11-03 23:03:10,926] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 23:03:10,926] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 23:03:10,927] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 23:03:10,929] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 23:03:10,930] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 23:03:10,931] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 23:03:10,932] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 23:03:10,933] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 23:03:10,936] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-11-03 23:03:10,936] [INFO] [AgentGraph]: Relationship added: agent2 --[reports_to]--> agent1 -[2024-11-03 23:03:10,937] [INFO] [AgentGraph]: Relationship added: agent3 --[reports_to]--> agent1 -[2024-11-03 23:03:10,938] [INFO] [AgentGraph]: Relationship added: agent4 --[reports_to]--> agent1 -[2024-11-03 23:03:10,940] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-03 23:03:10,943] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-03 23:03:10,943] [INFO] [Engine]: Engine initialized. -[2024-11-03 23:03:10,946] [INFO] [Engine]: Engine starting simulation. -[2024-11-03 23:03:10,948] [INFO] [Engine]: Starting iteration 1 -[2024-11-03 23:03:10,949] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-11-03 23:03:11,997] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'}, 'continue': True} -[2024-11-03 23:03:11,998] [INFO] [Engine]: Assigned tasks: {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'} -[2024-11-03 23:03:11,999] [INFO] [Engine]: Assigning task to agent2: Search for the latest trends in AI using Google. -[2024-11-03 23:03:12,000] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Search for the latest trends in AI using Google.'. -[2024-11-03 23:04:45,189] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Can you search for the latest trends in AI using Google? -[2024-11-03 23:04:45,191] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Can you search for the latest trends in AI using Google? -[2024-11-03 23:07:50,059] [ERROR] [Engine]: Error while executing task for agent 'agent2': -[2024-11-03 23:07:50,062] [INFO] [Engine]: Assigning task to agent3: Search for the latest trends in AI using Wikipedia. -[2024-11-03 23:07:50,079] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Search for the latest trends in AI using Wikipedia.'. -[2024-11-03 23:07:52,603] [ERROR] [Engine]: Error while executing task for agent 'agent3': -[2024-11-03 23:07:52,605] [INFO] [Engine]: Assigning task to agent4: Search for the latest trends in AI using Twitter. -[2024-11-03 23:07:52,606] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Search for the latest trends in AI using Twitter.'. -[2024-11-03 23:07:54,373] [ERROR] [Engine]: Error while executing task for agent 'agent4': -[2024-11-03 23:07:54,375] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: - -[2024-11-03 23:07:54,376] [INFO] [Engine]: Agents' Results Summary: - -[2024-11-03 23:07:54,377] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: - -[2024-11-03 23:07:54,378] [ERROR] [Engine]: An error occurred during simulation. -Traceback (most recent call last): - File "/home/zhong42/marble/MARBLE/marble/engine/engine.py", line 147, in start - self.evaluator.update(self.environment, self.agents) - File "/home/zhong42/marble/MARBLE/marble/evaluator/evaluator.py", line 39, in update - if environment.is_task_completed(): - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/zhong42/marble/MARBLE/marble/environments/base_env.py", line 38, in is_task_completed - return self._compare_to_ground_truth(last_action_result, self.ground_truth) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/zhong42/marble/MARBLE/marble/environments/base_env.py", line 43, in _compare_to_ground_truth - result_str: str = result.get("result", "") - ^^^^^^^^^^ -AttributeError: 'str' object has no attribute 'get' -[2024-11-03 23:07:54,386] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-03 23:07:54,387] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-03 23:07:54,388] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-03 23:07:54,389] [INFO] [Engine]: Simulation completed. -[2024-11-03 23:08:02,010] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-11-03 23:08:02,012] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 23:08:02,012] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 23:08:02,014] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 23:08:02,015] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 23:08:02,016] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 23:08:02,017] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 23:08:02,018] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 23:08:02,019] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 23:08:02,021] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-11-03 23:08:02,022] [INFO] [AgentGraph]: Relationship added: agent2 --[reports_to]--> agent1 -[2024-11-03 23:08:02,023] [INFO] [AgentGraph]: Relationship added: agent3 --[reports_to]--> agent1 -[2024-11-03 23:08:02,024] [INFO] [AgentGraph]: Relationship added: agent4 --[reports_to]--> agent1 -[2024-11-03 23:08:02,025] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-03 23:08:02,028] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-03 23:08:02,028] [INFO] [Engine]: Engine initialized. -[2024-11-03 23:08:02,029] [INFO] [Engine]: Engine starting simulation. -[2024-11-03 23:08:02,030] [INFO] [Engine]: Starting iteration 1 -[2024-11-03 23:08:02,031] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-11-03 23:08:03,187] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'}, 'continue': True} -[2024-11-03 23:08:03,188] [INFO] [Engine]: Assigned tasks: {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'} -[2024-11-03 23:08:03,189] [INFO] [Engine]: Assigning task to agent2: Search for the latest trends in AI using Google. -[2024-11-03 23:08:03,190] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Search for the latest trends in AI using Google.'. -[2024-11-03 23:09:01,658] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Can you search for the latest trends in AI using Google? -[2024-11-03 23:09:01,661] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Can you search for the latest trends in AI using Google? -[2024-11-03 23:09:04,666] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: Please search for the latest trends in AI using Google. -[2024-11-03 23:09:04,667] [INFO] [BaseAgent]: Agent agent2 received message from agent1: Please search for the latest trends in AI using Google. -[2024-11-03 23:09:05,304] [INFO] [BaseAgent]: Agent 'agent2' called 'new_communication_session' with args '{'target_agent_id': 'agent1', 'message': 'Can you search for the latest trends in AI using Google?'}'. -[2024-11-03 23:09:05,312] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'message': 'Successfully completed session 9332f225-218e-4093-b1ef-ab6092765883', 'session_id': 'In the chat history, agent2 asked agent1 to search for the latest trends in AI using Google.'}'. -[2024-11-03 23:09:05,313] [DEBUG] [Engine]: Agent 'agent2' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google?"}', name='new_communication_session'), id='call_G2WmEMrT35odzfqEppcooun6', type='function')], function_call=None) -[2024-11-03 23:09:05,314] [INFO] [Engine]: Assigning task to agent3: Search for the latest trends in AI using Wikipedia. -[2024-11-03 23:09:05,315] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Search for the latest trends in AI using Wikipedia.'. -[2024-11-03 23:10:08,027] [ERROR] [Engine]: Error while executing task for agent 'agent3': -[2024-11-03 23:10:08,028] [INFO] [Engine]: Assigning task to agent4: Search for the latest trends in AI using Twitter. -[2024-11-03 23:10:08,039] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Search for the latest trends in AI using Twitter.'. -[2024-11-03 23:12:35,499] [ERROR] [Engine]: Error while executing task for agent 'agent4': -[2024-11-03 23:12:35,500] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google?"}', name='new_communication_session'), id='call_G2WmEMrT35odzfqEppcooun6', type='function')], function_call=None) - -[2024-11-03 23:12:35,533] [INFO] [Engine]: Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google?"}', name='new_communication_session'), id='call_G2WmEMrT35odzfqEppcooun6', type='function')], function_call=None) - -[2024-11-03 23:12:35,535] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google?"}', name='new_communication_session'), id='call_G2WmEMrT35odzfqEppcooun6', type='function')], function_call=None) - -[2024-11-03 23:12:35,536] [ERROR] [Engine]: An error occurred during simulation. -Traceback (most recent call last): - File "/home/zhong42/marble/MARBLE/marble/engine/engine.py", line 147, in start - self.evaluator.update(self.environment, self.agents) - File "/home/zhong42/marble/MARBLE/marble/evaluator/evaluator.py", line 39, in update - if environment.is_task_completed(): - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/zhong42/marble/MARBLE/marble/environments/base_env.py", line 38, in is_task_completed - return self._compare_to_ground_truth(last_action_result, self.ground_truth) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/zhong42/marble/MARBLE/marble/environments/base_env.py", line 43, in _compare_to_ground_truth - result_str: str = result.get("result", "") - ^^^^^^^^^^ -AttributeError: 'str' object has no attribute 'get' -[2024-11-03 23:12:35,544] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-03 23:12:35,569] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-03 23:12:35,570] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-03 23:12:35,571] [INFO] [Engine]: Simulation completed. -[2024-11-03 23:12:49,416] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-11-03 23:12:49,433] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 23:12:49,433] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 23:12:49,434] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 23:12:49,435] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 23:12:49,436] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 23:12:49,437] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 23:12:49,438] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 23:12:49,439] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 23:12:49,442] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-11-03 23:12:49,442] [INFO] [AgentGraph]: Relationship added: agent2 --[reports_to]--> agent1 -[2024-11-03 23:12:49,443] [INFO] [AgentGraph]: Relationship added: agent3 --[reports_to]--> agent1 -[2024-11-03 23:12:49,444] [INFO] [AgentGraph]: Relationship added: agent4 --[reports_to]--> agent1 -[2024-11-03 23:12:49,445] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-03 23:12:49,448] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-03 23:12:49,448] [INFO] [Engine]: Engine initialized. -[2024-11-03 23:12:49,449] [INFO] [Engine]: Engine starting simulation. -[2024-11-03 23:12:49,450] [INFO] [Engine]: Starting iteration 1 -[2024-11-03 23:12:49,452] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-11-03 23:12:50,672] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'}, 'continue': True} -[2024-11-03 23:12:50,673] [INFO] [Engine]: Assigned tasks: {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'} -[2024-11-03 23:12:50,674] [INFO] [Engine]: Assigning task to agent2: Search for the latest trends in AI using Google. -[2024-11-03 23:12:50,676] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Search for the latest trends in AI using Google.'. -[2024-11-03 23:12:59,038] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Can you search for the latest trends in AI using Google? -[2024-11-03 23:12:59,058] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Can you search for the latest trends in AI using Google? -[2024-11-03 23:13:05,612] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: For task Search for the latest trends in AI using Google. -[2024-11-03 23:13:05,613] [INFO] [BaseAgent]: Agent agent2 received message from agent1: For task Search for the latest trends in AI using Google. -[2024-11-03 23:13:06,163] [INFO] [BaseAgent]: Agent 'agent2' called 'new_communication_session' with args '{'target_agent_id': 'agent1', 'message': 'Can you search for the latest trends in AI using Google?'}'. -[2024-11-03 23:13:06,166] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'message': 'Successfully completed session 1b766d7c-9a6f-4f23-b37e-c3e8f33c90b5', 'session_id': 'In the chat history, agent2 asked agent1 to search for the latest trends in AI using Google.'}'. -[2024-11-03 23:13:06,168] [DEBUG] [Engine]: Agent 'agent2' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google?"}', name='new_communication_session'), id='call_10DTB4ep9CYqXaoOJQUHDw8w', type='function')], function_call=None) -[2024-11-03 23:13:06,170] [INFO] [Engine]: Assigning task to agent3: Search for the latest trends in AI using Wikipedia. -[2024-11-03 23:13:06,171] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Search for the latest trends in AI using Wikipedia.'. -[2024-11-03 23:14:01,075] [ERROR] [Engine]: Error while executing task for agent 'agent3': -[2024-11-03 23:14:01,076] [INFO] [Engine]: Assigning task to agent4: Search for the latest trends in AI using Twitter. -[2024-11-03 23:14:01,092] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Search for the latest trends in AI using Twitter.'. -[2024-11-03 23:14:02,397] [ERROR] [Engine]: Error while executing task for agent 'agent4': -[2024-11-03 23:14:02,399] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google?"}', name='new_communication_session'), id='call_10DTB4ep9CYqXaoOJQUHDw8w', type='function')], function_call=None) - -[2024-11-03 23:14:02,400] [INFO] [Engine]: Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google?"}', name='new_communication_session'), id='call_10DTB4ep9CYqXaoOJQUHDw8w', type='function')], function_call=None) - -[2024-11-03 23:14:02,401] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google?"}', name='new_communication_session'), id='call_10DTB4ep9CYqXaoOJQUHDw8w', type='function')], function_call=None) - -[2024-11-03 23:14:02,403] [ERROR] [Engine]: An error occurred during simulation. -Traceback (most recent call last): - File "/home/zhong42/marble/MARBLE/marble/engine/engine.py", line 147, in start - self.evaluator.update(self.environment, self.agents) - File "/home/zhong42/marble/MARBLE/marble/evaluator/evaluator.py", line 39, in update - if environment.is_task_completed(): - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/zhong42/marble/MARBLE/marble/environments/base_env.py", line 38, in is_task_completed - return self._compare_to_ground_truth(last_action_result, self.ground_truth) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/zhong42/marble/MARBLE/marble/environments/base_env.py", line 43, in _compare_to_ground_truth - result_str: str = result.get("result", "") - ^^^^^^^^^^ -AttributeError: 'str' object has no attribute 'get' -[2024-11-03 23:14:02,406] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-03 23:14:02,425] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-03 23:14:02,427] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-03 23:14:02,428] [INFO] [Engine]: Simulation completed. -[2024-11-03 23:14:08,915] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-11-03 23:14:08,918] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 23:14:08,918] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 23:14:08,919] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 23:14:08,920] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 23:14:08,921] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 23:14:08,922] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 23:14:08,924] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 23:14:08,925] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 23:14:08,928] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-11-03 23:14:08,928] [INFO] [AgentGraph]: Relationship added: agent2 --[reports_to]--> agent1 -[2024-11-03 23:14:08,929] [INFO] [AgentGraph]: Relationship added: agent3 --[reports_to]--> agent1 -[2024-11-03 23:14:08,930] [INFO] [AgentGraph]: Relationship added: agent4 --[reports_to]--> agent1 -[2024-11-03 23:14:08,931] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-03 23:14:08,935] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-03 23:14:08,935] [INFO] [Engine]: Engine initialized. -[2024-11-03 23:14:08,936] [INFO] [Engine]: Engine starting simulation. -[2024-11-03 23:14:08,937] [INFO] [Engine]: Starting iteration 1 -[2024-11-03 23:14:08,938] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-11-03 23:14:10,026] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'}, 'continue': True} -[2024-11-03 23:14:10,028] [INFO] [Engine]: Assigned tasks: {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'} -[2024-11-03 23:14:10,042] [INFO] [Engine]: Assigning task to agent2: Search for the latest trends in AI using Google. -[2024-11-03 23:14:10,048] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Search for the latest trends in AI using Google.'. -[2024-11-03 23:14:12,820] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Can you search for the latest trends in AI using Google and provide the information? -[2024-11-03 23:14:12,821] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Can you search for the latest trends in AI using Google and provide the information? -[2024-11-03 23:14:23,285] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: For task Search for the latest trends in AI using Google. -[2024-11-03 23:14:23,290] [INFO] [BaseAgent]: Agent agent2 received message from agent1: For task Search for the latest trends in AI using Google. -[2024-11-03 23:14:23,825] [INFO] [BaseAgent]: Agent 'agent2' called 'new_communication_session' with args '{'target_agent_id': 'agent1', 'message': 'Can you search for the latest trends in AI using Google and provide the information?'}'. -[2024-11-03 23:14:23,826] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'message': 'Successfully completed session a6b2048f-f86d-491a-967d-8905c19f27dd', 'session_id': 'Agent2 asked Agent1 to search for the latest trends in AI using Google and provide the information.'}'. -[2024-11-03 23:14:23,827] [DEBUG] [Engine]: Agent 'agent2' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google and provide the information?"}', name='new_communication_session'), id='call_sJNrbNSzyUnzmrmQbP4Ttc0W', type='function')], function_call=None) -[2024-11-03 23:14:23,828] [INFO] [Engine]: Assigning task to agent3: Search for the latest trends in AI using Wikipedia. -[2024-11-03 23:14:23,829] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Search for the latest trends in AI using Wikipedia.'. -[2024-11-03 23:14:35,387] [INFO] [BaseAgent]: Agent 'agent3' called 'fetch_webpage' with args '{'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence'}'. -[2024-11-03 23:14:35,395] [INFO] [BaseAgent]: Agent 'agent3' obtained result '{'success': True, 'error-msg': '', 'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence', 'content': '\n\n\n\nArtificial intelligence - Wikipedia\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nJump to content\n
    \n\t
    \n\t\t
    \n\t\t\t
    \n\n\t\t\n\t\t\t\n\n\n\t\t
    \n\t\t
    \n\t\t\t\n\n\n\t\t\t\n\n\t\t
    \n\t\n\n
    \n\t
    \n\t\t
    \n\t\t\t
    \n\t\t
    \n\t\t
    \n\t\t\t
    \n\t\t
    \n\t\t\t\n\t\t
    \n\t
    \n\t
    \n\t\t\t\t
    \n\t\t\n\t\t\t
    \n\t\t
    \n\t\t
    \n\t\t\t
    \n\t\t\t\t
    \n\t\t\t\t\t\n\t\t\t\t\t

    Artificial intelligence

    \n\t\t\t\t\t\t\t\n
    \n\t\n\t\n\t
    \n\n\t\t
    \n\t\t\t\n\t\t\t\n\t\t\t\n\t\t
    \n\n\t
    \n
    \n
    \n\t\t\t\t
    \n\t\t\t\t\t
    \n\t\t\t\t\t\t
    \n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
    \n\t\t\t\t\t\t
    \n\t\t\t\t\t\t\t\n\t\t\t\t\n\t\t\t\t\t\t\t
    \n\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
    \n\t\t\t\t\t
    \n\t\t\t\t
    \n\t\t\t\t
    \n\t\t\t\t\t
    \n\t\t\t\t\t\t\n\t\t\t\t\t\t
    \n\t\t\n\t\t\t\t\t
    \n\t\t\t\t
    \n\t\t\t\t
    \n\t\t\t\t\t
    \n\t\t\t\t\t\t\t
    \n\t\t
    Page semi-protected
    \n\t\t
    \n\n\t\t\t\t\t\t
    From Wikipedia, the free encyclopedia
    \n\t\t\t\t\t
    \n\t\t\t\t\t
    \n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t
    \n\n

    \n

    \n\n\n\n\n\n\n\n

    Artificial intelligence (AI), in its broadest sense, is intelligence emulated by machines, particularly computer systems. It is a field of research in computer science that develops and studies methods and software that enable machines to perceive their environment and use learning and intelligence to take actions that maximize their chances of achieving defined goals.[1] Such machines may be called AIs.\n

    Some high-profile applications of AI include advanced web search engines (e.g., Google Search); recommendation systems (used by YouTube, Amazon, and Netflix); interacting via human speech (e.g., Google Assistant, Siri, and Alexa); autonomous vehicles (e.g., Waymo); generative and creative tools (e.g., ChatGPT, and AI art); and superhuman play and analysis in strategy games (e.g., chess and Go). However, many AI applications are not perceived as AI: "A lot of cutting edge AI has filtered into general applications, often without being called AI because once something becomes useful enough and common enough it\'s not labeled AI anymore."[2][3]\n

    The various subfields of AI research are centered around particular goals and the use of particular tools. The traditional goals of AI research include reasoning, knowledge representation, planning, learning, natural language processing, perception, and support for robotics.[a] General intelligence—the ability to complete any task performable by a human on an at least equal level—is among the field\'s long-term goals.[4] To reach these goals, AI researchers have adapted and integrated a wide range of techniques, including search and mathematical optimization, formal logic, artificial neural networks, and methods based on statistics, operations research, and economics.[b] AI also draws upon psychology, linguistics, philosophy, neuroscience, and other fields.[5]\n

    Artificial intelligence was founded as an academic discipline in 1956,[6] and the field went through multiple cycles of optimism,[7][8] followed by periods of disappointment and loss of funding, known as AI winter.[9][10] Funding and interest vastly increased after 2012 when deep learning outperformed previous AI techniques.[11] This growth accelerated further after 2017 with the transformer architecture,[12] and by the early 2020s hundreds of billions of dollars were being invested in AI (known as the "AI boom"). The widespread use of AI in the 21st century exposed several unintended consequences and harms in the present and raised concerns about its risks and long-term effects in the future, prompting discussions about regulatory policies to ensure the safety and benefits of the technology.\n

    \n\n

    Goals

    \n

    The general problem of fully simulating (or creating) intelligence is mostly found to be overwhelming. However, some types of problems have been successfully broken into more achievable subproblems. These consist of particular traits or capabilities that researchers expect an intelligent system to display. The traits described below have received the most attention and cover the scope of AI research.[a]\n

    \n

    Reasoning and problem-solving

    \n

    Early researchers developed algorithms that imitated step-by-step reasoning that humans use when they solve puzzles or make logical deductions.[13] By the late 1980s and 1990s, methods were developed for dealing with uncertain or incomplete information, employing concepts from probability and economics.[14]\n

    Many of these algorithms are insufficient for solving large reasoning problems because they experience a "combinatorial explosion": They become exponentially slower as the problems grow.[15] Even humans rarely use the step-by-step deduction that early AI research could model. They solve most of their problems using fast, intuitive judgments.[16] Accurate and efficient reasoning is an unsolved problem.\n

    \n

    Knowledge representation

    \n
    An ontology represents knowledge as a set of concepts within a domain and the relationships between those concepts.
    \n

    Knowledge representation and knowledge engineering[17] allow AI programs to answer questions intelligently and make deductions about real-world facts. Formal knowledge representations are used in content-based indexing and retrieval,[18] scene interpretation,[19] clinical decision support,[20] knowledge discovery (mining "interesting" and actionable inferences from large databases),[21] and other areas.[22]\n

    A knowledge base is a body of knowledge represented in a form that can be used by a program. An ontology is the set of objects, relations, concepts, and properties used by a particular domain of knowledge.[23] Knowledge bases need to represent things such as objects, properties, categories, and relations between objects;[24] situations, events, states, and time;[25] causes and effects;[26] knowledge about knowledge (what we know about what other people know);[27] default reasoning (things that humans assume are true until they are told differently and will remain true even when other facts are changing);[28] and many other aspects and domains of knowledge.\n

    Among the most difficult problems in knowledge representation are the breadth of commonsense knowledge (the set of atomic facts that the average person knows is enormous);[29] and the sub-symbolic form of most commonsense knowledge (much of what people know is not represented as "facts" or "statements" that they could express verbally).[16] There is also the difficulty of knowledge acquisition, the problem of obtaining knowledge for AI applications.[c]\n

    \n

    Planning and decision-making

    \n

    An "agent" is anything that perceives and takes actions in the world. A rational agent has goals or preferences and takes actions to make them happen.[d][32] In automated planning, the agent has a specific goal.[33] In automated decision-making, the agent has preferences—there are some situations it would prefer to be in, and some situations it is trying to avoid. The decision-making agent assigns a number to each situation (called the "utility") that measures how much the agent prefers it. For each possible action, it can calculate the "expected utility": the utility of all possible outcomes of the action, weighted by the probability that the outcome will occur. It can then choose the action with the maximum expected utility.[34]\n

    In classical planning, the agent knows exactly what the effect of any action will be.[35] In most real-world problems, however, the agent may not be certain about the situation they are in (it is "unknown" or "unobservable") and it may not know for certain what will happen after each possible action (it is not "deterministic"). It must choose an action by making a probabilistic guess and then reassess the situation to see if the action worked.[36]\n

    In some problems, the agent\'s preferences may be uncertain, especially if there are other agents or humans involved. These can be learned (e.g., with inverse reinforcement learning), or the agent can seek information to improve its preferences.[37] Information value theory can be used to weigh the value of exploratory or experimental actions.[38] The space of possible future actions and situations is typically intractably large, so the agents must take actions and evaluate situations while being uncertain of what the outcome will be.\n

    A Markov decision process has a transition model that describes the probability that a particular action will change the state in a particular way and a reward function that supplies the utility of each state and the cost of each action. A policy associates a decision with each possible state. The policy could be calculated (e.g., by iteration), be heuristic, or it can be learned.[39]\n

    Game theory describes the rational behavior of multiple interacting agents and is used in AI programs that make decisions that involve other agents.[40]\n

    \n

    Learning

    \n

    Machine learning is the study of programs that can improve their performance on a given task automatically.[41] It has been a part of AI from the beginning.[e]\n

    There are several kinds of machine learning. Unsupervised learning analyzes a stream of data and finds patterns and makes predictions without any other guidance.[44] Supervised learning requires a human to label the input data first, and comes in two main varieties: classification (where the program must learn to predict what category the input belongs in) and regression (where the program must deduce a numeric function based on numeric input).[45]\n

    In reinforcement learning, the agent is rewarded for good responses and punished for bad ones. The agent learns to choose responses that are classified as "good".[46] Transfer learning is when the knowledge gained from one problem is applied to a new problem.[47] Deep learning is a type of machine learning that runs inputs through biologically inspired artificial neural networks for all of these types of learning.[48]\n

    Computational learning theory can assess learners by computational complexity, by sample complexity (how much data is required), or by other notions of optimization.[49]\n

    \n
    \n

    Natural language processing

    \n

    Natural language processing (NLP)[50] allows programs to read, write and communicate in human languages such as English. Specific problems include speech recognition, speech synthesis, machine translation, information extraction, information retrieval and question answering.[51]\n

    Early work, based on Noam Chomsky\'s generative grammar and semantic networks, had difficulty with word-sense disambiguation[f] unless restricted to small domains called "micro-worlds" (due to the common sense knowledge problem[29]). Margaret Masterman believed that it was meaning and not grammar that was the key to understanding languages, and that thesauri and not dictionaries should be the basis of computational language structure.\n

    Modern deep learning techniques for NLP include word embedding (representing words, typically as vectors encoding their meaning),[52] transformers (a deep learning architecture using an attention mechanism),[53] and others.[54] In 2019, generative pre-trained transformer (or "GPT") language models began to generate coherent text,[55][56] and by 2023, these models were able to get human-level scores on the bar exam, SAT test, GRE test, and many other real-world applications.[57]\n

    \n

    Perception

    \n

    Machine perception is the ability to use input from sensors (such as cameras, microphones, wireless signals, active lidar, sonar, radar, and tactile sensors) to deduce aspects of the world. Computer vision is the ability to analyze visual input.[58]\n

    The field includes speech recognition,[59] image classification,[60] facial recognition, object recognition,[61]object tracking,[62] and robotic perception.[63]\n

    \n

    Social intelligence

    \n
    Kismet, a robot head which was made in the 1990s; it is a machine that can recognize and simulate emotions.[64]
    \n

    Affective computing is an interdisciplinary umbrella that comprises systems that recognize, interpret, process, or simulate human feeling, emotion, and mood.[65] For example, some virtual assistants are programmed to speak conversationally or even to banter humorously; it makes them appear more sensitive to the emotional dynamics of human interaction, or to otherwise facilitate human–computer interaction.\n

    However, this tends to give naïve users an unrealistic conception of the intelligence of existing computer agents.[66] Moderate successes related to affective computing include textual sentiment analysis and, more recently, multimodal sentiment analysis, wherein AI classifies the affects displayed by a videotaped subject.[67]\n

    \n

    General intelligence

    \n

    A machine with artificial general intelligence should be able to solve a wide variety of problems with breadth and versatility similar to human intelligence.[4]\n

    \n

    Techniques

    \n

    AI research uses a wide variety of techniques to accomplish the goals above.[b]\n

    \n

    Search and optimization

    \n

    AI can solve many problems by intelligently searching through many possible solutions.[68] There are two very different kinds of search used in AI: state space search and local search.\n

    \n
    \n

    State space search searches through a tree of possible states to try to find a goal state.[69] For example, planning algorithms search through trees of goals and subgoals, attempting to find a path to a target goal, a process called means-ends analysis.[70]\n

    Simple exhaustive searches[71] are rarely sufficient for most real-world problems: the search space (the number of places to search) quickly grows to astronomical numbers. The result is a search that is too slow or never completes.[15] "Heuristics" or "rules of thumb" can help prioritize choices that are more likely to reach a goal.[72]\n

    Adversarial search is used for game-playing programs, such as chess or Go. It searches through a tree of possible moves and counter-moves, looking for a winning position.[73]\n

    \n
    \n
    Illustration of gradient descent for 3 different starting points; two parameters (represented by the plan coordinates) are adjusted in order to minimize the loss function (the height)

    Local search uses mathematical optimization to find a solution to a problem. It begins with some form of guess and refines it incrementally.[74]\n

    Gradient descent is a type of local search that optimizes a set of numerical parameters by incrementally adjusting them to minimize a loss function. Variants of gradient descent are commonly used to train neural networks.[75]\n

    Another type of local search is evolutionary computation, which aims to iteratively improve a set of candidate solutions by "mutating" and "recombining" them, selecting only the fittest to survive each generation.[76]\n

    Distributed search processes can coordinate via swarm intelligence algorithms. Two popular swarm algorithms used in search are particle swarm optimization (inspired by bird flocking) and ant colony optimization (inspired by ant trails).[77]\n

    \n

    Logic

    \n

    Formal logic is used for reasoning and knowledge representation.[78]\nFormal logic comes in two main forms: propositional logic (which operates on statements that are true or false and uses logical connectives such as "and", "or", "not" and "implies")[79] and predicate logic (which also operates on objects, predicates and relations and uses quantifiers such as "Every X is a Y" and "There are some Xs that are Ys").[80]\n

    Deductive reasoning in logic is the process of proving a new statement (conclusion) from other statements that are given and assumed to be true (the premises).[81] Proofs can be structured as proof trees, in which nodes are labelled by sentences, and children nodes are connected to parent nodes by inference rules.\n

    Given a problem and a set of premises, problem-solving reduces to searching for a proof tree whose root node is labelled by a solution of the problem and whose leaf nodes are labelled by premises or axioms. In the case of Horn clauses, problem-solving search can be performed by reasoning forwards from the premises or backwards from the problem.[82] In the more general case of the clausal form of first-order logic, resolution is a single, axiom-free rule of inference, in which a problem is solved by proving a contradiction from premises that include the negation of the problem to be solved.[83]\n

    Inference in both Horn clause logic and first-order logic is undecidable, and therefore intractable. However, backward reasoning with Horn clauses, which underpins computation in the logic programming language Prolog, is Turing complete. Moreover, its efficiency is competitive with computation in other symbolic programming languages.[84]\n

    Fuzzy logic assigns a "degree of truth" between 0 and 1. It can therefore handle propositions that are vague and partially true.[85]\n

    Non-monotonic logics, including logic programming with negation as failure, are designed to handle default reasoning.[28] Other specialized versions of logic have been developed to describe many complex domains.\n

    \n

    Probabilistic methods for uncertain reasoning

    \n
    A simple Bayesian network, with the associated conditional probability tables
    \n

    Many problems in AI (including in reasoning, planning, learning, perception, and robotics) require the agent to operate with incomplete or uncertain information. AI researchers have devised a number of tools to solve these problems using methods from probability theory and economics.[86] Precise mathematical tools have been developed that analyze how an agent can make choices and plan, using decision theory, decision analysis,[87] and information value theory.[88] These tools include models such as Markov decision processes,[89] dynamic decision networks,[90] game theory and mechanism design.[91]\n

    Bayesian networks[92] are a tool that can be used for reasoning (using the Bayesian inference algorithm),[g][94] learning (using the expectation–maximization algorithm),[h][96] planning (using decision networks)[97] and perception (using dynamic Bayesian networks).[90]\n

    Probabilistic algorithms can also be used for filtering, prediction, smoothing, and finding explanations for streams of data, thus helping perception systems analyze processes that occur over time (e.g., hidden Markov models or Kalman filters).[90]\n

    \n
    Expectation–maximization clustering of Old Faithful eruption data starts from a random guess but then successfully converges on an accurate clustering of the two physically distinct modes of eruption.
    \n

    Classifiers and statistical learning methods

    \n

    The simplest AI applications can be divided into two types: classifiers (e.g., "if shiny then diamond"), on one hand, and controllers (e.g., "if diamond then pick up"), on the other hand. Classifiers[98] are functions that use pattern matching to determine the closest match. They can be fine-tuned based on chosen examples using supervised learning. Each pattern (also called an "observation") is labeled with a certain predefined class. All the observations combined with their class labels are known as a data set. When a new observation is received, that observation is classified based on previous experience.[45]\n

    There are many kinds of classifiers in use.[99] The decision tree is the simplest and most widely used symbolic machine learning algorithm.[100] K-nearest neighbor algorithm was the most widely used analogical AI until the mid-1990s, and Kernel methods such as the support vector machine (SVM) displaced k-nearest neighbor in the 1990s.[101]\nThe naive Bayes classifier is reportedly the "most widely used learner"[102] at Google, due in part to its scalability.[103]\nNeural networks are also used as classifiers.[104]\n

    \n

    Artificial neural networks

    \n
    A neural network is an interconnected group of nodes, akin to the vast network of neurons in the human brain.
    \n

    An artificial neural network is based on a collection of nodes also known as artificial neurons, which loosely model the neurons in a biological brain. It is trained to recognise patterns; once trained, it can recognise those patterns in fresh data. There is an input, at least one hidden layer of nodes and an output. Each node applies a function and once the weight crosses its specified threshold, the data is transmitted to the next layer. A network is typically called a deep neural network if it has at least 2 hidden layers.[104]\n

    Learning algorithms for neural networks use local search to choose the weights that will get the right output for each input during training. The most common training technique is the backpropagation algorithm.[105] Neural networks learn to model complex relationships between inputs and outputs and find patterns in data. In theory, a neural network can learn any function.[106]\n

    In feedforward neural networks the signal passes in only one direction.[107] Recurrent neural networks feed the output signal back into the input, which allows short-term memories of previous input events. Long short term memory is the most successful network architecture for recurrent networks.[108] Perceptrons[109] use only a single layer of neurons; deep learning[110] uses multiple layers. Convolutional neural networks strengthen the connection between neurons that are "close" to each other—this is especially important in image processing, where a local set of neurons must identify an "edge" before the network can identify an object.[111]\n

    \n
    \n

    Deep learning

    \n
    \n

    Deep learning[110] uses several layers of neurons between the network\'s inputs and outputs. The multiple layers can progressively extract higher-level features from the raw input. For example, in image processing, lower layers may identify edges, while higher layers may identify the concepts relevant to a human such as digits, letters, or faces.[112]\n

    Deep learning has profoundly improved the performance of programs in many important subfields of artificial intelligence, including computer vision, speech recognition, natural language processing, image classification,[113] and others. The reason that deep learning performs so well in so many applications is not known as of 2023.[114] The sudden success of deep learning in 2012–2015 did not occur because of some new discovery or theoretical breakthrough (deep neural networks and backpropagation had been described by many people, as far back as the 1950s)[i] but because of two factors: the incredible increase in computer power (including the hundred-fold increase in speed by switching to GPUs) and the availability of vast amounts of training data, especially the giant curated datasets used for benchmark testing, such as ImageNet.[j]\n

    \n

    GPT

    \n

    Generative pre-trained transformers (GPT) are large language models (LLMs) that generate text based on the semantic relationships between words in sentences. Text-based GPT models are pretrained on a large corpus of text that can be from the Internet. The pretraining consists of predicting the next token (a token being usually a word, subword, or punctuation). Throughout this pretraining, GPT models accumulate knowledge about the world and can then generate human-like text by repeatedly predicting the next token. Typically, a subsequent training phase makes the model more truthful, useful, and harmless, usually with a technique called reinforcement learning from human feedback (RLHF). Current GPT models are prone to generating falsehoods called "hallucinations", although this can be reduced with RLHF and quality data. They are used in chatbots, which allow people to ask a question or request a task in simple text.[122][123]\n

    Current models and services include Gemini (formerly Bard), ChatGPT, Grok, Claude, Copilot, and LLaMA.[124] Multimodal GPT models can process different types of data (modalities) such as images, videos, sound, and text.[125]\n

    \n

    Hardware and software

    \n\n

    In the late 2010s, graphics processing units (GPUs) that were increasingly designed with AI-specific enhancements and used with specialized TensorFlow software had replaced previously used central processing unit (CPUs) as the dominant means for large-scale (commercial and academic) machine learning models\' training.[126] Specialized programming languages such as Prolog were used in early AI research,[127] but general-purpose programming languages like Python have become predominant.[128]\n

    The transistor density in integrated circuits has been observed to roughly double every 18 months—a trend known as Moore\'s law, named after the Intel co-founder Gordon Moore, who first identified it. Improvements in GPUs have been even faster.[129]\n

    \n

    Applications

    \n

    AI and machine learning technology is used in most of the essential applications of the 2020s, including: search engines (such as Google Search), targeting online advertisements, recommendation systems (offered by Netflix, YouTube or Amazon), driving internet traffic, targeted advertising (AdSense, Facebook), virtual assistants (such as Siri or Alexa), autonomous vehicles (including drones, ADAS and self-driving cars), automatic language translation (Microsoft Translator, Google Translate), facial recognition (Apple\'s Face ID or Microsoft\'s DeepFace and Google\'s FaceNet) and image labeling (used by Facebook, Apple\'s iPhoto and TikTok). The deployment of AI may be overseen by a Chief automation officer (CAO).\n

    Health and medicine

    \n\n

    The application of AI in medicine and medical research has the potential to increase patient care and quality of life.[130] Through the lens of the Hippocratic Oath, medical professionals are ethically compelled to use AI, if applications can more accurately diagnose and treat patients.[131][132]\n

    For medical research, AI is an important tool for processing and integrating big data. This is particularly important for organoid and tissue engineering development which use microscopy imaging as a key technique in fabrication.[133] It has been suggested that AI can overcome discrepancies in funding allocated to different fields of research.[133] New AI tools can deepen the understanding of biomedically relevant pathways. For example, AlphaFold 2 (2021) demonstrated the ability to approximate, in hours rather than months, the 3D structure of a protein.[134] In 2023, it was reported that AI-guided drug discovery helped find a class of antibiotics capable of killing two different types of drug-resistant bacteria.[135] In 2024, researchers used machine learning to accelerate the search for Parkinson\'s disease drug treatments. Their aim was to identify compounds that block the clumping, or aggregation, of alpha-synuclein (the protein that characterises Parkinson\'s disease). They were able to speed up the initial screening process ten-fold and reduce the cost by a thousand-fold.[136][137]\n

    \n

    Games

    \n\n

    Game playing programs have been used since the 1950s to demonstrate and test AI\'s most advanced techniques.[138] Deep Blue became the first computer chess-playing system to beat a reigning world chess champion, Garry Kasparov, on 11 May 1997.[139] In 2011, in a Jeopardy! quiz show exhibition match, IBM\'s question answering system, Watson, defeated the two greatest Jeopardy! champions, Brad Rutter and Ken Jennings, by a significant margin.[140] In March 2016, AlphaGo won 4 out of 5 games of Go in a match with Go champion Lee Sedol, becoming the first computer Go-playing system to beat a professional Go player without handicaps. Then, in 2017, it defeated Ke Jie, who was the best Go player in the world.[141] Other programs handle imperfect-information games, such as the poker-playing program Pluribus.[142] DeepMind developed increasingly generalistic reinforcement learning models, such as with MuZero, which could be trained to play chess, Go, or Atari games.[143] In 2019, DeepMind\'s AlphaStar achieved grandmaster level in StarCraft II, a particularly challenging real-time strategy game that involves incomplete knowledge of what happens on the map.[144] In 2021, an AI agent competed in a PlayStation Gran Turismo competition, winning against four of the world\'s best Gran Turismo drivers using deep reinforcement learning.[145] In 2024, Google DeepMind introduced SIMA, a type of AI capable of autonomously playing nine previously unseen open-world video games by observing screen output, as well as executing short, specific tasks in response to natural language instructions.[146]\n

    \n

    Mathematics

    \n

    In mathematics, special forms of formal step-by-step reasoning are used. In contrast, LLMs such as GPT-4 Turbo, Gemini Ultra, Claude Opus, LLaMa-2 or Mistral Large are working with probabilistic models, which can produce wrong answers in the form of hallucinations. Therefore, they need not only a large database of mathematical problems to learn from but also methods such as supervised fine-tuning or trained classifiers with human-annotated data to improve answers for new problems and learn from corrections.[147] A 2024 study showed that the performance of some language models for reasoning capabilities in solving math problems not included in their training data was low, even for problems with only minor deviations from trained data.[148]\n

    Alternatively, dedicated models for mathematic problem solving with higher precision for the outcome including proof of theorems have been developed such as Alpha Tensor, Alpha Geometry and Alpha Proof all from Google DeepMind,[149] Llemma from eleuther[150] or Julius.[151]\n

    When natural language is used to describe mathematical problems, converters transform such prompts into a formal language such as Lean to define mathematic tasks.\n

    Some models have been developed to solve challenging problems and reach good results in benchmark tests, others to serve as educational tools in mathematics.[152]\n

    \n

    Finance

    \n

    Finance is one of the fastest growing sectors where applied AI tools are being deployed: from retail online banking to investment advice and insurance, where automated "robot advisers" have been in use for some years.[153]\n

    World Pensions experts like Nicolas Firzli insist it may be too early to see the emergence of highly innovative AI-informed financial products and services: "the deployment of AI tools will simply further automatise things: destroying tens of thousands of jobs in banking, financial planning, and pension advice in the process, but I\'m not sure it will unleash a new wave of [e.g., sophisticated] pension innovation."[154]\n

    \n

    Military

    \n\n

    Various countries are deploying AI military applications.[155] The main applications enhance command and control, communications, sensors, integration and interoperability.[156] Research is targeting intelligence collection and analysis, logistics, cyber operations, information operations, and semiautonomous and autonomous vehicles.[155] AI technologies enable coordination of sensors and effectors, threat detection and identification, marking of enemy positions, target acquisition, coordination and deconfliction of distributed Joint Fires between networked combat vehicles involving manned and unmanned teams.[156] AI was incorporated into military operations in Iraq and Syria.[155]\n

    In November 2023, US Vice President Kamala Harris disclosed a declaration signed by 31 nations to set guardrails for the military use of AI. The commitments include using legal reviews to ensure the compliance of military AI with international laws, and being cautious and transparent in the development of this technology.[157]\n

    \n

    Generative AI

    \n\n
    Vincent van Gogh in watercolour created by generative AI software
    \n

    In the early 2020s, generative AI gained widespread prominence. GenAI is AI capable of generating text, images, videos, or other data using generative models,[158][159] often in response to prompts.[160][161]\n

    In March 2023, 58% of U.S. adults had heard about ChatGPT and 14% had tried it.[162] The increasing realism and ease-of-use of AI-based text-to-image generators such as Midjourney, DALL-E, and Stable Diffusion sparked a trend of viral AI-generated photos. Widespread attention was gained by a fake photo of Pope Francis wearing a white puffer coat, the fictional arrest of Donald Trump, and a hoax of an attack on the Pentagon, as well as the usage in professional creative arts.[163][164]\n

    \n

    Agents

    \n

    Artificial intelligent (AI) agents are software entities designed to perceive their environment, make decisions, and take actions autonomously to achieve specific goals. These agents can interact with users, their environment, or other agents. AI agents are used in various applications, including virtual assistants, chatbots, autonomous vehicles, game-playing systems, and industrial robotics. AI agents operate within the constraints of their programming, available computational resources, and hardware limitations. This means they are restricted to performing tasks within their defined scope and have finite memory and processing capabilities. In real-world applications, AI agents often face time constraints for decision-making and action execution. Many AI agents incorporate learning algorithms, enabling them to improve their performance over time through experience or training. Using machine learning, AI agents can adapt to new situations and optimise their behaviour for their designated tasks.[165][166][167]\n

    \n

    Other industry-specific tasks

    \n

    There are also thousands of successful AI applications used to solve specific problems for specific industries or institutions. In a 2017 survey, one in five companies reported having incorporated "AI" in some offerings or processes.[168] A few examples are energy storage, medical diagnosis, military logistics, applications that predict the result of judicial decisions, foreign policy, or supply chain management.\n

    AI applications for evacuation and disaster management are growing. AI has been used to investigate if and how people evacuated in large scale and small scale evacuations using historical data from GPS, videos or social media. Further, AI can provide real time information on the real time evacuation conditions.[169][170][171]\n

    In agriculture, AI has helped farmers identify areas that need irrigation, fertilization, pesticide treatments or increasing yield. Agronomists use AI to conduct research and development. AI has been used to predict the ripening time for crops such as tomatoes, monitor soil moisture, operate agricultural robots, conduct predictive analytics, classify livestock pig call emotions, automate greenhouses, detect diseases and pests, and save water.\n

    Artificial intelligence is used in astronomy to analyze increasing amounts of available data and applications, mainly for "classification, regression, clustering, forecasting, generation, discovery, and the development of new scientific insights." For example, it is used for discovering exoplanets, forecasting solar activity, and distinguishing between signals and instrumental effects in gravitational wave astronomy. Additionally, it could be used for activities in space, such as space exploration, including the analysis of data from space missions, real-time science decisions of spacecraft, space debris avoidance, and more autonomous operation.\n

    During the 2024 Indian elections, US$50 millions was spent on authorized AI-generated content, notably by creating deepfakes of allied (including sometimes deceased) politicians to better engage with voters, and by translating speeches to various local languages.[172] \n

    \n

    Ethics

    \n\n

    AI has potential benefits and potential risks.[173] AI may be able to advance science and find solutions for serious problems: Demis Hassabis of Deep Mind hopes to "solve intelligence, and then use that to solve everything else".[174] However, as the use of AI has become widespread, several unintended consequences and risks have been identified.[175] In-production systems can sometimes not factor ethics and bias into their AI training processes, especially when the AI algorithms are inherently unexplainable in deep learning.[176]\n

    \n

    Risks and harm

    \n
    \n\n

    Machine learning algorithms require large amounts of data. The techniques used to acquire this data have raised concerns about privacy, surveillance and copyright.\n

    AI-powered devices and services, such as virtual assistants and IoT products, continuously collect personal information, raising concerns about intrusive data gathering and unauthorized access by third parties. The loss of privacy is further exacerbated by AI\'s ability to process and combine vast amounts of data, potentially leading to a surveillance society where individual activities are constantly monitored and analyzed without adequate safeguards or transparency.\n

    Sensitive user data collected may include online activity records, geolocation data, video or audio.[177] For example, in order to build speech recognition algorithms, Amazon has recorded millions of private conversations and allowed temporary workers to listen to and transcribe some of them.[178] Opinions about this widespread surveillance range from those who see it as a necessary evil to those for whom it is clearly unethical and a violation of the right to privacy.[179]\n

    AI developers argue that this is the only way to deliver valuable applications. and have developed several techniques that attempt to preserve privacy while still obtaining the data, such as data aggregation, de-identification and differential privacy.[180] Since 2016, some privacy experts, such as Cynthia Dwork, have begun to view privacy in terms of fairness. Brian Christian wrote that experts have pivoted "from the question of \'what they know\' to the question of \'what they\'re doing with it\'."[181]\n

    Generative AI is often trained on unlicensed copyrighted works, including in domains such as images or computer code; the output is then used under the rationale of "fair use". Experts disagree about how well and under what circumstances this rationale will hold up in courts of law; relevant factors may include "the purpose and character of the use of the copyrighted work" and "the effect upon the potential market for the copyrighted work".[182][183] Website owners who do not wish to have their content scraped can indicate it in a "robots.txt" file.[184] In 2023, leading authors (including John Grisham and Jonathan Franzen) sued AI companies for using their work to train generative AI.[185][186] Another discussed approach is to envision a separate sui generis system of protection for creations generated by AI to ensure fair attribution and compensation for human authors.[187]\n

    \n

    Dominance by tech giants

    \n

    The commercial AI scene is dominated by Big Tech companies such as Alphabet Inc., Amazon, Apple Inc., Meta Platforms, and Microsoft.[188][189][190] Some of these players already own the vast majority of existing cloud infrastructure and computing power from data centers, allowing them to entrench further in the marketplace.[191][192]\n

    \n

    Substantial power needs and other environmental impacts

    \n\n

    In January 2024, the International Energy Agency (IEA) released Electricity 2024, Analysis and Forecast to 2026, forecasting electric power use.[193] This is the first IEA report to make projections for data centers and power consumption for artificial intelligence and cryptocurrency. The report states that power demand for these uses might double by 2026, with additional electric power usage equal to electricity used by the whole Japanese nation.[194]\n

    Prodigious power consumption by AI is responsible for the growth of fossil fuels use, and might delay closings of obsolete, carbon-emitting coal energy facilities. There is a feverish rise in the construction of data centers throughout the US, making large technology firms (e.g., Microsoft, Meta, Google, Amazon) into voracious consumers of electric power. Projected electric consumption is so immense that there is concern that it will be fulfilled no matter the source. A ChatGPT search involves the use of 10 times the electrical energy as a Google search. The large firms are in haste to find power sources – from nuclear energy to geothermal to fusion. The tech firms argue that – in the long view – AI will be eventually kinder to the environment, but they need the energy now. AI makes the power grid more efficient and "intelligent", will assist in the growth of nuclear power, and track overall carbon emissions, according to technology firms.[195]\n

    A 2024 Goldman Sachs Research Paper, AI Data Centers and the Coming US Power Demand Surge, found "US power demand (is) likely to experience growth not seen in a generation...." and forecasts that, by 2030, US data centers will consume 8% of US power, as opposed to 3% in 2022, presaging growth for the electrical power generation industry by a variety of means.[196] Data centers\' need for more and more electrical power is such that they might max out the electrical grid. The Big Tech companies counter that AI can be used to maximize the utilization of the grid by all.[197]\n

    In 2024, the Wall Street Journal reported that big AI companies have begun negotiations with the US nuclear power providers to provide electricity to the data centers. In March 2024 Amazon purchased a Pennsylvania nuclear-powered data center for $650 Million (US).[198]\n

    In September 2024, Microsoft announced an agreement with Constellation Energy to re-open the Three Mile Island nuclear power plant to provide Microsoft with 100% of all electric power produced by the plant for 20 years. Reopening the plant, which suffered a partial nuclear meltdown of its Unit 2 reactor in 1979, will require Constellation to get through strict regulatory processes which will include extensive safety scrutiny from the US Nuclear Regulatory Commission. If approved (this will be the first ever US re-commissioning of a nuclear plant), over 835 megawatts of power – enough for 800,000 homes – of energy will be produced. The cost for re-opening and upgrading is estimated at $1.6 billion (US) and is dependent on tax breaks for nuclear power contained in the 2022 US Inflation Reduction Act.[199] The US government and the state of Michigan are investing almost $2 billion (US) to reopen the Palisades Nuclear reactor on Lake Michigan. Closed since 2022, the plant is planned to be reopened in October 2025. The Three Mile Island facility will be renamed the Crane Clean Energy Center after Chris Crane, a nuclear proponent and former CEO of Exelon who was responsible for Exelon spinoff of Constellation.[200]\n

    \n

    Misinformation

    \n\n

    YouTube, Facebook and others use recommender systems to guide users to more content. These AI programs were given the goal of maximizing user engagement (that is, the only goal was to keep people watching). The AI learned that users tended to choose misinformation, conspiracy theories, and extreme partisan content, and, to keep them watching, the AI recommended more of it. Users also tended to watch more content on the same subject, so the AI led people into filter bubbles where they received multiple versions of the same misinformation.[201] This convinced many users that the misinformation was true, and ultimately undermined trust in institutions, the media and the government.[202] The AI program had correctly learned to maximize its goal, but the result was harmful to society. After the U.S. election in 2016, major technology companies took steps to mitigate the problem [citation needed].\n

    In 2022, generative AI began to create images, audio, video and text that are indistinguishable from real photographs, recordings, films, or human writing. It is possible for bad actors to use this technology to create massive amounts of misinformation or propaganda.[203] AI pioneer Geoffrey Hinton expressed concern about AI enabling "authoritarian leaders to manipulate their electorates" on a large scale, among other risks.[204]\n

    \n

    Algorithmic bias and fairness

    \n\n

    Machine learning applications will be biased[k] if they learn from biased data.[206] The developers may not be aware that the bias exists.[207] Bias can be introduced by the way training data is selected and by the way a model is deployed.[208][206] If a biased algorithm is used to make decisions that can seriously harm people (as it can in medicine, finance, recruitment, housing or policing) then the algorithm may cause discrimination.[209] The field of fairness studies how to prevent harms from algorithmic biases.\n

    On June 28, 2015, Google Photos\'s new image labeling feature mistakenly identified Jacky Alcine and a friend as "gorillas" because they were black. The system was trained on a dataset that contained very few images of black people,[210] a problem called "sample size disparity".[211] Google "fixed" this problem by preventing the system from labelling anything as a "gorilla". Eight years later, in 2023, Google Photos still could not identify a gorilla, and neither could similar products from Apple, Facebook, Microsoft and Amazon.[212]\n

    COMPAS is a commercial program widely used by U.S. courts to assess the likelihood of a defendant becoming a recidivist. In 2016, Julia Angwin at ProPublica discovered that COMPAS exhibited racial bias, despite the fact that the program was not told the races of the defendants. Although the error rate for both whites and blacks was calibrated equal at exactly 61%, the errors for each race were different—the system consistently overestimated the chance that a black person would re-offend and would underestimate the chance that a white person would not re-offend.[213] In 2017, several researchers[l] showed that it was mathematically impossible for COMPAS to accommodate all possible measures of fairness when the base rates of re-offense were different for whites and blacks in the data.[215]\n

    A program can make biased decisions even if the data does not explicitly mention a problematic feature (such as "race" or "gender"). The feature will correlate with other features (like "address", "shopping history" or "first name"), and the program will make the same decisions based on these features as it would on "race" or "gender".[216] Moritz Hardt said "the most robust fact in this research area is that fairness through blindness doesn\'t work."[217]\n

    Criticism of COMPAS highlighted that machine learning models are designed to make "predictions" that are only valid if we assume that the future will resemble the past. If they are trained on data that includes the results of racist decisions in the past, machine learning models must predict that racist decisions will be made in the future. If an application then uses these predictions as recommendations, some of these "recommendations" will likely be racist.[218] Thus, machine learning is not well suited to help make decisions in areas where there is hope that the future will be better than the past. It is descriptive rather than prescriptive.[m]\n

    Bias and unfairness may go undetected because the developers are overwhelmingly white and male: among AI engineers, about 4% are black and 20% are women.[211]\n

    There are various conflicting definitions and mathematical models of fairness. These notions depend on ethical assumptions, and are influenced by beliefs about society. One broad category is distributive fairness, which focuses on the outcomes, often identifying groups and seeking to compensate for statistical disparities. Representational fairness tries to ensure that AI systems do not reinforce negative stereotypes or render certain groups invisible. Procedural fairness focuses on the decision process rather than the outcome. The most relevant notions of fairness may depend on the context, notably the type of AI application and the stakeholders. The subjectivity in the notions of bias and fairness makes it difficult for companies to operationalize them. Having access to sensitive attributes such as race or gender is also considered by many AI ethicists to be necessary in order to compensate for biases, but it may conflict with anti-discrimination laws.[205]\n

    At its 2022 Conference on Fairness, Accountability, and Transparency (ACM FAccT 2022), the Association for Computing Machinery, in Seoul, South Korea, presented and published findings that recommend that until AI and robotics systems are demonstrated to be free of bias mistakes, they are unsafe, and the use of self-learning neural networks trained on vast, unregulated sources of flawed internet data should be curtailed.[dubiousdiscuss][220]\n

    \n

    Lack of transparency

    \n\n

    Many AI systems are so complex that their designers cannot explain how they reach their decisions.[221] Particularly with deep neural networks, in which there are a large amount of non-linear relationships between inputs and outputs. But some popular explainability techniques exist.[222]\n

    It is impossible to be certain that a program is operating correctly if no one knows how exactly it works. There have been many cases where a machine learning program passed rigorous tests, but nevertheless learned something different than what the programmers intended. For example, a system that could identify skin diseases better than medical professionals was found to actually have a strong tendency to classify images with a ruler as "cancerous", because pictures of malignancies typically include a ruler to show the scale.[223] Another machine learning system designed to help effectively allocate medical resources was found to classify patients with asthma as being at "low risk" of dying from pneumonia. Having asthma is actually a severe risk factor, but since the patients having asthma would usually get much more medical care, they were relatively unlikely to die according to the training data. The correlation between asthma and low risk of dying from pneumonia was real, but misleading.[224]\n

    People who have been harmed by an algorithm\'s decision have a right to an explanation.[225] Doctors, for example, are expected to clearly and completely explain to their colleagues the reasoning behind any decision they make. Early drafts of the European Union\'s General Data Protection Regulation in 2016 included an explicit statement that this right exists.[n] Industry experts noted that this is an unsolved problem with no solution in sight. Regulators argued that nevertheless the harm is real: if the problem has no solution, the tools should not be used.[226]\n

    DARPA established the XAI ("Explainable Artificial Intelligence") program in 2014 to try to solve these problems.[227]\n

    Several approaches aim to address the transparency problem. SHAP enables to visualise the contribution of each feature to the output.[228] LIME can locally approximate a model\'s outputs with a simpler, interpretable model.[229] Multitask learning provides a large number of outputs in addition to the target classification. These other outputs can help developers deduce what the network has learned.[230] Deconvolution, DeepDream and other generative methods can allow developers to see what different layers of a deep network for computer vision have learned, and produce output that can suggest what the network is learning.[231] For generative pre-trained transformers, Anthropic developed a technique based on dictionary learning that associates patterns of neuron activations with human-understandable concepts.[232]\n

    \n

    Bad actors and weaponized AI

    \n\n

    Artificial intelligence provides a number of tools that are useful to bad actors, such as authoritarian governments, terrorists, criminals or rogue states.\n

    A lethal autonomous weapon is a machine that locates, selects and engages human targets without human supervision.[o] Widely available AI tools can be used by bad actors to develop inexpensive autonomous weapons and, if produced at scale, they are potentially weapons of mass destruction.[234] Even when used in conventional warfare, it is unlikely that they will be unable to reliably choose targets and could potentially kill an innocent person.[234] In 2014, 30 nations (including China) supported a ban on autonomous weapons under the United Nations\' Convention on Certain Conventional Weapons, however the United States and others disagreed.[235] By 2015, over fifty countries were reported to be researching battlefield robots.[236]\n

    AI tools make it easier for authoritarian governments to efficiently control their citizens in several ways. Face and voice recognition allow widespread surveillance. Machine learning, operating this data, can classify potential enemies of the state and prevent them from hiding. Recommendation systems can precisely target propaganda and misinformation for maximum effect. Deepfakes and generative AI aid in producing misinformation. Advanced AI can make authoritarian centralized decision making more competitive than liberal and decentralized systems such as markets. It lowers the cost and difficulty of digital warfare and advanced spyware.[237] All these technologies have been available since 2020 or earlier—AI facial recognition systems are already being used for mass surveillance in China.[238][239]\n

    There many other ways that AI is expected to help bad actors, some of which can not be foreseen. For example, machine-learning AI is able to design tens of thousands of toxic molecules in a matter of hours.[240]\n

    \n

    Technological unemployment

    \n\n

    Economists have frequently highlighted the risks of redundancies from AI, and speculated about unemployment if there is no adequate social policy for full employment.[241]\n

    In the past, technology has tended to increase rather than reduce total employment, but economists acknowledge that "we\'re in uncharted territory" with AI.[242] A survey of economists showed disagreement about whether the increasing use of robots and AI will cause a substantial increase in long-term unemployment, but they generally agree that it could be a net benefit if productivity gains are redistributed.[243] Risk estimates vary; for example, in the 2010s, Michael Osborne and Carl Benedikt Frey estimated 47% of U.S. jobs are at "high risk" of potential automation, while an OECD report classified only 9% of U.S. jobs as "high risk".[p][245] The methodology of speculating about future employment levels has been criticised as lacking evidential foundation, and for implying that technology, rather than social policy, creates unemployment, as opposed to redundancies.[241] In April 2023, it was reported that 70% of the jobs for Chinese video game illustrators had been eliminated by generative artificial intelligence.[246][247]\n

    Unlike previous waves of automation, many middle-class jobs may be eliminated by artificial intelligence; The Economist stated in 2015 that "the worry that AI could do to white-collar jobs what steam power did to blue-collar ones during the Industrial Revolution" is "worth taking seriously".[248] Jobs at extreme risk range from paralegals to fast food cooks, while job demand is likely to increase for care-related professions ranging from personal healthcare to the clergy.[249]\n

    From the early days of the development of artificial intelligence, there have been arguments, for example, those put forward by Joseph Weizenbaum, about whether tasks that can be done by computers actually should be done by them, given the difference between computers and humans, and between quantitative calculation and qualitative, value-based judgement.[250]\n

    \n

    Existential risk

    \n\n

    It has been argued AI will become so powerful that humanity may irreversibly lose control of it. This could, as physicist Stephen Hawking stated, "spell the end of the human race".[251] This scenario has been common in science fiction, when a computer or robot suddenly develops a human-like "self-awareness" (or "sentience" or "consciousness") and becomes a malevolent character.[q] These sci-fi scenarios are misleading in several ways.\n

    First, AI does not require human-like "sentience" to be an existential risk. Modern AI programs are given specific goals and use learning and intelligence to achieve them. Philosopher Nick Bostrom argued that if one gives almost any goal to a sufficiently powerful AI, it may choose to destroy humanity to achieve it (he used the example of a paperclip factory manager).[253] Stuart Russell gives the example of household robot that tries to find a way to kill its owner to prevent it from being unplugged, reasoning that "you can\'t fetch the coffee if you\'re dead."[254] In order to be safe for humanity, a superintelligence would have to be genuinely aligned with humanity\'s morality and values so that it is "fundamentally on our side".[255]\n

    Second, Yuval Noah Harari argues that AI does not require a robot body or physical control to pose an existential risk. The essential parts of civilization are not physical. Things like ideologies, law, government, money and the economy are made of language; they exist because there are stories that billions of people believe. The current prevalence of misinformation suggests that an AI could use language to convince people to believe anything, even to take actions that are destructive.[256]\n

    The opinions amongst experts and industry insiders are mixed, with sizable fractions both concerned and unconcerned by risk from eventual superintelligent AI.[257] Personalities such as Stephen Hawking, Bill Gates, and Elon Musk,[258] as well as AI pioneers such as Yoshua Bengio, Stuart Russell, Demis Hassabis, and Sam Altman, have expressed concerns about existential risk from AI.\n

    In May 2023, Geoffrey Hinton announced his resignation from Google in order to be able to "freely speak out about the risks of AI" without "considering how this impacts Google."[259] He notably mentioned risks of an AI takeover,[260] and stressed that in order to avoid the worst outcomes, establishing safety guidelines will require cooperation among those competing in use of AI.[261]\n

    In 2023, many leading AI experts issued the joint statement that "Mitigating the risk of extinction from AI should be a global priority alongside other societal-scale risks such as pandemics and nuclear war".[262]\n

    Other researchers, however, spoke in favor of a less dystopian view. AI pioneer Juergen Schmidhuber did not sign the joint statement, emphasising that in 95% of all cases, AI research is about making "human lives longer and healthier and easier."[263] While the tools that are now being used to improve lives can also be used by bad actors, "they can also be used against the bad actors."[264][265] Andrew Ng also argued that "it\'s a mistake to fall for the doomsday hype on AI—and that regulators who do will only benefit vested interests."[266] Yann LeCun "scoffs at his peers\' dystopian scenarios of supercharged misinformation and even, eventually, human extinction."[267] In the early 2010s, experts argued that the risks are too distant in the future to warrant research or that humans will be valuable from the perspective of a superintelligent machine.[268] However, after 2016, the study of current and future risks and possible solutions became a serious area of research.[269]\n

    \n

    Ethical machines and alignment

    \n\n

    Friendly AI are machines that have been designed from the beginning to minimize risks and to make choices that benefit humans. Eliezer Yudkowsky, who coined the term, argues that developing friendly AI should be a higher research priority: it may require a large investment and it must be completed before AI becomes an existential risk.[270]\n

    Machines with intelligence have the potential to use their intelligence to make ethical decisions. The field of machine ethics provides machines with ethical principles and procedures for resolving ethical dilemmas.[271]\nThe field of machine ethics is also called computational morality,[271]\nand was founded at an AAAI symposium in 2005.[272]\n

    Other approaches include Wendell Wallach\'s "artificial moral agents"[273] and Stuart J. Russell\'s three principles for developing provably beneficial machines.[274]\n

    \n

    Open source

    \n

    Active organizations in the AI open-source community include Hugging Face,[275] Google,[276] EleutherAI and Meta.[277] Various AI models, such as Llama 2, Mistral or Stable Diffusion, have been made open-weight,[278][279] meaning that their architecture and trained parameters (the "weights") are publicly available. Open-weight models can be freely fine-tuned, which allows companies to specialize them with their own data and for their own use-case.[280] Open-weight models are useful for research and innovation but can also be misused. Since they can be fine-tuned, any built-in security measure, such as objecting to harmful requests, can be trained away until it becomes ineffective. Some researchers warn that future AI models may develop dangerous capabilities (such as the potential to drastically facilitate bioterrorism) and that once released on the Internet, they cannot be deleted everywhere if needed. They recommend pre-release audits and cost-benefit analyses.[281]\n

    \n

    Frameworks

    \n

    Artificial Intelligence projects can have their ethical permissibility tested while designing, developing, and implementing an AI system. An AI framework such as the Care and Act Framework containing the SUM values—developed by the Alan Turing Institute tests projects in four main areas:[282][283]\n

    \n
    • Respect the dignity of individual people
    • \n
    • Connect with other people sincerely, openly, and inclusively
    • \n
    • Care for the wellbeing of everyone
    • \n
    • Protect social values, justice, and the public interest
    \n

    Other developments in ethical frameworks include those decided upon during the Asilomar Conference, the Montreal Declaration for Responsible AI, and the IEEE\'s Ethics of Autonomous Systems initiative, among others;[284] however, these principles do not go without their criticisms, especially regards to the people chosen contributes to these frameworks.[285]\n

    Promotion of the wellbeing of the people and communities that these technologies affect requires consideration of the social and ethical implications at all stages of AI system design, development and implementation, and collaboration between job roles such as data scientists, product managers, data engineers, domain experts, and delivery managers.[286]\n

    The UK AI Safety Institute released in 2024 a testing toolset called \'Inspect\' for AI safety evaluations available under a MIT open-source licence which is freely available on GitHub and can be improved with third-party packages. It can be used to evaluate AI models in a range of areas including core knowledge, ability to reason, and autonomous capabilities.[287]\n

    \n

    Regulation

    \n\n
    AI Safety Summit
    The first global AI Safety Summit was held in 2023 with a declaration calling for international co-operation.
    \n

    The regulation of artificial intelligence is the development of public sector policies and laws for promoting and regulating AI; it is therefore related to the broader regulation of algorithms.[288] The regulatory and policy landscape for AI is an emerging issue in jurisdictions globally.[289] According to AI Index at Stanford, the annual number of AI-related laws passed in the 127 survey countries jumped from one passed in 2016 to 37 passed in 2022 alone.[290][291] Between 2016 and 2020, more than 30 countries adopted dedicated strategies for AI.[292] Most EU member states had released national AI strategies, as had Canada, China, India, Japan, Mauritius, the Russian Federation, Saudi Arabia, United Arab Emirates, U.S., and Vietnam. Others were in the process of elaborating their own AI strategy, including Bangladesh, Malaysia and Tunisia.[292] The Global Partnership on Artificial Intelligence was launched in June 2020, stating a need for AI to be developed in accordance with human rights and democratic values, to ensure public confidence and trust in the technology.[292] Henry Kissinger, Eric Schmidt, and Daniel Huttenlocher published a joint statement in November 2021 calling for a government commission to regulate AI.[293] In 2023, OpenAI leaders published recommendations for the governance of superintelligence, which they believe may happen in less than 10 years.[294] In 2023, the United Nations also launched an advisory body to provide recommendations on AI governance; the body comprises technology company executives, governments officials and academics.[295] In 2024, the Council of Europe created the first international legally binding treaty on AI, called the "Framework Convention on Artificial Intelligence and Human Rights, Democracy and the Rule of Law". It was adopted by the European Union, the United States, the United Kingdom, and other signatories.[296]\n

    In a 2022 Ipsos survey, attitudes towards AI varied greatly by country; 78% of Chinese citizens, but only 35% of Americans, agreed that "products and services using AI have more benefits than drawbacks".[290] A 2023 Reuters/Ipsos poll found that 61% of Americans agree, and 22% disagree, that AI poses risks to humanity.[297] In a 2023 Fox News poll, 35% of Americans thought it "very important", and an additional 41% thought it "somewhat important", for the federal government to regulate AI, versus 13% responding "not very important" and 8% responding "not at all important".[298][299]\n

    In November 2023, the first global AI Safety Summit was held in Bletchley Park in the UK to discuss the near and far term risks of AI and the possibility of mandatory and voluntary regulatory frameworks.[300] 28 countries including the United States, China, and the European Union issued a declaration at the start of the summit, calling for international co-operation to manage the challenges and risks of artificial intelligence.[301][302] In May 2024 at the AI Seoul Summit, 16 global AI tech companies agreed to safety commitments on the development of AI.[303][304]\n

    \n

    History

    \n\n\n

    The study of mechanical or "formal" reasoning began with philosophers and mathematicians in antiquity. The study of logic led directly to Alan Turing\'s theory of computation, which suggested that a machine, by shuffling symbols as simple as "0" and "1", could simulate any conceivable form of mathematical reasoning.[305][306] This, along with concurrent discoveries in cybernetics, information theory and neurobiology, led researchers to consider the possibility of building an "electronic brain".[r] They developed several areas of research that would become part of AI,[308] such as McCullouch and Pitts design for "artificial neurons" in 1943,[115] and Turing\'s influential 1950 paper \'Computing Machinery and Intelligence\', which introduced the Turing test and showed that "machine intelligence" was plausible.[309][306]\n

    The field of AI research was founded at a workshop at Dartmouth College in 1956.[s][6] The attendees became the leaders of AI research in the 1960s.[t] They and their students produced programs that the press described as "astonishing":[u] computers were learning checkers strategies, solving word problems in algebra, proving logical theorems and speaking English.[v][7] Artificial intelligence laboratories were set up at a number of British and U.S. universities in the latter 1950s and early 1960s.[306]\n

    Researchers in the 1960s and the 1970s were convinced that their methods would eventually succeed in creating a machine with general intelligence and considered this the goal of their field.[313] In 1965 Herbert Simon predicted, "machines will be capable, within twenty years, of doing any work a man can do".[314] In 1967 Marvin Minsky agreed, writing that "within a generation ... the problem of creating \'artificial intelligence\' will substantially be solved".[315] They had, however, underestimated the difficulty of the problem.[w] In 1974, both the U.S. and British governments cut off exploratory research in response to the criticism of Sir James Lighthill[317] and ongoing pressure from the U.S. Congress to fund more productive projects.[318] Minsky\'s and Papert\'s book Perceptrons was understood as proving that artificial neural networks would never be useful for solving real-world tasks, thus discrediting the approach altogether.[319] The "AI winter", a period when obtaining funding for AI projects was difficult, followed.[9]\n

    In the early 1980s, AI research was revived by the commercial success of expert systems,[320] a form of AI program that simulated the knowledge and analytical skills of human experts. By 1985, the market for AI had reached over a billion dollars. At the same time, Japan\'s fifth generation computer project inspired the U.S. and British governments to restore funding for academic research.[8] However, beginning with the collapse of the Lisp Machine market in 1987, AI once again fell into disrepute, and a second, longer-lasting winter began.[10]\n

    Up to this point, most of AI\'s funding had gone to projects that used high-level symbols to represent mental objects like plans, goals, beliefs, and known facts. In the 1980s, some researchers began to doubt that this approach would be able to imitate all the processes of human cognition, especially perception, robotics, learning and pattern recognition,[321] and began to look into "sub-symbolic" approaches.[322] Rodney Brooks rejected "representation" in general and focussed directly on engineering machines that move and survive.[x] Judea Pearl, Lofti Zadeh and others developed methods that handled incomplete and uncertain information by making reasonable guesses rather than precise logic.[86][327] But the most important development was the revival of "connectionism", including neural network research, by Geoffrey Hinton and others.[328] In 1990, Yann LeCun successfully showed that convolutional neural networks can recognize handwritten digits, the first of many successful applications of neural networks.[329]\n

    AI gradually restored its reputation in the late 1990s and early 21st century by exploiting formal mathematical methods and by finding specific solutions to specific problems. This "narrow" and "formal" focus allowed researchers to produce verifiable results and collaborate with other fields (such as statistics, economics and mathematics).[330] By 2000, solutions developed by AI researchers were being widely used, although in the 1990s they were rarely described as "artificial intelligence" (a tendency known as the AI effect).[331]\nHowever, several academic researchers became concerned that AI was no longer pursuing its original goal of creating versatile, fully intelligent machines. Beginning around 2002, they founded the subfield of artificial general intelligence (or "AGI"), which had several well-funded institutions by the 2010s.[4]\n

    Deep learning began to dominate industry benchmarks in 2012 and was adopted throughout the field.[11]\nFor many specific tasks, other methods were abandoned.[y]\nDeep learning\'s success was based on both hardware improvements (faster computers,[333] graphics processing units, cloud computing[334]) and access to large amounts of data[335] (including curated datasets,[334] such as ImageNet). Deep learning\'s success led to an enormous increase in interest and funding in AI.[z] The amount of machine learning research (measured by total publications) increased by 50% in the years 2015–2019.[292]\n

    In 2016, issues of fairness and the misuse of technology were catapulted into center stage at machine learning conferences, publications vastly increased, funding became available, and many researchers re-focussed their careers on these issues. The alignment problem became a serious field of academic study.[269]\n

    In the late teens and early 2020s, AGI companies began to deliver programs that created enormous interest. In 2015, AlphaGo, developed by DeepMind, beat the world champion Go player. The program was taught only the rules of the game and developed strategy by itself. GPT-3 is a large language model that was released in 2020 by OpenAI and is capable of generating high-quality human-like text.[336] These programs, and others, inspired an aggressive AI boom, where large companies began investing billions in AI research. According to AI Impacts, about $50 billion annually was invested in "AI" around 2022 in the U.S. alone and about 20% of the new U.S. Computer Science PhD graduates have specialized in "AI".[337] About 800,000 "AI"-related U.S. job openings existed in 2022.[338]\n

    \n

    Philosophy

    \n\n

    Philosophical debates have historically sought to determine the nature of intelligence and how to make intelligent machines.[339] Another major focus has been whether machines can be conscious, and the associated ethical implications.[340] Many other topics in philosophy are relevant to AI, such as epistemology and free will.[341] Rapid advancements have intensified public discussions on the philosophy and ethics of AI.[340]\n

    \n

    Defining artificial intelligence

    \n\n

    Alan Turing wrote in 1950 "I propose to consider the question \'can machines think\'?"[342] He advised changing the question from whether a machine "thinks", to "whether or not it is possible for machinery to show intelligent behaviour".[342] He devised the Turing test, which measures the ability of a machine to simulate human conversation.[309] Since we can only observe the behavior of the machine, it does not matter if it is "actually" thinking or literally has a "mind". Turing notes that we can not determine these things about other people but "it is usual to have a polite convention that everyone thinks."[343]\n

    \n
    The Turing test can provide some evidence of intelligence, but it penalizes non-human intelligent behavior.[344]
    \n

    Russell and Norvig agree with Turing that intelligence must be defined in terms of external behavior, not internal structure.[1] However, they are critical that the test requires the machine to imitate humans. "Aeronautical engineering texts," they wrote, "do not define the goal of their field as making \'machines that fly so exactly like pigeons that they can fool other pigeons.\'"[345] AI founder John McCarthy agreed, writing that "Artificial intelligence is not, by definition, simulation of human intelligence".[346]\n

    McCarthy defines intelligence as "the computational part of the ability to achieve goals in the world".[347] Another AI founder, Marvin Minsky similarly describes it as "the ability to solve hard problems".[348] The leading AI textbook defines it as the study of agents that perceive their environment and take actions that maximize their chances of achieving defined goals.[1] These definitions view intelligence in terms of well-defined problems with well-defined solutions, where both the difficulty of the problem and the performance of the program are direct measures of the "intelligence" of the machine—and no other philosophical discussion is required, or may not even be possible.\n

    Another definition has been adopted by Google,[349] a major practitioner in the field of AI. This definition stipulates the ability of systems to synthesize information as the manifestation of intelligence, similar to the way it is defined in biological intelligence.\n

    Some authors have suggested in practice, that the definition of AI is vague and difficult to define, with contention as to whether classical algorithms should be categorised as AI,[350] with many companies during the early 2020s AI boom using the term as a marketing buzzword, often even if they did "not actually use AI in a material way".[351]\n

    \n

    Evaluating approaches to AI

    \n

    No established unifying theory or paradigm has guided AI research for most of its history.[aa] The unprecedented success of statistical machine learning in the 2010s eclipsed all other approaches (so much so that some sources, especially in the business world, use the term "artificial intelligence" to mean "machine learning with neural networks"). This approach is mostly sub-symbolic, soft and narrow. Critics argue that these questions may have to be revisited by future generations of AI researchers.\n

    \n

    Symbolic AI and its limits

    \n

    Symbolic AI (or "GOFAI")[353] simulated the high-level conscious reasoning that people use when they solve puzzles, express legal reasoning and do mathematics. They were highly successful at "intelligent" tasks such as algebra or IQ tests. In the 1960s, Newell and Simon proposed the physical symbol systems hypothesis: "A physical symbol system has the necessary and sufficient means of general intelligent action."[354]\n

    However, the symbolic approach failed on many tasks that humans solve easily, such as learning, recognizing an object or commonsense reasoning. Moravec\'s paradox is the discovery that high-level "intelligent" tasks were easy for AI, but low level "instinctive" tasks were extremely difficult.[355] Philosopher Hubert Dreyfus had argued since the 1960s that human expertise depends on unconscious instinct rather than conscious symbol manipulation, and on having a "feel" for the situation, rather than explicit symbolic knowledge.[356] Although his arguments had been ridiculed and ignored when they were first presented, eventually, AI research came to agree with him.[ab][16]\n

    The issue is not resolved: sub-symbolic reasoning can make many of the same inscrutable mistakes that human intuition does, such as algorithmic bias. Critics such as Noam Chomsky argue continuing research into symbolic AI will still be necessary to attain general intelligence,[358][359] in part because sub-symbolic AI is a move away from explainable AI: it can be difficult or impossible to understand why a modern statistical AI program made a particular decision. The emerging field of neuro-symbolic artificial intelligence attempts to bridge the two approaches.\n

    \n

    Neat vs. scruffy

    \n\n

    "Neats" hope that intelligent behavior is described using simple, elegant principles (such as logic, optimization, or neural networks). "Scruffies" expect that it necessarily requires solving a large number of unrelated problems. Neats defend their programs with theoretical rigor, scruffies rely mainly on incremental testing to see if they work. This issue was actively discussed in the 1970s and 1980s,[360] but eventually was seen as irrelevant. Modern AI has elements of both.\n

    \n

    Soft vs. hard computing

    \n\n

    Finding a provably correct or optimal solution is intractable for many important problems.[15] Soft computing is a set of techniques, including genetic algorithms, fuzzy logic and neural networks, that are tolerant of imprecision, uncertainty, partial truth and approximation. Soft computing was introduced in the late 1980s and most successful AI programs in the 21st century are examples of soft computing with neural networks.\n

    \n

    Narrow vs. general AI

    \n\n

    AI researchers are divided as to whether to pursue the goals of artificial general intelligence and superintelligence directly or to solve as many specific problems as possible (narrow AI) in hopes these solutions will lead indirectly to the field\'s long-term goals.[361][362] General intelligence is difficult to define and difficult to measure, and modern AI has had more verifiable successes by focusing on specific problems with specific solutions. The sub-field of artificial general intelligence studies this area exclusively.\n

    \n

    Machine consciousness, sentience, and mind

    \n\n

    The philosophy of mind does not know whether a machine can have a mind, consciousness and mental states, in the same sense that human beings do. This issue considers the internal experiences of the machine, rather than its external behavior. Mainstream AI research considers this issue irrelevant because it does not affect the goals of the field: to build machines that can solve problems using intelligence. Russell and Norvig add that "[t]he additional project of making a machine conscious in exactly the way humans are is not one that we are equipped to take on."[363] However, the question has become central to the philosophy of mind. It is also typically the central question at issue in artificial intelligence in fiction.\n

    \n

    Consciousness

    \n\n

    David Chalmers identified two problems in understanding the mind, which he named the "hard" and "easy" problems of consciousness.[364] The easy problem is understanding how the brain processes signals, makes plans and controls behavior. The hard problem is explaining how this feels or why it should feel like anything at all, assuming we are right in thinking that it truly does feel like something (Dennett\'s consciousness illusionism says this is an illusion). While human information processing is easy to explain, human subjective experience is difficult to explain. For example, it is easy to imagine a color-blind person who has learned to identify which objects in their field of view are red, but it is not clear what would be required for the person to know what red looks like.[365]\n

    \n

    Computationalism and functionalism

    \n\n

    Computationalism is the position in the philosophy of mind that the human mind is an information processing system and that thinking is a form of computing. Computationalism argues that the relationship between mind and body is similar or identical to the relationship between software and hardware and thus may be a solution to the mind–body problem. This philosophical position was inspired by the work of AI researchers and cognitive scientists in the 1960s and was originally proposed by philosophers Jerry Fodor and Hilary Putnam.[366]\n

    Philosopher John Searle characterized this position as "strong AI": "The appropriately programmed computer with the right inputs and outputs would thereby have a mind in exactly the same sense human beings have minds."[ac] Searle counters this assertion with his Chinese room argument, which attempts to show that, even if a machine perfectly simulates human behavior, there is still no reason to suppose it also has a mind.[370]\n

    \n

    AI welfare and rights

    \n

    It is difficult or impossible to reliably evaluate whether an advanced AI is sentient (has the ability to feel), and if so, to what degree.[371] But if there is a significant chance that a given machine can feel and suffer, then it may be entitled to certain rights or welfare protection measures, similarly to animals.[372][373] Sapience (a set of capacities related to high intelligence, such as discernment or self-awareness) may provide another moral basis for AI rights.[372] Robot rights are also sometimes proposed as a practical way to integrate autonomous agents into society.[374]\n

    In 2017, the European Union considered granting "electronic personhood" to some of the most capable AI systems. Similarly to the legal status of companies, it would have conferred rights but also responsibilities.[375] Critics argued in 2018 that granting rights to AI systems would downplay the importance of human rights, and that legislation should focus on user needs rather than speculative futuristic scenarios. They also noted that robots lacked the autonomy to take part to society on their own.[376][377]\n

    Progress in AI increased interest in the topic. Proponents of AI welfare and rights often argue that AI sentience, if it emerges, would be particularly easy to deny. They warn that this may be a moral blind spot analogous to slavery or factory farming, which could lead to large-scale suffering if sentient AI is created and carelessly exploited.[373][372]\n

    \n

    Future

    \n

    Superintelligence and the singularity

    \n

    A superintelligence is a hypothetical agent that would possess intelligence far surpassing that of the brightest and most gifted human mind.[362]If research into artificial general intelligence produced sufficiently intelligent software, it might be able to reprogram and improve itself. The improved software would be even better at improving itself, leading to what I. J. Good called an "intelligence explosion" and Vernor Vinge called a "singularity".[378]\n

    However, technologies cannot improve exponentially indefinitely, and typically follow an S-shaped curve, slowing when they reach the physical limits of what the technology can do.[379]\n

    \n

    Transhumanism

    \n\n

    Robot designer Hans Moravec, cyberneticist Kevin Warwick and inventor Ray Kurzweil have predicted that humans and machines may merge in the future into cyborgs that are more capable and powerful than either. This idea, called transhumanism, has roots in the writings of Aldous Huxley and Robert Ettinger.[380]\n

    Edward Fredkin argues that "artificial intelligence is the next step in evolution", an idea first proposed by Samuel Butler\'s "Darwin among the Machines" as far back as 1863, and expanded upon by George Dyson in his 1998 book Darwin Among the Machines: The Evolution of Global Intelligence.[381]\n

    \n

    In fiction

    \n\n
    The word "robot" itself was coined by Karel Čapek in his 1921 play R.U.R., the title standing for "Rossum\'s Universal Robots".
    \n

    Thought-capable artificial beings have appeared as storytelling devices since antiquity,[382] and have been a persistent theme in science fiction.[383]\n

    A common trope in these works began with Mary Shelley\'s Frankenstein, where a human creation becomes a threat to its masters. This includes such works as Arthur C. Clarke\'s and Stanley Kubrick\'s 2001: A Space Odyssey (both 1968), with HAL 9000, the murderous computer in charge of the Discovery One spaceship, as well as The Terminator (1984) and The Matrix (1999). In contrast, the rare loyal robots such as Gort from The Day the Earth Stood Still (1951) and Bishop from Aliens (1986) are less prominent in popular culture.[384]\n

    Isaac Asimov introduced the Three Laws of Robotics in many stories, most notably with the "Multivac" super-intelligent computer. Asimov\'s laws are often brought up during lay discussions of machine ethics;[385] while almost all artificial intelligence researchers are familiar with Asimov\'s laws through popular culture, they generally consider the laws useless for many reasons, one of which is their ambiguity.[386]\n

    Several works use AI to force us to confront the fundamental question of what makes us human, showing us artificial beings that have the ability to feel, and thus to suffer. This appears in Karel Čapek\'s R.U.R., the films A.I. Artificial Intelligence and Ex Machina, as well as the novel Do Androids Dream of Electric Sheep?, by Philip K. Dick. Dick considers the idea that our understanding of human subjectivity is altered by technology created with artificial intelligence.[387]\n

    \n

    See also

    \n\n

    Explanatory notes

    \n
    \n
      \n
    1. ^ a b This list of intelligent traits is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
    2. \n
    3. ^ a b This list of tools is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
    4. \n
    5. ^ It is among the reasons that expert systems proved to be inefficient for capturing knowledge.[30][31]\n
    6. \n
    7. ^ \n"Rational agent" is general term used in economics, philosophy and theoretical artificial intelligence. It can refer to anything that directs its behavior to accomplish goals, such as a person, an animal, a corporation, a nation, or in the case of AI, a computer program.\n
    8. \n
    9. ^ Alan Turing discussed the centrality of learning as early as 1950, in his classic paper "Computing Machinery and Intelligence".[42] In 1956, at the original Dartmouth AI summer conference, Ray Solomonoff wrote a report on unsupervised probabilistic machine learning: "An Inductive Inference Machine".[43]\n
    10. \n
    11. ^ See AI winter § Machine translation and the ALPAC report of 1966\n
    12. \n
    13. ^ \nCompared with symbolic logic, formal Bayesian inference is computationally expensive. For inference to be tractable, most observations must be conditionally independent of one another. AdSense uses a Bayesian network with over 300 million edges to learn which ads to serve.[93]\n
    14. \n
    15. ^ Expectation–maximization, one of the most popular algorithms in machine learning, allows clustering in the presence of unknown latent variables.[95]\n
    16. \n
    17. ^ \nSome form of deep neural networks (without a specific learning algorithm) were described by:\nWarren S. McCulloch and Walter Pitts (1943)[115]\nAlan Turing (1948);[116]\nKarl Steinbuch and Roger David Joseph (1961).[117]\nDeep or recurrent networks that learned (or used gradient descent) were developed by:\nFrank Rosenblatt(1957);[116]\nOliver Selfridge (1959);[117]\nAlexey Ivakhnenko and Valentin Lapa (1965);[118]\nKaoru Nakano (1971);[119]\nShun-Ichi Amari (1972);[119]\nJohn Joseph Hopfield (1982).[119]\nPrecursors to backpropagation were developed by:\nHenry J. Kelley (1960);[116]\nArthur E. Bryson (1962);[116]\nStuart Dreyfus (1962);[116]\nArthur E. Bryson and Yu-Chi Ho (1969);[116]\nBackpropagation was independently developed by:\nSeppo Linnainmaa (1970);[120]\nPaul Werbos (1974).[116]\n
    18. \n
    19. ^ Geoffrey Hinton said, of his work on neural networks in the 1990s, "our labeled datasets were thousands of times too small. [And] our computers were millions of times too slow."[121]\n
    20. \n
    21. ^ In statistics, a bias is a systematic error or deviation from the correct value. But in the context of fairness, it refers to a tendency in favor or against a certain group or individual characteristic, usually in a way that is considered unfair or harmful. A statistically unbiased AI system that produces disparate outcomes for different demographic groups may thus be viewed as biased in the ethical sense.[205]\n
    22. \n
    23. ^ Including Jon Kleinberg (Cornell University), Sendhil Mullainathan (University of Chicago), Cynthia Chouldechova (Carnegie Mellon) and Sam Corbett-Davis (Stanford)[214]\n
    24. \n
    25. ^ Moritz Hardt (a director at the Max Planck Institute for Intelligent Systems) argues that machine learning "is fundamentally the wrong tool for a lot of domains, where you\'re trying to design interventions and mechanisms that change the world."[219]\n
    26. \n
    27. ^ When the law was passed in 2018, it still contained a form of this provision.\n
    28. \n
    29. ^ This is the United Nations\' definition, and includes things like land mines as well.[233]\n
    30. \n
    31. ^ See table 4; 9% is both the OECD average and the U.S. average.[244]\n
    32. \n
    33. ^ Sometimes called a "robopocalypse"[252]\n
    34. \n
    35. ^ "Electronic brain" was the term used by the press around this time.[305][307]\n
    36. \n
    37. ^ \nDaniel Crevier wrote, "the conference is generally recognized as the official birthdate of the new science."[310] Russell and Norvig called the conference "the inception of artificial intelligence."[115]\n
    38. \n
    39. ^ \nRussell and Norvig wrote "for the next 20 years the field would be dominated by these people and their students."[311]\n
    40. \n
    41. ^ \nRussell and Norvig wrote "it was astonishing whenever a computer did anything kind of smartish".[312]\n
    42. \n
    43. ^ \nThe programs described are Arthur Samuel\'s checkers program for the IBM 701, Daniel Bobrow\'s STUDENT, Newell and Simon\'s Logic Theorist and Terry Winograd\'s SHRDLU.\n
    44. \n
    45. ^ Russell and Norvig write: "in almost all cases, these early systems failed on more difficult problems"[316]\n
    46. \n
    47. ^ \nEmbodied approaches to AI[323] were championed by Hans Moravec[324] and Rodney Brooks[325] and went by many names: Nouvelle AI.[325] Developmental robotics.[326]\n
    48. \n
    49. ^ Matteo Wong wrote in The Atlantic: "Whereas for decades, computer-science fields such as natural-language processing, computer vision, and robotics used extremely different methods, now they all use a programming method called "deep learning." As a result, their code and approaches have become more similar, and their models are easier to integrate into one another."[332]\n
    50. \n
    51. ^ Jack Clark wrote in Bloomberg: "After a half-decade of quiet breakthroughs in artificial intelligence, 2015 has been a landmark year. Computers are smarter and learning faster than ever", and noted that the number of software projects that use machine learning at Google increased from a "sporadic usage" in 2012 to more than 2,700 projects in 2015.[334]\n
    52. \n
    53. ^ Nils Nilsson wrote in 1983: "Simply put, there is wide disagreement in the field about what AI is all about."[352]\n
    54. \n
    55. ^ \nDaniel Crevier wrote that "time has proven the accuracy and perceptiveness of some of Dreyfus\'s comments. Had he formulated them less aggressively, constructive actions they suggested might have been taken much earlier."[357]\n
    56. \n
    57. ^ \nSearle presented this definition of "Strong AI" in 1999.[367] Searle\'s original formulation was "The appropriately programmed computer really is a mind, in the sense that computers given the right programs can be literally said to understand and have other cognitive states."[368] Strong AI is defined similarly by Russell and Norvig: "Stong AI – the assertion that machines that do so are actually thinking (as opposed to simulating thinking)."[369]\n
    58. \n
    \n

    References

    \n
    \n
      \n
    1. ^ a b c Russell & Norvig (2021), pp. 1–4.\n
    2. \n
    3. ^ AI set to exceed human brain power Archived 2008-02-19 at the Wayback Machine CNN.com (July 26, 2006)\n
    4. \n
    5. ^ Kaplan, Andreas; Haenlein, Michael (2019). "Siri, Siri, in my hand: Who\'s the fairest in the land? On the interpretations, illustrations, and implications of artificial intelligence". Business Horizons. 62: 15–25. doi:10.1016/j.bushor.2018.08.004. ISSN 0007-6813. S2CID 158433736.\n
    6. \n
    7. ^ a b c \nArtificial general intelligence: Russell & Norvig (2021, pp. 32–33, 1020–1021)
      Proposal for the modern version: Pennachin & Goertzel (2007)
      Warnings of overspecialization in AI from leading researchers: Nilsson (1995), McCarthy (2007), Beal & Winston (2009)
      \n
    8. \n
    9. ^ Russell & Norvig (2021, §1.2).\n
    10. \n
    11. ^ a b Dartmouth workshop: Russell & Norvig (2021, p. 18), McCorduck (2004, pp. 111–136), NRC (1999, pp. 200–201)
      The proposal: McCarthy et al. (1955)
      \n
    12. \n
    13. ^ a b Successful programs of the 1960s: McCorduck (2004, pp. 243–252), Crevier (1993, pp. 52–107), Moravec (1988, p. 9), Russell & Norvig (2021, pp. 19–21)\n
    14. \n
    15. ^ a b Funding initiatives in the early 1980s: Fifth Generation Project (Japan), Alvey (UK), Microelectronics and Computer Technology Corporation (US), Strategic Computing Initiative (US): McCorduck (2004, pp. 426–441), Crevier (1993, pp. 161–162, 197–203, 211, 240), Russell & Norvig (2021, p. 23), NRC (1999, pp. 210–211), Newquist (1994, pp. 235–248)\n
    16. \n
    17. ^ a b First AI Winter, Lighthill report, Mansfield Amendment: Crevier (1993, pp. 115–117), Russell & Norvig (2021, pp. 21–22), NRC (1999, pp. 212–213), Howe (1994), Newquist (1994, pp. 189–201)\n
    18. \n
    19. ^ a b Second AI Winter: Russell & Norvig (2021, p. 24), McCorduck (2004, pp. 430–435), Crevier (1993, pp. 209–210), NRC (1999, pp. 214–216), Newquist (1994, pp. 301–318)\n
    20. \n
    21. ^ a b Deep learning revolution, AlexNet: Goldman (2022), Russell & Norvig (2021, p. 26), McKinsey (2018)\n
    22. \n
    23. ^ Toews (2023).\n
    24. \n
    25. ^ Problem-solving, puzzle solving, game playing, and deduction: Russell & Norvig (2021, chpt. 3–5), Russell & Norvig (2021, chpt. 6) (constraint satisfaction), Poole, Mackworth & Goebel (1998, chpt. 2, 3, 7, 9), Luger & Stubblefield (2004, chpt. 3, 4, 6, 8), Nilsson (1998, chpt. 7–12)\n
    26. \n
    27. ^ Uncertain reasoning: Russell & Norvig (2021, chpt. 12–18), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 333–381), Nilsson (1998, chpt. 7–12)\n
    28. \n
    29. ^ a b c Intractability and efficiency and the combinatorial explosion: Russell & Norvig (2021, p. 21)\n
    30. \n
    31. ^ a b c Psychological evidence of the prevalence of sub-symbolic reasoning and knowledge: Kahneman (2011), Dreyfus & Dreyfus (1986), Wason & Shapiro (1966), Kahneman, Slovic & Tversky (1982)\n
    32. \n
    33. ^ Knowledge representation and knowledge engineering: Russell & Norvig (2021, chpt. 10), Poole, Mackworth & Goebel (1998, pp. 23–46, 69–81, 169–233, 235–277, 281–298, 319–345), Luger & Stubblefield (2004, pp. 227–243), Nilsson (1998, chpt. 17.1–17.4, 18)\n
    34. \n
    35. ^ Smoliar & Zhang (1994).\n
    36. \n
    37. ^ Neumann & Möller (2008).\n
    38. \n
    39. ^ Kuperman, Reichley & Bailey (2006).\n
    40. \n
    41. ^ McGarry (2005).\n
    42. \n
    43. ^ Bertini, Del Bimbo & Torniai (2006).\n
    44. \n
    45. ^ Russell & Norvig (2021), pp. 272.\n
    46. \n
    47. ^ Representing categories and relations: Semantic networks, description logics, inheritance (including frames, and scripts): Russell & Norvig (2021, §10.2 & 10.5), Poole, Mackworth & Goebel (1998, pp. 174–177), Luger & Stubblefield (2004, pp. 248–258), Nilsson (1998, chpt. 18.3)\n
    48. \n
    49. ^ Representing events and time:Situation calculus, event calculus, fluent calculus (including solving the frame problem): Russell & Norvig (2021, §10.3), Poole, Mackworth & Goebel (1998, pp. 281–298), Nilsson (1998, chpt. 18.2)\n
    50. \n
    51. ^ Causal calculus: Poole, Mackworth & Goebel (1998, pp. 335–337)\n
    52. \n
    53. ^ Representing knowledge about knowledge: Belief calculus, modal logics: Russell & Norvig (2021, §10.4), Poole, Mackworth & Goebel (1998, pp. 275–277)\n
    54. \n
    55. ^ a b Default reasoning, Frame problem, default logic, non-monotonic logics, circumscription, closed world assumption, abduction: Russell & Norvig (2021, §10.6), Poole, Mackworth & Goebel (1998, pp. 248–256, 323–335), Luger & Stubblefield (2004, pp. 335–363), Nilsson (1998, ~18.3.3)\n(Poole et al. places abduction under "default reasoning". Luger et al. places this under "uncertain reasoning").\n
    56. \n
    57. ^ a b Breadth of commonsense knowledge: Lenat & Guha (1989, Introduction), Crevier (1993, pp. 113–114), Moravec (1988, p. 13), Russell & Norvig (2021, pp. 241, 385, 982) (qualification problem)\n
    58. \n
    59. ^ Newquist (1994), p. 296.\n
    60. \n
    61. ^ Crevier (1993), pp. 204–208.\n
    62. \n
    63. ^ Russell & Norvig (2021), p. 528.\n
    64. \n
    65. ^ Automated planning: Russell & Norvig (2021, chpt. 11).\n
    66. \n
    67. ^ Automated decision making, Decision theory: Russell & Norvig (2021, chpt. 16–18).\n
    68. \n
    69. ^ Classical planning: Russell & Norvig (2021, Section 11.2).\n
    70. \n
    71. ^ Sensorless or "conformant" planning, contingent planning, replanning (a.k.a online planning): Russell & Norvig (2021, Section 11.5).\n
    72. \n
    73. ^ Uncertain preferences: Russell & Norvig (2021, Section 16.7)\nInverse reinforcement learning: Russell & Norvig (2021, Section 22.6)\n
    74. \n
    75. ^ Information value theory: Russell & Norvig (2021, Section 16.6).\n
    76. \n
    77. ^ Markov decision process: Russell & Norvig (2021, chpt. 17).\n
    78. \n
    79. ^ Game theory and multi-agent decision theory: Russell & Norvig (2021, chpt. 18).\n
    80. \n
    81. ^ Learning: Russell & Norvig (2021, chpt. 19–22), Poole, Mackworth & Goebel (1998, pp. 397–438), Luger & Stubblefield (2004, pp. 385–542), Nilsson (1998, chpt. 3.3, 10.3, 17.5, 20)\n
    82. \n
    83. ^ Turing (1950).\n
    84. \n
    85. ^ Solomonoff (1956).\n
    86. \n
    87. ^ Unsupervised learning: Russell & Norvig (2021, pp. 653) (definition), Russell & Norvig (2021, pp. 738–740) (cluster analysis), Russell & Norvig (2021, pp. 846–860) (word embedding)\n
    88. \n
    89. ^ a b Supervised learning: Russell & Norvig (2021, §19.2) (Definition), Russell & Norvig (2021, Chpt. 19–20) (Techniques)\n
    90. \n
    91. ^ Reinforcement learning: Russell & Norvig (2021, chpt. 22), Luger & Stubblefield (2004, pp. 442–449)\n
    92. \n
    93. ^ Transfer learning: Russell & Norvig (2021, pp. 281), The Economist (2016)\n
    94. \n
    95. ^ "Artificial Intelligence (AI): What Is AI and How Does It Work? | Built In". builtin.com. Retrieved 30 October 2023.\n
    96. \n
    97. ^ Computational learning theory: Russell & Norvig (2021, pp. 672–674), Jordan & Mitchell (2015)\n
    98. \n
    99. ^ Natural language processing (NLP): Russell & Norvig (2021, chpt. 23–24), Poole, Mackworth & Goebel (1998, pp. 91–104), Luger & Stubblefield (2004, pp. 591–632)\n
    100. \n
    101. ^ Subproblems of NLP: Russell & Norvig (2021, pp. 849–850)\n
    102. \n
    103. ^ Russell & Norvig (2021), pp. 856–858.\n
    104. \n
    105. ^ Dickson (2022).\n
    106. \n
    107. ^ Modern statistical and deep learning approaches to NLP: Russell & Norvig (2021, chpt. 24), Cambria & White (2014)\n
    108. \n
    109. ^ Vincent (2019).\n
    110. \n
    111. ^ Russell & Norvig (2021), pp. 875–878.\n
    112. \n
    113. ^ Bushwick (2023).\n
    114. \n
    115. ^ Computer vision: Russell & Norvig (2021, chpt. 25), Nilsson (1998, chpt. 6)\n
    116. \n
    117. ^ Russell & Norvig (2021), pp. 849–850.\n
    118. \n
    119. ^ Russell & Norvig (2021), pp. 895–899.\n
    120. \n
    121. ^ Russell & Norvig (2021), pp. 899–901.\n
    122. \n
    123. ^ Challa et al. (2011).\n
    124. \n
    125. ^ Russell & Norvig (2021), pp. 931–938.\n
    126. \n
    127. ^ MIT AIL (2014).\n
    128. \n
    129. ^ Affective computing: Thro (1993), Edelson (1991), Tao & Tan (2005), Scassellati (2002)\n
    130. \n
    131. ^ Waddell (2018).\n
    132. \n
    133. ^ Poria et al. (2017).\n
    134. \n
    135. ^ Search algorithms: Russell & Norvig (2021, chpts. 3–5), Poole, Mackworth & Goebel (1998, pp. 113–163), Luger & Stubblefield (2004, pp. 79–164, 193–219), Nilsson (1998, chpts. 7–12)\n
    136. \n
    137. ^ State space search: Russell & Norvig (2021, chpt. 3)\n
    138. \n
    139. ^ Russell & Norvig (2021), sect. 11.2.\n
    140. \n
    141. ^ Uninformed searches (breadth first search, depth-first search and general state space search): Russell & Norvig (2021, sect. 3.4), Poole, Mackworth & Goebel (1998, pp. 113–132), Luger & Stubblefield (2004, pp. 79–121), Nilsson (1998, chpt. 8)\n
    142. \n
    143. ^ Heuristic or informed searches (e.g., greedy best first and A*): Russell & Norvig (2021, sect. 3.5), Poole, Mackworth & Goebel (1998, pp. 132–147), Poole & Mackworth (2017, sect. 3.6), Luger & Stubblefield (2004, pp. 133–150)\n
    144. \n
    145. ^ Adversarial search: Russell & Norvig (2021, chpt. 5)\n
    146. \n
    147. ^ Local or "optimization" search: Russell & Norvig (2021, chpt. 4)\n
    148. \n
    149. ^ Singh Chauhan, Nagesh (18 December 2020). "Optimization Algorithms in Neural Networks". KDnuggets. Retrieved 13 January 2024.\n
    150. \n
    151. ^ Evolutionary computation: Russell & Norvig (2021, sect. 4.1.2)\n
    152. \n
    153. ^ Merkle & Middendorf (2013).\n
    154. \n
    155. ^ Logic: Russell & Norvig (2021, chpts. 6–9), Luger & Stubblefield (2004, pp. 35–77), Nilsson (1998, chpt. 13–16)\n
    156. \n
    157. ^ Propositional logic: Russell & Norvig (2021, chpt. 6), Luger & Stubblefield (2004, pp. 45–50), Nilsson (1998, chpt. 13)\n
    158. \n
    159. ^ First-order logic and features such as equality: Russell & Norvig (2021, chpt. 7), Poole, Mackworth & Goebel (1998, pp. 268–275), Luger & Stubblefield (2004, pp. 50–62), Nilsson (1998, chpt. 15)\n
    160. \n
    161. ^ Logical inference: Russell & Norvig (2021, chpt. 10)\n
    162. \n
    163. ^ logical deduction as search: Russell & Norvig (2021, sects. 9.3, 9.4), Poole, Mackworth & Goebel (1998, pp. ~46–52), Luger & Stubblefield (2004, pp. 62–73), Nilsson (1998, chpt. 4.2, 7.2)\n
    164. \n
    165. ^ Resolution and unification: Russell & Norvig (2021, sections 7.5.2, 9.2, 9.5)\n
    166. \n
    167. ^ Warren, D.H.; Pereira, L.M.; Pereira, F. (1977). "Prolog-the language and its implementation compared with Lisp". ACM SIGPLAN Notices. 12 (8): 109–115. doi:10.1145/872734.806939.\n
    168. \n
    169. ^ Fuzzy logic: Russell & Norvig (2021, pp. 214, 255, 459), Scientific American (1999)\n
    170. \n
    171. ^ a b Stochastic methods for uncertain reasoning: Russell & Norvig (2021, chpt. 12–18, 20), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 165–191, 333–381), Nilsson (1998, chpt. 19)\n
    172. \n
    173. ^ decision theory and decision analysis: Russell & Norvig (2021, chpt. 16–18), Poole, Mackworth & Goebel (1998, pp. 381–394)\n
    174. \n
    175. ^ Information value theory: Russell & Norvig (2021, sect. 16.6)\n
    176. \n
    177. ^ Markov decision processes and dynamic decision networks: Russell & Norvig (2021, chpt. 17)\n
    178. \n
    179. ^ a b c Stochastic temporal models: Russell & Norvig (2021, chpt. 14)\nHidden Markov model: Russell & Norvig (2021, sect. 14.3)\nKalman filters: Russell & Norvig (2021, sect. 14.4)\nDynamic Bayesian networks: Russell & Norvig (2021, sect. 14.5)\n
    180. \n
    181. ^ Game theory and mechanism design: Russell & Norvig (2021, chpt. 18)\n
    182. \n
    183. ^ Bayesian networks: Russell & Norvig (2021, sects. 12.5–12.6, 13.4–13.5, 14.3–14.5, 16.5, 20.2–20.3), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~182–190, ≈363–379), Nilsson (1998, chpt. 19.3–19.4)\n
    184. \n
    185. ^ Domingos (2015), chpt. 6.\n
    186. \n
    187. ^ Bayesian inference algorithm: Russell & Norvig (2021, sect. 13.3–13.5), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~363–379), Nilsson (1998, chpt. 19.4 & 7)\n
    188. \n
    189. ^ Domingos (2015), p. 210.\n
    190. \n
    191. ^ Bayesian learning and the expectation–maximization algorithm: Russell & Norvig (2021, chpt. 20), Poole, Mackworth & Goebel (1998, pp. 424–433), Nilsson (1998, chpt. 20), Domingos (2015, p. 210)\n
    192. \n
    193. ^ Bayesian decision theory and Bayesian decision networks: Russell & Norvig (2021, sect. 16.5)\n
    194. \n
    195. ^ Statistical learning methods and classifiers: Russell & Norvig (2021, chpt. 20),\n
    196. \n
    197. ^ Ciaramella, Alberto; Ciaramella, Marco (2024). Introduction to Artificial Intelligence: from data analysis to generative AI. Intellisemantic Editions. ISBN 978-8-8947-8760-3.\n
    198. \n
    199. ^ Decision trees: Russell & Norvig (2021, sect. 19.3), Domingos (2015, p. 88)\n
    200. \n
    201. ^ Non-parameteric learning models such as K-nearest neighbor and support vector machines: Russell & Norvig (2021, sect. 19.7), Domingos (2015, p. 187) (k-nearest neighbor)\n\n
    202. \n
    203. ^ Domingos (2015), p. 152.\n
    204. \n
    205. ^ Naive Bayes classifier: Russell & Norvig (2021, sect. 12.6), Domingos (2015, p. 152)\n
    206. \n
    207. ^ a b Neural networks: Russell & Norvig (2021, chpt. 21), Domingos (2015, Chapter 4)\n
    208. \n
    209. ^ Gradient calculation in computational graphs, backpropagation, automatic differentiation: Russell & Norvig (2021, sect. 21.2), Luger & Stubblefield (2004, pp. 467–474), Nilsson (1998, chpt. 3.3)\n
    210. \n
    211. ^ Universal approximation theorem: Russell & Norvig (2021, p. 752)\nThe theorem: Cybenko (1988), Hornik, Stinchcombe & White (1989)\n
    212. \n
    213. ^ Feedforward neural networks: Russell & Norvig (2021, sect. 21.1)\n
    214. \n
    215. ^ Recurrent neural networks: Russell & Norvig (2021, sect. 21.6)\n
    216. \n
    217. ^ Perceptrons: Russell & Norvig (2021, pp. 21, 22, 683, 22)\n
    218. \n
    219. ^ a b Deep learning: Russell & Norvig (2021, chpt. 21), Goodfellow, Bengio & Courville (2016), Hinton et al. (2016), Schmidhuber (2015)\n
    220. \n
    221. ^ Convolutional neural networks: Russell & Norvig (2021, sect. 21.3)\n
    222. \n
    223. ^ Deng & Yu (2014), pp. 199–200.\n
    224. \n
    225. ^ Ciresan, Meier & Schmidhuber (2012).\n
    226. \n
    227. ^ Russell & Norvig (2021), p. 751.\n
    228. \n
    229. ^ a b c Russell & Norvig (2021), p. 17.\n
    230. \n
    231. ^ a b c d e f g Russell & Norvig (2021), p. 785.\n
    232. \n
    233. ^ a b Schmidhuber (2022), sect. 5.\n
    234. \n
    235. ^ Schmidhuber (2022), sect. 6.\n
    236. \n
    237. ^ a b c Schmidhuber (2022), sect. 7.\n
    238. \n
    239. ^ Schmidhuber (2022), sect. 8.\n
    240. \n
    241. ^ Quoted in Christian (2020, p. 22)\n
    242. \n
    243. ^ Smith (2023).\n
    244. \n
    245. ^ "Explained: Generative AI". 9 November 2023.\n
    246. \n
    247. ^ "AI Writing and Content Creation Tools". MIT Sloan Teaching & Learning Technologies. Archived from the original on 25 December 2023. Retrieved 25 December 2023.\n
    248. \n
    249. ^ Marmouyet (2023).\n
    250. \n
    251. ^ Kobielus (2019).\n
    252. \n
    253. ^ Thomason, James (21 May 2024). "Mojo Rising: The resurgence of AI-first programming languages". VentureBeat. Archived from the original on 27 June 2024. Retrieved 26 May 2024.\n
    254. \n
    255. ^ Wodecki, Ben (5 May 2023). "7 AI Programming Languages You Need to Know". AI Business. Archived from the original on 25 July 2024. Retrieved 5 October 2024.\n
    256. \n
    257. ^ Plumb, Taryn (18 September 2024). "Why Jensen Huang and Marc Benioff see \'gigantic\' opportunity for agentic AI". VentureBeat. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
    258. \n
    259. ^ Davenport, T; Kalakota, R (June 2019). "The potential for artificial intelligence in healthcare". Future Healthc J. 6 (2): 94–98. doi:10.7861/futurehosp.6-2-94. PMC 6616181. PMID 31363513.\n
    260. \n
    261. ^ Lyakhova, U.A.; Lyakhov, P.A. (2024). "Systematic review of approaches to detection and classification of skin cancer using artificial intelligence: Development and prospects". Computers in Biology and Medicine. 178: 108742. doi:10.1016/j.compbiomed.2024.108742. PMID 38875908.\n
    262. \n
    263. ^ Alqudaihi, Kawther S.; Aslam, Nida; Khan, Irfan Ullah; Almuhaideb, Abdullah M.; Alsunaidi, Shikah J.; Ibrahim, Nehad M. Abdel Rahman; Alhaidari, Fahd A.; Shaikh, Fatema S.; Alsenbel, Yasmine M.; Alalharith, Dima M.; Alharthi, Hajar M.; Alghamdi, Wejdan M.; Alshahrani, Mohammed S. (2021). "Cough Sound Detection and Diagnosis Using Artificial Intelligence Techniques: Challenges and Opportunities". IEEE Access. 9: 102327–102344. Bibcode:2021IEEEA...9j2327A. doi:10.1109/ACCESS.2021.3097559. ISSN 2169-3536. PMC 8545201. PMID 34786317.\n
    264. \n
    265. ^ a b Bax, Monique; Thorpe, Jordan; Romanov, Valentin (December 2023). "The future of personalized cardiovascular medicine demands 3D and 4D printing, stem cells, and artificial intelligence". Frontiers in Sensors. 4. doi:10.3389/fsens.2023.1294721. ISSN 2673-5067.\n
    266. \n
    267. ^ Jumper, J; Evans, R; Pritzel, A (2021). "Highly accurate protein structure prediction with AlphaFold". Nature. 596 (7873): 583–589. Bibcode:2021Natur.596..583J. doi:10.1038/s41586-021-03819-2. PMC 8371605. PMID 34265844.\n
    268. \n
    269. ^ "AI discovers new class of antibiotics to kill drug-resistant bacteria". 20 December 2023. Archived from the original on 16 September 2024. Retrieved 5 October 2024.\n
    270. \n
    271. ^ "AI speeds up drug design for Parkinson\'s ten-fold". Cambridge University. 17 April 2024. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
    272. \n
    273. ^ Horne, Robert I.; Andrzejewska, Ewa A.; Alam, Parvez; Brotzakis, Z. Faidon; Srivastava, Ankit; Aubert, Alice; Nowinska, Magdalena; Gregory, Rebecca C.; Staats, Roxine; Possenti, Andrea; Chia, Sean; Sormanni, Pietro; Ghetti, Bernardino; Caughey, Byron; Knowles, Tuomas P. J.; Vendruscolo, Michele (17 April 2024). "Discovery of potent inhibitors of α-synuclein aggregation using structure-based iterative learning". Nature Chemical Biology. 20 (5). Nature: 634–645. doi:10.1038/s41589-024-01580-x. PMC 11062903. PMID 38632492.\n
    274. \n
    275. ^ Grant, Eugene F.; Lardner, Rex (25 July 1952). "The Talk of the Town – It". The New Yorker. ISSN 0028-792X. Archived from the original on 16 February 2020. Retrieved 28 January 2024.\n
    276. \n
    277. ^ Anderson, Mark Robert (11 May 2017). "Twenty years on from Deep Blue vs Kasparov: how a chess match started the big data revolution". The Conversation. Archived from the original on 17 September 2024. Retrieved 28 January 2024.\n
    278. \n
    279. ^ Markoff, John (16 February 2011). "Computer Wins on \'Jeopardy!\': Trivial, It\'s Not". The New York Times. ISSN 0362-4331. Archived from the original on 22 October 2014. Retrieved 28 January 2024.\n
    280. \n
    281. ^ Byford, Sam (27 May 2017). "AlphaGo retires from competitive Go after defeating world number one 3–0". The Verge. Archived from the original on 7 June 2017. Retrieved 28 January 2024.\n
    282. \n
    283. ^ Brown, Noam; Sandholm, Tuomas (30 August 2019). "Superhuman AI for multiplayer poker". Science. 365 (6456): 885–890. Bibcode:2019Sci...365..885B. doi:10.1126/science.aay2400. ISSN 0036-8075. PMID 31296650.\n
    284. \n
    285. ^ "MuZero: Mastering Go, chess, shogi and Atari without rules". Google DeepMind. 23 December 2020. Retrieved 28 January 2024.\n
    286. \n
    287. ^ Sample, Ian (30 October 2019). "AI becomes grandmaster in \'fiendishly complex\' StarCraft II". The Guardian. ISSN 0261-3077. Archived from the original on 29 December 2020. Retrieved 28 January 2024.\n
    288. \n
    289. ^ Wurman, P. R.; Barrett, S.; Kawamoto, K. (2022). "Outracing champion Gran Turismo drivers with deep reinforcement learning" (PDF). Nature. 602 (7896): 223–228. Bibcode:2022Natur.602..223W. doi:10.1038/s41586-021-04357-7. PMID 35140384.\n
    290. \n
    291. ^ Wilkins, Alex (13 March 2024). "Google AI learns to play open-world video games by watching them". New Scientist. Archived from the original on 26 July 2024. Retrieved 21 July 2024.\n
    292. \n
    293. ^ Uesato, J. et al.: Improving mathematical reasoning with process supervision. Archived 15 September 2024 at the Wayback Machine openai.com, May 31, 2023. Retrieved 2024-08-07.\n
    294. \n
    295. ^ Srivastava, Saurabh (29 February 2024). "Functional Benchmarks for Robust Evaluation of Reasoning Performance, and the Reasoning Gap". arXiv:2402.19450 [cs.AI].\n
    296. \n
    297. ^ Roberts, Siobhan (25 July 2024). "AI achieves silver-medal standard solving International Mathematical Olympiad problems". The New York Times. Archived from the original on 26 September 2024. Retrieved 7 August 2024.\n
    298. \n
    299. ^ LLEMMA. eleuther.ai. Retrieved 2024-08-07.\n
    300. \n
    301. ^ AI Math. Archived 5 October 2024 at the Wayback Machine Caesars Labs, 2024. Retrieved 2024-08-07.\n
    302. \n
    303. ^ Alex McFarland: 7 Best AI for Math Tools. Archived 11 September 2024 at the Wayback Machine unite.ai. Retrieved 2024-08-07\n
    304. \n
    305. ^ Matthew Finio & Amanda Downie: IBM Think 2024 Primer, "What is Artificial Intelligence (AI) in Finance?" 8 Dec. 2023\n
    306. \n
    307. ^ M. Nicolas, J. Firzli: Pensions Age/European Pensions magazine, "Artificial Intelligence: Ask the Industry" May June 2024 https://videovoice.org/ai-in-finance-innovation-entrepreneurship-vs-over-regulation-with-the-eus-artificial-intelligence-act-wont-work-as-intended/ Archived 11 September 2024 at the Wayback Machine.\n
    308. \n
    309. ^ a b c Congressional Research Service (2019). Artificial Intelligence and National Security (PDF). Washington, DC: Congressional Research Service. Archived (PDF) from the original on 8 May 2020. Retrieved 5 October 2024.PD-notice\n
    310. \n
    311. ^ a b Slyusar, Vadym (2019). "Artificial intelligence as the basis of future control networks". ResearchGate. doi:10.13140/RG.2.2.30247.50087. Archived from the original on 28 April 2021. Retrieved 20 July 2019.\n
    312. \n
    313. ^ Knight, Will. "The US and 30 Other Nations Agree to Set Guardrails for Military AI". Wired. ISSN 1059-1028. Archived from the original on 20 September 2024. Retrieved 24 January 2024.\n
    314. \n
    315. ^ Newsom, Gavin; Weber, Shirley N. (6 September 2023). "Executive Order N-12-23" (PDF). Executive Department, State of California. Archived (PDF) from the original on 21 February 2024. Retrieved 7 September 2023.\n
    316. \n
    317. ^ Pinaya, Walter H. L.; Graham, Mark S.; Kerfoot, Eric; Tudosiu, Petru-Daniel; Dafflon, Jessica; Fernandez, Virginia; Sanchez, Pedro; Wolleb, Julia; da Costa, Pedro F.; Patel, Ashay (2023). "Generative AI for Medical Imaging: extending the MONAI Framework". arXiv:2307.15208 [eess.IV].\n
    318. \n
    319. ^ Griffith, Erin; Metz, Cade (27 January 2023). "Anthropic Said to Be Closing In on $300 Million in New A.I. Funding". The New York Times. Archived from the original on 9 December 2023. Retrieved 14 March 2023.\n
    320. \n
    321. ^ Lanxon, Nate; Bass, Dina; Davalos, Jackie (10 March 2023). "A Cheat Sheet to AI Buzzwords and Their Meanings". Bloomberg News. Archived from the original on 17 November 2023. Retrieved 14 March 2023.\n
    322. \n
    323. ^ Marcelline, Marco (27 May 2023). "ChatGPT: Most Americans Know About It, But Few Actually Use the AI Chatbot". PCMag. Archived from the original on 21 May 2024. Retrieved 28 January 2024.\n
    324. \n
    325. ^ Lu, Donna (31 March 2023). "Misinformation, mistakes and the Pope in a puffer: what rapidly evolving AI can – and can\'t – do". The Guardian. ISSN 0261-3077. Archived from the original on 10 June 2024. Retrieved 28 January 2024.\n
    326. \n
    327. ^ Hurst, Luke (23 May 2023). "How a fake image of a Pentagon explosion shared on Twitter caused a real dip on Wall Street". euronews. Retrieved 28 January 2024.\n
    328. \n
    329. ^ Poole, David; Mackworth, Alan (2023). Artificial Intelligence, Foundations of Computational Agents (3rd ed.). Cambridge University Press. doi:10.1017/9781009258227. ISBN 978-1-0092-5819-7. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
    330. \n
    331. ^ Russell, Stuart; Norvig, Peter (2020). Artificial Intelligence: A Modern Approach (4th ed.). Pearson. ISBN 978-0-1346-1099-3.\n
    332. \n
    333. ^ "Why agents are the next frontier of generative AI". McKinsey Digital. 24 July 2024. Archived from the original on 3 October 2024. Retrieved 10 August 2024.\n
    334. \n
    335. ^ Ransbotham, Sam; Kiron, David; Gerbert, Philipp; Reeves, Martin (6 September 2017). "Reshaping Business With Artificial Intelligence". MIT Sloan Management Review. Archived from the original on 13 February 2024.\n
    336. \n
    337. ^ Sun, Yuran; Zhao, Xilei; Lovreglio, Ruggiero; Kuligowski, Erica (1 January 2024), Naser, M. Z. (ed.), "8 – AI for large-scale evacuation modeling: promises and challenges", Interpretable Machine Learning for the Analysis, Design, Assessment, and Informed Decision Making for Civil Infrastructure, Woodhead Publishing Series in Civil and Structural Engineering, Woodhead Publishing, pp. 185–204, ISBN 978-0-1282-4073-1, archived from the original on 19 May 2024, retrieved 28 June 2024.\n
    338. \n
    339. ^ Gomaa, Islam; Adelzadeh, Masoud; Gwynne, Steven; Spencer, Bruce; Ko, Yoon; Bénichou, Noureddine; Ma, Chunyun; Elsagan, Nour; Duong, Dana; Zalok, Ehab; Kinateder, Max (1 November 2021). "A Framework for Intelligent Fire Detection and Evacuation System". Fire Technology. 57 (6): 3179–3185. doi:10.1007/s10694-021-01157-3. ISSN 1572-8099. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
    340. \n
    341. ^ Zhao, Xilei; Lovreglio, Ruggiero; Nilsson, Daniel (1 May 2020). "Modelling and interpreting pre-evacuation decision-making using machine learning". Automation in Construction. 113: 103140. doi:10.1016/j.autcon.2020.103140. ISSN 0926-5805. Archived from the original on 19 May 2024. Retrieved 5 October 2024.\n
    342. \n
    343. ^ "India\'s latest election embraced AI technology. Here are some ways it was used constructively". PBS News. 12 June 2024. Retrieved 28 October 2024.\n
    344. \n
    345. ^ Müller, Vincent C. (30 April 2020). "Ethics of Artificial Intelligence and Robotics". Stanford Encyclopedia of Philosophy Archive. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
    346. \n
    347. ^ Simonite (2016).\n
    348. \n
    349. ^ Russell & Norvig (2021), p. 987.\n
    350. \n
    351. ^ Laskowski (2023).\n
    352. \n
    353. ^ GAO (2022).\n
    354. \n
    355. ^ Valinsky (2019).\n
    356. \n
    357. ^ Russell & Norvig (2021), p. 991.\n
    358. \n
    359. ^ Russell & Norvig (2021), pp. 991–992.\n
    360. \n
    361. ^ Christian (2020), p. 63.\n
    362. \n
    363. ^ Vincent (2022).\n
    364. \n
    365. ^ Kopel, Matthew. "Copyright Services: Fair Use". Cornell University Library. Archived from the original on 26 September 2024. Retrieved 26 April 2024.\n
    366. \n
    367. ^ Burgess, Matt. "How to Stop Your Data From Being Used to Train AI". Wired. ISSN 1059-1028. Archived from the original on 3 October 2024. Retrieved 26 April 2024.\n
    368. \n
    369. ^ Reisner (2023).\n
    370. \n
    371. ^ Alter & Harris (2023).\n
    372. \n
    373. ^ "Getting the Innovation Ecosystem Ready for AI. An IP policy toolkit" (PDF). WIPO.\n
    374. \n
    375. ^ Hammond, George (27 December 2023). "Big Tech is spending more than VC firms on AI startups". Ars Technica. Archived from the original on 10 January 2024.\n
    376. \n
    377. ^ Wong, Matteo (24 October 2023). "The Future of AI Is GOMA". The Atlantic. Archived from the original on 5 January 2024.\n
    378. \n
    379. ^ "Big tech and the pursuit of AI dominance". The Economist. 26 March 2023. Archived from the original on 29 December 2023.\n
    380. \n
    381. ^ Fung, Brian (19 December 2023). "Where the battle to dominate AI may be won". CNN Business. Archived from the original on 13 January 2024.\n
    382. \n
    383. ^ Metz, Cade (5 July 2023). "In the Age of A.I., Tech\'s Little Guys Need Big Friends". The New York Times. Archived from the original on 8 July 2024. Retrieved 5 October 2024.\n
    384. \n
    385. ^ "Electricity 2024 – Analysis". IEA. 24 January 2024. Retrieved 13 July 2024.\n
    386. \n
    387. ^ Calvert, Brian (28 March 2024). "AI already uses as much energy as a small country. It\'s only the beginning". Vox. New York, New York. Archived from the original on 3 July 2024. Retrieved 5 October 2024.\n
    388. \n
    389. ^ Halper, Evan; O\'Donovan, Caroline (21 June 2024). "AI is exhausting the power grid. Tech firms are seeking a miracle solution". Washington Post.\n
    390. \n
    391. ^ Davenport, Carly. "AI Data Centers and the Coming YS Power Demand Surge" (PDF). Goldman Sachs. Archived from the original (PDF) on 26 July 2024. Retrieved 5 October 2024.\n
    392. \n
    393. ^ Ryan, Carol (12 April 2024). "Energy-Guzzling AI Is Also the Future of Energy Savings". Wall Street Journal. Dow Jones.\n
    394. \n
    395. ^ Hiller, Jennifer (1 July 2024). "Tech Industry Wants to Lock Up Nuclear Power for AI". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
    396. \n
    397. ^ Halper, Evan (20 September 2024). "Microsoft deal would reopen Three Mile Island nuclear plant to power AI". Washington Post.\n
    398. \n
    399. ^ Hiller, Jennifer (20 September 2024). "Three Mile Island\'s Nuclear Plant to Reopen, Help Power Microsoft\'s AI Centers". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
    400. \n
    401. ^ Nicas (2018).\n
    402. \n
    403. ^ Rainie, Lee; Keeter, Scott; Perrin, Andrew (22 July 2019). "Trust and Distrust in America". Pew Research Center. Archived from the original on 22 February 2024.\n
    404. \n
    405. ^ Williams (2023).\n
    406. \n
    407. ^ Taylor & Hern (2023).\n
    408. \n
    409. ^ a b Samuel, Sigal (19 April 2022). "Why it\'s so damn hard to make AI fair and unbiased". Vox. Archived from the original on 5 October 2024. Retrieved 24 July 2024.\n
    410. \n
    411. ^ a b Rose (2023).\n
    412. \n
    413. ^ CNA (2019).\n
    414. \n
    415. ^ Goffrey (2008), p. 17.\n
    416. \n
    417. ^ Berdahl et al. (2023); Goffrey (2008, p. 17); Rose (2023); Russell & Norvig (2021, p. 995)\n
    418. \n
    419. ^ Christian (2020), p. 25.\n
    420. \n
    421. ^ a b Russell & Norvig (2021), p. 995.\n
    422. \n
    423. ^ Grant & Hill (2023).\n
    424. \n
    425. ^ Larson & Angwin (2016).\n
    426. \n
    427. ^ Christian (2020), p. 67–70.\n
    428. \n
    429. ^ Christian (2020, pp. 67–70); Russell & Norvig (2021, pp. 993–994)\n
    430. \n
    431. ^ Russell & Norvig (2021, p. 995); Lipartito (2011, p. 36); Goodman & Flaxman (2017, p. 6); Christian (2020, pp. 39–40, 65)\n
    432. \n
    433. ^ Quoted in Christian (2020, p. 65).\n
    434. \n
    435. ^ Russell & Norvig (2021, p. 994); Christian (2020, pp. 40, 80–81)\n
    436. \n
    437. ^ Quoted in Christian (2020, p. 80)\n
    438. \n
    439. ^ Dockrill (2022).\n
    440. \n
    441. ^ Sample (2017).\n
    442. \n
    443. ^ "Black Box AI". 16 June 2023. Archived from the original on 15 June 2024. Retrieved 5 October 2024.\n
    444. \n
    445. ^ Christian (2020), p. 110.\n
    446. \n
    447. ^ Christian (2020), pp. 88–91.\n
    448. \n
    449. ^ Christian (2020, p. 83); Russell & Norvig (2021, p. 997)\n
    450. \n
    451. ^ Christian (2020), p. 91.\n
    452. \n
    453. ^ Christian (2020), p. 83.\n
    454. \n
    455. ^ Verma (2021).\n
    456. \n
    457. ^ Rothman (2020).\n
    458. \n
    459. ^ Christian (2020), pp. 105–108.\n
    460. \n
    461. ^ Christian (2020), pp. 108–112.\n
    462. \n
    463. ^ Ropek, Lucas (21 May 2024). "New Anthropic Research Sheds Light on AI\'s \'Black Box\'". Gizmodo. Archived from the original on 5 October 2024. Retrieved 23 May 2024.\n
    464. \n
    465. ^ Russell & Norvig (2021), p. 989.\n
    466. \n
    467. ^ a b Russell & Norvig (2021), pp. 987–990.\n
    468. \n
    469. ^ Russell & Norvig (2021), p. 988.\n
    470. \n
    471. ^ Robitzski (2018); Sainato (2015)\n
    472. \n
    473. ^ Harari (2018).\n
    474. \n
    475. ^ Buckley, Chris; Mozur, Paul (22 May 2019). "How China Uses High-Tech Surveillance to Subdue Minorities". The New York Times. Archived from the original on 25 November 2019. Retrieved 2 July 2019.\n
    476. \n
    477. ^ "Security lapse exposed a Chinese smart city surveillance system". 3 May 2019. Archived from the original on 7 March 2021. Retrieved 14 September 2020.\n
    478. \n
    479. ^ Urbina et al. (2022).\n
    480. \n
    481. ^ a b E. McGaughey, \'Will Robots Automate Your Job Away? Full Employment, Basic Income, and Economic Democracy\' (2022), 51(3) Industrial Law Journal 511–559. Archived 27 May 2023 at the Wayback Machine.\n
    482. \n
    483. ^ Ford & Colvin (2015);McGaughey (2022)\n
    484. \n
    485. ^ IGM Chicago (2017).\n
    486. \n
    487. ^ Arntz, Gregory & Zierahn (2016), p. 33.\n
    488. \n
    489. ^ Lohr (2017); Frey & Osborne (2017); Arntz, Gregory & Zierahn (2016, p. 33)\n
    490. \n
    491. ^ Zhou, Viola (11 April 2023). "AI is already taking video game illustrators\' jobs in China". Rest of World. Archived from the original on 21 February 2024. Retrieved 17 August 2023.\n
    492. \n
    493. ^ Carter, Justin (11 April 2023). "China\'s game art industry reportedly decimated by growing AI use". Game Developer. Archived from the original on 17 August 2023. Retrieved 17 August 2023.\n
    494. \n
    495. ^ Morgenstern (2015).\n
    496. \n
    497. ^ Mahdawi (2017); Thompson (2014)\n
    498. \n
    499. ^ Tarnoff, Ben (4 August 2023). "Lessons from Eliza". The Guardian Weekly. pp. 34–39.\n
    500. \n
    501. ^ Cellan-Jones (2014).\n
    502. \n
    503. ^ Russell & Norvig 2021, p. 1001.\n
    504. \n
    505. ^ Bostrom (2014).\n
    506. \n
    507. ^ Russell (2019).\n
    508. \n
    509. ^ Bostrom (2014); Müller & Bostrom (2014); Bostrom (2015).\n
    510. \n
    511. ^ Harari (2023).\n
    512. \n
    513. ^ Müller & Bostrom (2014).\n
    514. \n
    515. ^ Leaders\' concerns about the existential risks of AI around 2015: Rawlinson (2015), Holley (2015), Gibbs (2014), Sainato (2015)\n
    516. \n
    517. ^ ""Godfather of artificial intelligence" talks impact and potential of new AI". CBS News. 25 March 2023. Archived from the original on 28 March 2023. Retrieved 28 March 2023.\n
    518. \n
    519. ^ Pittis, Don (4 May 2023). "Canadian artificial intelligence leader Geoffrey Hinton piles on fears of computer takeover". CBC. Archived from the original on 7 July 2024. Retrieved 5 October 2024.\n
    520. \n
    521. ^ "\'50–50 chance\' that AI outsmarts humanity, Geoffrey Hinton says". Bloomberg BNN. 14 June 2024. Retrieved 6 July 2024.\n
    522. \n
    523. ^ Valance (2023).\n
    524. \n
    525. ^ Taylor, Josh (7 May 2023). "Rise of artificial intelligence is inevitable but should not be feared, \'father of AI\' says". The Guardian. Archived from the original on 23 October 2023. Retrieved 26 May 2023.\n
    526. \n
    527. ^ Colton, Emma (7 May 2023). "\'Father of AI\' says tech fears misplaced: \'You cannot stop it\'". Fox News. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
    528. \n
    529. ^ Jones, Hessie (23 May 2023). "Juergen Schmidhuber, Renowned \'Father Of Modern AI,\' Says His Life\'s Work Won\'t Lead To Dystopia". Forbes. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
    530. \n
    531. ^ McMorrow, Ryan (19 December 2023). "Andrew Ng: \'Do we think the world is better off with more or less intelligence?\'". Financial Times. Archived from the original on 25 January 2024. Retrieved 30 December 2023.\n
    532. \n
    533. ^ Levy, Steven (22 December 2023). "How Not to Be Stupid About AI, With Yann LeCun". Wired. Archived from the original on 28 December 2023. Retrieved 30 December 2023.\n
    534. \n
    535. ^ Arguments that AI is not an imminent risk: Brooks (2014), Geist (2015), Madrigal (2015), Lee (2014)\n
    536. \n
    537. ^ a b Christian (2020), pp. 67, 73.\n
    538. \n
    539. ^ Yudkowsky (2008).\n
    540. \n
    541. ^ a b Anderson & Anderson (2011).\n
    542. \n
    543. ^ AAAI (2014).\n
    544. \n
    545. ^ Wallach (2010).\n
    546. \n
    547. ^ Russell (2019), p. 173.\n
    548. \n
    549. ^ Stewart, Ashley; Melton, Monica. "Hugging Face CEO says he\'s focused on building a \'sustainable model\' for the $4.5 billion open-source-AI startup". Business Insider. Archived from the original on 25 September 2024. Retrieved 14 April 2024.\n
    550. \n
    551. ^ Wiggers, Kyle (9 April 2024). "Google open sources tools to support AI model development". TechCrunch. Archived from the original on 10 September 2024. Retrieved 14 April 2024.\n
    552. \n
    553. ^ Heaven, Will Douglas (12 May 2023). "The open-source AI boom is built on Big Tech\'s handouts. How long will it last?". MIT Technology Review. Retrieved 14 April 2024.\n
    554. \n
    555. ^ Brodsky, Sascha (19 December 2023). "Mistral AI\'s New Language Model Aims for Open Source Supremacy". AI Business. Archived from the original on 5 September 2024. Retrieved 5 October 2024.\n
    556. \n
    557. ^ Edwards, Benj (22 February 2024). "Stability announces Stable Diffusion 3, a next-gen AI image generator". Ars Technica. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
    558. \n
    559. ^ Marshall, Matt (29 January 2024). "How enterprises are using open source LLMs: 16 examples". VentureBeat. Archived from the original on 26 September 2024. Retrieved 5 October 2024.\n
    560. \n
    561. ^ Piper, Kelsey (2 February 2024). "Should we make our most powerful AI models open source to all?". Vox. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
    562. \n
    563. ^ Alan Turing Institute (2019). "Understanding artificial intelligence ethics and safety" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
    564. \n
    565. ^ Alan Turing Institute (2023). "AI Ethics and Governance in Practice" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
    566. \n
    567. ^ Floridi, Luciano; Cowls, Josh (23 June 2019). "A Unified Framework of Five Principles for AI in Society". Harvard Data Science Review. 1 (1). doi:10.1162/99608f92.8cd550d1. S2CID 198775713.\n
    568. \n
    569. ^ Buruk, Banu; Ekmekci, Perihan Elif; Arda, Berna (1 September 2020). "A critical perspective on guidelines for responsible and trustworthy artificial intelligence". Medicine, Health Care and Philosophy. 23 (3): 387–399. doi:10.1007/s11019-020-09948-1. ISSN 1572-8633. PMID 32236794. S2CID 214766800. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
    570. \n
    571. ^ Kamila, Manoj Kumar; Jasrotia, Sahil Singh (1 January 2023). "Ethical issues in the development of artificial intelligence: recognizing the risks". International Journal of Ethics and Systems. ahead-of-print (ahead-of-print). doi:10.1108/IJOES-05-2023-0107. ISSN 2514-9369. S2CID 259614124. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
    572. \n
    573. ^ "AI Safety Institute releases new AI safety evaluations platform". UK Government. 10 May 2024. Archived from the original on 5 October 2024. Retrieved 14 May 2024.\n
    574. \n
    575. ^ Regulation of AI to mitigate risks: Berryhill et al. (2019), Barfield & Pagallo (2018), Iphofen & Kritikos (2019), Wirtz, Weyerer & Geyer (2018), Buiten (2019)\n
    576. \n\n
    577. ^ a b Vincent (2023).\n
    578. \n
    579. ^ Stanford University (2023).\n
    580. \n
    581. ^ a b c d UNESCO (2021).\n
    582. \n
    583. ^ Kissinger (2021).\n
    584. \n
    585. ^ Altman, Brockman & Sutskever (2023).\n
    586. \n
    587. ^ VOA News (25 October 2023). "UN Announces Advisory Body on Artificial Intelligence". Archived from the original on 18 September 2024. Retrieved 5 October 2024.\n
    588. \n
    589. ^ "Council of Europe opens first ever global treaty on AI for signature". Council of Europe. 5 September 2024. Archived from the original on 17 September 2024. Retrieved 17 September 2024.\n
    590. \n
    591. ^ Edwards (2023).\n
    592. \n
    593. ^ Kasperowicz (2023).\n
    594. \n
    595. ^ Fox News (2023).\n
    596. \n
    597. ^ Milmo, Dan (3 November 2023). "Hope or Horror? The great AI debate dividing its pioneers". The Guardian Weekly. pp. 10–12.\n
    598. \n
    599. ^ "The Bletchley Declaration by Countries Attending the AI Safety Summit, 1–2 November 2023". GOV.UK. 1 November 2023. Archived from the original on 1 November 2023. Retrieved 2 November 2023.\n
    600. \n
    601. ^ "Countries agree to safe and responsible development of frontier AI in landmark Bletchley Declaration". GOV.UK (Press release). Archived from the original on 1 November 2023. Retrieved 1 November 2023.\n
    602. \n
    603. ^ "Second global AI summit secures safety commitments from companies". Reuters. 21 May 2024. Retrieved 23 May 2024.\n
    604. \n
    605. ^ "Frontier AI Safety Commitments, AI Seoul Summit 2024". gov.uk. 21 May 2024. Archived from the original on 23 May 2024. Retrieved 23 May 2024.\n
    606. \n
    607. ^ a b Russell & Norvig 2021, p. 9.\n
    608. \n
    609. ^ a b c Copeland, J., ed. (2004). The Essential Turing: the ideas that gave birth to the computer age. Oxford, England: Clarendon Press. ISBN 0-1982-5079-7.\n
    610. \n
    611. ^ "Google books ngram". Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
    612. \n
    613. ^ AI\'s immediate precursors: McCorduck (2004, pp. 51–107), Crevier (1993, pp. 27–32), Russell & Norvig (2021, pp. 8–17), Moravec (1988, p. 3)\n
    614. \n
    615. ^ a b Turing\'s original publication of the Turing test in "Computing machinery and intelligence": Turing (1950)\nHistorical influence and philosophical implications: Haugeland (1985, pp. 6–9), Crevier (1993, p. 24), McCorduck (2004, pp. 70–71), Russell & Norvig (2021, pp. 2, 984)\n
    616. \n
    617. ^ Crevier (1993), pp. 47–49.\n
    618. \n
    619. ^ Russell & Norvig (2003), p. 17.\n
    620. \n
    621. ^ Russell & Norvig (2003), p. 18.\n
    622. \n
    623. ^ Newquist (1994), pp. 86–86.\n
    624. \n
    625. ^ Simon (1965, p. 96) quoted in Crevier (1993, p. 109)\n
    626. \n
    627. ^ Minsky (1967, p. 2) quoted in Crevier (1993, p. 109)\n
    628. \n
    629. ^ Russell & Norvig (2021), p. 21.\n
    630. \n
    631. ^ Lighthill (1973).\n
    632. \n
    633. ^ NRC 1999, pp. 212–213.\n
    634. \n
    635. ^ Russell & Norvig (2021), p. 22.\n
    636. \n
    637. ^ Expert systems: Russell & Norvig (2021, pp. 23, 292), Luger & Stubblefield (2004, pp. 227–331), Nilsson (1998, chpt. 17.4), McCorduck (2004, pp. 327–335, 434–435), Crevier (1993, pp. 145–162, 197–203), Newquist (1994, pp. 155–183)\n
    638. \n
    639. ^ Russell & Norvig (2021), p. 24.\n
    640. \n
    641. ^ Nilsson (1998), p. 7.\n
    642. \n
    643. ^ McCorduck (2004), pp. 454–462.\n
    644. \n
    645. ^ Moravec (1988).\n
    646. \n
    647. ^ a b Brooks (1990).\n
    648. \n
    649. ^ Developmental robotics: Weng et al. (2001), Lungarella et al. (2003), Asada et al. (2009), Oudeyer (2010)\n
    650. \n
    651. ^ Russell & Norvig (2021), p. 25.\n
    652. \n
    653. ^ Crevier (1993, pp. 214–215), Russell & Norvig (2021, pp. 24, 26)\n
    654. \n
    655. ^ Russell & Norvig (2021), p. 26.\n
    656. \n
    657. ^ Formal and narrow methods adopted in the 1990s: Russell & Norvig (2021, pp. 24–26), McCorduck (2004, pp. 486–487)\n
    658. \n
    659. ^ AI widely used in the late 1990s: Kurzweil (2005, p. 265), NRC (1999, pp. 216–222), Newquist (1994, pp. 189–201)\n
    660. \n
    661. ^ Wong (2023).\n
    662. \n
    663. ^ Moore\'s Law and AI: Russell & Norvig (2021, pp. 14, 27)\n
    664. \n
    665. ^ a b c Clark (2015b).\n
    666. \n
    667. ^ Big data: Russell & Norvig (2021, p. 26)\n
    668. \n
    669. ^ Sagar, Ram (3 June 2020). "OpenAI Releases GPT-3, The Largest Model So Far". Analytics India Magazine. Archived from the original on 4 August 2020. Retrieved 15 March 2023.\n
    670. \n
    671. ^ DiFeliciantonio (2023).\n
    672. \n
    673. ^ Goswami (2023).\n
    674. \n
    675. ^ Grayling, Anthony; Ball, Brian (1 August 2024). "Philosophy is crucial in the age of AI". The Conversation. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
    676. \n
    677. ^ a b Jarow, Oshan (15 June 2024). "Will AI ever become conscious? It depends on how you think about biology". Vox. Archived from the original on 21 September 2024. Retrieved 4 October 2024.\n
    678. \n
    679. ^ McCarthy, John. "The Philosophy of AI and the AI of Philosophy". jmc.stanford.edu. Archived from the original on 23 October 2018. Retrieved 3 October 2024.\n
    680. \n
    681. ^ a b Turing (1950), p. 1.\n
    682. \n
    683. ^ Turing (1950), Under "The Argument from Consciousness".\n
    684. \n
    685. ^ Kirk-Giannini, Cameron Domenico; Goldstein, Simon (16 October 2023). "AI is closer than ever to passing the Turing test for \'intelligence\'. What happens when it does?". The Conversation. Archived from the original on 25 September 2024. Retrieved 17 August 2024.\n
    686. \n
    687. ^ Russell & Norvig (2021), p. 3.\n
    688. \n
    689. ^ Maker (2006).\n
    690. \n
    691. ^ McCarthy (1999).\n
    692. \n
    693. ^ Minsky (1986).\n
    694. \n
    695. ^ "What Is Artificial Intelligence (AI)?". Google Cloud Platform. Archived from the original on 31 July 2023. Retrieved 16 October 2023.\n
    696. \n
    697. ^ "One of the Biggest Problems in Regulating AI Is Agreeing on a Definition". carnegieendowment.org. Retrieved 31 July 2024.\n
    698. \n
    699. ^ "AI or BS? How to tell if a marketing tool really uses artificial intelligence". The Drum. Retrieved 31 July 2024.\n
    700. \n
    701. ^ Nilsson (1983), p. 10.\n
    702. \n
    703. ^ Haugeland (1985), pp. 112–117.\n
    704. \n
    705. ^ Physical symbol system hypothesis: Newell & Simon (1976, p. 116)\nHistorical significance: McCorduck (2004, p. 153), Russell & Norvig (2021, p. 19)\n
    706. \n
    707. ^ Moravec\'s paradox: Moravec (1988, pp. 15–16), Minsky (1986, p. 29), Pinker (2007, pp. 190–191)\n
    708. \n
    709. ^ Dreyfus\' critique of AI: Dreyfus (1972), Dreyfus & Dreyfus (1986)\nHistorical significance and philosophical implications: Crevier (1993, pp. 120–132), McCorduck (2004, pp. 211–239), Russell & Norvig (2021, pp. 981–982), Fearn (2007, chpt. 3)\n
    710. \n
    711. ^ Crevier (1993), p. 125.\n
    712. \n
    713. ^ Langley (2011).\n
    714. \n
    715. ^ Katz (2012).\n
    716. \n
    717. ^ Neats vs. scruffies, the historic debate: McCorduck (2004, pp. 421–424, 486–489), Crevier (1993, p. 168), Nilsson (1983, pp. 10–11), Russell & Norvig (2021, p. 24)\nA classic example of the "scruffy" approach to intelligence: Minsky (1986)\nA modern example of neat AI and its aspirations in the 21st century: Domingos (2015)\n
    718. \n
    719. ^ Pennachin & Goertzel (2007).\n
    720. \n
    721. ^ a b Roberts (2016).\n
    722. \n
    723. ^ Russell & Norvig (2021), p. 986.\n
    724. \n
    725. ^ Chalmers (1995).\n
    726. \n
    727. ^ Dennett (1991).\n
    728. \n
    729. ^ Horst (2005).\n
    730. \n
    731. ^ Searle (1999).\n
    732. \n
    733. ^ Searle (1980), p. 1.\n
    734. \n
    735. ^ Russell & Norvig (2021), p. 9817.\n
    736. \n
    737. ^ Searle\'s Chinese room argument: Searle (1980). Searle\'s original presentation of the thought experiment., Searle (1999).\nDiscussion: Russell & Norvig (2021, pp. 985), McCorduck (2004, pp. 443–445), Crevier (1993, pp. 269–271)\n
    738. \n
    739. ^ Leith, Sam (7 July 2022). "Nick Bostrom: How can we be certain a machine isn\'t conscious?". The Spectator. Archived from the original on 26 September 2024. Retrieved 23 February 2024.\n
    740. \n
    741. ^ a b c Thomson, Jonny (31 October 2022). "Why don\'t robots have rights?". Big Think. Archived from the original on 13 September 2024. Retrieved 23 February 2024.\n
    742. \n
    743. ^ a b Kateman, Brian (24 July 2023). "AI Should Be Terrified of Humans". Time. Archived from the original on 25 September 2024. Retrieved 23 February 2024.\n
    744. \n
    745. ^ Wong, Jeff (10 July 2023). "What leaders need to know about robot rights". Fast Company.\n
    746. \n
    747. ^ Hern, Alex (12 January 2017). "Give robots \'personhood\' status, EU committee argues". The Guardian. ISSN 0261-3077. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
    748. \n
    749. ^ Dovey, Dana (14 April 2018). "Experts Don\'t Think Robots Should Have Rights". Newsweek. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
    750. \n
    751. ^ Cuddy, Alice (13 April 2018). "Robot rights violate human rights, experts warn EU". euronews. Archived from the original on 19 September 2024. Retrieved 23 February 2024.\n
    752. \n
    753. ^ The Intelligence explosion and technological singularity: Russell & Norvig (2021, pp. 1004–1005), Omohundro (2008), Kurzweil (2005)\n\nI. J. Good\'s "intelligence explosion": Good (1965)\n\nVernor Vinge\'s "singularity": Vinge (1993)\n
    754. \n
    755. ^ Russell & Norvig (2021), p. 1005.\n
    756. \n
    757. ^ Transhumanism: Moravec (1988), Kurzweil (2005), Russell & Norvig (2021, p. 1005)\n
    758. \n
    759. ^ AI as evolution: Edward Fredkin is quoted in McCorduck (2004, p. 401), Butler (1863), Dyson (1998)\n
    760. \n
    761. ^ AI in myth: McCorduck (2004, pp. 4–5)\n
    762. \n
    763. ^ McCorduck (2004), pp. 340–400.\n
    764. \n
    765. ^ Buttazzo (2001).\n
    766. \n
    767. ^ Anderson (2008).\n
    768. \n
    769. ^ McCauley (2007).\n
    770. \n
    771. ^ Galvan (1997).\n
    772. \n
    \n

    AI textbooks

    \n

    The two most widely used textbooks in 2023 (see the Open Syllabus):\n

    \n\n

    These were the four of the most widely used AI textbooks in 2008:\n

    \n
    \n\n
    \n

    Other textbooks:\n

    \n\n

    History of AI

    \n
    \n\n
    \n

    Other sources

    \n
    \n\n\n
    \n

    Further reading

    \n
    \n\n
    \n
    \n\n\n\n\n\n\n\n\n\n
    \n
    \n\t\t\t\t\t\n\t\t\t\t
    \n\t\t\t\n\t\t\t\n\t\t\n\t\t\n\t \n \n
    \n\t
      \n
      \n\n\n'}'. -[2024-11-03 23:14:35,423] [DEBUG] [Engine]: Agent 'agent3' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_RYb5ybQGfa6oTI7Vspa0826j', type='function')], function_call=None) -[2024-11-03 23:14:35,526] [INFO] [Engine]: Assigning task to agent4: Search for the latest trends in AI using Twitter. -[2024-11-03 23:14:35,528] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Search for the latest trends in AI using Twitter.'. -[2024-11-03 23:14:38,430] [INFO] [BaseAgent]: Agent 'agent4' called 'fetch_webpage' with args '{'url': 'https://twitter.com/search?q=%23AI%20%23trending'}'. -[2024-11-03 23:14:38,432] [INFO] [BaseAgent]: Agent 'agent4' obtained result '{'success': True, 'error-msg': '', 'url': 'https://twitter.com/search?q=%23AI%20%23trending', 'content': '\n \n \n x.com\n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n \n \n '}'. -[2024-11-03 23:14:38,433] [DEBUG] [Engine]: Agent 'agent4' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23trending"}', name='fetch_webpage'), id='call_FTGL5vfDF77GlMhXb1LPvFu2', type='function')], function_call=None) -[2024-11-03 23:14:38,434] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google and provide the information?"}', name='new_communication_session'), id='call_sJNrbNSzyUnzmrmQbP4Ttc0W', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_RYb5ybQGfa6oTI7Vspa0826j', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23trending"}', name='fetch_webpage'), id='call_FTGL5vfDF77GlMhXb1LPvFu2', type='function')], function_call=None) - -[2024-11-03 23:14:38,436] [INFO] [Engine]: Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google and provide the information?"}', name='new_communication_session'), id='call_sJNrbNSzyUnzmrmQbP4Ttc0W', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_RYb5ybQGfa6oTI7Vspa0826j', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23trending"}', name='fetch_webpage'), id='call_FTGL5vfDF77GlMhXb1LPvFu2', type='function')], function_call=None) - -[2024-11-03 23:14:38,437] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google and provide the information?"}', name='new_communication_session'), id='call_sJNrbNSzyUnzmrmQbP4Ttc0W', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_RYb5ybQGfa6oTI7Vspa0826j', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23trending"}', name='fetch_webpage'), id='call_FTGL5vfDF77GlMhXb1LPvFu2', type='function')], function_call=None) - -[2024-11-03 23:14:38,892] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': False} -[2024-11-03 23:14:38,893] [INFO] [Engine]: EnginePlanner decided to terminate the simulation. -[2024-11-03 23:14:38,895] [INFO] [Engine]: Engine simulation loop completed. -[2024-11-03 23:14:38,896] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-03 23:14:38,897] [INFO] [Evaluator]: Total Token Consumption: 36 -[2024-11-03 23:14:38,898] [INFO] [Evaluator]: Average Tokens per Iteration: 36.0 -[2024-11-03 23:14:38,900] [INFO] [Engine]: Simulation completed. -[2024-11-03 23:14:45,313] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-11-03 23:14:45,315] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 23:14:45,315] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 23:14:45,316] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 23:14:45,317] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 23:14:45,319] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 23:14:45,320] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 23:14:45,321] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 23:14:45,322] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 23:14:45,325] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-11-03 23:14:45,325] [INFO] [AgentGraph]: Relationship added: agent2 --[reports_to]--> agent1 -[2024-11-03 23:14:45,326] [INFO] [AgentGraph]: Relationship added: agent3 --[reports_to]--> agent1 -[2024-11-03 23:14:45,327] [INFO] [AgentGraph]: Relationship added: agent4 --[reports_to]--> agent1 -[2024-11-03 23:14:45,328] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-03 23:14:45,331] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-03 23:14:45,331] [INFO] [Engine]: Engine initialized. -[2024-11-03 23:14:45,332] [INFO] [Engine]: Engine starting simulation. -[2024-11-03 23:14:45,333] [INFO] [Engine]: Starting iteration 1 -[2024-11-03 23:14:45,334] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-11-03 23:14:46,338] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'}, 'continue': True} -[2024-11-03 23:14:46,339] [INFO] [Engine]: Assigned tasks: {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'} -[2024-11-03 23:14:46,341] [INFO] [Engine]: Assigning task to agent2: Search for the latest trends in AI using Google. -[2024-11-03 23:14:46,342] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Search for the latest trends in AI using Google.'. -[2024-11-03 23:14:50,743] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Can you search for the latest trends in AI using Google? -[2024-11-03 23:14:50,744] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Can you search for the latest trends in AI using Google? -[2024-11-03 23:15:31,997] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: For task Search for the latest trends in AI using Google. -[2024-11-03 23:15:31,999] [INFO] [BaseAgent]: Agent agent2 received message from agent1: For task Search for the latest trends in AI using Google. -[2024-11-03 23:15:32,676] [INFO] [BaseAgent]: Agent 'agent2' called 'new_communication_session' with args '{'target_agent_id': 'agent1', 'message': 'Can you search for the latest trends in AI using Google?'}'. -[2024-11-03 23:15:32,677] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'message': 'Successfully completed session 093d7e36-c560-4687-aee1-e598a31ba98d', 'session_id': 'In the chat history, agent2 asked agent1 to search for the latest trends in AI using Google.'}'. -[2024-11-03 23:15:32,678] [DEBUG] [Engine]: Agent 'agent2' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google?"}', name='new_communication_session'), id='call_DH0OJ9iQhDV5Iyi3n7brCF7r', type='function')], function_call=None) -[2024-11-03 23:15:32,680] [INFO] [Engine]: Assigning task to agent3: Search for the latest trends in AI using Wikipedia. -[2024-11-03 23:15:32,681] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Search for the latest trends in AI using Wikipedia.'. -[2024-11-03 23:16:20,373] [ERROR] [Engine]: Error while executing task for agent 'agent3': -[2024-11-03 23:16:20,374] [INFO] [Engine]: Assigning task to agent4: Search for the latest trends in AI using Twitter. -[2024-11-03 23:16:20,388] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Search for the latest trends in AI using Twitter.'. -[2024-11-03 23:16:21,704] [ERROR] [Engine]: Error while executing task for agent 'agent4': -[2024-11-03 23:16:21,705] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google?"}', name='new_communication_session'), id='call_DH0OJ9iQhDV5Iyi3n7brCF7r', type='function')], function_call=None) - -[2024-11-03 23:16:21,706] [INFO] [Engine]: Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google?"}', name='new_communication_session'), id='call_DH0OJ9iQhDV5Iyi3n7brCF7r', type='function')], function_call=None) - -[2024-11-03 23:16:21,708] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google?"}', name='new_communication_session'), id='call_DH0OJ9iQhDV5Iyi3n7brCF7r', type='function')], function_call=None) - -[2024-11-03 23:16:21,709] [ERROR] [Engine]: An error occurred during simulation. -Traceback (most recent call last): - File "/home/zhong42/marble/MARBLE/marble/engine/engine.py", line 147, in start - self.evaluator.update(self.environment, self.agents) - File "/home/zhong42/marble/MARBLE/marble/evaluator/evaluator.py", line 39, in update - if environment.is_task_completed(): - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/zhong42/marble/MARBLE/marble/environments/base_env.py", line 38, in is_task_completed - return self._compare_to_ground_truth(last_action_result, self.ground_truth) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/zhong42/marble/MARBLE/marble/environments/base_env.py", line 43, in _compare_to_ground_truth - result_str: str = result.get("result", "") - ^^^^^^^^^^ -AttributeError: 'str' object has no attribute 'get' -[2024-11-03 23:16:21,713] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-03 23:16:21,714] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-03 23:16:21,716] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-03 23:16:21,717] [INFO] [Engine]: Simulation completed. -[2024-11-03 23:16:26,301] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-11-03 23:16:26,302] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 23:16:26,303] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 23:16:26,304] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 23:16:26,305] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 23:16:26,306] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 23:16:26,307] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 23:16:26,308] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 23:16:26,309] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 23:16:26,312] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-11-03 23:16:26,312] [INFO] [AgentGraph]: Relationship added: agent2 --[reports_to]--> agent1 -[2024-11-03 23:16:26,313] [INFO] [AgentGraph]: Relationship added: agent3 --[reports_to]--> agent1 -[2024-11-03 23:16:26,314] [INFO] [AgentGraph]: Relationship added: agent4 --[reports_to]--> agent1 -[2024-11-03 23:16:26,315] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-03 23:16:26,318] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-03 23:16:26,318] [INFO] [Engine]: Engine initialized. -[2024-11-03 23:16:26,319] [INFO] [Engine]: Engine starting simulation. -[2024-11-03 23:16:26,320] [INFO] [Engine]: Starting iteration 1 -[2024-11-03 23:16:26,321] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-11-03 23:16:27,370] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'}, 'continue': True} -[2024-11-03 23:16:27,371] [INFO] [Engine]: Assigned tasks: {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'} -[2024-11-03 23:16:27,372] [INFO] [Engine]: Assigning task to agent2: Search for the latest trends in AI using Google. -[2024-11-03 23:16:27,373] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Search for the latest trends in AI using Google.'. -[2024-11-03 23:17:34,900] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Can you search for the latest trends in AI using Google and provide the information? -[2024-11-03 23:17:34,901] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Can you search for the latest trends in AI using Google and provide the information? -[2024-11-03 23:17:50,501] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: For task Search for the latest trends in AI using Google. -[2024-11-03 23:17:50,529] [INFO] [BaseAgent]: Agent agent2 received message from agent1: For task Search for the latest trends in AI using Google. -[2024-11-03 23:17:51,034] [INFO] [BaseAgent]: Agent 'agent2' called 'new_communication_session' with args '{'target_agent_id': 'agent1', 'message': 'Can you search for the latest trends in AI using Google and provide the information?'}'. -[2024-11-03 23:17:51,059] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'message': 'Successfully completed session cf2759e3-dce4-41fb-b777-e9b72292abfa', 'session_id': 'Agent2 asked Agent1 to search for the latest trends in AI using Google and provide the information.'}'. -[2024-11-03 23:17:51,061] [DEBUG] [Engine]: Agent 'agent2' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google and provide the information?"}', name='new_communication_session'), id='call_JkifRcFbT1swXpGc1MZZqPCG', type='function')], function_call=None) -[2024-11-03 23:17:51,063] [INFO] [Engine]: Assigning task to agent3: Search for the latest trends in AI using Wikipedia. -[2024-11-03 23:17:51,064] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Search for the latest trends in AI using Wikipedia.'. -[2024-11-03 23:17:53,449] [ERROR] [Engine]: Error while executing task for agent 'agent3': -[2024-11-03 23:17:53,450] [INFO] [Engine]: Assigning task to agent4: Search for the latest trends in AI using Twitter. -[2024-11-03 23:17:53,451] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Search for the latest trends in AI using Twitter.'. -[2024-11-03 23:18:55,451] [ERROR] [Engine]: Error while executing task for agent 'agent4': -[2024-11-03 23:18:55,452] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google and provide the information?"}', name='new_communication_session'), id='call_JkifRcFbT1swXpGc1MZZqPCG', type='function')], function_call=None) - -[2024-11-03 23:18:55,477] [INFO] [Engine]: Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google and provide the information?"}', name='new_communication_session'), id='call_JkifRcFbT1swXpGc1MZZqPCG', type='function')], function_call=None) - -[2024-11-03 23:18:55,478] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google and provide the information?"}', name='new_communication_session'), id='call_JkifRcFbT1swXpGc1MZZqPCG', type='function')], function_call=None) - -[2024-11-03 23:18:55,479] [ERROR] [Engine]: An error occurred during simulation. -Traceback (most recent call last): - File "/home/zhong42/marble/MARBLE/marble/engine/engine.py", line 147, in start - self.evaluator.update(self.environment, self.agents) - File "/home/zhong42/marble/MARBLE/marble/evaluator/evaluator.py", line 39, in update - if environment.is_task_completed(): - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/zhong42/marble/MARBLE/marble/environments/base_env.py", line 38, in is_task_completed - return self._compare_to_ground_truth(last_action_result, self.ground_truth) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/zhong42/marble/MARBLE/marble/environments/base_env.py", line 43, in _compare_to_ground_truth - result_str: str = result.get("result", "") - ^^^^^^^^^^ -AttributeError: 'str' object has no attribute 'get' -[2024-11-03 23:18:55,487] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-03 23:18:55,488] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-03 23:18:55,489] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-03 23:18:55,490] [INFO] [Engine]: Simulation completed. -[2024-11-03 23:19:07,733] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-11-03 23:19:07,754] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 23:19:07,754] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 23:19:07,756] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 23:19:07,757] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 23:19:07,758] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 23:19:07,759] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 23:19:07,761] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 23:19:07,762] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 23:19:07,765] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-11-03 23:19:07,765] [INFO] [AgentGraph]: Relationship added: agent2 --[reports_to]--> agent1 -[2024-11-03 23:19:07,766] [INFO] [AgentGraph]: Relationship added: agent3 --[reports_to]--> agent1 -[2024-11-03 23:19:07,767] [INFO] [AgentGraph]: Relationship added: agent4 --[reports_to]--> agent1 -[2024-11-03 23:19:07,768] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-03 23:19:07,771] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-03 23:19:07,771] [INFO] [Engine]: Engine initialized. -[2024-11-03 23:19:07,773] [INFO] [Engine]: Engine starting simulation. -[2024-11-03 23:19:07,774] [INFO] [Engine]: Starting iteration 1 -[2024-11-03 23:19:07,775] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-11-03 23:19:09,180] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent2': 'Search the latest trends in AI using Google.', 'agent3': 'Search the latest trends in AI using Wikipedia.', 'agent4': 'Search the latest trends in AI using Twitter.'}, 'continue': True} -[2024-11-03 23:19:09,181] [INFO] [Engine]: Assigned tasks: {'agent2': 'Search the latest trends in AI using Google.', 'agent3': 'Search the latest trends in AI using Wikipedia.', 'agent4': 'Search the latest trends in AI using Twitter.'} -[2024-11-03 23:19:09,182] [INFO] [Engine]: Assigning task to agent2: Search the latest trends in AI using Google. -[2024-11-03 23:19:09,183] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Search the latest trends in AI using Google.'. -[2024-11-03 23:19:11,622] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Can you search for the latest trends in AI using Google? -[2024-11-03 23:19:11,623] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Can you search for the latest trends in AI using Google? -[2024-11-03 23:20:11,995] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: For task Search the latest trends in AI using Google. -[2024-11-03 23:20:11,997] [INFO] [BaseAgent]: Agent agent2 received message from agent1: For task Search the latest trends in AI using Google. -[2024-11-03 23:20:59,821] [ERROR] [Engine]: Error while executing task for agent 'agent2': -[2024-11-03 23:20:59,822] [INFO] [Engine]: Assigning task to agent3: Search the latest trends in AI using Wikipedia. -[2024-11-03 23:20:59,840] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Search the latest trends in AI using Wikipedia.'. -[2024-11-03 23:21:01,968] [ERROR] [Engine]: Error while executing task for agent 'agent3': -[2024-11-03 23:21:01,970] [INFO] [Engine]: Assigning task to agent4: Search the latest trends in AI using Twitter. -[2024-11-03 23:21:01,971] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Search the latest trends in AI using Twitter.'. -[2024-11-03 23:21:04,370] [ERROR] [Engine]: Error while executing task for agent 'agent4': -[2024-11-03 23:21:04,372] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: - -[2024-11-03 23:21:04,373] [INFO] [Engine]: Agents' Results Summary: - -[2024-11-03 23:21:04,375] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: - -[2024-11-03 23:21:04,376] [ERROR] [Engine]: An error occurred during simulation. -Traceback (most recent call last): - File "/home/zhong42/marble/MARBLE/marble/engine/engine.py", line 147, in start - self.evaluator.update(self.environment, self.agents) - File "/home/zhong42/marble/MARBLE/marble/evaluator/evaluator.py", line 39, in update - if environment.is_task_completed(): - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/zhong42/marble/MARBLE/marble/environments/base_env.py", line 38, in is_task_completed - return self._compare_to_ground_truth(last_action_result, self.ground_truth) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/zhong42/marble/MARBLE/marble/environments/base_env.py", line 43, in _compare_to_ground_truth - result_str: str = result.get("result", "") - ^^^^^^^^^^ -AttributeError: 'str' object has no attribute 'get' -[2024-11-03 23:21:04,396] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-03 23:21:04,397] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-03 23:21:04,398] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-03 23:21:04,399] [INFO] [Engine]: Simulation completed. -[2024-11-03 23:21:11,651] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-11-03 23:21:11,652] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 23:21:11,652] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 23:21:11,653] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 23:21:11,655] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 23:21:11,656] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 23:21:11,657] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 23:21:11,658] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 23:21:11,659] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 23:21:11,662] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-11-03 23:21:11,662] [INFO] [AgentGraph]: Relationship added: agent2 --[reports_to]--> agent1 -[2024-11-03 23:21:11,663] [INFO] [AgentGraph]: Relationship added: agent3 --[reports_to]--> agent1 -[2024-11-03 23:21:11,664] [INFO] [AgentGraph]: Relationship added: agent4 --[reports_to]--> agent1 -[2024-11-03 23:21:11,665] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-03 23:21:11,668] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-03 23:21:11,668] [INFO] [Engine]: Engine initialized. -[2024-11-03 23:21:11,669] [INFO] [Engine]: Engine starting simulation. -[2024-11-03 23:21:11,670] [INFO] [Engine]: Starting iteration 1 -[2024-11-03 23:21:11,671] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-11-03 23:21:12,670] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'}, 'continue': True} -[2024-11-03 23:21:12,672] [INFO] [Engine]: Assigned tasks: {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'} -[2024-11-03 23:21:12,673] [INFO] [Engine]: Assigning task to agent2: Search for the latest trends in AI using Google. -[2024-11-03 23:21:12,674] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Search for the latest trends in AI using Google.'. -[2024-11-03 23:23:27,748] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Can you search for the latest trends in AI using Google? -[2024-11-03 23:23:27,749] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Can you search for the latest trends in AI using Google? -[2024-11-03 23:23:32,472] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: For task Search for the latest trends in AI using Google. -[2024-11-03 23:23:32,474] [INFO] [BaseAgent]: Agent agent2 received message from agent1: For task Search for the latest trends in AI using Google. -[2024-11-03 23:25:20,257] [ERROR] [Engine]: Error while executing task for agent 'agent2': -[2024-11-03 23:25:20,259] [INFO] [Engine]: Assigning task to agent3: Search for the latest trends in AI using Wikipedia. -[2024-11-03 23:25:20,276] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Search for the latest trends in AI using Wikipedia.'. -[2024-11-03 23:25:21,578] [ERROR] [Engine]: Error while executing task for agent 'agent3': -[2024-11-03 23:25:21,580] [INFO] [Engine]: Assigning task to agent4: Search for the latest trends in AI using Twitter. -[2024-11-03 23:25:21,582] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Search for the latest trends in AI using Twitter.'. -[2024-11-03 23:25:23,122] [ERROR] [Engine]: Error while executing task for agent 'agent4': -[2024-11-03 23:25:23,124] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: - -[2024-11-03 23:25:23,126] [INFO] [Engine]: Agents' Results Summary: - -[2024-11-03 23:25:23,127] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: - -[2024-11-03 23:25:23,128] [ERROR] [Engine]: An error occurred during simulation. -Traceback (most recent call last): - File "/home/zhong42/marble/MARBLE/marble/engine/engine.py", line 147, in start - self.evaluator.update(self.environment, self.agents) - File "/home/zhong42/marble/MARBLE/marble/evaluator/evaluator.py", line 39, in update - if environment.is_task_completed(): - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/zhong42/marble/MARBLE/marble/environments/base_env.py", line 38, in is_task_completed - return self._compare_to_ground_truth(last_action_result, self.ground_truth) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/zhong42/marble/MARBLE/marble/environments/base_env.py", line 43, in _compare_to_ground_truth - result_str: str = result.get("result", "") - ^^^^^^^^^^ -AttributeError: 'str' object has no attribute 'get' -[2024-11-03 23:25:23,136] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-03 23:25:23,137] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-03 23:25:23,138] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-03 23:25:23,140] [INFO] [Engine]: Simulation completed. -[2024-11-03 23:27:58,547] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-11-03 23:27:58,594] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 23:27:58,594] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 23:27:58,596] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 23:27:58,597] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 23:27:58,598] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 23:27:58,599] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 23:27:58,600] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 23:27:58,601] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 23:27:58,604] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-11-03 23:27:58,604] [INFO] [AgentGraph]: Relationship added: agent2 --[reports_to]--> agent1 -[2024-11-03 23:27:58,611] [INFO] [AgentGraph]: Relationship added: agent3 --[reports_to]--> agent1 -[2024-11-03 23:27:58,612] [INFO] [AgentGraph]: Relationship added: agent4 --[reports_to]--> agent1 -[2024-11-03 23:27:58,613] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-03 23:27:58,617] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-03 23:27:58,617] [INFO] [Engine]: Engine initialized. -[2024-11-03 23:27:58,618] [INFO] [Engine]: Engine starting simulation. -[2024-11-03 23:27:58,619] [INFO] [Engine]: Starting iteration 1 -[2024-11-03 23:27:58,620] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-11-03 23:27:59,770] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent2': 'Search the latest trends in AI using Google.', 'agent3': 'Search the latest trends in AI using Wikipedia.', 'agent4': 'Search the latest trends in AI using Twitter.'}, 'continue': True} -[2024-11-03 23:27:59,771] [INFO] [Engine]: Assigned tasks: {'agent2': 'Search the latest trends in AI using Google.', 'agent3': 'Search the latest trends in AI using Wikipedia.', 'agent4': 'Search the latest trends in AI using Twitter.'} -[2024-11-03 23:27:59,772] [INFO] [Engine]: Assigning task to agent2: Search the latest trends in AI using Google. -[2024-11-03 23:27:59,773] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Search the latest trends in AI using Google.'. -[2024-11-03 23:28:19,769] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Can you search for the latest trends in AI using Google? -[2024-11-03 23:28:19,785] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Can you search for the latest trends in AI using Google? -[2024-11-03 23:28:28,147] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: The task is: Search the latest trends in AI using Google. -[2024-11-03 23:28:28,149] [INFO] [BaseAgent]: Agent agent2 received message from agent1: The task is: Search the latest trends in AI using Google. -[2024-11-03 23:28:32,167] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: I will search for the latest trends in AI using Google. -[2024-11-03 23:28:32,169] [INFO] [BaseAgent]: Agent agent1 received message from agent2: I will search for the latest trends in AI using Google. -[2024-11-03 23:28:43,550] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: Please search for the latest trends in AI using Google. -[2024-11-03 23:28:43,551] [INFO] [BaseAgent]: Agent agent2 received message from agent1: Please search for the latest trends in AI using Google. -[2024-11-03 23:28:49,710] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Please search for the latest trends in AI using Google. -[2024-11-03 23:28:49,711] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Please search for the latest trends in AI using Google. -[2024-11-03 23:28:57,899] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: The task is: Search the latest trends in AI using Google. -[2024-11-03 23:28:57,973] [INFO] [BaseAgent]: Agent agent2 received message from agent1: The task is: Search the latest trends in AI using Google. -[2024-11-03 23:28:58,654] [INFO] [BaseAgent]: Agent 'agent2' called 'new_communication_session' with args '{'target_agent_id': 'agent1', 'message': 'Can you search for the latest trends in AI using Google?'}'. -[2024-11-03 23:28:58,692] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'message': 'Successfully completed session 09bdadc2-3b04-4161-8be4-11ec35fa0f12', 'session_id': 'In the chat history, agent2 asked agent1 to search for the latest trends in AI using Google. Agent2 also reiterated the request for agent1 to search for the latest trends in AI using Google.'}'. -[2024-11-03 23:28:58,694] [DEBUG] [Engine]: Agent 'agent2' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google?"}', name='new_communication_session'), id='call_kCjOwzRWyg4qbPnayzKv9RMm', type='function')], function_call=None) -[2024-11-03 23:28:58,695] [INFO] [Engine]: Assigning task to agent3: Search the latest trends in AI using Wikipedia. -[2024-11-03 23:28:58,696] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Search the latest trends in AI using Wikipedia.'. -[2024-11-03 23:35:55,877] [INFO] [BaseAgent]: Agent 'agent3' called 'fetch_webpage' with args '{'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence'}'. -[2024-11-03 23:35:55,886] [INFO] [BaseAgent]: Agent 'agent3' obtained result '{'success': True, 'error-msg': '', 'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence', 'content': '\n\n\n\nArtificial intelligence - Wikipedia\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nJump to content\n
      \n\t
      \n\t\t
      \n\t\t\t
      \n\n\t\t\n\t\t\t\n\n\n\t\t
      \n\t\t
      \n\t\t\t\n\n\n\t\t\t\n\n\t\t
      \n\t\n\n
      \n\t
      \n\t\t
      \n\t\t\t
      \n\t\t
      \n\t\t
      \n\t\t\t
      \n\t\t
      \n\t\t\t\n\t\t
      \n\t
      \n\t
      \n\t\t\t\t
      \n\t\t\n\t\t\t
      \n\t\t
      \n\t\t
      \n\t\t\t
      \n\t\t\t\t
      \n\t\t\t\t\t\n\t\t\t\t\t

      Artificial intelligence

      \n\t\t\t\t\t\t\t\n
      \n\t\n\t\n\t
      \n\n\t\t
      \n\t\t\t\n\t\t\t\n\t\t\t\n\t\t
      \n\n\t
      \n
      \n
      \n\t\t\t\t
      \n\t\t\t\t\t
      \n\t\t\t\t\t\t
      \n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
      \n\t\t\t\t\t\t
      \n\t\t\t\t\t\t\t\n\t\t\t\t\n\t\t\t\t\t\t\t
      \n\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
      \n\t\t\t\t\t
      \n\t\t\t\t
      \n\t\t\t\t
      \n\t\t\t\t\t
      \n\t\t\t\t\t\t\n\t\t\t\t\t\t
      \n\t\t\n\t\t\t\t\t
      \n\t\t\t\t
      \n\t\t\t\t
      \n\t\t\t\t\t
      \n\t\t\t\t\t\t\t
      \n\t\t
      Page semi-protected
      \n\t\t
      \n\n\t\t\t\t\t\t
      From Wikipedia, the free encyclopedia
      \n\t\t\t\t\t
      \n\t\t\t\t\t
      \n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t
      \n\n

      \n

      \n\n\n\n\n\n\n\n

      Artificial intelligence (AI), in its broadest sense, is intelligence emulated by machines, particularly computer systems. It is a field of research in computer science that develops and studies methods and software that enable machines to perceive their environment and use learning and intelligence to take actions that maximize their chances of achieving defined goals.[1] Such machines may be called AIs.\n

      Some high-profile applications of AI include advanced web search engines (e.g., Google Search); recommendation systems (used by YouTube, Amazon, and Netflix); interacting via human speech (e.g., Google Assistant, Siri, and Alexa); autonomous vehicles (e.g., Waymo); generative and creative tools (e.g., ChatGPT, and AI art); and superhuman play and analysis in strategy games (e.g., chess and Go). However, many AI applications are not perceived as AI: "A lot of cutting edge AI has filtered into general applications, often without being called AI because once something becomes useful enough and common enough it\'s not labeled AI anymore."[2][3]\n

      The various subfields of AI research are centered around particular goals and the use of particular tools. The traditional goals of AI research include reasoning, knowledge representation, planning, learning, natural language processing, perception, and support for robotics.[a] General intelligence—the ability to complete any task performable by a human on an at least equal level—is among the field\'s long-term goals.[4] To reach these goals, AI researchers have adapted and integrated a wide range of techniques, including search and mathematical optimization, formal logic, artificial neural networks, and methods based on statistics, operations research, and economics.[b] AI also draws upon psychology, linguistics, philosophy, neuroscience, and other fields.[5]\n

      Artificial intelligence was founded as an academic discipline in 1956,[6] and the field went through multiple cycles of optimism,[7][8] followed by periods of disappointment and loss of funding, known as AI winter.[9][10] Funding and interest vastly increased after 2012 when deep learning outperformed previous AI techniques.[11] This growth accelerated further after 2017 with the transformer architecture,[12] and by the early 2020s hundreds of billions of dollars were being invested in AI (known as the "AI boom"). The widespread use of AI in the 21st century exposed several unintended consequences and harms in the present and raised concerns about its risks and long-term effects in the future, prompting discussions about regulatory policies to ensure the safety and benefits of the technology.\n

      \n\n

      Goals

      \n

      The general problem of fully simulating (or creating) intelligence is mostly found to be overwhelming. However, some types of problems have been successfully broken into more achievable subproblems. These consist of particular traits or capabilities that researchers expect an intelligent system to display. The traits described below have received the most attention and cover the scope of AI research.[a]\n

      \n

      Reasoning and problem-solving

      \n

      Early researchers developed algorithms that imitated step-by-step reasoning that humans use when they solve puzzles or make logical deductions.[13] By the late 1980s and 1990s, methods were developed for dealing with uncertain or incomplete information, employing concepts from probability and economics.[14]\n

      Many of these algorithms are insufficient for solving large reasoning problems because they experience a "combinatorial explosion": They become exponentially slower as the problems grow.[15] Even humans rarely use the step-by-step deduction that early AI research could model. They solve most of their problems using fast, intuitive judgments.[16] Accurate and efficient reasoning is an unsolved problem.\n

      \n

      Knowledge representation

      \n
      An ontology represents knowledge as a set of concepts within a domain and the relationships between those concepts.
      \n

      Knowledge representation and knowledge engineering[17] allow AI programs to answer questions intelligently and make deductions about real-world facts. Formal knowledge representations are used in content-based indexing and retrieval,[18] scene interpretation,[19] clinical decision support,[20] knowledge discovery (mining "interesting" and actionable inferences from large databases),[21] and other areas.[22]\n

      A knowledge base is a body of knowledge represented in a form that can be used by a program. An ontology is the set of objects, relations, concepts, and properties used by a particular domain of knowledge.[23] Knowledge bases need to represent things such as objects, properties, categories, and relations between objects;[24] situations, events, states, and time;[25] causes and effects;[26] knowledge about knowledge (what we know about what other people know);[27] default reasoning (things that humans assume are true until they are told differently and will remain true even when other facts are changing);[28] and many other aspects and domains of knowledge.\n

      Among the most difficult problems in knowledge representation are the breadth of commonsense knowledge (the set of atomic facts that the average person knows is enormous);[29] and the sub-symbolic form of most commonsense knowledge (much of what people know is not represented as "facts" or "statements" that they could express verbally).[16] There is also the difficulty of knowledge acquisition, the problem of obtaining knowledge for AI applications.[c]\n

      \n

      Planning and decision-making

      \n

      An "agent" is anything that perceives and takes actions in the world. A rational agent has goals or preferences and takes actions to make them happen.[d][32] In automated planning, the agent has a specific goal.[33] In automated decision-making, the agent has preferences—there are some situations it would prefer to be in, and some situations it is trying to avoid. The decision-making agent assigns a number to each situation (called the "utility") that measures how much the agent prefers it. For each possible action, it can calculate the "expected utility": the utility of all possible outcomes of the action, weighted by the probability that the outcome will occur. It can then choose the action with the maximum expected utility.[34]\n

      In classical planning, the agent knows exactly what the effect of any action will be.[35] In most real-world problems, however, the agent may not be certain about the situation they are in (it is "unknown" or "unobservable") and it may not know for certain what will happen after each possible action (it is not "deterministic"). It must choose an action by making a probabilistic guess and then reassess the situation to see if the action worked.[36]\n

      In some problems, the agent\'s preferences may be uncertain, especially if there are other agents or humans involved. These can be learned (e.g., with inverse reinforcement learning), or the agent can seek information to improve its preferences.[37] Information value theory can be used to weigh the value of exploratory or experimental actions.[38] The space of possible future actions and situations is typically intractably large, so the agents must take actions and evaluate situations while being uncertain of what the outcome will be.\n

      A Markov decision process has a transition model that describes the probability that a particular action will change the state in a particular way and a reward function that supplies the utility of each state and the cost of each action. A policy associates a decision with each possible state. The policy could be calculated (e.g., by iteration), be heuristic, or it can be learned.[39]\n

      Game theory describes the rational behavior of multiple interacting agents and is used in AI programs that make decisions that involve other agents.[40]\n

      \n

      Learning

      \n

      Machine learning is the study of programs that can improve their performance on a given task automatically.[41] It has been a part of AI from the beginning.[e]\n

      There are several kinds of machine learning. Unsupervised learning analyzes a stream of data and finds patterns and makes predictions without any other guidance.[44] Supervised learning requires a human to label the input data first, and comes in two main varieties: classification (where the program must learn to predict what category the input belongs in) and regression (where the program must deduce a numeric function based on numeric input).[45]\n

      In reinforcement learning, the agent is rewarded for good responses and punished for bad ones. The agent learns to choose responses that are classified as "good".[46] Transfer learning is when the knowledge gained from one problem is applied to a new problem.[47] Deep learning is a type of machine learning that runs inputs through biologically inspired artificial neural networks for all of these types of learning.[48]\n

      Computational learning theory can assess learners by computational complexity, by sample complexity (how much data is required), or by other notions of optimization.[49]\n

      \n
      \n

      Natural language processing

      \n

      Natural language processing (NLP)[50] allows programs to read, write and communicate in human languages such as English. Specific problems include speech recognition, speech synthesis, machine translation, information extraction, information retrieval and question answering.[51]\n

      Early work, based on Noam Chomsky\'s generative grammar and semantic networks, had difficulty with word-sense disambiguation[f] unless restricted to small domains called "micro-worlds" (due to the common sense knowledge problem[29]). Margaret Masterman believed that it was meaning and not grammar that was the key to understanding languages, and that thesauri and not dictionaries should be the basis of computational language structure.\n

      Modern deep learning techniques for NLP include word embedding (representing words, typically as vectors encoding their meaning),[52] transformers (a deep learning architecture using an attention mechanism),[53] and others.[54] In 2019, generative pre-trained transformer (or "GPT") language models began to generate coherent text,[55][56] and by 2023, these models were able to get human-level scores on the bar exam, SAT test, GRE test, and many other real-world applications.[57]\n

      \n

      Perception

      \n

      Machine perception is the ability to use input from sensors (such as cameras, microphones, wireless signals, active lidar, sonar, radar, and tactile sensors) to deduce aspects of the world. Computer vision is the ability to analyze visual input.[58]\n

      The field includes speech recognition,[59] image classification,[60] facial recognition, object recognition,[61]object tracking,[62] and robotic perception.[63]\n

      \n

      Social intelligence

      \n
      Kismet, a robot head which was made in the 1990s; it is a machine that can recognize and simulate emotions.[64]
      \n

      Affective computing is an interdisciplinary umbrella that comprises systems that recognize, interpret, process, or simulate human feeling, emotion, and mood.[65] For example, some virtual assistants are programmed to speak conversationally or even to banter humorously; it makes them appear more sensitive to the emotional dynamics of human interaction, or to otherwise facilitate human–computer interaction.\n

      However, this tends to give naïve users an unrealistic conception of the intelligence of existing computer agents.[66] Moderate successes related to affective computing include textual sentiment analysis and, more recently, multimodal sentiment analysis, wherein AI classifies the affects displayed by a videotaped subject.[67]\n

      \n

      General intelligence

      \n

      A machine with artificial general intelligence should be able to solve a wide variety of problems with breadth and versatility similar to human intelligence.[4]\n

      \n

      Techniques

      \n

      AI research uses a wide variety of techniques to accomplish the goals above.[b]\n

      \n

      Search and optimization

      \n

      AI can solve many problems by intelligently searching through many possible solutions.[68] There are two very different kinds of search used in AI: state space search and local search.\n

      \n
      \n

      State space search searches through a tree of possible states to try to find a goal state.[69] For example, planning algorithms search through trees of goals and subgoals, attempting to find a path to a target goal, a process called means-ends analysis.[70]\n

      Simple exhaustive searches[71] are rarely sufficient for most real-world problems: the search space (the number of places to search) quickly grows to astronomical numbers. The result is a search that is too slow or never completes.[15] "Heuristics" or "rules of thumb" can help prioritize choices that are more likely to reach a goal.[72]\n

      Adversarial search is used for game-playing programs, such as chess or Go. It searches through a tree of possible moves and counter-moves, looking for a winning position.[73]\n

      \n
      \n
      Illustration of gradient descent for 3 different starting points; two parameters (represented by the plan coordinates) are adjusted in order to minimize the loss function (the height)

      Local search uses mathematical optimization to find a solution to a problem. It begins with some form of guess and refines it incrementally.[74]\n

      Gradient descent is a type of local search that optimizes a set of numerical parameters by incrementally adjusting them to minimize a loss function. Variants of gradient descent are commonly used to train neural networks.[75]\n

      Another type of local search is evolutionary computation, which aims to iteratively improve a set of candidate solutions by "mutating" and "recombining" them, selecting only the fittest to survive each generation.[76]\n

      Distributed search processes can coordinate via swarm intelligence algorithms. Two popular swarm algorithms used in search are particle swarm optimization (inspired by bird flocking) and ant colony optimization (inspired by ant trails).[77]\n

      \n

      Logic

      \n

      Formal logic is used for reasoning and knowledge representation.[78]\nFormal logic comes in two main forms: propositional logic (which operates on statements that are true or false and uses logical connectives such as "and", "or", "not" and "implies")[79] and predicate logic (which also operates on objects, predicates and relations and uses quantifiers such as "Every X is a Y" and "There are some Xs that are Ys").[80]\n

      Deductive reasoning in logic is the process of proving a new statement (conclusion) from other statements that are given and assumed to be true (the premises).[81] Proofs can be structured as proof trees, in which nodes are labelled by sentences, and children nodes are connected to parent nodes by inference rules.\n

      Given a problem and a set of premises, problem-solving reduces to searching for a proof tree whose root node is labelled by a solution of the problem and whose leaf nodes are labelled by premises or axioms. In the case of Horn clauses, problem-solving search can be performed by reasoning forwards from the premises or backwards from the problem.[82] In the more general case of the clausal form of first-order logic, resolution is a single, axiom-free rule of inference, in which a problem is solved by proving a contradiction from premises that include the negation of the problem to be solved.[83]\n

      Inference in both Horn clause logic and first-order logic is undecidable, and therefore intractable. However, backward reasoning with Horn clauses, which underpins computation in the logic programming language Prolog, is Turing complete. Moreover, its efficiency is competitive with computation in other symbolic programming languages.[84]\n

      Fuzzy logic assigns a "degree of truth" between 0 and 1. It can therefore handle propositions that are vague and partially true.[85]\n

      Non-monotonic logics, including logic programming with negation as failure, are designed to handle default reasoning.[28] Other specialized versions of logic have been developed to describe many complex domains.\n

      \n

      Probabilistic methods for uncertain reasoning

      \n
      A simple Bayesian network, with the associated conditional probability tables
      \n

      Many problems in AI (including in reasoning, planning, learning, perception, and robotics) require the agent to operate with incomplete or uncertain information. AI researchers have devised a number of tools to solve these problems using methods from probability theory and economics.[86] Precise mathematical tools have been developed that analyze how an agent can make choices and plan, using decision theory, decision analysis,[87] and information value theory.[88] These tools include models such as Markov decision processes,[89] dynamic decision networks,[90] game theory and mechanism design.[91]\n

      Bayesian networks[92] are a tool that can be used for reasoning (using the Bayesian inference algorithm),[g][94] learning (using the expectation–maximization algorithm),[h][96] planning (using decision networks)[97] and perception (using dynamic Bayesian networks).[90]\n

      Probabilistic algorithms can also be used for filtering, prediction, smoothing, and finding explanations for streams of data, thus helping perception systems analyze processes that occur over time (e.g., hidden Markov models or Kalman filters).[90]\n

      \n
      Expectation–maximization clustering of Old Faithful eruption data starts from a random guess but then successfully converges on an accurate clustering of the two physically distinct modes of eruption.
      \n

      Classifiers and statistical learning methods

      \n

      The simplest AI applications can be divided into two types: classifiers (e.g., "if shiny then diamond"), on one hand, and controllers (e.g., "if diamond then pick up"), on the other hand. Classifiers[98] are functions that use pattern matching to determine the closest match. They can be fine-tuned based on chosen examples using supervised learning. Each pattern (also called an "observation") is labeled with a certain predefined class. All the observations combined with their class labels are known as a data set. When a new observation is received, that observation is classified based on previous experience.[45]\n

      There are many kinds of classifiers in use.[99] The decision tree is the simplest and most widely used symbolic machine learning algorithm.[100] K-nearest neighbor algorithm was the most widely used analogical AI until the mid-1990s, and Kernel methods such as the support vector machine (SVM) displaced k-nearest neighbor in the 1990s.[101]\nThe naive Bayes classifier is reportedly the "most widely used learner"[102] at Google, due in part to its scalability.[103]\nNeural networks are also used as classifiers.[104]\n

      \n

      Artificial neural networks

      \n
      A neural network is an interconnected group of nodes, akin to the vast network of neurons in the human brain.
      \n

      An artificial neural network is based on a collection of nodes also known as artificial neurons, which loosely model the neurons in a biological brain. It is trained to recognise patterns; once trained, it can recognise those patterns in fresh data. There is an input, at least one hidden layer of nodes and an output. Each node applies a function and once the weight crosses its specified threshold, the data is transmitted to the next layer. A network is typically called a deep neural network if it has at least 2 hidden layers.[104]\n

      Learning algorithms for neural networks use local search to choose the weights that will get the right output for each input during training. The most common training technique is the backpropagation algorithm.[105] Neural networks learn to model complex relationships between inputs and outputs and find patterns in data. In theory, a neural network can learn any function.[106]\n

      In feedforward neural networks the signal passes in only one direction.[107] Recurrent neural networks feed the output signal back into the input, which allows short-term memories of previous input events. Long short term memory is the most successful network architecture for recurrent networks.[108] Perceptrons[109] use only a single layer of neurons; deep learning[110] uses multiple layers. Convolutional neural networks strengthen the connection between neurons that are "close" to each other—this is especially important in image processing, where a local set of neurons must identify an "edge" before the network can identify an object.[111]\n

      \n
      \n

      Deep learning

      \n
      \n

      Deep learning[110] uses several layers of neurons between the network\'s inputs and outputs. The multiple layers can progressively extract higher-level features from the raw input. For example, in image processing, lower layers may identify edges, while higher layers may identify the concepts relevant to a human such as digits, letters, or faces.[112]\n

      Deep learning has profoundly improved the performance of programs in many important subfields of artificial intelligence, including computer vision, speech recognition, natural language processing, image classification,[113] and others. The reason that deep learning performs so well in so many applications is not known as of 2023.[114] The sudden success of deep learning in 2012–2015 did not occur because of some new discovery or theoretical breakthrough (deep neural networks and backpropagation had been described by many people, as far back as the 1950s)[i] but because of two factors: the incredible increase in computer power (including the hundred-fold increase in speed by switching to GPUs) and the availability of vast amounts of training data, especially the giant curated datasets used for benchmark testing, such as ImageNet.[j]\n

      \n

      GPT

      \n

      Generative pre-trained transformers (GPT) are large language models (LLMs) that generate text based on the semantic relationships between words in sentences. Text-based GPT models are pretrained on a large corpus of text that can be from the Internet. The pretraining consists of predicting the next token (a token being usually a word, subword, or punctuation). Throughout this pretraining, GPT models accumulate knowledge about the world and can then generate human-like text by repeatedly predicting the next token. Typically, a subsequent training phase makes the model more truthful, useful, and harmless, usually with a technique called reinforcement learning from human feedback (RLHF). Current GPT models are prone to generating falsehoods called "hallucinations", although this can be reduced with RLHF and quality data. They are used in chatbots, which allow people to ask a question or request a task in simple text.[122][123]\n

      Current models and services include Gemini (formerly Bard), ChatGPT, Grok, Claude, Copilot, and LLaMA.[124] Multimodal GPT models can process different types of data (modalities) such as images, videos, sound, and text.[125]\n

      \n

      Hardware and software

      \n\n

      In the late 2010s, graphics processing units (GPUs) that were increasingly designed with AI-specific enhancements and used with specialized TensorFlow software had replaced previously used central processing unit (CPUs) as the dominant means for large-scale (commercial and academic) machine learning models\' training.[126] Specialized programming languages such as Prolog were used in early AI research,[127] but general-purpose programming languages like Python have become predominant.[128]\n

      The transistor density in integrated circuits has been observed to roughly double every 18 months—a trend known as Moore\'s law, named after the Intel co-founder Gordon Moore, who first identified it. Improvements in GPUs have been even faster.[129]\n

      \n

      Applications

      \n

      AI and machine learning technology is used in most of the essential applications of the 2020s, including: search engines (such as Google Search), targeting online advertisements, recommendation systems (offered by Netflix, YouTube or Amazon), driving internet traffic, targeted advertising (AdSense, Facebook), virtual assistants (such as Siri or Alexa), autonomous vehicles (including drones, ADAS and self-driving cars), automatic language translation (Microsoft Translator, Google Translate), facial recognition (Apple\'s Face ID or Microsoft\'s DeepFace and Google\'s FaceNet) and image labeling (used by Facebook, Apple\'s iPhoto and TikTok). The deployment of AI may be overseen by a Chief automation officer (CAO).\n

      Health and medicine

      \n\n

      The application of AI in medicine and medical research has the potential to increase patient care and quality of life.[130] Through the lens of the Hippocratic Oath, medical professionals are ethically compelled to use AI, if applications can more accurately diagnose and treat patients.[131][132]\n

      For medical research, AI is an important tool for processing and integrating big data. This is particularly important for organoid and tissue engineering development which use microscopy imaging as a key technique in fabrication.[133] It has been suggested that AI can overcome discrepancies in funding allocated to different fields of research.[133] New AI tools can deepen the understanding of biomedically relevant pathways. For example, AlphaFold 2 (2021) demonstrated the ability to approximate, in hours rather than months, the 3D structure of a protein.[134] In 2023, it was reported that AI-guided drug discovery helped find a class of antibiotics capable of killing two different types of drug-resistant bacteria.[135] In 2024, researchers used machine learning to accelerate the search for Parkinson\'s disease drug treatments. Their aim was to identify compounds that block the clumping, or aggregation, of alpha-synuclein (the protein that characterises Parkinson\'s disease). They were able to speed up the initial screening process ten-fold and reduce the cost by a thousand-fold.[136][137]\n

      \n

      Games

      \n\n

      Game playing programs have been used since the 1950s to demonstrate and test AI\'s most advanced techniques.[138] Deep Blue became the first computer chess-playing system to beat a reigning world chess champion, Garry Kasparov, on 11 May 1997.[139] In 2011, in a Jeopardy! quiz show exhibition match, IBM\'s question answering system, Watson, defeated the two greatest Jeopardy! champions, Brad Rutter and Ken Jennings, by a significant margin.[140] In March 2016, AlphaGo won 4 out of 5 games of Go in a match with Go champion Lee Sedol, becoming the first computer Go-playing system to beat a professional Go player without handicaps. Then, in 2017, it defeated Ke Jie, who was the best Go player in the world.[141] Other programs handle imperfect-information games, such as the poker-playing program Pluribus.[142] DeepMind developed increasingly generalistic reinforcement learning models, such as with MuZero, which could be trained to play chess, Go, or Atari games.[143] In 2019, DeepMind\'s AlphaStar achieved grandmaster level in StarCraft II, a particularly challenging real-time strategy game that involves incomplete knowledge of what happens on the map.[144] In 2021, an AI agent competed in a PlayStation Gran Turismo competition, winning against four of the world\'s best Gran Turismo drivers using deep reinforcement learning.[145] In 2024, Google DeepMind introduced SIMA, a type of AI capable of autonomously playing nine previously unseen open-world video games by observing screen output, as well as executing short, specific tasks in response to natural language instructions.[146]\n

      \n

      Mathematics

      \n

      In mathematics, special forms of formal step-by-step reasoning are used. In contrast, LLMs such as GPT-4 Turbo, Gemini Ultra, Claude Opus, LLaMa-2 or Mistral Large are working with probabilistic models, which can produce wrong answers in the form of hallucinations. Therefore, they need not only a large database of mathematical problems to learn from but also methods such as supervised fine-tuning or trained classifiers with human-annotated data to improve answers for new problems and learn from corrections.[147] A 2024 study showed that the performance of some language models for reasoning capabilities in solving math problems not included in their training data was low, even for problems with only minor deviations from trained data.[148]\n

      Alternatively, dedicated models for mathematic problem solving with higher precision for the outcome including proof of theorems have been developed such as Alpha Tensor, Alpha Geometry and Alpha Proof all from Google DeepMind,[149] Llemma from eleuther[150] or Julius.[151]\n

      When natural language is used to describe mathematical problems, converters transform such prompts into a formal language such as Lean to define mathematic tasks.\n

      Some models have been developed to solve challenging problems and reach good results in benchmark tests, others to serve as educational tools in mathematics.[152]\n

      \n

      Finance

      \n

      Finance is one of the fastest growing sectors where applied AI tools are being deployed: from retail online banking to investment advice and insurance, where automated "robot advisers" have been in use for some years.[153]\n

      World Pensions experts like Nicolas Firzli insist it may be too early to see the emergence of highly innovative AI-informed financial products and services: "the deployment of AI tools will simply further automatise things: destroying tens of thousands of jobs in banking, financial planning, and pension advice in the process, but I\'m not sure it will unleash a new wave of [e.g., sophisticated] pension innovation."[154]\n

      \n

      Military

      \n\n

      Various countries are deploying AI military applications.[155] The main applications enhance command and control, communications, sensors, integration and interoperability.[156] Research is targeting intelligence collection and analysis, logistics, cyber operations, information operations, and semiautonomous and autonomous vehicles.[155] AI technologies enable coordination of sensors and effectors, threat detection and identification, marking of enemy positions, target acquisition, coordination and deconfliction of distributed Joint Fires between networked combat vehicles involving manned and unmanned teams.[156] AI was incorporated into military operations in Iraq and Syria.[155]\n

      In November 2023, US Vice President Kamala Harris disclosed a declaration signed by 31 nations to set guardrails for the military use of AI. The commitments include using legal reviews to ensure the compliance of military AI with international laws, and being cautious and transparent in the development of this technology.[157]\n

      \n

      Generative AI

      \n\n
      Vincent van Gogh in watercolour created by generative AI software
      \n

      In the early 2020s, generative AI gained widespread prominence. GenAI is AI capable of generating text, images, videos, or other data using generative models,[158][159] often in response to prompts.[160][161]\n

      In March 2023, 58% of U.S. adults had heard about ChatGPT and 14% had tried it.[162] The increasing realism and ease-of-use of AI-based text-to-image generators such as Midjourney, DALL-E, and Stable Diffusion sparked a trend of viral AI-generated photos. Widespread attention was gained by a fake photo of Pope Francis wearing a white puffer coat, the fictional arrest of Donald Trump, and a hoax of an attack on the Pentagon, as well as the usage in professional creative arts.[163][164]\n

      \n

      Agents

      \n

      Artificial intelligent (AI) agents are software entities designed to perceive their environment, make decisions, and take actions autonomously to achieve specific goals. These agents can interact with users, their environment, or other agents. AI agents are used in various applications, including virtual assistants, chatbots, autonomous vehicles, game-playing systems, and industrial robotics. AI agents operate within the constraints of their programming, available computational resources, and hardware limitations. This means they are restricted to performing tasks within their defined scope and have finite memory and processing capabilities. In real-world applications, AI agents often face time constraints for decision-making and action execution. Many AI agents incorporate learning algorithms, enabling them to improve their performance over time through experience or training. Using machine learning, AI agents can adapt to new situations and optimise their behaviour for their designated tasks.[165][166][167]\n

      \n

      Other industry-specific tasks

      \n

      There are also thousands of successful AI applications used to solve specific problems for specific industries or institutions. In a 2017 survey, one in five companies reported having incorporated "AI" in some offerings or processes.[168] A few examples are energy storage, medical diagnosis, military logistics, applications that predict the result of judicial decisions, foreign policy, or supply chain management.\n

      AI applications for evacuation and disaster management are growing. AI has been used to investigate if and how people evacuated in large scale and small scale evacuations using historical data from GPS, videos or social media. Further, AI can provide real time information on the real time evacuation conditions.[169][170][171]\n

      In agriculture, AI has helped farmers identify areas that need irrigation, fertilization, pesticide treatments or increasing yield. Agronomists use AI to conduct research and development. AI has been used to predict the ripening time for crops such as tomatoes, monitor soil moisture, operate agricultural robots, conduct predictive analytics, classify livestock pig call emotions, automate greenhouses, detect diseases and pests, and save water.\n

      Artificial intelligence is used in astronomy to analyze increasing amounts of available data and applications, mainly for "classification, regression, clustering, forecasting, generation, discovery, and the development of new scientific insights." For example, it is used for discovering exoplanets, forecasting solar activity, and distinguishing between signals and instrumental effects in gravitational wave astronomy. Additionally, it could be used for activities in space, such as space exploration, including the analysis of data from space missions, real-time science decisions of spacecraft, space debris avoidance, and more autonomous operation.\n

      During the 2024 Indian elections, US$50 millions was spent on authorized AI-generated content, notably by creating deepfakes of allied (including sometimes deceased) politicians to better engage with voters, and by translating speeches to various local languages.[172] \n

      \n

      Ethics

      \n\n

      AI has potential benefits and potential risks.[173] AI may be able to advance science and find solutions for serious problems: Demis Hassabis of Deep Mind hopes to "solve intelligence, and then use that to solve everything else".[174] However, as the use of AI has become widespread, several unintended consequences and risks have been identified.[175] In-production systems can sometimes not factor ethics and bias into their AI training processes, especially when the AI algorithms are inherently unexplainable in deep learning.[176]\n

      \n

      Risks and harm

      \n
      \n\n

      Machine learning algorithms require large amounts of data. The techniques used to acquire this data have raised concerns about privacy, surveillance and copyright.\n

      AI-powered devices and services, such as virtual assistants and IoT products, continuously collect personal information, raising concerns about intrusive data gathering and unauthorized access by third parties. The loss of privacy is further exacerbated by AI\'s ability to process and combine vast amounts of data, potentially leading to a surveillance society where individual activities are constantly monitored and analyzed without adequate safeguards or transparency.\n

      Sensitive user data collected may include online activity records, geolocation data, video or audio.[177] For example, in order to build speech recognition algorithms, Amazon has recorded millions of private conversations and allowed temporary workers to listen to and transcribe some of them.[178] Opinions about this widespread surveillance range from those who see it as a necessary evil to those for whom it is clearly unethical and a violation of the right to privacy.[179]\n

      AI developers argue that this is the only way to deliver valuable applications. and have developed several techniques that attempt to preserve privacy while still obtaining the data, such as data aggregation, de-identification and differential privacy.[180] Since 2016, some privacy experts, such as Cynthia Dwork, have begun to view privacy in terms of fairness. Brian Christian wrote that experts have pivoted "from the question of \'what they know\' to the question of \'what they\'re doing with it\'."[181]\n

      Generative AI is often trained on unlicensed copyrighted works, including in domains such as images or computer code; the output is then used under the rationale of "fair use". Experts disagree about how well and under what circumstances this rationale will hold up in courts of law; relevant factors may include "the purpose and character of the use of the copyrighted work" and "the effect upon the potential market for the copyrighted work".[182][183] Website owners who do not wish to have their content scraped can indicate it in a "robots.txt" file.[184] In 2023, leading authors (including John Grisham and Jonathan Franzen) sued AI companies for using their work to train generative AI.[185][186] Another discussed approach is to envision a separate sui generis system of protection for creations generated by AI to ensure fair attribution and compensation for human authors.[187]\n

      \n

      Dominance by tech giants

      \n

      The commercial AI scene is dominated by Big Tech companies such as Alphabet Inc., Amazon, Apple Inc., Meta Platforms, and Microsoft.[188][189][190] Some of these players already own the vast majority of existing cloud infrastructure and computing power from data centers, allowing them to entrench further in the marketplace.[191][192]\n

      \n

      Substantial power needs and other environmental impacts

      \n\n

      In January 2024, the International Energy Agency (IEA) released Electricity 2024, Analysis and Forecast to 2026, forecasting electric power use.[193] This is the first IEA report to make projections for data centers and power consumption for artificial intelligence and cryptocurrency. The report states that power demand for these uses might double by 2026, with additional electric power usage equal to electricity used by the whole Japanese nation.[194]\n

      Prodigious power consumption by AI is responsible for the growth of fossil fuels use, and might delay closings of obsolete, carbon-emitting coal energy facilities. There is a feverish rise in the construction of data centers throughout the US, making large technology firms (e.g., Microsoft, Meta, Google, Amazon) into voracious consumers of electric power. Projected electric consumption is so immense that there is concern that it will be fulfilled no matter the source. A ChatGPT search involves the use of 10 times the electrical energy as a Google search. The large firms are in haste to find power sources – from nuclear energy to geothermal to fusion. The tech firms argue that – in the long view – AI will be eventually kinder to the environment, but they need the energy now. AI makes the power grid more efficient and "intelligent", will assist in the growth of nuclear power, and track overall carbon emissions, according to technology firms.[195]\n

      A 2024 Goldman Sachs Research Paper, AI Data Centers and the Coming US Power Demand Surge, found "US power demand (is) likely to experience growth not seen in a generation...." and forecasts that, by 2030, US data centers will consume 8% of US power, as opposed to 3% in 2022, presaging growth for the electrical power generation industry by a variety of means.[196] Data centers\' need for more and more electrical power is such that they might max out the electrical grid. The Big Tech companies counter that AI can be used to maximize the utilization of the grid by all.[197]\n

      In 2024, the Wall Street Journal reported that big AI companies have begun negotiations with the US nuclear power providers to provide electricity to the data centers. In March 2024 Amazon purchased a Pennsylvania nuclear-powered data center for $650 Million (US).[198]\n

      In September 2024, Microsoft announced an agreement with Constellation Energy to re-open the Three Mile Island nuclear power plant to provide Microsoft with 100% of all electric power produced by the plant for 20 years. Reopening the plant, which suffered a partial nuclear meltdown of its Unit 2 reactor in 1979, will require Constellation to get through strict regulatory processes which will include extensive safety scrutiny from the US Nuclear Regulatory Commission. If approved (this will be the first ever US re-commissioning of a nuclear plant), over 835 megawatts of power – enough for 800,000 homes – of energy will be produced. The cost for re-opening and upgrading is estimated at $1.6 billion (US) and is dependent on tax breaks for nuclear power contained in the 2022 US Inflation Reduction Act.[199] The US government and the state of Michigan are investing almost $2 billion (US) to reopen the Palisades Nuclear reactor on Lake Michigan. Closed since 2022, the plant is planned to be reopened in October 2025. The Three Mile Island facility will be renamed the Crane Clean Energy Center after Chris Crane, a nuclear proponent and former CEO of Exelon who was responsible for Exelon spinoff of Constellation.[200]\n

      \n

      Misinformation

      \n\n

      YouTube, Facebook and others use recommender systems to guide users to more content. These AI programs were given the goal of maximizing user engagement (that is, the only goal was to keep people watching). The AI learned that users tended to choose misinformation, conspiracy theories, and extreme partisan content, and, to keep them watching, the AI recommended more of it. Users also tended to watch more content on the same subject, so the AI led people into filter bubbles where they received multiple versions of the same misinformation.[201] This convinced many users that the misinformation was true, and ultimately undermined trust in institutions, the media and the government.[202] The AI program had correctly learned to maximize its goal, but the result was harmful to society. After the U.S. election in 2016, major technology companies took steps to mitigate the problem [citation needed].\n

      In 2022, generative AI began to create images, audio, video and text that are indistinguishable from real photographs, recordings, films, or human writing. It is possible for bad actors to use this technology to create massive amounts of misinformation or propaganda.[203] AI pioneer Geoffrey Hinton expressed concern about AI enabling "authoritarian leaders to manipulate their electorates" on a large scale, among other risks.[204]\n

      \n

      Algorithmic bias and fairness

      \n\n

      Machine learning applications will be biased[k] if they learn from biased data.[206] The developers may not be aware that the bias exists.[207] Bias can be introduced by the way training data is selected and by the way a model is deployed.[208][206] If a biased algorithm is used to make decisions that can seriously harm people (as it can in medicine, finance, recruitment, housing or policing) then the algorithm may cause discrimination.[209] The field of fairness studies how to prevent harms from algorithmic biases.\n

      On June 28, 2015, Google Photos\'s new image labeling feature mistakenly identified Jacky Alcine and a friend as "gorillas" because they were black. The system was trained on a dataset that contained very few images of black people,[210] a problem called "sample size disparity".[211] Google "fixed" this problem by preventing the system from labelling anything as a "gorilla". Eight years later, in 2023, Google Photos still could not identify a gorilla, and neither could similar products from Apple, Facebook, Microsoft and Amazon.[212]\n

      COMPAS is a commercial program widely used by U.S. courts to assess the likelihood of a defendant becoming a recidivist. In 2016, Julia Angwin at ProPublica discovered that COMPAS exhibited racial bias, despite the fact that the program was not told the races of the defendants. Although the error rate for both whites and blacks was calibrated equal at exactly 61%, the errors for each race were different—the system consistently overestimated the chance that a black person would re-offend and would underestimate the chance that a white person would not re-offend.[213] In 2017, several researchers[l] showed that it was mathematically impossible for COMPAS to accommodate all possible measures of fairness when the base rates of re-offense were different for whites and blacks in the data.[215]\n

      A program can make biased decisions even if the data does not explicitly mention a problematic feature (such as "race" or "gender"). The feature will correlate with other features (like "address", "shopping history" or "first name"), and the program will make the same decisions based on these features as it would on "race" or "gender".[216] Moritz Hardt said "the most robust fact in this research area is that fairness through blindness doesn\'t work."[217]\n

      Criticism of COMPAS highlighted that machine learning models are designed to make "predictions" that are only valid if we assume that the future will resemble the past. If they are trained on data that includes the results of racist decisions in the past, machine learning models must predict that racist decisions will be made in the future. If an application then uses these predictions as recommendations, some of these "recommendations" will likely be racist.[218] Thus, machine learning is not well suited to help make decisions in areas where there is hope that the future will be better than the past. It is descriptive rather than prescriptive.[m]\n

      Bias and unfairness may go undetected because the developers are overwhelmingly white and male: among AI engineers, about 4% are black and 20% are women.[211]\n

      There are various conflicting definitions and mathematical models of fairness. These notions depend on ethical assumptions, and are influenced by beliefs about society. One broad category is distributive fairness, which focuses on the outcomes, often identifying groups and seeking to compensate for statistical disparities. Representational fairness tries to ensure that AI systems do not reinforce negative stereotypes or render certain groups invisible. Procedural fairness focuses on the decision process rather than the outcome. The most relevant notions of fairness may depend on the context, notably the type of AI application and the stakeholders. The subjectivity in the notions of bias and fairness makes it difficult for companies to operationalize them. Having access to sensitive attributes such as race or gender is also considered by many AI ethicists to be necessary in order to compensate for biases, but it may conflict with anti-discrimination laws.[205]\n

      At its 2022 Conference on Fairness, Accountability, and Transparency (ACM FAccT 2022), the Association for Computing Machinery, in Seoul, South Korea, presented and published findings that recommend that until AI and robotics systems are demonstrated to be free of bias mistakes, they are unsafe, and the use of self-learning neural networks trained on vast, unregulated sources of flawed internet data should be curtailed.[dubiousdiscuss][220]\n

      \n

      Lack of transparency

      \n\n

      Many AI systems are so complex that their designers cannot explain how they reach their decisions.[221] Particularly with deep neural networks, in which there are a large amount of non-linear relationships between inputs and outputs. But some popular explainability techniques exist.[222]\n

      It is impossible to be certain that a program is operating correctly if no one knows how exactly it works. There have been many cases where a machine learning program passed rigorous tests, but nevertheless learned something different than what the programmers intended. For example, a system that could identify skin diseases better than medical professionals was found to actually have a strong tendency to classify images with a ruler as "cancerous", because pictures of malignancies typically include a ruler to show the scale.[223] Another machine learning system designed to help effectively allocate medical resources was found to classify patients with asthma as being at "low risk" of dying from pneumonia. Having asthma is actually a severe risk factor, but since the patients having asthma would usually get much more medical care, they were relatively unlikely to die according to the training data. The correlation between asthma and low risk of dying from pneumonia was real, but misleading.[224]\n

      People who have been harmed by an algorithm\'s decision have a right to an explanation.[225] Doctors, for example, are expected to clearly and completely explain to their colleagues the reasoning behind any decision they make. Early drafts of the European Union\'s General Data Protection Regulation in 2016 included an explicit statement that this right exists.[n] Industry experts noted that this is an unsolved problem with no solution in sight. Regulators argued that nevertheless the harm is real: if the problem has no solution, the tools should not be used.[226]\n

      DARPA established the XAI ("Explainable Artificial Intelligence") program in 2014 to try to solve these problems.[227]\n

      Several approaches aim to address the transparency problem. SHAP enables to visualise the contribution of each feature to the output.[228] LIME can locally approximate a model\'s outputs with a simpler, interpretable model.[229] Multitask learning provides a large number of outputs in addition to the target classification. These other outputs can help developers deduce what the network has learned.[230] Deconvolution, DeepDream and other generative methods can allow developers to see what different layers of a deep network for computer vision have learned, and produce output that can suggest what the network is learning.[231] For generative pre-trained transformers, Anthropic developed a technique based on dictionary learning that associates patterns of neuron activations with human-understandable concepts.[232]\n

      \n

      Bad actors and weaponized AI

      \n\n

      Artificial intelligence provides a number of tools that are useful to bad actors, such as authoritarian governments, terrorists, criminals or rogue states.\n

      A lethal autonomous weapon is a machine that locates, selects and engages human targets without human supervision.[o] Widely available AI tools can be used by bad actors to develop inexpensive autonomous weapons and, if produced at scale, they are potentially weapons of mass destruction.[234] Even when used in conventional warfare, it is unlikely that they will be unable to reliably choose targets and could potentially kill an innocent person.[234] In 2014, 30 nations (including China) supported a ban on autonomous weapons under the United Nations\' Convention on Certain Conventional Weapons, however the United States and others disagreed.[235] By 2015, over fifty countries were reported to be researching battlefield robots.[236]\n

      AI tools make it easier for authoritarian governments to efficiently control their citizens in several ways. Face and voice recognition allow widespread surveillance. Machine learning, operating this data, can classify potential enemies of the state and prevent them from hiding. Recommendation systems can precisely target propaganda and misinformation for maximum effect. Deepfakes and generative AI aid in producing misinformation. Advanced AI can make authoritarian centralized decision making more competitive than liberal and decentralized systems such as markets. It lowers the cost and difficulty of digital warfare and advanced spyware.[237] All these technologies have been available since 2020 or earlier—AI facial recognition systems are already being used for mass surveillance in China.[238][239]\n

      There many other ways that AI is expected to help bad actors, some of which can not be foreseen. For example, machine-learning AI is able to design tens of thousands of toxic molecules in a matter of hours.[240]\n

      \n

      Technological unemployment

      \n\n

      Economists have frequently highlighted the risks of redundancies from AI, and speculated about unemployment if there is no adequate social policy for full employment.[241]\n

      In the past, technology has tended to increase rather than reduce total employment, but economists acknowledge that "we\'re in uncharted territory" with AI.[242] A survey of economists showed disagreement about whether the increasing use of robots and AI will cause a substantial increase in long-term unemployment, but they generally agree that it could be a net benefit if productivity gains are redistributed.[243] Risk estimates vary; for example, in the 2010s, Michael Osborne and Carl Benedikt Frey estimated 47% of U.S. jobs are at "high risk" of potential automation, while an OECD report classified only 9% of U.S. jobs as "high risk".[p][245] The methodology of speculating about future employment levels has been criticised as lacking evidential foundation, and for implying that technology, rather than social policy, creates unemployment, as opposed to redundancies.[241] In April 2023, it was reported that 70% of the jobs for Chinese video game illustrators had been eliminated by generative artificial intelligence.[246][247]\n

      Unlike previous waves of automation, many middle-class jobs may be eliminated by artificial intelligence; The Economist stated in 2015 that "the worry that AI could do to white-collar jobs what steam power did to blue-collar ones during the Industrial Revolution" is "worth taking seriously".[248] Jobs at extreme risk range from paralegals to fast food cooks, while job demand is likely to increase for care-related professions ranging from personal healthcare to the clergy.[249]\n

      From the early days of the development of artificial intelligence, there have been arguments, for example, those put forward by Joseph Weizenbaum, about whether tasks that can be done by computers actually should be done by them, given the difference between computers and humans, and between quantitative calculation and qualitative, value-based judgement.[250]\n

      \n

      Existential risk

      \n\n

      It has been argued AI will become so powerful that humanity may irreversibly lose control of it. This could, as physicist Stephen Hawking stated, "spell the end of the human race".[251] This scenario has been common in science fiction, when a computer or robot suddenly develops a human-like "self-awareness" (or "sentience" or "consciousness") and becomes a malevolent character.[q] These sci-fi scenarios are misleading in several ways.\n

      First, AI does not require human-like "sentience" to be an existential risk. Modern AI programs are given specific goals and use learning and intelligence to achieve them. Philosopher Nick Bostrom argued that if one gives almost any goal to a sufficiently powerful AI, it may choose to destroy humanity to achieve it (he used the example of a paperclip factory manager).[253] Stuart Russell gives the example of household robot that tries to find a way to kill its owner to prevent it from being unplugged, reasoning that "you can\'t fetch the coffee if you\'re dead."[254] In order to be safe for humanity, a superintelligence would have to be genuinely aligned with humanity\'s morality and values so that it is "fundamentally on our side".[255]\n

      Second, Yuval Noah Harari argues that AI does not require a robot body or physical control to pose an existential risk. The essential parts of civilization are not physical. Things like ideologies, law, government, money and the economy are made of language; they exist because there are stories that billions of people believe. The current prevalence of misinformation suggests that an AI could use language to convince people to believe anything, even to take actions that are destructive.[256]\n

      The opinions amongst experts and industry insiders are mixed, with sizable fractions both concerned and unconcerned by risk from eventual superintelligent AI.[257] Personalities such as Stephen Hawking, Bill Gates, and Elon Musk,[258] as well as AI pioneers such as Yoshua Bengio, Stuart Russell, Demis Hassabis, and Sam Altman, have expressed concerns about existential risk from AI.\n

      In May 2023, Geoffrey Hinton announced his resignation from Google in order to be able to "freely speak out about the risks of AI" without "considering how this impacts Google."[259] He notably mentioned risks of an AI takeover,[260] and stressed that in order to avoid the worst outcomes, establishing safety guidelines will require cooperation among those competing in use of AI.[261]\n

      In 2023, many leading AI experts issued the joint statement that "Mitigating the risk of extinction from AI should be a global priority alongside other societal-scale risks such as pandemics and nuclear war".[262]\n

      Other researchers, however, spoke in favor of a less dystopian view. AI pioneer Juergen Schmidhuber did not sign the joint statement, emphasising that in 95% of all cases, AI research is about making "human lives longer and healthier and easier."[263] While the tools that are now being used to improve lives can also be used by bad actors, "they can also be used against the bad actors."[264][265] Andrew Ng also argued that "it\'s a mistake to fall for the doomsday hype on AI—and that regulators who do will only benefit vested interests."[266] Yann LeCun "scoffs at his peers\' dystopian scenarios of supercharged misinformation and even, eventually, human extinction."[267] In the early 2010s, experts argued that the risks are too distant in the future to warrant research or that humans will be valuable from the perspective of a superintelligent machine.[268] However, after 2016, the study of current and future risks and possible solutions became a serious area of research.[269]\n

      \n

      Ethical machines and alignment

      \n\n

      Friendly AI are machines that have been designed from the beginning to minimize risks and to make choices that benefit humans. Eliezer Yudkowsky, who coined the term, argues that developing friendly AI should be a higher research priority: it may require a large investment and it must be completed before AI becomes an existential risk.[270]\n

      Machines with intelligence have the potential to use their intelligence to make ethical decisions. The field of machine ethics provides machines with ethical principles and procedures for resolving ethical dilemmas.[271]\nThe field of machine ethics is also called computational morality,[271]\nand was founded at an AAAI symposium in 2005.[272]\n

      Other approaches include Wendell Wallach\'s "artificial moral agents"[273] and Stuart J. Russell\'s three principles for developing provably beneficial machines.[274]\n

      \n

      Open source

      \n

      Active organizations in the AI open-source community include Hugging Face,[275] Google,[276] EleutherAI and Meta.[277] Various AI models, such as Llama 2, Mistral or Stable Diffusion, have been made open-weight,[278][279] meaning that their architecture and trained parameters (the "weights") are publicly available. Open-weight models can be freely fine-tuned, which allows companies to specialize them with their own data and for their own use-case.[280] Open-weight models are useful for research and innovation but can also be misused. Since they can be fine-tuned, any built-in security measure, such as objecting to harmful requests, can be trained away until it becomes ineffective. Some researchers warn that future AI models may develop dangerous capabilities (such as the potential to drastically facilitate bioterrorism) and that once released on the Internet, they cannot be deleted everywhere if needed. They recommend pre-release audits and cost-benefit analyses.[281]\n

      \n

      Frameworks

      \n

      Artificial Intelligence projects can have their ethical permissibility tested while designing, developing, and implementing an AI system. An AI framework such as the Care and Act Framework containing the SUM values—developed by the Alan Turing Institute tests projects in four main areas:[282][283]\n

      \n
      • Respect the dignity of individual people
      • \n
      • Connect with other people sincerely, openly, and inclusively
      • \n
      • Care for the wellbeing of everyone
      • \n
      • Protect social values, justice, and the public interest
      \n

      Other developments in ethical frameworks include those decided upon during the Asilomar Conference, the Montreal Declaration for Responsible AI, and the IEEE\'s Ethics of Autonomous Systems initiative, among others;[284] however, these principles do not go without their criticisms, especially regards to the people chosen contributes to these frameworks.[285]\n

      Promotion of the wellbeing of the people and communities that these technologies affect requires consideration of the social and ethical implications at all stages of AI system design, development and implementation, and collaboration between job roles such as data scientists, product managers, data engineers, domain experts, and delivery managers.[286]\n

      The UK AI Safety Institute released in 2024 a testing toolset called \'Inspect\' for AI safety evaluations available under a MIT open-source licence which is freely available on GitHub and can be improved with third-party packages. It can be used to evaluate AI models in a range of areas including core knowledge, ability to reason, and autonomous capabilities.[287]\n

      \n

      Regulation

      \n\n
      AI Safety Summit
      The first global AI Safety Summit was held in 2023 with a declaration calling for international co-operation.
      \n

      The regulation of artificial intelligence is the development of public sector policies and laws for promoting and regulating AI; it is therefore related to the broader regulation of algorithms.[288] The regulatory and policy landscape for AI is an emerging issue in jurisdictions globally.[289] According to AI Index at Stanford, the annual number of AI-related laws passed in the 127 survey countries jumped from one passed in 2016 to 37 passed in 2022 alone.[290][291] Between 2016 and 2020, more than 30 countries adopted dedicated strategies for AI.[292] Most EU member states had released national AI strategies, as had Canada, China, India, Japan, Mauritius, the Russian Federation, Saudi Arabia, United Arab Emirates, U.S., and Vietnam. Others were in the process of elaborating their own AI strategy, including Bangladesh, Malaysia and Tunisia.[292] The Global Partnership on Artificial Intelligence was launched in June 2020, stating a need for AI to be developed in accordance with human rights and democratic values, to ensure public confidence and trust in the technology.[292] Henry Kissinger, Eric Schmidt, and Daniel Huttenlocher published a joint statement in November 2021 calling for a government commission to regulate AI.[293] In 2023, OpenAI leaders published recommendations for the governance of superintelligence, which they believe may happen in less than 10 years.[294] In 2023, the United Nations also launched an advisory body to provide recommendations on AI governance; the body comprises technology company executives, governments officials and academics.[295] In 2024, the Council of Europe created the first international legally binding treaty on AI, called the "Framework Convention on Artificial Intelligence and Human Rights, Democracy and the Rule of Law". It was adopted by the European Union, the United States, the United Kingdom, and other signatories.[296]\n

      In a 2022 Ipsos survey, attitudes towards AI varied greatly by country; 78% of Chinese citizens, but only 35% of Americans, agreed that "products and services using AI have more benefits than drawbacks".[290] A 2023 Reuters/Ipsos poll found that 61% of Americans agree, and 22% disagree, that AI poses risks to humanity.[297] In a 2023 Fox News poll, 35% of Americans thought it "very important", and an additional 41% thought it "somewhat important", for the federal government to regulate AI, versus 13% responding "not very important" and 8% responding "not at all important".[298][299]\n

      In November 2023, the first global AI Safety Summit was held in Bletchley Park in the UK to discuss the near and far term risks of AI and the possibility of mandatory and voluntary regulatory frameworks.[300] 28 countries including the United States, China, and the European Union issued a declaration at the start of the summit, calling for international co-operation to manage the challenges and risks of artificial intelligence.[301][302] In May 2024 at the AI Seoul Summit, 16 global AI tech companies agreed to safety commitments on the development of AI.[303][304]\n

      \n

      History

      \n\n\n

      The study of mechanical or "formal" reasoning began with philosophers and mathematicians in antiquity. The study of logic led directly to Alan Turing\'s theory of computation, which suggested that a machine, by shuffling symbols as simple as "0" and "1", could simulate any conceivable form of mathematical reasoning.[305][306] This, along with concurrent discoveries in cybernetics, information theory and neurobiology, led researchers to consider the possibility of building an "electronic brain".[r] They developed several areas of research that would become part of AI,[308] such as McCullouch and Pitts design for "artificial neurons" in 1943,[115] and Turing\'s influential 1950 paper \'Computing Machinery and Intelligence\', which introduced the Turing test and showed that "machine intelligence" was plausible.[309][306]\n

      The field of AI research was founded at a workshop at Dartmouth College in 1956.[s][6] The attendees became the leaders of AI research in the 1960s.[t] They and their students produced programs that the press described as "astonishing":[u] computers were learning checkers strategies, solving word problems in algebra, proving logical theorems and speaking English.[v][7] Artificial intelligence laboratories were set up at a number of British and U.S. universities in the latter 1950s and early 1960s.[306]\n

      Researchers in the 1960s and the 1970s were convinced that their methods would eventually succeed in creating a machine with general intelligence and considered this the goal of their field.[313] In 1965 Herbert Simon predicted, "machines will be capable, within twenty years, of doing any work a man can do".[314] In 1967 Marvin Minsky agreed, writing that "within a generation ... the problem of creating \'artificial intelligence\' will substantially be solved".[315] They had, however, underestimated the difficulty of the problem.[w] In 1974, both the U.S. and British governments cut off exploratory research in response to the criticism of Sir James Lighthill[317] and ongoing pressure from the U.S. Congress to fund more productive projects.[318] Minsky\'s and Papert\'s book Perceptrons was understood as proving that artificial neural networks would never be useful for solving real-world tasks, thus discrediting the approach altogether.[319] The "AI winter", a period when obtaining funding for AI projects was difficult, followed.[9]\n

      In the early 1980s, AI research was revived by the commercial success of expert systems,[320] a form of AI program that simulated the knowledge and analytical skills of human experts. By 1985, the market for AI had reached over a billion dollars. At the same time, Japan\'s fifth generation computer project inspired the U.S. and British governments to restore funding for academic research.[8] However, beginning with the collapse of the Lisp Machine market in 1987, AI once again fell into disrepute, and a second, longer-lasting winter began.[10]\n

      Up to this point, most of AI\'s funding had gone to projects that used high-level symbols to represent mental objects like plans, goals, beliefs, and known facts. In the 1980s, some researchers began to doubt that this approach would be able to imitate all the processes of human cognition, especially perception, robotics, learning and pattern recognition,[321] and began to look into "sub-symbolic" approaches.[322] Rodney Brooks rejected "representation" in general and focussed directly on engineering machines that move and survive.[x] Judea Pearl, Lofti Zadeh and others developed methods that handled incomplete and uncertain information by making reasonable guesses rather than precise logic.[86][327] But the most important development was the revival of "connectionism", including neural network research, by Geoffrey Hinton and others.[328] In 1990, Yann LeCun successfully showed that convolutional neural networks can recognize handwritten digits, the first of many successful applications of neural networks.[329]\n

      AI gradually restored its reputation in the late 1990s and early 21st century by exploiting formal mathematical methods and by finding specific solutions to specific problems. This "narrow" and "formal" focus allowed researchers to produce verifiable results and collaborate with other fields (such as statistics, economics and mathematics).[330] By 2000, solutions developed by AI researchers were being widely used, although in the 1990s they were rarely described as "artificial intelligence" (a tendency known as the AI effect).[331]\nHowever, several academic researchers became concerned that AI was no longer pursuing its original goal of creating versatile, fully intelligent machines. Beginning around 2002, they founded the subfield of artificial general intelligence (or "AGI"), which had several well-funded institutions by the 2010s.[4]\n

      Deep learning began to dominate industry benchmarks in 2012 and was adopted throughout the field.[11]\nFor many specific tasks, other methods were abandoned.[y]\nDeep learning\'s success was based on both hardware improvements (faster computers,[333] graphics processing units, cloud computing[334]) and access to large amounts of data[335] (including curated datasets,[334] such as ImageNet). Deep learning\'s success led to an enormous increase in interest and funding in AI.[z] The amount of machine learning research (measured by total publications) increased by 50% in the years 2015–2019.[292]\n

      In 2016, issues of fairness and the misuse of technology were catapulted into center stage at machine learning conferences, publications vastly increased, funding became available, and many researchers re-focussed their careers on these issues. The alignment problem became a serious field of academic study.[269]\n

      In the late teens and early 2020s, AGI companies began to deliver programs that created enormous interest. In 2015, AlphaGo, developed by DeepMind, beat the world champion Go player. The program was taught only the rules of the game and developed strategy by itself. GPT-3 is a large language model that was released in 2020 by OpenAI and is capable of generating high-quality human-like text.[336] These programs, and others, inspired an aggressive AI boom, where large companies began investing billions in AI research. According to AI Impacts, about $50 billion annually was invested in "AI" around 2022 in the U.S. alone and about 20% of the new U.S. Computer Science PhD graduates have specialized in "AI".[337] About 800,000 "AI"-related U.S. job openings existed in 2022.[338]\n

      \n

      Philosophy

      \n\n

      Philosophical debates have historically sought to determine the nature of intelligence and how to make intelligent machines.[339] Another major focus has been whether machines can be conscious, and the associated ethical implications.[340] Many other topics in philosophy are relevant to AI, such as epistemology and free will.[341] Rapid advancements have intensified public discussions on the philosophy and ethics of AI.[340]\n

      \n

      Defining artificial intelligence

      \n\n

      Alan Turing wrote in 1950 "I propose to consider the question \'can machines think\'?"[342] He advised changing the question from whether a machine "thinks", to "whether or not it is possible for machinery to show intelligent behaviour".[342] He devised the Turing test, which measures the ability of a machine to simulate human conversation.[309] Since we can only observe the behavior of the machine, it does not matter if it is "actually" thinking or literally has a "mind". Turing notes that we can not determine these things about other people but "it is usual to have a polite convention that everyone thinks."[343]\n

      \n
      The Turing test can provide some evidence of intelligence, but it penalizes non-human intelligent behavior.[344]
      \n

      Russell and Norvig agree with Turing that intelligence must be defined in terms of external behavior, not internal structure.[1] However, they are critical that the test requires the machine to imitate humans. "Aeronautical engineering texts," they wrote, "do not define the goal of their field as making \'machines that fly so exactly like pigeons that they can fool other pigeons.\'"[345] AI founder John McCarthy agreed, writing that "Artificial intelligence is not, by definition, simulation of human intelligence".[346]\n

      McCarthy defines intelligence as "the computational part of the ability to achieve goals in the world".[347] Another AI founder, Marvin Minsky similarly describes it as "the ability to solve hard problems".[348] The leading AI textbook defines it as the study of agents that perceive their environment and take actions that maximize their chances of achieving defined goals.[1] These definitions view intelligence in terms of well-defined problems with well-defined solutions, where both the difficulty of the problem and the performance of the program are direct measures of the "intelligence" of the machine—and no other philosophical discussion is required, or may not even be possible.\n

      Another definition has been adopted by Google,[349] a major practitioner in the field of AI. This definition stipulates the ability of systems to synthesize information as the manifestation of intelligence, similar to the way it is defined in biological intelligence.\n

      Some authors have suggested in practice, that the definition of AI is vague and difficult to define, with contention as to whether classical algorithms should be categorised as AI,[350] with many companies during the early 2020s AI boom using the term as a marketing buzzword, often even if they did "not actually use AI in a material way".[351]\n

      \n

      Evaluating approaches to AI

      \n

      No established unifying theory or paradigm has guided AI research for most of its history.[aa] The unprecedented success of statistical machine learning in the 2010s eclipsed all other approaches (so much so that some sources, especially in the business world, use the term "artificial intelligence" to mean "machine learning with neural networks"). This approach is mostly sub-symbolic, soft and narrow. Critics argue that these questions may have to be revisited by future generations of AI researchers.\n

      \n

      Symbolic AI and its limits

      \n

      Symbolic AI (or "GOFAI")[353] simulated the high-level conscious reasoning that people use when they solve puzzles, express legal reasoning and do mathematics. They were highly successful at "intelligent" tasks such as algebra or IQ tests. In the 1960s, Newell and Simon proposed the physical symbol systems hypothesis: "A physical symbol system has the necessary and sufficient means of general intelligent action."[354]\n

      However, the symbolic approach failed on many tasks that humans solve easily, such as learning, recognizing an object or commonsense reasoning. Moravec\'s paradox is the discovery that high-level "intelligent" tasks were easy for AI, but low level "instinctive" tasks were extremely difficult.[355] Philosopher Hubert Dreyfus had argued since the 1960s that human expertise depends on unconscious instinct rather than conscious symbol manipulation, and on having a "feel" for the situation, rather than explicit symbolic knowledge.[356] Although his arguments had been ridiculed and ignored when they were first presented, eventually, AI research came to agree with him.[ab][16]\n

      The issue is not resolved: sub-symbolic reasoning can make many of the same inscrutable mistakes that human intuition does, such as algorithmic bias. Critics such as Noam Chomsky argue continuing research into symbolic AI will still be necessary to attain general intelligence,[358][359] in part because sub-symbolic AI is a move away from explainable AI: it can be difficult or impossible to understand why a modern statistical AI program made a particular decision. The emerging field of neuro-symbolic artificial intelligence attempts to bridge the two approaches.\n

      \n

      Neat vs. scruffy

      \n\n

      "Neats" hope that intelligent behavior is described using simple, elegant principles (such as logic, optimization, or neural networks). "Scruffies" expect that it necessarily requires solving a large number of unrelated problems. Neats defend their programs with theoretical rigor, scruffies rely mainly on incremental testing to see if they work. This issue was actively discussed in the 1970s and 1980s,[360] but eventually was seen as irrelevant. Modern AI has elements of both.\n

      \n

      Soft vs. hard computing

      \n\n

      Finding a provably correct or optimal solution is intractable for many important problems.[15] Soft computing is a set of techniques, including genetic algorithms, fuzzy logic and neural networks, that are tolerant of imprecision, uncertainty, partial truth and approximation. Soft computing was introduced in the late 1980s and most successful AI programs in the 21st century are examples of soft computing with neural networks.\n

      \n

      Narrow vs. general AI

      \n\n

      AI researchers are divided as to whether to pursue the goals of artificial general intelligence and superintelligence directly or to solve as many specific problems as possible (narrow AI) in hopes these solutions will lead indirectly to the field\'s long-term goals.[361][362] General intelligence is difficult to define and difficult to measure, and modern AI has had more verifiable successes by focusing on specific problems with specific solutions. The sub-field of artificial general intelligence studies this area exclusively.\n

      \n

      Machine consciousness, sentience, and mind

      \n\n

      The philosophy of mind does not know whether a machine can have a mind, consciousness and mental states, in the same sense that human beings do. This issue considers the internal experiences of the machine, rather than its external behavior. Mainstream AI research considers this issue irrelevant because it does not affect the goals of the field: to build machines that can solve problems using intelligence. Russell and Norvig add that "[t]he additional project of making a machine conscious in exactly the way humans are is not one that we are equipped to take on."[363] However, the question has become central to the philosophy of mind. It is also typically the central question at issue in artificial intelligence in fiction.\n

      \n

      Consciousness

      \n\n

      David Chalmers identified two problems in understanding the mind, which he named the "hard" and "easy" problems of consciousness.[364] The easy problem is understanding how the brain processes signals, makes plans and controls behavior. The hard problem is explaining how this feels or why it should feel like anything at all, assuming we are right in thinking that it truly does feel like something (Dennett\'s consciousness illusionism says this is an illusion). While human information processing is easy to explain, human subjective experience is difficult to explain. For example, it is easy to imagine a color-blind person who has learned to identify which objects in their field of view are red, but it is not clear what would be required for the person to know what red looks like.[365]\n

      \n

      Computationalism and functionalism

      \n\n

      Computationalism is the position in the philosophy of mind that the human mind is an information processing system and that thinking is a form of computing. Computationalism argues that the relationship between mind and body is similar or identical to the relationship between software and hardware and thus may be a solution to the mind–body problem. This philosophical position was inspired by the work of AI researchers and cognitive scientists in the 1960s and was originally proposed by philosophers Jerry Fodor and Hilary Putnam.[366]\n

      Philosopher John Searle characterized this position as "strong AI": "The appropriately programmed computer with the right inputs and outputs would thereby have a mind in exactly the same sense human beings have minds."[ac] Searle counters this assertion with his Chinese room argument, which attempts to show that, even if a machine perfectly simulates human behavior, there is still no reason to suppose it also has a mind.[370]\n

      \n

      AI welfare and rights

      \n

      It is difficult or impossible to reliably evaluate whether an advanced AI is sentient (has the ability to feel), and if so, to what degree.[371] But if there is a significant chance that a given machine can feel and suffer, then it may be entitled to certain rights or welfare protection measures, similarly to animals.[372][373] Sapience (a set of capacities related to high intelligence, such as discernment or self-awareness) may provide another moral basis for AI rights.[372] Robot rights are also sometimes proposed as a practical way to integrate autonomous agents into society.[374]\n

      In 2017, the European Union considered granting "electronic personhood" to some of the most capable AI systems. Similarly to the legal status of companies, it would have conferred rights but also responsibilities.[375] Critics argued in 2018 that granting rights to AI systems would downplay the importance of human rights, and that legislation should focus on user needs rather than speculative futuristic scenarios. They also noted that robots lacked the autonomy to take part to society on their own.[376][377]\n

      Progress in AI increased interest in the topic. Proponents of AI welfare and rights often argue that AI sentience, if it emerges, would be particularly easy to deny. They warn that this may be a moral blind spot analogous to slavery or factory farming, which could lead to large-scale suffering if sentient AI is created and carelessly exploited.[373][372]\n

      \n

      Future

      \n

      Superintelligence and the singularity

      \n

      A superintelligence is a hypothetical agent that would possess intelligence far surpassing that of the brightest and most gifted human mind.[362]If research into artificial general intelligence produced sufficiently intelligent software, it might be able to reprogram and improve itself. The improved software would be even better at improving itself, leading to what I. J. Good called an "intelligence explosion" and Vernor Vinge called a "singularity".[378]\n

      However, technologies cannot improve exponentially indefinitely, and typically follow an S-shaped curve, slowing when they reach the physical limits of what the technology can do.[379]\n

      \n

      Transhumanism

      \n\n

      Robot designer Hans Moravec, cyberneticist Kevin Warwick and inventor Ray Kurzweil have predicted that humans and machines may merge in the future into cyborgs that are more capable and powerful than either. This idea, called transhumanism, has roots in the writings of Aldous Huxley and Robert Ettinger.[380]\n

      Edward Fredkin argues that "artificial intelligence is the next step in evolution", an idea first proposed by Samuel Butler\'s "Darwin among the Machines" as far back as 1863, and expanded upon by George Dyson in his 1998 book Darwin Among the Machines: The Evolution of Global Intelligence.[381]\n

      \n

      In fiction

      \n\n
      The word "robot" itself was coined by Karel Čapek in his 1921 play R.U.R., the title standing for "Rossum\'s Universal Robots".
      \n

      Thought-capable artificial beings have appeared as storytelling devices since antiquity,[382] and have been a persistent theme in science fiction.[383]\n

      A common trope in these works began with Mary Shelley\'s Frankenstein, where a human creation becomes a threat to its masters. This includes such works as Arthur C. Clarke\'s and Stanley Kubrick\'s 2001: A Space Odyssey (both 1968), with HAL 9000, the murderous computer in charge of the Discovery One spaceship, as well as The Terminator (1984) and The Matrix (1999). In contrast, the rare loyal robots such as Gort from The Day the Earth Stood Still (1951) and Bishop from Aliens (1986) are less prominent in popular culture.[384]\n

      Isaac Asimov introduced the Three Laws of Robotics in many stories, most notably with the "Multivac" super-intelligent computer. Asimov\'s laws are often brought up during lay discussions of machine ethics;[385] while almost all artificial intelligence researchers are familiar with Asimov\'s laws through popular culture, they generally consider the laws useless for many reasons, one of which is their ambiguity.[386]\n

      Several works use AI to force us to confront the fundamental question of what makes us human, showing us artificial beings that have the ability to feel, and thus to suffer. This appears in Karel Čapek\'s R.U.R., the films A.I. Artificial Intelligence and Ex Machina, as well as the novel Do Androids Dream of Electric Sheep?, by Philip K. Dick. Dick considers the idea that our understanding of human subjectivity is altered by technology created with artificial intelligence.[387]\n

      \n

      See also

      \n\n

      Explanatory notes

      \n
      \n
        \n
      1. ^ a b This list of intelligent traits is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
      2. \n
      3. ^ a b This list of tools is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
      4. \n
      5. ^ It is among the reasons that expert systems proved to be inefficient for capturing knowledge.[30][31]\n
      6. \n
      7. ^ \n"Rational agent" is general term used in economics, philosophy and theoretical artificial intelligence. It can refer to anything that directs its behavior to accomplish goals, such as a person, an animal, a corporation, a nation, or in the case of AI, a computer program.\n
      8. \n
      9. ^ Alan Turing discussed the centrality of learning as early as 1950, in his classic paper "Computing Machinery and Intelligence".[42] In 1956, at the original Dartmouth AI summer conference, Ray Solomonoff wrote a report on unsupervised probabilistic machine learning: "An Inductive Inference Machine".[43]\n
      10. \n
      11. ^ See AI winter § Machine translation and the ALPAC report of 1966\n
      12. \n
      13. ^ \nCompared with symbolic logic, formal Bayesian inference is computationally expensive. For inference to be tractable, most observations must be conditionally independent of one another. AdSense uses a Bayesian network with over 300 million edges to learn which ads to serve.[93]\n
      14. \n
      15. ^ Expectation–maximization, one of the most popular algorithms in machine learning, allows clustering in the presence of unknown latent variables.[95]\n
      16. \n
      17. ^ \nSome form of deep neural networks (without a specific learning algorithm) were described by:\nWarren S. McCulloch and Walter Pitts (1943)[115]\nAlan Turing (1948);[116]\nKarl Steinbuch and Roger David Joseph (1961).[117]\nDeep or recurrent networks that learned (or used gradient descent) were developed by:\nFrank Rosenblatt(1957);[116]\nOliver Selfridge (1959);[117]\nAlexey Ivakhnenko and Valentin Lapa (1965);[118]\nKaoru Nakano (1971);[119]\nShun-Ichi Amari (1972);[119]\nJohn Joseph Hopfield (1982).[119]\nPrecursors to backpropagation were developed by:\nHenry J. Kelley (1960);[116]\nArthur E. Bryson (1962);[116]\nStuart Dreyfus (1962);[116]\nArthur E. Bryson and Yu-Chi Ho (1969);[116]\nBackpropagation was independently developed by:\nSeppo Linnainmaa (1970);[120]\nPaul Werbos (1974).[116]\n
      18. \n
      19. ^ Geoffrey Hinton said, of his work on neural networks in the 1990s, "our labeled datasets were thousands of times too small. [And] our computers were millions of times too slow."[121]\n
      20. \n
      21. ^ In statistics, a bias is a systematic error or deviation from the correct value. But in the context of fairness, it refers to a tendency in favor or against a certain group or individual characteristic, usually in a way that is considered unfair or harmful. A statistically unbiased AI system that produces disparate outcomes for different demographic groups may thus be viewed as biased in the ethical sense.[205]\n
      22. \n
      23. ^ Including Jon Kleinberg (Cornell University), Sendhil Mullainathan (University of Chicago), Cynthia Chouldechova (Carnegie Mellon) and Sam Corbett-Davis (Stanford)[214]\n
      24. \n
      25. ^ Moritz Hardt (a director at the Max Planck Institute for Intelligent Systems) argues that machine learning "is fundamentally the wrong tool for a lot of domains, where you\'re trying to design interventions and mechanisms that change the world."[219]\n
      26. \n
      27. ^ When the law was passed in 2018, it still contained a form of this provision.\n
      28. \n
      29. ^ This is the United Nations\' definition, and includes things like land mines as well.[233]\n
      30. \n
      31. ^ See table 4; 9% is both the OECD average and the U.S. average.[244]\n
      32. \n
      33. ^ Sometimes called a "robopocalypse"[252]\n
      34. \n
      35. ^ "Electronic brain" was the term used by the press around this time.[305][307]\n
      36. \n
      37. ^ \nDaniel Crevier wrote, "the conference is generally recognized as the official birthdate of the new science."[310] Russell and Norvig called the conference "the inception of artificial intelligence."[115]\n
      38. \n
      39. ^ \nRussell and Norvig wrote "for the next 20 years the field would be dominated by these people and their students."[311]\n
      40. \n
      41. ^ \nRussell and Norvig wrote "it was astonishing whenever a computer did anything kind of smartish".[312]\n
      42. \n
      43. ^ \nThe programs described are Arthur Samuel\'s checkers program for the IBM 701, Daniel Bobrow\'s STUDENT, Newell and Simon\'s Logic Theorist and Terry Winograd\'s SHRDLU.\n
      44. \n
      45. ^ Russell and Norvig write: "in almost all cases, these early systems failed on more difficult problems"[316]\n
      46. \n
      47. ^ \nEmbodied approaches to AI[323] were championed by Hans Moravec[324] and Rodney Brooks[325] and went by many names: Nouvelle AI.[325] Developmental robotics.[326]\n
      48. \n
      49. ^ Matteo Wong wrote in The Atlantic: "Whereas for decades, computer-science fields such as natural-language processing, computer vision, and robotics used extremely different methods, now they all use a programming method called "deep learning." As a result, their code and approaches have become more similar, and their models are easier to integrate into one another."[332]\n
      50. \n
      51. ^ Jack Clark wrote in Bloomberg: "After a half-decade of quiet breakthroughs in artificial intelligence, 2015 has been a landmark year. Computers are smarter and learning faster than ever", and noted that the number of software projects that use machine learning at Google increased from a "sporadic usage" in 2012 to more than 2,700 projects in 2015.[334]\n
      52. \n
      53. ^ Nils Nilsson wrote in 1983: "Simply put, there is wide disagreement in the field about what AI is all about."[352]\n
      54. \n
      55. ^ \nDaniel Crevier wrote that "time has proven the accuracy and perceptiveness of some of Dreyfus\'s comments. Had he formulated them less aggressively, constructive actions they suggested might have been taken much earlier."[357]\n
      56. \n
      57. ^ \nSearle presented this definition of "Strong AI" in 1999.[367] Searle\'s original formulation was "The appropriately programmed computer really is a mind, in the sense that computers given the right programs can be literally said to understand and have other cognitive states."[368] Strong AI is defined similarly by Russell and Norvig: "Stong AI – the assertion that machines that do so are actually thinking (as opposed to simulating thinking)."[369]\n
      58. \n
      \n

      References

      \n
      \n
        \n
      1. ^ a b c Russell & Norvig (2021), pp. 1–4.\n
      2. \n
      3. ^ AI set to exceed human brain power Archived 2008-02-19 at the Wayback Machine CNN.com (July 26, 2006)\n
      4. \n
      5. ^ Kaplan, Andreas; Haenlein, Michael (2019). "Siri, Siri, in my hand: Who\'s the fairest in the land? On the interpretations, illustrations, and implications of artificial intelligence". Business Horizons. 62: 15–25. doi:10.1016/j.bushor.2018.08.004. ISSN 0007-6813. S2CID 158433736.\n
      6. \n
      7. ^ a b c \nArtificial general intelligence: Russell & Norvig (2021, pp. 32–33, 1020–1021)
        Proposal for the modern version: Pennachin & Goertzel (2007)
        Warnings of overspecialization in AI from leading researchers: Nilsson (1995), McCarthy (2007), Beal & Winston (2009)
        \n
      8. \n
      9. ^ Russell & Norvig (2021, §1.2).\n
      10. \n
      11. ^ a b Dartmouth workshop: Russell & Norvig (2021, p. 18), McCorduck (2004, pp. 111–136), NRC (1999, pp. 200–201)
        The proposal: McCarthy et al. (1955)
        \n
      12. \n
      13. ^ a b Successful programs of the 1960s: McCorduck (2004, pp. 243–252), Crevier (1993, pp. 52–107), Moravec (1988, p. 9), Russell & Norvig (2021, pp. 19–21)\n
      14. \n
      15. ^ a b Funding initiatives in the early 1980s: Fifth Generation Project (Japan), Alvey (UK), Microelectronics and Computer Technology Corporation (US), Strategic Computing Initiative (US): McCorduck (2004, pp. 426–441), Crevier (1993, pp. 161–162, 197–203, 211, 240), Russell & Norvig (2021, p. 23), NRC (1999, pp. 210–211), Newquist (1994, pp. 235–248)\n
      16. \n
      17. ^ a b First AI Winter, Lighthill report, Mansfield Amendment: Crevier (1993, pp. 115–117), Russell & Norvig (2021, pp. 21–22), NRC (1999, pp. 212–213), Howe (1994), Newquist (1994, pp. 189–201)\n
      18. \n
      19. ^ a b Second AI Winter: Russell & Norvig (2021, p. 24), McCorduck (2004, pp. 430–435), Crevier (1993, pp. 209–210), NRC (1999, pp. 214–216), Newquist (1994, pp. 301–318)\n
      20. \n
      21. ^ a b Deep learning revolution, AlexNet: Goldman (2022), Russell & Norvig (2021, p. 26), McKinsey (2018)\n
      22. \n
      23. ^ Toews (2023).\n
      24. \n
      25. ^ Problem-solving, puzzle solving, game playing, and deduction: Russell & Norvig (2021, chpt. 3–5), Russell & Norvig (2021, chpt. 6) (constraint satisfaction), Poole, Mackworth & Goebel (1998, chpt. 2, 3, 7, 9), Luger & Stubblefield (2004, chpt. 3, 4, 6, 8), Nilsson (1998, chpt. 7–12)\n
      26. \n
      27. ^ Uncertain reasoning: Russell & Norvig (2021, chpt. 12–18), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 333–381), Nilsson (1998, chpt. 7–12)\n
      28. \n
      29. ^ a b c Intractability and efficiency and the combinatorial explosion: Russell & Norvig (2021, p. 21)\n
      30. \n
      31. ^ a b c Psychological evidence of the prevalence of sub-symbolic reasoning and knowledge: Kahneman (2011), Dreyfus & Dreyfus (1986), Wason & Shapiro (1966), Kahneman, Slovic & Tversky (1982)\n
      32. \n
      33. ^ Knowledge representation and knowledge engineering: Russell & Norvig (2021, chpt. 10), Poole, Mackworth & Goebel (1998, pp. 23–46, 69–81, 169–233, 235–277, 281–298, 319–345), Luger & Stubblefield (2004, pp. 227–243), Nilsson (1998, chpt. 17.1–17.4, 18)\n
      34. \n
      35. ^ Smoliar & Zhang (1994).\n
      36. \n
      37. ^ Neumann & Möller (2008).\n
      38. \n
      39. ^ Kuperman, Reichley & Bailey (2006).\n
      40. \n
      41. ^ McGarry (2005).\n
      42. \n
      43. ^ Bertini, Del Bimbo & Torniai (2006).\n
      44. \n
      45. ^ Russell & Norvig (2021), pp. 272.\n
      46. \n
      47. ^ Representing categories and relations: Semantic networks, description logics, inheritance (including frames, and scripts): Russell & Norvig (2021, §10.2 & 10.5), Poole, Mackworth & Goebel (1998, pp. 174–177), Luger & Stubblefield (2004, pp. 248–258), Nilsson (1998, chpt. 18.3)\n
      48. \n
      49. ^ Representing events and time:Situation calculus, event calculus, fluent calculus (including solving the frame problem): Russell & Norvig (2021, §10.3), Poole, Mackworth & Goebel (1998, pp. 281–298), Nilsson (1998, chpt. 18.2)\n
      50. \n
      51. ^ Causal calculus: Poole, Mackworth & Goebel (1998, pp. 335–337)\n
      52. \n
      53. ^ Representing knowledge about knowledge: Belief calculus, modal logics: Russell & Norvig (2021, §10.4), Poole, Mackworth & Goebel (1998, pp. 275–277)\n
      54. \n
      55. ^ a b Default reasoning, Frame problem, default logic, non-monotonic logics, circumscription, closed world assumption, abduction: Russell & Norvig (2021, §10.6), Poole, Mackworth & Goebel (1998, pp. 248–256, 323–335), Luger & Stubblefield (2004, pp. 335–363), Nilsson (1998, ~18.3.3)\n(Poole et al. places abduction under "default reasoning". Luger et al. places this under "uncertain reasoning").\n
      56. \n
      57. ^ a b Breadth of commonsense knowledge: Lenat & Guha (1989, Introduction), Crevier (1993, pp. 113–114), Moravec (1988, p. 13), Russell & Norvig (2021, pp. 241, 385, 982) (qualification problem)\n
      58. \n
      59. ^ Newquist (1994), p. 296.\n
      60. \n
      61. ^ Crevier (1993), pp. 204–208.\n
      62. \n
      63. ^ Russell & Norvig (2021), p. 528.\n
      64. \n
      65. ^ Automated planning: Russell & Norvig (2021, chpt. 11).\n
      66. \n
      67. ^ Automated decision making, Decision theory: Russell & Norvig (2021, chpt. 16–18).\n
      68. \n
      69. ^ Classical planning: Russell & Norvig (2021, Section 11.2).\n
      70. \n
      71. ^ Sensorless or "conformant" planning, contingent planning, replanning (a.k.a online planning): Russell & Norvig (2021, Section 11.5).\n
      72. \n
      73. ^ Uncertain preferences: Russell & Norvig (2021, Section 16.7)\nInverse reinforcement learning: Russell & Norvig (2021, Section 22.6)\n
      74. \n
      75. ^ Information value theory: Russell & Norvig (2021, Section 16.6).\n
      76. \n
      77. ^ Markov decision process: Russell & Norvig (2021, chpt. 17).\n
      78. \n
      79. ^ Game theory and multi-agent decision theory: Russell & Norvig (2021, chpt. 18).\n
      80. \n
      81. ^ Learning: Russell & Norvig (2021, chpt. 19–22), Poole, Mackworth & Goebel (1998, pp. 397–438), Luger & Stubblefield (2004, pp. 385–542), Nilsson (1998, chpt. 3.3, 10.3, 17.5, 20)\n
      82. \n
      83. ^ Turing (1950).\n
      84. \n
      85. ^ Solomonoff (1956).\n
      86. \n
      87. ^ Unsupervised learning: Russell & Norvig (2021, pp. 653) (definition), Russell & Norvig (2021, pp. 738–740) (cluster analysis), Russell & Norvig (2021, pp. 846–860) (word embedding)\n
      88. \n
      89. ^ a b Supervised learning: Russell & Norvig (2021, §19.2) (Definition), Russell & Norvig (2021, Chpt. 19–20) (Techniques)\n
      90. \n
      91. ^ Reinforcement learning: Russell & Norvig (2021, chpt. 22), Luger & Stubblefield (2004, pp. 442–449)\n
      92. \n
      93. ^ Transfer learning: Russell & Norvig (2021, pp. 281), The Economist (2016)\n
      94. \n
      95. ^ "Artificial Intelligence (AI): What Is AI and How Does It Work? | Built In". builtin.com. Retrieved 30 October 2023.\n
      96. \n
      97. ^ Computational learning theory: Russell & Norvig (2021, pp. 672–674), Jordan & Mitchell (2015)\n
      98. \n
      99. ^ Natural language processing (NLP): Russell & Norvig (2021, chpt. 23–24), Poole, Mackworth & Goebel (1998, pp. 91–104), Luger & Stubblefield (2004, pp. 591–632)\n
      100. \n
      101. ^ Subproblems of NLP: Russell & Norvig (2021, pp. 849–850)\n
      102. \n
      103. ^ Russell & Norvig (2021), pp. 856–858.\n
      104. \n
      105. ^ Dickson (2022).\n
      106. \n
      107. ^ Modern statistical and deep learning approaches to NLP: Russell & Norvig (2021, chpt. 24), Cambria & White (2014)\n
      108. \n
      109. ^ Vincent (2019).\n
      110. \n
      111. ^ Russell & Norvig (2021), pp. 875–878.\n
      112. \n
      113. ^ Bushwick (2023).\n
      114. \n
      115. ^ Computer vision: Russell & Norvig (2021, chpt. 25), Nilsson (1998, chpt. 6)\n
      116. \n
      117. ^ Russell & Norvig (2021), pp. 849–850.\n
      118. \n
      119. ^ Russell & Norvig (2021), pp. 895–899.\n
      120. \n
      121. ^ Russell & Norvig (2021), pp. 899–901.\n
      122. \n
      123. ^ Challa et al. (2011).\n
      124. \n
      125. ^ Russell & Norvig (2021), pp. 931–938.\n
      126. \n
      127. ^ MIT AIL (2014).\n
      128. \n
      129. ^ Affective computing: Thro (1993), Edelson (1991), Tao & Tan (2005), Scassellati (2002)\n
      130. \n
      131. ^ Waddell (2018).\n
      132. \n
      133. ^ Poria et al. (2017).\n
      134. \n
      135. ^ Search algorithms: Russell & Norvig (2021, chpts. 3–5), Poole, Mackworth & Goebel (1998, pp. 113–163), Luger & Stubblefield (2004, pp. 79–164, 193–219), Nilsson (1998, chpts. 7–12)\n
      136. \n
      137. ^ State space search: Russell & Norvig (2021, chpt. 3)\n
      138. \n
      139. ^ Russell & Norvig (2021), sect. 11.2.\n
      140. \n
      141. ^ Uninformed searches (breadth first search, depth-first search and general state space search): Russell & Norvig (2021, sect. 3.4), Poole, Mackworth & Goebel (1998, pp. 113–132), Luger & Stubblefield (2004, pp. 79–121), Nilsson (1998, chpt. 8)\n
      142. \n
      143. ^ Heuristic or informed searches (e.g., greedy best first and A*): Russell & Norvig (2021, sect. 3.5), Poole, Mackworth & Goebel (1998, pp. 132–147), Poole & Mackworth (2017, sect. 3.6), Luger & Stubblefield (2004, pp. 133–150)\n
      144. \n
      145. ^ Adversarial search: Russell & Norvig (2021, chpt. 5)\n
      146. \n
      147. ^ Local or "optimization" search: Russell & Norvig (2021, chpt. 4)\n
      148. \n
      149. ^ Singh Chauhan, Nagesh (18 December 2020). "Optimization Algorithms in Neural Networks". KDnuggets. Retrieved 13 January 2024.\n
      150. \n
      151. ^ Evolutionary computation: Russell & Norvig (2021, sect. 4.1.2)\n
      152. \n
      153. ^ Merkle & Middendorf (2013).\n
      154. \n
      155. ^ Logic: Russell & Norvig (2021, chpts. 6–9), Luger & Stubblefield (2004, pp. 35–77), Nilsson (1998, chpt. 13–16)\n
      156. \n
      157. ^ Propositional logic: Russell & Norvig (2021, chpt. 6), Luger & Stubblefield (2004, pp. 45–50), Nilsson (1998, chpt. 13)\n
      158. \n
      159. ^ First-order logic and features such as equality: Russell & Norvig (2021, chpt. 7), Poole, Mackworth & Goebel (1998, pp. 268–275), Luger & Stubblefield (2004, pp. 50–62), Nilsson (1998, chpt. 15)\n
      160. \n
      161. ^ Logical inference: Russell & Norvig (2021, chpt. 10)\n
      162. \n
      163. ^ logical deduction as search: Russell & Norvig (2021, sects. 9.3, 9.4), Poole, Mackworth & Goebel (1998, pp. ~46–52), Luger & Stubblefield (2004, pp. 62–73), Nilsson (1998, chpt. 4.2, 7.2)\n
      164. \n
      165. ^ Resolution and unification: Russell & Norvig (2021, sections 7.5.2, 9.2, 9.5)\n
      166. \n
      167. ^ Warren, D.H.; Pereira, L.M.; Pereira, F. (1977). "Prolog-the language and its implementation compared with Lisp". ACM SIGPLAN Notices. 12 (8): 109–115. doi:10.1145/872734.806939.\n
      168. \n
      169. ^ Fuzzy logic: Russell & Norvig (2021, pp. 214, 255, 459), Scientific American (1999)\n
      170. \n
      171. ^ a b Stochastic methods for uncertain reasoning: Russell & Norvig (2021, chpt. 12–18, 20), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 165–191, 333–381), Nilsson (1998, chpt. 19)\n
      172. \n
      173. ^ decision theory and decision analysis: Russell & Norvig (2021, chpt. 16–18), Poole, Mackworth & Goebel (1998, pp. 381–394)\n
      174. \n
      175. ^ Information value theory: Russell & Norvig (2021, sect. 16.6)\n
      176. \n
      177. ^ Markov decision processes and dynamic decision networks: Russell & Norvig (2021, chpt. 17)\n
      178. \n
      179. ^ a b c Stochastic temporal models: Russell & Norvig (2021, chpt. 14)\nHidden Markov model: Russell & Norvig (2021, sect. 14.3)\nKalman filters: Russell & Norvig (2021, sect. 14.4)\nDynamic Bayesian networks: Russell & Norvig (2021, sect. 14.5)\n
      180. \n
      181. ^ Game theory and mechanism design: Russell & Norvig (2021, chpt. 18)\n
      182. \n
      183. ^ Bayesian networks: Russell & Norvig (2021, sects. 12.5–12.6, 13.4–13.5, 14.3–14.5, 16.5, 20.2–20.3), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~182–190, ≈363–379), Nilsson (1998, chpt. 19.3–19.4)\n
      184. \n
      185. ^ Domingos (2015), chpt. 6.\n
      186. \n
      187. ^ Bayesian inference algorithm: Russell & Norvig (2021, sect. 13.3–13.5), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~363–379), Nilsson (1998, chpt. 19.4 & 7)\n
      188. \n
      189. ^ Domingos (2015), p. 210.\n
      190. \n
      191. ^ Bayesian learning and the expectation–maximization algorithm: Russell & Norvig (2021, chpt. 20), Poole, Mackworth & Goebel (1998, pp. 424–433), Nilsson (1998, chpt. 20), Domingos (2015, p. 210)\n
      192. \n
      193. ^ Bayesian decision theory and Bayesian decision networks: Russell & Norvig (2021, sect. 16.5)\n
      194. \n
      195. ^ Statistical learning methods and classifiers: Russell & Norvig (2021, chpt. 20),\n
      196. \n
      197. ^ Ciaramella, Alberto; Ciaramella, Marco (2024). Introduction to Artificial Intelligence: from data analysis to generative AI. Intellisemantic Editions. ISBN 978-8-8947-8760-3.\n
      198. \n
      199. ^ Decision trees: Russell & Norvig (2021, sect. 19.3), Domingos (2015, p. 88)\n
      200. \n
      201. ^ Non-parameteric learning models such as K-nearest neighbor and support vector machines: Russell & Norvig (2021, sect. 19.7), Domingos (2015, p. 187) (k-nearest neighbor)\n\n
      202. \n
      203. ^ Domingos (2015), p. 152.\n
      204. \n
      205. ^ Naive Bayes classifier: Russell & Norvig (2021, sect. 12.6), Domingos (2015, p. 152)\n
      206. \n
      207. ^ a b Neural networks: Russell & Norvig (2021, chpt. 21), Domingos (2015, Chapter 4)\n
      208. \n
      209. ^ Gradient calculation in computational graphs, backpropagation, automatic differentiation: Russell & Norvig (2021, sect. 21.2), Luger & Stubblefield (2004, pp. 467–474), Nilsson (1998, chpt. 3.3)\n
      210. \n
      211. ^ Universal approximation theorem: Russell & Norvig (2021, p. 752)\nThe theorem: Cybenko (1988), Hornik, Stinchcombe & White (1989)\n
      212. \n
      213. ^ Feedforward neural networks: Russell & Norvig (2021, sect. 21.1)\n
      214. \n
      215. ^ Recurrent neural networks: Russell & Norvig (2021, sect. 21.6)\n
      216. \n
      217. ^ Perceptrons: Russell & Norvig (2021, pp. 21, 22, 683, 22)\n
      218. \n
      219. ^ a b Deep learning: Russell & Norvig (2021, chpt. 21), Goodfellow, Bengio & Courville (2016), Hinton et al. (2016), Schmidhuber (2015)\n
      220. \n
      221. ^ Convolutional neural networks: Russell & Norvig (2021, sect. 21.3)\n
      222. \n
      223. ^ Deng & Yu (2014), pp. 199–200.\n
      224. \n
      225. ^ Ciresan, Meier & Schmidhuber (2012).\n
      226. \n
      227. ^ Russell & Norvig (2021), p. 751.\n
      228. \n
      229. ^ a b c Russell & Norvig (2021), p. 17.\n
      230. \n
      231. ^ a b c d e f g Russell & Norvig (2021), p. 785.\n
      232. \n
      233. ^ a b Schmidhuber (2022), sect. 5.\n
      234. \n
      235. ^ Schmidhuber (2022), sect. 6.\n
      236. \n
      237. ^ a b c Schmidhuber (2022), sect. 7.\n
      238. \n
      239. ^ Schmidhuber (2022), sect. 8.\n
      240. \n
      241. ^ Quoted in Christian (2020, p. 22)\n
      242. \n
      243. ^ Smith (2023).\n
      244. \n
      245. ^ "Explained: Generative AI". 9 November 2023.\n
      246. \n
      247. ^ "AI Writing and Content Creation Tools". MIT Sloan Teaching & Learning Technologies. Archived from the original on 25 December 2023. Retrieved 25 December 2023.\n
      248. \n
      249. ^ Marmouyet (2023).\n
      250. \n
      251. ^ Kobielus (2019).\n
      252. \n
      253. ^ Thomason, James (21 May 2024). "Mojo Rising: The resurgence of AI-first programming languages". VentureBeat. Archived from the original on 27 June 2024. Retrieved 26 May 2024.\n
      254. \n
      255. ^ Wodecki, Ben (5 May 2023). "7 AI Programming Languages You Need to Know". AI Business. Archived from the original on 25 July 2024. Retrieved 5 October 2024.\n
      256. \n
      257. ^ Plumb, Taryn (18 September 2024). "Why Jensen Huang and Marc Benioff see \'gigantic\' opportunity for agentic AI". VentureBeat. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
      258. \n
      259. ^ Davenport, T; Kalakota, R (June 2019). "The potential for artificial intelligence in healthcare". Future Healthc J. 6 (2): 94–98. doi:10.7861/futurehosp.6-2-94. PMC 6616181. PMID 31363513.\n
      260. \n
      261. ^ Lyakhova, U.A.; Lyakhov, P.A. (2024). "Systematic review of approaches to detection and classification of skin cancer using artificial intelligence: Development and prospects". Computers in Biology and Medicine. 178: 108742. doi:10.1016/j.compbiomed.2024.108742. PMID 38875908.\n
      262. \n
      263. ^ Alqudaihi, Kawther S.; Aslam, Nida; Khan, Irfan Ullah; Almuhaideb, Abdullah M.; Alsunaidi, Shikah J.; Ibrahim, Nehad M. Abdel Rahman; Alhaidari, Fahd A.; Shaikh, Fatema S.; Alsenbel, Yasmine M.; Alalharith, Dima M.; Alharthi, Hajar M.; Alghamdi, Wejdan M.; Alshahrani, Mohammed S. (2021). "Cough Sound Detection and Diagnosis Using Artificial Intelligence Techniques: Challenges and Opportunities". IEEE Access. 9: 102327–102344. Bibcode:2021IEEEA...9j2327A. doi:10.1109/ACCESS.2021.3097559. ISSN 2169-3536. PMC 8545201. PMID 34786317.\n
      264. \n
      265. ^ a b Bax, Monique; Thorpe, Jordan; Romanov, Valentin (December 2023). "The future of personalized cardiovascular medicine demands 3D and 4D printing, stem cells, and artificial intelligence". Frontiers in Sensors. 4. doi:10.3389/fsens.2023.1294721. ISSN 2673-5067.\n
      266. \n
      267. ^ Jumper, J; Evans, R; Pritzel, A (2021). "Highly accurate protein structure prediction with AlphaFold". Nature. 596 (7873): 583–589. Bibcode:2021Natur.596..583J. doi:10.1038/s41586-021-03819-2. PMC 8371605. PMID 34265844.\n
      268. \n
      269. ^ "AI discovers new class of antibiotics to kill drug-resistant bacteria". 20 December 2023. Archived from the original on 16 September 2024. Retrieved 5 October 2024.\n
      270. \n
      271. ^ "AI speeds up drug design for Parkinson\'s ten-fold". Cambridge University. 17 April 2024. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
      272. \n
      273. ^ Horne, Robert I.; Andrzejewska, Ewa A.; Alam, Parvez; Brotzakis, Z. Faidon; Srivastava, Ankit; Aubert, Alice; Nowinska, Magdalena; Gregory, Rebecca C.; Staats, Roxine; Possenti, Andrea; Chia, Sean; Sormanni, Pietro; Ghetti, Bernardino; Caughey, Byron; Knowles, Tuomas P. J.; Vendruscolo, Michele (17 April 2024). "Discovery of potent inhibitors of α-synuclein aggregation using structure-based iterative learning". Nature Chemical Biology. 20 (5). Nature: 634–645. doi:10.1038/s41589-024-01580-x. PMC 11062903. PMID 38632492.\n
      274. \n
      275. ^ Grant, Eugene F.; Lardner, Rex (25 July 1952). "The Talk of the Town – It". The New Yorker. ISSN 0028-792X. Archived from the original on 16 February 2020. Retrieved 28 January 2024.\n
      276. \n
      277. ^ Anderson, Mark Robert (11 May 2017). "Twenty years on from Deep Blue vs Kasparov: how a chess match started the big data revolution". The Conversation. Archived from the original on 17 September 2024. Retrieved 28 January 2024.\n
      278. \n
      279. ^ Markoff, John (16 February 2011). "Computer Wins on \'Jeopardy!\': Trivial, It\'s Not". The New York Times. ISSN 0362-4331. Archived from the original on 22 October 2014. Retrieved 28 January 2024.\n
      280. \n
      281. ^ Byford, Sam (27 May 2017). "AlphaGo retires from competitive Go after defeating world number one 3–0". The Verge. Archived from the original on 7 June 2017. Retrieved 28 January 2024.\n
      282. \n
      283. ^ Brown, Noam; Sandholm, Tuomas (30 August 2019). "Superhuman AI for multiplayer poker". Science. 365 (6456): 885–890. Bibcode:2019Sci...365..885B. doi:10.1126/science.aay2400. ISSN 0036-8075. PMID 31296650.\n
      284. \n
      285. ^ "MuZero: Mastering Go, chess, shogi and Atari without rules". Google DeepMind. 23 December 2020. Retrieved 28 January 2024.\n
      286. \n
      287. ^ Sample, Ian (30 October 2019). "AI becomes grandmaster in \'fiendishly complex\' StarCraft II". The Guardian. ISSN 0261-3077. Archived from the original on 29 December 2020. Retrieved 28 January 2024.\n
      288. \n
      289. ^ Wurman, P. R.; Barrett, S.; Kawamoto, K. (2022). "Outracing champion Gran Turismo drivers with deep reinforcement learning" (PDF). Nature. 602 (7896): 223–228. Bibcode:2022Natur.602..223W. doi:10.1038/s41586-021-04357-7. PMID 35140384.\n
      290. \n
      291. ^ Wilkins, Alex (13 March 2024). "Google AI learns to play open-world video games by watching them". New Scientist. Archived from the original on 26 July 2024. Retrieved 21 July 2024.\n
      292. \n
      293. ^ Uesato, J. et al.: Improving mathematical reasoning with process supervision. Archived 15 September 2024 at the Wayback Machine openai.com, May 31, 2023. Retrieved 2024-08-07.\n
      294. \n
      295. ^ Srivastava, Saurabh (29 February 2024). "Functional Benchmarks for Robust Evaluation of Reasoning Performance, and the Reasoning Gap". arXiv:2402.19450 [cs.AI].\n
      296. \n
      297. ^ Roberts, Siobhan (25 July 2024). "AI achieves silver-medal standard solving International Mathematical Olympiad problems". The New York Times. Archived from the original on 26 September 2024. Retrieved 7 August 2024.\n
      298. \n
      299. ^ LLEMMA. eleuther.ai. Retrieved 2024-08-07.\n
      300. \n
      301. ^ AI Math. Archived 5 October 2024 at the Wayback Machine Caesars Labs, 2024. Retrieved 2024-08-07.\n
      302. \n
      303. ^ Alex McFarland: 7 Best AI for Math Tools. Archived 11 September 2024 at the Wayback Machine unite.ai. Retrieved 2024-08-07\n
      304. \n
      305. ^ Matthew Finio & Amanda Downie: IBM Think 2024 Primer, "What is Artificial Intelligence (AI) in Finance?" 8 Dec. 2023\n
      306. \n
      307. ^ M. Nicolas, J. Firzli: Pensions Age/European Pensions magazine, "Artificial Intelligence: Ask the Industry" May June 2024 https://videovoice.org/ai-in-finance-innovation-entrepreneurship-vs-over-regulation-with-the-eus-artificial-intelligence-act-wont-work-as-intended/ Archived 11 September 2024 at the Wayback Machine.\n
      308. \n
      309. ^ a b c Congressional Research Service (2019). Artificial Intelligence and National Security (PDF). Washington, DC: Congressional Research Service. Archived (PDF) from the original on 8 May 2020. Retrieved 5 October 2024.PD-notice\n
      310. \n
      311. ^ a b Slyusar, Vadym (2019). "Artificial intelligence as the basis of future control networks". ResearchGate. doi:10.13140/RG.2.2.30247.50087. Archived from the original on 28 April 2021. Retrieved 20 July 2019.\n
      312. \n
      313. ^ Knight, Will. "The US and 30 Other Nations Agree to Set Guardrails for Military AI". Wired. ISSN 1059-1028. Archived from the original on 20 September 2024. Retrieved 24 January 2024.\n
      314. \n
      315. ^ Newsom, Gavin; Weber, Shirley N. (6 September 2023). "Executive Order N-12-23" (PDF). Executive Department, State of California. Archived (PDF) from the original on 21 February 2024. Retrieved 7 September 2023.\n
      316. \n
      317. ^ Pinaya, Walter H. L.; Graham, Mark S.; Kerfoot, Eric; Tudosiu, Petru-Daniel; Dafflon, Jessica; Fernandez, Virginia; Sanchez, Pedro; Wolleb, Julia; da Costa, Pedro F.; Patel, Ashay (2023). "Generative AI for Medical Imaging: extending the MONAI Framework". arXiv:2307.15208 [eess.IV].\n
      318. \n
      319. ^ Griffith, Erin; Metz, Cade (27 January 2023). "Anthropic Said to Be Closing In on $300 Million in New A.I. Funding". The New York Times. Archived from the original on 9 December 2023. Retrieved 14 March 2023.\n
      320. \n
      321. ^ Lanxon, Nate; Bass, Dina; Davalos, Jackie (10 March 2023). "A Cheat Sheet to AI Buzzwords and Their Meanings". Bloomberg News. Archived from the original on 17 November 2023. Retrieved 14 March 2023.\n
      322. \n
      323. ^ Marcelline, Marco (27 May 2023). "ChatGPT: Most Americans Know About It, But Few Actually Use the AI Chatbot". PCMag. Archived from the original on 21 May 2024. Retrieved 28 January 2024.\n
      324. \n
      325. ^ Lu, Donna (31 March 2023). "Misinformation, mistakes and the Pope in a puffer: what rapidly evolving AI can – and can\'t – do". The Guardian. ISSN 0261-3077. Archived from the original on 10 June 2024. Retrieved 28 January 2024.\n
      326. \n
      327. ^ Hurst, Luke (23 May 2023). "How a fake image of a Pentagon explosion shared on Twitter caused a real dip on Wall Street". euronews. Retrieved 28 January 2024.\n
      328. \n
      329. ^ Poole, David; Mackworth, Alan (2023). Artificial Intelligence, Foundations of Computational Agents (3rd ed.). Cambridge University Press. doi:10.1017/9781009258227. ISBN 978-1-0092-5819-7. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
      330. \n
      331. ^ Russell, Stuart; Norvig, Peter (2020). Artificial Intelligence: A Modern Approach (4th ed.). Pearson. ISBN 978-0-1346-1099-3.\n
      332. \n
      333. ^ "Why agents are the next frontier of generative AI". McKinsey Digital. 24 July 2024. Archived from the original on 3 October 2024. Retrieved 10 August 2024.\n
      334. \n
      335. ^ Ransbotham, Sam; Kiron, David; Gerbert, Philipp; Reeves, Martin (6 September 2017). "Reshaping Business With Artificial Intelligence". MIT Sloan Management Review. Archived from the original on 13 February 2024.\n
      336. \n
      337. ^ Sun, Yuran; Zhao, Xilei; Lovreglio, Ruggiero; Kuligowski, Erica (1 January 2024), Naser, M. Z. (ed.), "8 – AI for large-scale evacuation modeling: promises and challenges", Interpretable Machine Learning for the Analysis, Design, Assessment, and Informed Decision Making for Civil Infrastructure, Woodhead Publishing Series in Civil and Structural Engineering, Woodhead Publishing, pp. 185–204, ISBN 978-0-1282-4073-1, archived from the original on 19 May 2024, retrieved 28 June 2024.\n
      338. \n
      339. ^ Gomaa, Islam; Adelzadeh, Masoud; Gwynne, Steven; Spencer, Bruce; Ko, Yoon; Bénichou, Noureddine; Ma, Chunyun; Elsagan, Nour; Duong, Dana; Zalok, Ehab; Kinateder, Max (1 November 2021). "A Framework for Intelligent Fire Detection and Evacuation System". Fire Technology. 57 (6): 3179–3185. doi:10.1007/s10694-021-01157-3. ISSN 1572-8099. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
      340. \n
      341. ^ Zhao, Xilei; Lovreglio, Ruggiero; Nilsson, Daniel (1 May 2020). "Modelling and interpreting pre-evacuation decision-making using machine learning". Automation in Construction. 113: 103140. doi:10.1016/j.autcon.2020.103140. ISSN 0926-5805. Archived from the original on 19 May 2024. Retrieved 5 October 2024.\n
      342. \n
      343. ^ "India\'s latest election embraced AI technology. Here are some ways it was used constructively". PBS News. 12 June 2024. Retrieved 28 October 2024.\n
      344. \n
      345. ^ Müller, Vincent C. (30 April 2020). "Ethics of Artificial Intelligence and Robotics". Stanford Encyclopedia of Philosophy Archive. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
      346. \n
      347. ^ Simonite (2016).\n
      348. \n
      349. ^ Russell & Norvig (2021), p. 987.\n
      350. \n
      351. ^ Laskowski (2023).\n
      352. \n
      353. ^ GAO (2022).\n
      354. \n
      355. ^ Valinsky (2019).\n
      356. \n
      357. ^ Russell & Norvig (2021), p. 991.\n
      358. \n
      359. ^ Russell & Norvig (2021), pp. 991–992.\n
      360. \n
      361. ^ Christian (2020), p. 63.\n
      362. \n
      363. ^ Vincent (2022).\n
      364. \n
      365. ^ Kopel, Matthew. "Copyright Services: Fair Use". Cornell University Library. Archived from the original on 26 September 2024. Retrieved 26 April 2024.\n
      366. \n
      367. ^ Burgess, Matt. "How to Stop Your Data From Being Used to Train AI". Wired. ISSN 1059-1028. Archived from the original on 3 October 2024. Retrieved 26 April 2024.\n
      368. \n
      369. ^ Reisner (2023).\n
      370. \n
      371. ^ Alter & Harris (2023).\n
      372. \n
      373. ^ "Getting the Innovation Ecosystem Ready for AI. An IP policy toolkit" (PDF). WIPO.\n
      374. \n
      375. ^ Hammond, George (27 December 2023). "Big Tech is spending more than VC firms on AI startups". Ars Technica. Archived from the original on 10 January 2024.\n
      376. \n
      377. ^ Wong, Matteo (24 October 2023). "The Future of AI Is GOMA". The Atlantic. Archived from the original on 5 January 2024.\n
      378. \n
      379. ^ "Big tech and the pursuit of AI dominance". The Economist. 26 March 2023. Archived from the original on 29 December 2023.\n
      380. \n
      381. ^ Fung, Brian (19 December 2023). "Where the battle to dominate AI may be won". CNN Business. Archived from the original on 13 January 2024.\n
      382. \n
      383. ^ Metz, Cade (5 July 2023). "In the Age of A.I., Tech\'s Little Guys Need Big Friends". The New York Times. Archived from the original on 8 July 2024. Retrieved 5 October 2024.\n
      384. \n
      385. ^ "Electricity 2024 – Analysis". IEA. 24 January 2024. Retrieved 13 July 2024.\n
      386. \n
      387. ^ Calvert, Brian (28 March 2024). "AI already uses as much energy as a small country. It\'s only the beginning". Vox. New York, New York. Archived from the original on 3 July 2024. Retrieved 5 October 2024.\n
      388. \n
      389. ^ Halper, Evan; O\'Donovan, Caroline (21 June 2024). "AI is exhausting the power grid. Tech firms are seeking a miracle solution". Washington Post.\n
      390. \n
      391. ^ Davenport, Carly. "AI Data Centers and the Coming YS Power Demand Surge" (PDF). Goldman Sachs. Archived from the original (PDF) on 26 July 2024. Retrieved 5 October 2024.\n
      392. \n
      393. ^ Ryan, Carol (12 April 2024). "Energy-Guzzling AI Is Also the Future of Energy Savings". Wall Street Journal. Dow Jones.\n
      394. \n
      395. ^ Hiller, Jennifer (1 July 2024). "Tech Industry Wants to Lock Up Nuclear Power for AI". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
      396. \n
      397. ^ Halper, Evan (20 September 2024). "Microsoft deal would reopen Three Mile Island nuclear plant to power AI". Washington Post.\n
      398. \n
      399. ^ Hiller, Jennifer (20 September 2024). "Three Mile Island\'s Nuclear Plant to Reopen, Help Power Microsoft\'s AI Centers". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
      400. \n
      401. ^ Nicas (2018).\n
      402. \n
      403. ^ Rainie, Lee; Keeter, Scott; Perrin, Andrew (22 July 2019). "Trust and Distrust in America". Pew Research Center. Archived from the original on 22 February 2024.\n
      404. \n
      405. ^ Williams (2023).\n
      406. \n
      407. ^ Taylor & Hern (2023).\n
      408. \n
      409. ^ a b Samuel, Sigal (19 April 2022). "Why it\'s so damn hard to make AI fair and unbiased". Vox. Archived from the original on 5 October 2024. Retrieved 24 July 2024.\n
      410. \n
      411. ^ a b Rose (2023).\n
      412. \n
      413. ^ CNA (2019).\n
      414. \n
      415. ^ Goffrey (2008), p. 17.\n
      416. \n
      417. ^ Berdahl et al. (2023); Goffrey (2008, p. 17); Rose (2023); Russell & Norvig (2021, p. 995)\n
      418. \n
      419. ^ Christian (2020), p. 25.\n
      420. \n
      421. ^ a b Russell & Norvig (2021), p. 995.\n
      422. \n
      423. ^ Grant & Hill (2023).\n
      424. \n
      425. ^ Larson & Angwin (2016).\n
      426. \n
      427. ^ Christian (2020), p. 67–70.\n
      428. \n
      429. ^ Christian (2020, pp. 67–70); Russell & Norvig (2021, pp. 993–994)\n
      430. \n
      431. ^ Russell & Norvig (2021, p. 995); Lipartito (2011, p. 36); Goodman & Flaxman (2017, p. 6); Christian (2020, pp. 39–40, 65)\n
      432. \n
      433. ^ Quoted in Christian (2020, p. 65).\n
      434. \n
      435. ^ Russell & Norvig (2021, p. 994); Christian (2020, pp. 40, 80–81)\n
      436. \n
      437. ^ Quoted in Christian (2020, p. 80)\n
      438. \n
      439. ^ Dockrill (2022).\n
      440. \n
      441. ^ Sample (2017).\n
      442. \n
      443. ^ "Black Box AI". 16 June 2023. Archived from the original on 15 June 2024. Retrieved 5 October 2024.\n
      444. \n
      445. ^ Christian (2020), p. 110.\n
      446. \n
      447. ^ Christian (2020), pp. 88–91.\n
      448. \n
      449. ^ Christian (2020, p. 83); Russell & Norvig (2021, p. 997)\n
      450. \n
      451. ^ Christian (2020), p. 91.\n
      452. \n
      453. ^ Christian (2020), p. 83.\n
      454. \n
      455. ^ Verma (2021).\n
      456. \n
      457. ^ Rothman (2020).\n
      458. \n
      459. ^ Christian (2020), pp. 105–108.\n
      460. \n
      461. ^ Christian (2020), pp. 108–112.\n
      462. \n
      463. ^ Ropek, Lucas (21 May 2024). "New Anthropic Research Sheds Light on AI\'s \'Black Box\'". Gizmodo. Archived from the original on 5 October 2024. Retrieved 23 May 2024.\n
      464. \n
      465. ^ Russell & Norvig (2021), p. 989.\n
      466. \n
      467. ^ a b Russell & Norvig (2021), pp. 987–990.\n
      468. \n
      469. ^ Russell & Norvig (2021), p. 988.\n
      470. \n
      471. ^ Robitzski (2018); Sainato (2015)\n
      472. \n
      473. ^ Harari (2018).\n
      474. \n
      475. ^ Buckley, Chris; Mozur, Paul (22 May 2019). "How China Uses High-Tech Surveillance to Subdue Minorities". The New York Times. Archived from the original on 25 November 2019. Retrieved 2 July 2019.\n
      476. \n
      477. ^ "Security lapse exposed a Chinese smart city surveillance system". 3 May 2019. Archived from the original on 7 March 2021. Retrieved 14 September 2020.\n
      478. \n
      479. ^ Urbina et al. (2022).\n
      480. \n
      481. ^ a b E. McGaughey, \'Will Robots Automate Your Job Away? Full Employment, Basic Income, and Economic Democracy\' (2022), 51(3) Industrial Law Journal 511–559. Archived 27 May 2023 at the Wayback Machine.\n
      482. \n
      483. ^ Ford & Colvin (2015);McGaughey (2022)\n
      484. \n
      485. ^ IGM Chicago (2017).\n
      486. \n
      487. ^ Arntz, Gregory & Zierahn (2016), p. 33.\n
      488. \n
      489. ^ Lohr (2017); Frey & Osborne (2017); Arntz, Gregory & Zierahn (2016, p. 33)\n
      490. \n
      491. ^ Zhou, Viola (11 April 2023). "AI is already taking video game illustrators\' jobs in China". Rest of World. Archived from the original on 21 February 2024. Retrieved 17 August 2023.\n
      492. \n
      493. ^ Carter, Justin (11 April 2023). "China\'s game art industry reportedly decimated by growing AI use". Game Developer. Archived from the original on 17 August 2023. Retrieved 17 August 2023.\n
      494. \n
      495. ^ Morgenstern (2015).\n
      496. \n
      497. ^ Mahdawi (2017); Thompson (2014)\n
      498. \n
      499. ^ Tarnoff, Ben (4 August 2023). "Lessons from Eliza". The Guardian Weekly. pp. 34–39.\n
      500. \n
      501. ^ Cellan-Jones (2014).\n
      502. \n
      503. ^ Russell & Norvig 2021, p. 1001.\n
      504. \n
      505. ^ Bostrom (2014).\n
      506. \n
      507. ^ Russell (2019).\n
      508. \n
      509. ^ Bostrom (2014); Müller & Bostrom (2014); Bostrom (2015).\n
      510. \n
      511. ^ Harari (2023).\n
      512. \n
      513. ^ Müller & Bostrom (2014).\n
      514. \n
      515. ^ Leaders\' concerns about the existential risks of AI around 2015: Rawlinson (2015), Holley (2015), Gibbs (2014), Sainato (2015)\n
      516. \n
      517. ^ ""Godfather of artificial intelligence" talks impact and potential of new AI". CBS News. 25 March 2023. Archived from the original on 28 March 2023. Retrieved 28 March 2023.\n
      518. \n
      519. ^ Pittis, Don (4 May 2023). "Canadian artificial intelligence leader Geoffrey Hinton piles on fears of computer takeover". CBC. Archived from the original on 7 July 2024. Retrieved 5 October 2024.\n
      520. \n
      521. ^ "\'50–50 chance\' that AI outsmarts humanity, Geoffrey Hinton says". Bloomberg BNN. 14 June 2024. Retrieved 6 July 2024.\n
      522. \n
      523. ^ Valance (2023).\n
      524. \n
      525. ^ Taylor, Josh (7 May 2023). "Rise of artificial intelligence is inevitable but should not be feared, \'father of AI\' says". The Guardian. Archived from the original on 23 October 2023. Retrieved 26 May 2023.\n
      526. \n
      527. ^ Colton, Emma (7 May 2023). "\'Father of AI\' says tech fears misplaced: \'You cannot stop it\'". Fox News. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
      528. \n
      529. ^ Jones, Hessie (23 May 2023). "Juergen Schmidhuber, Renowned \'Father Of Modern AI,\' Says His Life\'s Work Won\'t Lead To Dystopia". Forbes. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
      530. \n
      531. ^ McMorrow, Ryan (19 December 2023). "Andrew Ng: \'Do we think the world is better off with more or less intelligence?\'". Financial Times. Archived from the original on 25 January 2024. Retrieved 30 December 2023.\n
      532. \n
      533. ^ Levy, Steven (22 December 2023). "How Not to Be Stupid About AI, With Yann LeCun". Wired. Archived from the original on 28 December 2023. Retrieved 30 December 2023.\n
      534. \n
      535. ^ Arguments that AI is not an imminent risk: Brooks (2014), Geist (2015), Madrigal (2015), Lee (2014)\n
      536. \n
      537. ^ a b Christian (2020), pp. 67, 73.\n
      538. \n
      539. ^ Yudkowsky (2008).\n
      540. \n
      541. ^ a b Anderson & Anderson (2011).\n
      542. \n
      543. ^ AAAI (2014).\n
      544. \n
      545. ^ Wallach (2010).\n
      546. \n
      547. ^ Russell (2019), p. 173.\n
      548. \n
      549. ^ Stewart, Ashley; Melton, Monica. "Hugging Face CEO says he\'s focused on building a \'sustainable model\' for the $4.5 billion open-source-AI startup". Business Insider. Archived from the original on 25 September 2024. Retrieved 14 April 2024.\n
      550. \n
      551. ^ Wiggers, Kyle (9 April 2024). "Google open sources tools to support AI model development". TechCrunch. Archived from the original on 10 September 2024. Retrieved 14 April 2024.\n
      552. \n
      553. ^ Heaven, Will Douglas (12 May 2023). "The open-source AI boom is built on Big Tech\'s handouts. How long will it last?". MIT Technology Review. Retrieved 14 April 2024.\n
      554. \n
      555. ^ Brodsky, Sascha (19 December 2023). "Mistral AI\'s New Language Model Aims for Open Source Supremacy". AI Business. Archived from the original on 5 September 2024. Retrieved 5 October 2024.\n
      556. \n
      557. ^ Edwards, Benj (22 February 2024). "Stability announces Stable Diffusion 3, a next-gen AI image generator". Ars Technica. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
      558. \n
      559. ^ Marshall, Matt (29 January 2024). "How enterprises are using open source LLMs: 16 examples". VentureBeat. Archived from the original on 26 September 2024. Retrieved 5 October 2024.\n
      560. \n
      561. ^ Piper, Kelsey (2 February 2024). "Should we make our most powerful AI models open source to all?". Vox. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
      562. \n
      563. ^ Alan Turing Institute (2019). "Understanding artificial intelligence ethics and safety" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
      564. \n
      565. ^ Alan Turing Institute (2023). "AI Ethics and Governance in Practice" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
      566. \n
      567. ^ Floridi, Luciano; Cowls, Josh (23 June 2019). "A Unified Framework of Five Principles for AI in Society". Harvard Data Science Review. 1 (1). doi:10.1162/99608f92.8cd550d1. S2CID 198775713.\n
      568. \n
      569. ^ Buruk, Banu; Ekmekci, Perihan Elif; Arda, Berna (1 September 2020). "A critical perspective on guidelines for responsible and trustworthy artificial intelligence". Medicine, Health Care and Philosophy. 23 (3): 387–399. doi:10.1007/s11019-020-09948-1. ISSN 1572-8633. PMID 32236794. S2CID 214766800. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
      570. \n
      571. ^ Kamila, Manoj Kumar; Jasrotia, Sahil Singh (1 January 2023). "Ethical issues in the development of artificial intelligence: recognizing the risks". International Journal of Ethics and Systems. ahead-of-print (ahead-of-print). doi:10.1108/IJOES-05-2023-0107. ISSN 2514-9369. S2CID 259614124. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
      572. \n
      573. ^ "AI Safety Institute releases new AI safety evaluations platform". UK Government. 10 May 2024. Archived from the original on 5 October 2024. Retrieved 14 May 2024.\n
      574. \n
      575. ^ Regulation of AI to mitigate risks: Berryhill et al. (2019), Barfield & Pagallo (2018), Iphofen & Kritikos (2019), Wirtz, Weyerer & Geyer (2018), Buiten (2019)\n
      576. \n\n
      577. ^ a b Vincent (2023).\n
      578. \n
      579. ^ Stanford University (2023).\n
      580. \n
      581. ^ a b c d UNESCO (2021).\n
      582. \n
      583. ^ Kissinger (2021).\n
      584. \n
      585. ^ Altman, Brockman & Sutskever (2023).\n
      586. \n
      587. ^ VOA News (25 October 2023). "UN Announces Advisory Body on Artificial Intelligence". Archived from the original on 18 September 2024. Retrieved 5 October 2024.\n
      588. \n
      589. ^ "Council of Europe opens first ever global treaty on AI for signature". Council of Europe. 5 September 2024. Archived from the original on 17 September 2024. Retrieved 17 September 2024.\n
      590. \n
      591. ^ Edwards (2023).\n
      592. \n
      593. ^ Kasperowicz (2023).\n
      594. \n
      595. ^ Fox News (2023).\n
      596. \n
      597. ^ Milmo, Dan (3 November 2023). "Hope or Horror? The great AI debate dividing its pioneers". The Guardian Weekly. pp. 10–12.\n
      598. \n
      599. ^ "The Bletchley Declaration by Countries Attending the AI Safety Summit, 1–2 November 2023". GOV.UK. 1 November 2023. Archived from the original on 1 November 2023. Retrieved 2 November 2023.\n
      600. \n
      601. ^ "Countries agree to safe and responsible development of frontier AI in landmark Bletchley Declaration". GOV.UK (Press release). Archived from the original on 1 November 2023. Retrieved 1 November 2023.\n
      602. \n
      603. ^ "Second global AI summit secures safety commitments from companies". Reuters. 21 May 2024. Retrieved 23 May 2024.\n
      604. \n
      605. ^ "Frontier AI Safety Commitments, AI Seoul Summit 2024". gov.uk. 21 May 2024. Archived from the original on 23 May 2024. Retrieved 23 May 2024.\n
      606. \n
      607. ^ a b Russell & Norvig 2021, p. 9.\n
      608. \n
      609. ^ a b c Copeland, J., ed. (2004). The Essential Turing: the ideas that gave birth to the computer age. Oxford, England: Clarendon Press. ISBN 0-1982-5079-7.\n
      610. \n
      611. ^ "Google books ngram". Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
      612. \n
      613. ^ AI\'s immediate precursors: McCorduck (2004, pp. 51–107), Crevier (1993, pp. 27–32), Russell & Norvig (2021, pp. 8–17), Moravec (1988, p. 3)\n
      614. \n
      615. ^ a b Turing\'s original publication of the Turing test in "Computing machinery and intelligence": Turing (1950)\nHistorical influence and philosophical implications: Haugeland (1985, pp. 6–9), Crevier (1993, p. 24), McCorduck (2004, pp. 70–71), Russell & Norvig (2021, pp. 2, 984)\n
      616. \n
      617. ^ Crevier (1993), pp. 47–49.\n
      618. \n
      619. ^ Russell & Norvig (2003), p. 17.\n
      620. \n
      621. ^ Russell & Norvig (2003), p. 18.\n
      622. \n
      623. ^ Newquist (1994), pp. 86–86.\n
      624. \n
      625. ^ Simon (1965, p. 96) quoted in Crevier (1993, p. 109)\n
      626. \n
      627. ^ Minsky (1967, p. 2) quoted in Crevier (1993, p. 109)\n
      628. \n
      629. ^ Russell & Norvig (2021), p. 21.\n
      630. \n
      631. ^ Lighthill (1973).\n
      632. \n
      633. ^ NRC 1999, pp. 212–213.\n
      634. \n
      635. ^ Russell & Norvig (2021), p. 22.\n
      636. \n
      637. ^ Expert systems: Russell & Norvig (2021, pp. 23, 292), Luger & Stubblefield (2004, pp. 227–331), Nilsson (1998, chpt. 17.4), McCorduck (2004, pp. 327–335, 434–435), Crevier (1993, pp. 145–162, 197–203), Newquist (1994, pp. 155–183)\n
      638. \n
      639. ^ Russell & Norvig (2021), p. 24.\n
      640. \n
      641. ^ Nilsson (1998), p. 7.\n
      642. \n
      643. ^ McCorduck (2004), pp. 454–462.\n
      644. \n
      645. ^ Moravec (1988).\n
      646. \n
      647. ^ a b Brooks (1990).\n
      648. \n
      649. ^ Developmental robotics: Weng et al. (2001), Lungarella et al. (2003), Asada et al. (2009), Oudeyer (2010)\n
      650. \n
      651. ^ Russell & Norvig (2021), p. 25.\n
      652. \n
      653. ^ Crevier (1993, pp. 214–215), Russell & Norvig (2021, pp. 24, 26)\n
      654. \n
      655. ^ Russell & Norvig (2021), p. 26.\n
      656. \n
      657. ^ Formal and narrow methods adopted in the 1990s: Russell & Norvig (2021, pp. 24–26), McCorduck (2004, pp. 486–487)\n
      658. \n
      659. ^ AI widely used in the late 1990s: Kurzweil (2005, p. 265), NRC (1999, pp. 216–222), Newquist (1994, pp. 189–201)\n
      660. \n
      661. ^ Wong (2023).\n
      662. \n
      663. ^ Moore\'s Law and AI: Russell & Norvig (2021, pp. 14, 27)\n
      664. \n
      665. ^ a b c Clark (2015b).\n
      666. \n
      667. ^ Big data: Russell & Norvig (2021, p. 26)\n
      668. \n
      669. ^ Sagar, Ram (3 June 2020). "OpenAI Releases GPT-3, The Largest Model So Far". Analytics India Magazine. Archived from the original on 4 August 2020. Retrieved 15 March 2023.\n
      670. \n
      671. ^ DiFeliciantonio (2023).\n
      672. \n
      673. ^ Goswami (2023).\n
      674. \n
      675. ^ Grayling, Anthony; Ball, Brian (1 August 2024). "Philosophy is crucial in the age of AI". The Conversation. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
      676. \n
      677. ^ a b Jarow, Oshan (15 June 2024). "Will AI ever become conscious? It depends on how you think about biology". Vox. Archived from the original on 21 September 2024. Retrieved 4 October 2024.\n
      678. \n
      679. ^ McCarthy, John. "The Philosophy of AI and the AI of Philosophy". jmc.stanford.edu. Archived from the original on 23 October 2018. Retrieved 3 October 2024.\n
      680. \n
      681. ^ a b Turing (1950), p. 1.\n
      682. \n
      683. ^ Turing (1950), Under "The Argument from Consciousness".\n
      684. \n
      685. ^ Kirk-Giannini, Cameron Domenico; Goldstein, Simon (16 October 2023). "AI is closer than ever to passing the Turing test for \'intelligence\'. What happens when it does?". The Conversation. Archived from the original on 25 September 2024. Retrieved 17 August 2024.\n
      686. \n
      687. ^ Russell & Norvig (2021), p. 3.\n
      688. \n
      689. ^ Maker (2006).\n
      690. \n
      691. ^ McCarthy (1999).\n
      692. \n
      693. ^ Minsky (1986).\n
      694. \n
      695. ^ "What Is Artificial Intelligence (AI)?". Google Cloud Platform. Archived from the original on 31 July 2023. Retrieved 16 October 2023.\n
      696. \n
      697. ^ "One of the Biggest Problems in Regulating AI Is Agreeing on a Definition". carnegieendowment.org. Retrieved 31 July 2024.\n
      698. \n
      699. ^ "AI or BS? How to tell if a marketing tool really uses artificial intelligence". The Drum. Retrieved 31 July 2024.\n
      700. \n
      701. ^ Nilsson (1983), p. 10.\n
      702. \n
      703. ^ Haugeland (1985), pp. 112–117.\n
      704. \n
      705. ^ Physical symbol system hypothesis: Newell & Simon (1976, p. 116)\nHistorical significance: McCorduck (2004, p. 153), Russell & Norvig (2021, p. 19)\n
      706. \n
      707. ^ Moravec\'s paradox: Moravec (1988, pp. 15–16), Minsky (1986, p. 29), Pinker (2007, pp. 190–191)\n
      708. \n
      709. ^ Dreyfus\' critique of AI: Dreyfus (1972), Dreyfus & Dreyfus (1986)\nHistorical significance and philosophical implications: Crevier (1993, pp. 120–132), McCorduck (2004, pp. 211–239), Russell & Norvig (2021, pp. 981–982), Fearn (2007, chpt. 3)\n
      710. \n
      711. ^ Crevier (1993), p. 125.\n
      712. \n
      713. ^ Langley (2011).\n
      714. \n
      715. ^ Katz (2012).\n
      716. \n
      717. ^ Neats vs. scruffies, the historic debate: McCorduck (2004, pp. 421–424, 486–489), Crevier (1993, p. 168), Nilsson (1983, pp. 10–11), Russell & Norvig (2021, p. 24)\nA classic example of the "scruffy" approach to intelligence: Minsky (1986)\nA modern example of neat AI and its aspirations in the 21st century: Domingos (2015)\n
      718. \n
      719. ^ Pennachin & Goertzel (2007).\n
      720. \n
      721. ^ a b Roberts (2016).\n
      722. \n
      723. ^ Russell & Norvig (2021), p. 986.\n
      724. \n
      725. ^ Chalmers (1995).\n
      726. \n
      727. ^ Dennett (1991).\n
      728. \n
      729. ^ Horst (2005).\n
      730. \n
      731. ^ Searle (1999).\n
      732. \n
      733. ^ Searle (1980), p. 1.\n
      734. \n
      735. ^ Russell & Norvig (2021), p. 9817.\n
      736. \n
      737. ^ Searle\'s Chinese room argument: Searle (1980). Searle\'s original presentation of the thought experiment., Searle (1999).\nDiscussion: Russell & Norvig (2021, pp. 985), McCorduck (2004, pp. 443–445), Crevier (1993, pp. 269–271)\n
      738. \n
      739. ^ Leith, Sam (7 July 2022). "Nick Bostrom: How can we be certain a machine isn\'t conscious?". The Spectator. Archived from the original on 26 September 2024. Retrieved 23 February 2024.\n
      740. \n
      741. ^ a b c Thomson, Jonny (31 October 2022). "Why don\'t robots have rights?". Big Think. Archived from the original on 13 September 2024. Retrieved 23 February 2024.\n
      742. \n
      743. ^ a b Kateman, Brian (24 July 2023). "AI Should Be Terrified of Humans". Time. Archived from the original on 25 September 2024. Retrieved 23 February 2024.\n
      744. \n
      745. ^ Wong, Jeff (10 July 2023). "What leaders need to know about robot rights". Fast Company.\n
      746. \n
      747. ^ Hern, Alex (12 January 2017). "Give robots \'personhood\' status, EU committee argues". The Guardian. ISSN 0261-3077. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
      748. \n
      749. ^ Dovey, Dana (14 April 2018). "Experts Don\'t Think Robots Should Have Rights". Newsweek. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
      750. \n
      751. ^ Cuddy, Alice (13 April 2018). "Robot rights violate human rights, experts warn EU". euronews. Archived from the original on 19 September 2024. Retrieved 23 February 2024.\n
      752. \n
      753. ^ The Intelligence explosion and technological singularity: Russell & Norvig (2021, pp. 1004–1005), Omohundro (2008), Kurzweil (2005)\n\nI. J. Good\'s "intelligence explosion": Good (1965)\n\nVernor Vinge\'s "singularity": Vinge (1993)\n
      754. \n
      755. ^ Russell & Norvig (2021), p. 1005.\n
      756. \n
      757. ^ Transhumanism: Moravec (1988), Kurzweil (2005), Russell & Norvig (2021, p. 1005)\n
      758. \n
      759. ^ AI as evolution: Edward Fredkin is quoted in McCorduck (2004, p. 401), Butler (1863), Dyson (1998)\n
      760. \n
      761. ^ AI in myth: McCorduck (2004, pp. 4–5)\n
      762. \n
      763. ^ McCorduck (2004), pp. 340–400.\n
      764. \n
      765. ^ Buttazzo (2001).\n
      766. \n
      767. ^ Anderson (2008).\n
      768. \n
      769. ^ McCauley (2007).\n
      770. \n
      771. ^ Galvan (1997).\n
      772. \n
      \n

      AI textbooks

      \n

      The two most widely used textbooks in 2023 (see the Open Syllabus):\n

      \n\n

      These were the four of the most widely used AI textbooks in 2008:\n

      \n
      \n\n
      \n

      Other textbooks:\n

      \n\n

      History of AI

      \n
      \n\n
      \n

      Other sources

      \n
      \n\n\n
      \n

      Further reading

      \n
      \n\n
      \n
      \n\n\n\n\n\n\n\n\n\n
      \n
      \n\t\t\t\t\t\n\t\t\t\t
      \n\t\t\t\n\t\t\t\n\t\t\n\t\t\n\t \n \n
      \n\t
        \n
        \n\n\n'}'. -[2024-11-03 23:35:55,929] [DEBUG] [Engine]: Agent 'agent3' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_PMsJAjmMffRSBxFEyEABp0Hn', type='function')], function_call=None) -[2024-11-03 23:35:56,127] [INFO] [Engine]: Assigning task to agent4: Search the latest trends in AI using Twitter. -[2024-11-03 23:35:56,129] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Search the latest trends in AI using Twitter.'. -[2024-11-03 23:35:59,079] [INFO] [BaseAgent]: Agent 'agent4' called 'fetch_webpage' with args '{'url': 'https://twitter.com/search?q=%23AI%20%23Trends&src=typed_query'}'. -[2024-11-03 23:35:59,081] [INFO] [BaseAgent]: Agent 'agent4' obtained result '{'success': True, 'error-msg': '', 'url': 'https://twitter.com/search?q=%23AI%20%23Trends&src=typed_query', 'content': '\n \n \n x.com\n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n \n \n '}'. -[2024-11-03 23:35:59,082] [DEBUG] [Engine]: Agent 'agent4' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23Trends&src=typed_query"}', name='fetch_webpage'), id='call_WgcyfPpjItvfe548yd2xveRd', type='function')], function_call=None) -[2024-11-03 23:35:59,083] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google?"}', name='new_communication_session'), id='call_kCjOwzRWyg4qbPnayzKv9RMm', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_PMsJAjmMffRSBxFEyEABp0Hn', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23Trends&src=typed_query"}', name='fetch_webpage'), id='call_WgcyfPpjItvfe548yd2xveRd', type='function')], function_call=None) - -[2024-11-03 23:35:59,084] [INFO] [Engine]: Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google?"}', name='new_communication_session'), id='call_kCjOwzRWyg4qbPnayzKv9RMm', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_PMsJAjmMffRSBxFEyEABp0Hn', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23Trends&src=typed_query"}', name='fetch_webpage'), id='call_WgcyfPpjItvfe548yd2xveRd', type='function')], function_call=None) - -[2024-11-03 23:35:59,085] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google?"}', name='new_communication_session'), id='call_kCjOwzRWyg4qbPnayzKv9RMm', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_PMsJAjmMffRSBxFEyEABp0Hn', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23Trends&src=typed_query"}', name='fetch_webpage'), id='call_WgcyfPpjItvfe548yd2xveRd', type='function')], function_call=None) - -[2024-11-03 23:35:59,543] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': False} -[2024-11-03 23:35:59,545] [INFO] [Engine]: EnginePlanner decided to terminate the simulation. -[2024-11-03 23:35:59,546] [INFO] [Engine]: Engine simulation loop completed. -[2024-11-03 23:35:59,548] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-03 23:35:59,549] [INFO] [Evaluator]: Total Token Consumption: 33 -[2024-11-03 23:35:59,551] [INFO] [Evaluator]: Average Tokens per Iteration: 33.0 -[2024-11-03 23:35:59,553] [INFO] [Engine]: Simulation completed. -[2024-11-03 23:36:46,517] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-11-03 23:36:46,526] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 23:36:46,526] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 23:36:46,527] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 23:36:46,528] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 23:36:46,529] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 23:36:46,531] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 23:36:46,532] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 23:36:46,533] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 23:36:46,536] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-11-03 23:36:46,536] [INFO] [AgentGraph]: Relationship added: agent2 --[reports_to]--> agent1 -[2024-11-03 23:36:46,538] [INFO] [AgentGraph]: Relationship added: agent3 --[reports_to]--> agent1 -[2024-11-03 23:36:46,539] [INFO] [AgentGraph]: Relationship added: agent4 --[reports_to]--> agent1 -[2024-11-03 23:36:46,540] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-03 23:36:46,543] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-03 23:36:46,543] [INFO] [Engine]: Engine initialized. -[2024-11-03 23:36:46,545] [INFO] [Engine]: Engine starting simulation. -[2024-11-03 23:36:46,546] [INFO] [Engine]: Starting iteration 1 -[2024-11-03 23:36:46,547] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-11-03 23:36:47,576] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'}, 'continue': True} -[2024-11-03 23:36:47,577] [INFO] [Engine]: Assigned tasks: {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'} -[2024-11-03 23:36:47,578] [INFO] [Engine]: Assigning task to agent2: Search for the latest trends in AI using Google. -[2024-11-03 23:36:47,579] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Search for the latest trends in AI using Google.'. -[2024-11-03 23:36:50,331] [INFO] [BaseAgent]: Agent 'agent2' called 'fetch_webpage' with args '{'url': 'https://www.google.com/search?q=latest+trends+in+AI'}'. -[2024-11-03 23:36:50,339] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'error-msg': '', 'url': 'https://www.google.com/search?q=latest+trends+in+AI', 'content': 'latest trends in AI - Google Search

        Accessibility Links

        About 541,000,000 results (0.29 seconds) 

        Search Results

        AI Overview

        AI Overview
        Here are some of the latest trends in artificial intelligence (AI):
        • Increased regulation
          As AI becomes more widespread, there will be more regulation to ensure AI is used ethically and responsibly. 
        • Agentic AI
          Autonomous systems are being used to make decisions for users, enhancing output and decision-making. 
        • Predictive analytics
          AI and machine learning techniques are being used to improve accuracy and efficiency in business decision-making. 
        • Generative AI
          Machine learning and other tools are used to understand contents or objects and then generate new artifacts. 
        • Top AI Trends 2024: Key Developments to Watch
          Oct 7, 2024 — Agentic AI. One of the most notable new developments in the AI field is the use of autonomous systems to make decisions...
          Appinventiv
        • Unveiling the Top Artificial Intelligence Trends for 2024
          Apr 5, 2024 — Workplace AI. The final list in the new AI trend in 2024 and beyond will be the workplace AI wherein the technology is ...
          Prismetric
        • 6 Trends Driving the AI Everywhere Boom
          Feb 24, 2023 — Increased Regulation: As AI becomes more widespread, we can expect to see increased regulation of this technology. Thi...
          Intel Community
        • Show all
        Show more

        Featured snippet from the web

        Multimodal AI Multimodel models in AI can grasp information from different data types, like audio, video, and images, in addition to text. This technology is enabling search and content creation tools to become more seamless and intuitive and integrate more easily into other applications we already use.Oct 27, 2024
        Top Artificial Intelligence (AI) Trends for 2024
        Top Artificial Intelligence Trends | AI Trends
        People also ask
        Feedback
        People also search for
        Feedback

        Page Navigation

        Google apps
        '}'. -[2024-11-03 23:36:50,364] [DEBUG] [Engine]: Agent 'agent2' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_wWXjnvnmgMMpCZyq4D6B1S8S', type='function')], function_call=None) -[2024-11-03 23:36:50,412] [INFO] [Engine]: Assigning task to agent3: Search for the latest trends in AI using Wikipedia. -[2024-11-03 23:36:50,414] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Search for the latest trends in AI using Wikipedia.'. -[2024-11-03 23:36:52,212] [INFO] [BaseAgent]: Agent 'agent3' called 'fetch_webpage' with args '{'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence'}'. -[2024-11-03 23:36:52,222] [INFO] [BaseAgent]: Agent 'agent3' obtained result '{'success': True, 'error-msg': '', 'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence', 'content': '\n\n\n\nArtificial intelligence - Wikipedia\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nJump to content\n
        \n\t
        \n\t\t
        \n\t\t\t
        \n\n\t\t\n\t\t\t\n\n\n\t\t
        \n\t\t
        \n\t\t\t\n\n\n\t\t\t\n\n\t\t
        \n\t\n\n
        \n\t
        \n\t\t
        \n\t\t\t
        \n\t\t
        \n\t\t
        \n\t\t\t
        \n\t\t
        \n\t\t\t\n\t\t
        \n\t
        \n\t
        \n\t\t\t\t
        \n\t\t\n\t\t\t
        \n\t\t
        \n\t\t
        \n\t\t\t
        \n\t\t\t\t
        \n\t\t\t\t\t\n\t\t\t\t\t

        Artificial intelligence

        \n\t\t\t\t\t\t\t\n
        \n\t\n\t\n\t
        \n\n\t\t
        \n\t\t\t\n\t\t\t\n\t\t\t\n\t\t
        \n\n\t
        \n
        \n
        \n\t\t\t\t
        \n\t\t\t\t\t
        \n\t\t\t\t\t\t
        \n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
        \n\t\t\t\t\t\t
        \n\t\t\t\t\t\t\t\n\t\t\t\t\n\t\t\t\t\t\t\t
        \n\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
        \n\t\t\t\t\t
        \n\t\t\t\t
        \n\t\t\t\t
        \n\t\t\t\t\t
        \n\t\t\t\t\t\t\n\t\t\t\t\t\t
        \n\t\t\n\t\t\t\t\t
        \n\t\t\t\t
        \n\t\t\t\t
        \n\t\t\t\t\t
        \n\t\t\t\t\t\t\t
        \n\t\t
        Page semi-protected
        \n\t\t
        \n\n\t\t\t\t\t\t
        From Wikipedia, the free encyclopedia
        \n\t\t\t\t\t
        \n\t\t\t\t\t
        \n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t
        \n\n

        \n

        \n\n\n\n\n\n\n\n

        Artificial intelligence (AI), in its broadest sense, is intelligence emulated by machines, particularly computer systems. It is a field of research in computer science that develops and studies methods and software that enable machines to perceive their environment and use learning and intelligence to take actions that maximize their chances of achieving defined goals.[1] Such machines may be called AIs.\n

        Some high-profile applications of AI include advanced web search engines (e.g., Google Search); recommendation systems (used by YouTube, Amazon, and Netflix); interacting via human speech (e.g., Google Assistant, Siri, and Alexa); autonomous vehicles (e.g., Waymo); generative and creative tools (e.g., ChatGPT, and AI art); and superhuman play and analysis in strategy games (e.g., chess and Go). However, many AI applications are not perceived as AI: "A lot of cutting edge AI has filtered into general applications, often without being called AI because once something becomes useful enough and common enough it\'s not labeled AI anymore."[2][3]\n

        The various subfields of AI research are centered around particular goals and the use of particular tools. The traditional goals of AI research include reasoning, knowledge representation, planning, learning, natural language processing, perception, and support for robotics.[a] General intelligence—the ability to complete any task performable by a human on an at least equal level—is among the field\'s long-term goals.[4] To reach these goals, AI researchers have adapted and integrated a wide range of techniques, including search and mathematical optimization, formal logic, artificial neural networks, and methods based on statistics, operations research, and economics.[b] AI also draws upon psychology, linguistics, philosophy, neuroscience, and other fields.[5]\n

        Artificial intelligence was founded as an academic discipline in 1956,[6] and the field went through multiple cycles of optimism,[7][8] followed by periods of disappointment and loss of funding, known as AI winter.[9][10] Funding and interest vastly increased after 2012 when deep learning outperformed previous AI techniques.[11] This growth accelerated further after 2017 with the transformer architecture,[12] and by the early 2020s hundreds of billions of dollars were being invested in AI (known as the "AI boom"). The widespread use of AI in the 21st century exposed several unintended consequences and harms in the present and raised concerns about its risks and long-term effects in the future, prompting discussions about regulatory policies to ensure the safety and benefits of the technology.\n

        \n\n

        Goals

        \n

        The general problem of fully simulating (or creating) intelligence is mostly found to be overwhelming. However, some types of problems have been successfully broken into more achievable subproblems. These consist of particular traits or capabilities that researchers expect an intelligent system to display. The traits described below have received the most attention and cover the scope of AI research.[a]\n

        \n

        Reasoning and problem-solving

        \n

        Early researchers developed algorithms that imitated step-by-step reasoning that humans use when they solve puzzles or make logical deductions.[13] By the late 1980s and 1990s, methods were developed for dealing with uncertain or incomplete information, employing concepts from probability and economics.[14]\n

        Many of these algorithms are insufficient for solving large reasoning problems because they experience a "combinatorial explosion": They become exponentially slower as the problems grow.[15] Even humans rarely use the step-by-step deduction that early AI research could model. They solve most of their problems using fast, intuitive judgments.[16] Accurate and efficient reasoning is an unsolved problem.\n

        \n

        Knowledge representation

        \n
        An ontology represents knowledge as a set of concepts within a domain and the relationships between those concepts.
        \n

        Knowledge representation and knowledge engineering[17] allow AI programs to answer questions intelligently and make deductions about real-world facts. Formal knowledge representations are used in content-based indexing and retrieval,[18] scene interpretation,[19] clinical decision support,[20] knowledge discovery (mining "interesting" and actionable inferences from large databases),[21] and other areas.[22]\n

        A knowledge base is a body of knowledge represented in a form that can be used by a program. An ontology is the set of objects, relations, concepts, and properties used by a particular domain of knowledge.[23] Knowledge bases need to represent things such as objects, properties, categories, and relations between objects;[24] situations, events, states, and time;[25] causes and effects;[26] knowledge about knowledge (what we know about what other people know);[27] default reasoning (things that humans assume are true until they are told differently and will remain true even when other facts are changing);[28] and many other aspects and domains of knowledge.\n

        Among the most difficult problems in knowledge representation are the breadth of commonsense knowledge (the set of atomic facts that the average person knows is enormous);[29] and the sub-symbolic form of most commonsense knowledge (much of what people know is not represented as "facts" or "statements" that they could express verbally).[16] There is also the difficulty of knowledge acquisition, the problem of obtaining knowledge for AI applications.[c]\n

        \n

        Planning and decision-making

        \n

        An "agent" is anything that perceives and takes actions in the world. A rational agent has goals or preferences and takes actions to make them happen.[d][32] In automated planning, the agent has a specific goal.[33] In automated decision-making, the agent has preferences—there are some situations it would prefer to be in, and some situations it is trying to avoid. The decision-making agent assigns a number to each situation (called the "utility") that measures how much the agent prefers it. For each possible action, it can calculate the "expected utility": the utility of all possible outcomes of the action, weighted by the probability that the outcome will occur. It can then choose the action with the maximum expected utility.[34]\n

        In classical planning, the agent knows exactly what the effect of any action will be.[35] In most real-world problems, however, the agent may not be certain about the situation they are in (it is "unknown" or "unobservable") and it may not know for certain what will happen after each possible action (it is not "deterministic"). It must choose an action by making a probabilistic guess and then reassess the situation to see if the action worked.[36]\n

        In some problems, the agent\'s preferences may be uncertain, especially if there are other agents or humans involved. These can be learned (e.g., with inverse reinforcement learning), or the agent can seek information to improve its preferences.[37] Information value theory can be used to weigh the value of exploratory or experimental actions.[38] The space of possible future actions and situations is typically intractably large, so the agents must take actions and evaluate situations while being uncertain of what the outcome will be.\n

        A Markov decision process has a transition model that describes the probability that a particular action will change the state in a particular way and a reward function that supplies the utility of each state and the cost of each action. A policy associates a decision with each possible state. The policy could be calculated (e.g., by iteration), be heuristic, or it can be learned.[39]\n

        Game theory describes the rational behavior of multiple interacting agents and is used in AI programs that make decisions that involve other agents.[40]\n

        \n

        Learning

        \n

        Machine learning is the study of programs that can improve their performance on a given task automatically.[41] It has been a part of AI from the beginning.[e]\n

        There are several kinds of machine learning. Unsupervised learning analyzes a stream of data and finds patterns and makes predictions without any other guidance.[44] Supervised learning requires a human to label the input data first, and comes in two main varieties: classification (where the program must learn to predict what category the input belongs in) and regression (where the program must deduce a numeric function based on numeric input).[45]\n

        In reinforcement learning, the agent is rewarded for good responses and punished for bad ones. The agent learns to choose responses that are classified as "good".[46] Transfer learning is when the knowledge gained from one problem is applied to a new problem.[47] Deep learning is a type of machine learning that runs inputs through biologically inspired artificial neural networks for all of these types of learning.[48]\n

        Computational learning theory can assess learners by computational complexity, by sample complexity (how much data is required), or by other notions of optimization.[49]\n

        \n
        \n

        Natural language processing

        \n

        Natural language processing (NLP)[50] allows programs to read, write and communicate in human languages such as English. Specific problems include speech recognition, speech synthesis, machine translation, information extraction, information retrieval and question answering.[51]\n

        Early work, based on Noam Chomsky\'s generative grammar and semantic networks, had difficulty with word-sense disambiguation[f] unless restricted to small domains called "micro-worlds" (due to the common sense knowledge problem[29]). Margaret Masterman believed that it was meaning and not grammar that was the key to understanding languages, and that thesauri and not dictionaries should be the basis of computational language structure.\n

        Modern deep learning techniques for NLP include word embedding (representing words, typically as vectors encoding their meaning),[52] transformers (a deep learning architecture using an attention mechanism),[53] and others.[54] In 2019, generative pre-trained transformer (or "GPT") language models began to generate coherent text,[55][56] and by 2023, these models were able to get human-level scores on the bar exam, SAT test, GRE test, and many other real-world applications.[57]\n

        \n

        Perception

        \n

        Machine perception is the ability to use input from sensors (such as cameras, microphones, wireless signals, active lidar, sonar, radar, and tactile sensors) to deduce aspects of the world. Computer vision is the ability to analyze visual input.[58]\n

        The field includes speech recognition,[59] image classification,[60] facial recognition, object recognition,[61]object tracking,[62] and robotic perception.[63]\n

        \n

        Social intelligence

        \n
        Kismet, a robot head which was made in the 1990s; it is a machine that can recognize and simulate emotions.[64]
        \n

        Affective computing is an interdisciplinary umbrella that comprises systems that recognize, interpret, process, or simulate human feeling, emotion, and mood.[65] For example, some virtual assistants are programmed to speak conversationally or even to banter humorously; it makes them appear more sensitive to the emotional dynamics of human interaction, or to otherwise facilitate human–computer interaction.\n

        However, this tends to give naïve users an unrealistic conception of the intelligence of existing computer agents.[66] Moderate successes related to affective computing include textual sentiment analysis and, more recently, multimodal sentiment analysis, wherein AI classifies the affects displayed by a videotaped subject.[67]\n

        \n

        General intelligence

        \n

        A machine with artificial general intelligence should be able to solve a wide variety of problems with breadth and versatility similar to human intelligence.[4]\n

        \n

        Techniques

        \n

        AI research uses a wide variety of techniques to accomplish the goals above.[b]\n

        \n

        Search and optimization

        \n

        AI can solve many problems by intelligently searching through many possible solutions.[68] There are two very different kinds of search used in AI: state space search and local search.\n

        \n
        \n

        State space search searches through a tree of possible states to try to find a goal state.[69] For example, planning algorithms search through trees of goals and subgoals, attempting to find a path to a target goal, a process called means-ends analysis.[70]\n

        Simple exhaustive searches[71] are rarely sufficient for most real-world problems: the search space (the number of places to search) quickly grows to astronomical numbers. The result is a search that is too slow or never completes.[15] "Heuristics" or "rules of thumb" can help prioritize choices that are more likely to reach a goal.[72]\n

        Adversarial search is used for game-playing programs, such as chess or Go. It searches through a tree of possible moves and counter-moves, looking for a winning position.[73]\n

        \n
        \n
        Illustration of gradient descent for 3 different starting points; two parameters (represented by the plan coordinates) are adjusted in order to minimize the loss function (the height)

        Local search uses mathematical optimization to find a solution to a problem. It begins with some form of guess and refines it incrementally.[74]\n

        Gradient descent is a type of local search that optimizes a set of numerical parameters by incrementally adjusting them to minimize a loss function. Variants of gradient descent are commonly used to train neural networks.[75]\n

        Another type of local search is evolutionary computation, which aims to iteratively improve a set of candidate solutions by "mutating" and "recombining" them, selecting only the fittest to survive each generation.[76]\n

        Distributed search processes can coordinate via swarm intelligence algorithms. Two popular swarm algorithms used in search are particle swarm optimization (inspired by bird flocking) and ant colony optimization (inspired by ant trails).[77]\n

        \n

        Logic

        \n

        Formal logic is used for reasoning and knowledge representation.[78]\nFormal logic comes in two main forms: propositional logic (which operates on statements that are true or false and uses logical connectives such as "and", "or", "not" and "implies")[79] and predicate logic (which also operates on objects, predicates and relations and uses quantifiers such as "Every X is a Y" and "There are some Xs that are Ys").[80]\n

        Deductive reasoning in logic is the process of proving a new statement (conclusion) from other statements that are given and assumed to be true (the premises).[81] Proofs can be structured as proof trees, in which nodes are labelled by sentences, and children nodes are connected to parent nodes by inference rules.\n

        Given a problem and a set of premises, problem-solving reduces to searching for a proof tree whose root node is labelled by a solution of the problem and whose leaf nodes are labelled by premises or axioms. In the case of Horn clauses, problem-solving search can be performed by reasoning forwards from the premises or backwards from the problem.[82] In the more general case of the clausal form of first-order logic, resolution is a single, axiom-free rule of inference, in which a problem is solved by proving a contradiction from premises that include the negation of the problem to be solved.[83]\n

        Inference in both Horn clause logic and first-order logic is undecidable, and therefore intractable. However, backward reasoning with Horn clauses, which underpins computation in the logic programming language Prolog, is Turing complete. Moreover, its efficiency is competitive with computation in other symbolic programming languages.[84]\n

        Fuzzy logic assigns a "degree of truth" between 0 and 1. It can therefore handle propositions that are vague and partially true.[85]\n

        Non-monotonic logics, including logic programming with negation as failure, are designed to handle default reasoning.[28] Other specialized versions of logic have been developed to describe many complex domains.\n

        \n

        Probabilistic methods for uncertain reasoning

        \n
        A simple Bayesian network, with the associated conditional probability tables
        \n

        Many problems in AI (including in reasoning, planning, learning, perception, and robotics) require the agent to operate with incomplete or uncertain information. AI researchers have devised a number of tools to solve these problems using methods from probability theory and economics.[86] Precise mathematical tools have been developed that analyze how an agent can make choices and plan, using decision theory, decision analysis,[87] and information value theory.[88] These tools include models such as Markov decision processes,[89] dynamic decision networks,[90] game theory and mechanism design.[91]\n

        Bayesian networks[92] are a tool that can be used for reasoning (using the Bayesian inference algorithm),[g][94] learning (using the expectation–maximization algorithm),[h][96] planning (using decision networks)[97] and perception (using dynamic Bayesian networks).[90]\n

        Probabilistic algorithms can also be used for filtering, prediction, smoothing, and finding explanations for streams of data, thus helping perception systems analyze processes that occur over time (e.g., hidden Markov models or Kalman filters).[90]\n

        \n
        Expectation–maximization clustering of Old Faithful eruption data starts from a random guess but then successfully converges on an accurate clustering of the two physically distinct modes of eruption.
        \n

        Classifiers and statistical learning methods

        \n

        The simplest AI applications can be divided into two types: classifiers (e.g., "if shiny then diamond"), on one hand, and controllers (e.g., "if diamond then pick up"), on the other hand. Classifiers[98] are functions that use pattern matching to determine the closest match. They can be fine-tuned based on chosen examples using supervised learning. Each pattern (also called an "observation") is labeled with a certain predefined class. All the observations combined with their class labels are known as a data set. When a new observation is received, that observation is classified based on previous experience.[45]\n

        There are many kinds of classifiers in use.[99] The decision tree is the simplest and most widely used symbolic machine learning algorithm.[100] K-nearest neighbor algorithm was the most widely used analogical AI until the mid-1990s, and Kernel methods such as the support vector machine (SVM) displaced k-nearest neighbor in the 1990s.[101]\nThe naive Bayes classifier is reportedly the "most widely used learner"[102] at Google, due in part to its scalability.[103]\nNeural networks are also used as classifiers.[104]\n

        \n

        Artificial neural networks

        \n
        A neural network is an interconnected group of nodes, akin to the vast network of neurons in the human brain.
        \n

        An artificial neural network is based on a collection of nodes also known as artificial neurons, which loosely model the neurons in a biological brain. It is trained to recognise patterns; once trained, it can recognise those patterns in fresh data. There is an input, at least one hidden layer of nodes and an output. Each node applies a function and once the weight crosses its specified threshold, the data is transmitted to the next layer. A network is typically called a deep neural network if it has at least 2 hidden layers.[104]\n

        Learning algorithms for neural networks use local search to choose the weights that will get the right output for each input during training. The most common training technique is the backpropagation algorithm.[105] Neural networks learn to model complex relationships between inputs and outputs and find patterns in data. In theory, a neural network can learn any function.[106]\n

        In feedforward neural networks the signal passes in only one direction.[107] Recurrent neural networks feed the output signal back into the input, which allows short-term memories of previous input events. Long short term memory is the most successful network architecture for recurrent networks.[108] Perceptrons[109] use only a single layer of neurons; deep learning[110] uses multiple layers. Convolutional neural networks strengthen the connection between neurons that are "close" to each other—this is especially important in image processing, where a local set of neurons must identify an "edge" before the network can identify an object.[111]\n

        \n
        \n

        Deep learning

        \n
        \n

        Deep learning[110] uses several layers of neurons between the network\'s inputs and outputs. The multiple layers can progressively extract higher-level features from the raw input. For example, in image processing, lower layers may identify edges, while higher layers may identify the concepts relevant to a human such as digits, letters, or faces.[112]\n

        Deep learning has profoundly improved the performance of programs in many important subfields of artificial intelligence, including computer vision, speech recognition, natural language processing, image classification,[113] and others. The reason that deep learning performs so well in so many applications is not known as of 2023.[114] The sudden success of deep learning in 2012–2015 did not occur because of some new discovery or theoretical breakthrough (deep neural networks and backpropagation had been described by many people, as far back as the 1950s)[i] but because of two factors: the incredible increase in computer power (including the hundred-fold increase in speed by switching to GPUs) and the availability of vast amounts of training data, especially the giant curated datasets used for benchmark testing, such as ImageNet.[j]\n

        \n

        GPT

        \n

        Generative pre-trained transformers (GPT) are large language models (LLMs) that generate text based on the semantic relationships between words in sentences. Text-based GPT models are pretrained on a large corpus of text that can be from the Internet. The pretraining consists of predicting the next token (a token being usually a word, subword, or punctuation). Throughout this pretraining, GPT models accumulate knowledge about the world and can then generate human-like text by repeatedly predicting the next token. Typically, a subsequent training phase makes the model more truthful, useful, and harmless, usually with a technique called reinforcement learning from human feedback (RLHF). Current GPT models are prone to generating falsehoods called "hallucinations", although this can be reduced with RLHF and quality data. They are used in chatbots, which allow people to ask a question or request a task in simple text.[122][123]\n

        Current models and services include Gemini (formerly Bard), ChatGPT, Grok, Claude, Copilot, and LLaMA.[124] Multimodal GPT models can process different types of data (modalities) such as images, videos, sound, and text.[125]\n

        \n

        Hardware and software

        \n\n

        In the late 2010s, graphics processing units (GPUs) that were increasingly designed with AI-specific enhancements and used with specialized TensorFlow software had replaced previously used central processing unit (CPUs) as the dominant means for large-scale (commercial and academic) machine learning models\' training.[126] Specialized programming languages such as Prolog were used in early AI research,[127] but general-purpose programming languages like Python have become predominant.[128]\n

        The transistor density in integrated circuits has been observed to roughly double every 18 months—a trend known as Moore\'s law, named after the Intel co-founder Gordon Moore, who first identified it. Improvements in GPUs have been even faster.[129]\n

        \n

        Applications

        \n

        AI and machine learning technology is used in most of the essential applications of the 2020s, including: search engines (such as Google Search), targeting online advertisements, recommendation systems (offered by Netflix, YouTube or Amazon), driving internet traffic, targeted advertising (AdSense, Facebook), virtual assistants (such as Siri or Alexa), autonomous vehicles (including drones, ADAS and self-driving cars), automatic language translation (Microsoft Translator, Google Translate), facial recognition (Apple\'s Face ID or Microsoft\'s DeepFace and Google\'s FaceNet) and image labeling (used by Facebook, Apple\'s iPhoto and TikTok). The deployment of AI may be overseen by a Chief automation officer (CAO).\n

        Health and medicine

        \n\n

        The application of AI in medicine and medical research has the potential to increase patient care and quality of life.[130] Through the lens of the Hippocratic Oath, medical professionals are ethically compelled to use AI, if applications can more accurately diagnose and treat patients.[131][132]\n

        For medical research, AI is an important tool for processing and integrating big data. This is particularly important for organoid and tissue engineering development which use microscopy imaging as a key technique in fabrication.[133] It has been suggested that AI can overcome discrepancies in funding allocated to different fields of research.[133] New AI tools can deepen the understanding of biomedically relevant pathways. For example, AlphaFold 2 (2021) demonstrated the ability to approximate, in hours rather than months, the 3D structure of a protein.[134] In 2023, it was reported that AI-guided drug discovery helped find a class of antibiotics capable of killing two different types of drug-resistant bacteria.[135] In 2024, researchers used machine learning to accelerate the search for Parkinson\'s disease drug treatments. Their aim was to identify compounds that block the clumping, or aggregation, of alpha-synuclein (the protein that characterises Parkinson\'s disease). They were able to speed up the initial screening process ten-fold and reduce the cost by a thousand-fold.[136][137]\n

        \n

        Games

        \n\n

        Game playing programs have been used since the 1950s to demonstrate and test AI\'s most advanced techniques.[138] Deep Blue became the first computer chess-playing system to beat a reigning world chess champion, Garry Kasparov, on 11 May 1997.[139] In 2011, in a Jeopardy! quiz show exhibition match, IBM\'s question answering system, Watson, defeated the two greatest Jeopardy! champions, Brad Rutter and Ken Jennings, by a significant margin.[140] In March 2016, AlphaGo won 4 out of 5 games of Go in a match with Go champion Lee Sedol, becoming the first computer Go-playing system to beat a professional Go player without handicaps. Then, in 2017, it defeated Ke Jie, who was the best Go player in the world.[141] Other programs handle imperfect-information games, such as the poker-playing program Pluribus.[142] DeepMind developed increasingly generalistic reinforcement learning models, such as with MuZero, which could be trained to play chess, Go, or Atari games.[143] In 2019, DeepMind\'s AlphaStar achieved grandmaster level in StarCraft II, a particularly challenging real-time strategy game that involves incomplete knowledge of what happens on the map.[144] In 2021, an AI agent competed in a PlayStation Gran Turismo competition, winning against four of the world\'s best Gran Turismo drivers using deep reinforcement learning.[145] In 2024, Google DeepMind introduced SIMA, a type of AI capable of autonomously playing nine previously unseen open-world video games by observing screen output, as well as executing short, specific tasks in response to natural language instructions.[146]\n

        \n

        Mathematics

        \n

        In mathematics, special forms of formal step-by-step reasoning are used. In contrast, LLMs such as GPT-4 Turbo, Gemini Ultra, Claude Opus, LLaMa-2 or Mistral Large are working with probabilistic models, which can produce wrong answers in the form of hallucinations. Therefore, they need not only a large database of mathematical problems to learn from but also methods such as supervised fine-tuning or trained classifiers with human-annotated data to improve answers for new problems and learn from corrections.[147] A 2024 study showed that the performance of some language models for reasoning capabilities in solving math problems not included in their training data was low, even for problems with only minor deviations from trained data.[148]\n

        Alternatively, dedicated models for mathematic problem solving with higher precision for the outcome including proof of theorems have been developed such as Alpha Tensor, Alpha Geometry and Alpha Proof all from Google DeepMind,[149] Llemma from eleuther[150] or Julius.[151]\n

        When natural language is used to describe mathematical problems, converters transform such prompts into a formal language such as Lean to define mathematic tasks.\n

        Some models have been developed to solve challenging problems and reach good results in benchmark tests, others to serve as educational tools in mathematics.[152]\n

        \n

        Finance

        \n

        Finance is one of the fastest growing sectors where applied AI tools are being deployed: from retail online banking to investment advice and insurance, where automated "robot advisers" have been in use for some years.[153]\n

        World Pensions experts like Nicolas Firzli insist it may be too early to see the emergence of highly innovative AI-informed financial products and services: "the deployment of AI tools will simply further automatise things: destroying tens of thousands of jobs in banking, financial planning, and pension advice in the process, but I\'m not sure it will unleash a new wave of [e.g., sophisticated] pension innovation."[154]\n

        \n

        Military

        \n\n

        Various countries are deploying AI military applications.[155] The main applications enhance command and control, communications, sensors, integration and interoperability.[156] Research is targeting intelligence collection and analysis, logistics, cyber operations, information operations, and semiautonomous and autonomous vehicles.[155] AI technologies enable coordination of sensors and effectors, threat detection and identification, marking of enemy positions, target acquisition, coordination and deconfliction of distributed Joint Fires between networked combat vehicles involving manned and unmanned teams.[156] AI was incorporated into military operations in Iraq and Syria.[155]\n

        In November 2023, US Vice President Kamala Harris disclosed a declaration signed by 31 nations to set guardrails for the military use of AI. The commitments include using legal reviews to ensure the compliance of military AI with international laws, and being cautious and transparent in the development of this technology.[157]\n

        \n

        Generative AI

        \n\n
        Vincent van Gogh in watercolour created by generative AI software
        \n

        In the early 2020s, generative AI gained widespread prominence. GenAI is AI capable of generating text, images, videos, or other data using generative models,[158][159] often in response to prompts.[160][161]\n

        In March 2023, 58% of U.S. adults had heard about ChatGPT and 14% had tried it.[162] The increasing realism and ease-of-use of AI-based text-to-image generators such as Midjourney, DALL-E, and Stable Diffusion sparked a trend of viral AI-generated photos. Widespread attention was gained by a fake photo of Pope Francis wearing a white puffer coat, the fictional arrest of Donald Trump, and a hoax of an attack on the Pentagon, as well as the usage in professional creative arts.[163][164]\n

        \n

        Agents

        \n

        Artificial intelligent (AI) agents are software entities designed to perceive their environment, make decisions, and take actions autonomously to achieve specific goals. These agents can interact with users, their environment, or other agents. AI agents are used in various applications, including virtual assistants, chatbots, autonomous vehicles, game-playing systems, and industrial robotics. AI agents operate within the constraints of their programming, available computational resources, and hardware limitations. This means they are restricted to performing tasks within their defined scope and have finite memory and processing capabilities. In real-world applications, AI agents often face time constraints for decision-making and action execution. Many AI agents incorporate learning algorithms, enabling them to improve their performance over time through experience or training. Using machine learning, AI agents can adapt to new situations and optimise their behaviour for their designated tasks.[165][166][167]\n

        \n

        Other industry-specific tasks

        \n

        There are also thousands of successful AI applications used to solve specific problems for specific industries or institutions. In a 2017 survey, one in five companies reported having incorporated "AI" in some offerings or processes.[168] A few examples are energy storage, medical diagnosis, military logistics, applications that predict the result of judicial decisions, foreign policy, or supply chain management.\n

        AI applications for evacuation and disaster management are growing. AI has been used to investigate if and how people evacuated in large scale and small scale evacuations using historical data from GPS, videos or social media. Further, AI can provide real time information on the real time evacuation conditions.[169][170][171]\n

        In agriculture, AI has helped farmers identify areas that need irrigation, fertilization, pesticide treatments or increasing yield. Agronomists use AI to conduct research and development. AI has been used to predict the ripening time for crops such as tomatoes, monitor soil moisture, operate agricultural robots, conduct predictive analytics, classify livestock pig call emotions, automate greenhouses, detect diseases and pests, and save water.\n

        Artificial intelligence is used in astronomy to analyze increasing amounts of available data and applications, mainly for "classification, regression, clustering, forecasting, generation, discovery, and the development of new scientific insights." For example, it is used for discovering exoplanets, forecasting solar activity, and distinguishing between signals and instrumental effects in gravitational wave astronomy. Additionally, it could be used for activities in space, such as space exploration, including the analysis of data from space missions, real-time science decisions of spacecraft, space debris avoidance, and more autonomous operation.\n

        During the 2024 Indian elections, US$50 millions was spent on authorized AI-generated content, notably by creating deepfakes of allied (including sometimes deceased) politicians to better engage with voters, and by translating speeches to various local languages.[172] \n

        \n

        Ethics

        \n\n

        AI has potential benefits and potential risks.[173] AI may be able to advance science and find solutions for serious problems: Demis Hassabis of Deep Mind hopes to "solve intelligence, and then use that to solve everything else".[174] However, as the use of AI has become widespread, several unintended consequences and risks have been identified.[175] In-production systems can sometimes not factor ethics and bias into their AI training processes, especially when the AI algorithms are inherently unexplainable in deep learning.[176]\n

        \n

        Risks and harm

        \n
        \n\n

        Machine learning algorithms require large amounts of data. The techniques used to acquire this data have raised concerns about privacy, surveillance and copyright.\n

        AI-powered devices and services, such as virtual assistants and IoT products, continuously collect personal information, raising concerns about intrusive data gathering and unauthorized access by third parties. The loss of privacy is further exacerbated by AI\'s ability to process and combine vast amounts of data, potentially leading to a surveillance society where individual activities are constantly monitored and analyzed without adequate safeguards or transparency.\n

        Sensitive user data collected may include online activity records, geolocation data, video or audio.[177] For example, in order to build speech recognition algorithms, Amazon has recorded millions of private conversations and allowed temporary workers to listen to and transcribe some of them.[178] Opinions about this widespread surveillance range from those who see it as a necessary evil to those for whom it is clearly unethical and a violation of the right to privacy.[179]\n

        AI developers argue that this is the only way to deliver valuable applications. and have developed several techniques that attempt to preserve privacy while still obtaining the data, such as data aggregation, de-identification and differential privacy.[180] Since 2016, some privacy experts, such as Cynthia Dwork, have begun to view privacy in terms of fairness. Brian Christian wrote that experts have pivoted "from the question of \'what they know\' to the question of \'what they\'re doing with it\'."[181]\n

        Generative AI is often trained on unlicensed copyrighted works, including in domains such as images or computer code; the output is then used under the rationale of "fair use". Experts disagree about how well and under what circumstances this rationale will hold up in courts of law; relevant factors may include "the purpose and character of the use of the copyrighted work" and "the effect upon the potential market for the copyrighted work".[182][183] Website owners who do not wish to have their content scraped can indicate it in a "robots.txt" file.[184] In 2023, leading authors (including John Grisham and Jonathan Franzen) sued AI companies for using their work to train generative AI.[185][186] Another discussed approach is to envision a separate sui generis system of protection for creations generated by AI to ensure fair attribution and compensation for human authors.[187]\n

        \n

        Dominance by tech giants

        \n

        The commercial AI scene is dominated by Big Tech companies such as Alphabet Inc., Amazon, Apple Inc., Meta Platforms, and Microsoft.[188][189][190] Some of these players already own the vast majority of existing cloud infrastructure and computing power from data centers, allowing them to entrench further in the marketplace.[191][192]\n

        \n

        Substantial power needs and other environmental impacts

        \n\n

        In January 2024, the International Energy Agency (IEA) released Electricity 2024, Analysis and Forecast to 2026, forecasting electric power use.[193] This is the first IEA report to make projections for data centers and power consumption for artificial intelligence and cryptocurrency. The report states that power demand for these uses might double by 2026, with additional electric power usage equal to electricity used by the whole Japanese nation.[194]\n

        Prodigious power consumption by AI is responsible for the growth of fossil fuels use, and might delay closings of obsolete, carbon-emitting coal energy facilities. There is a feverish rise in the construction of data centers throughout the US, making large technology firms (e.g., Microsoft, Meta, Google, Amazon) into voracious consumers of electric power. Projected electric consumption is so immense that there is concern that it will be fulfilled no matter the source. A ChatGPT search involves the use of 10 times the electrical energy as a Google search. The large firms are in haste to find power sources – from nuclear energy to geothermal to fusion. The tech firms argue that – in the long view – AI will be eventually kinder to the environment, but they need the energy now. AI makes the power grid more efficient and "intelligent", will assist in the growth of nuclear power, and track overall carbon emissions, according to technology firms.[195]\n

        A 2024 Goldman Sachs Research Paper, AI Data Centers and the Coming US Power Demand Surge, found "US power demand (is) likely to experience growth not seen in a generation...." and forecasts that, by 2030, US data centers will consume 8% of US power, as opposed to 3% in 2022, presaging growth for the electrical power generation industry by a variety of means.[196] Data centers\' need for more and more electrical power is such that they might max out the electrical grid. The Big Tech companies counter that AI can be used to maximize the utilization of the grid by all.[197]\n

        In 2024, the Wall Street Journal reported that big AI companies have begun negotiations with the US nuclear power providers to provide electricity to the data centers. In March 2024 Amazon purchased a Pennsylvania nuclear-powered data center for $650 Million (US).[198]\n

        In September 2024, Microsoft announced an agreement with Constellation Energy to re-open the Three Mile Island nuclear power plant to provide Microsoft with 100% of all electric power produced by the plant for 20 years. Reopening the plant, which suffered a partial nuclear meltdown of its Unit 2 reactor in 1979, will require Constellation to get through strict regulatory processes which will include extensive safety scrutiny from the US Nuclear Regulatory Commission. If approved (this will be the first ever US re-commissioning of a nuclear plant), over 835 megawatts of power – enough for 800,000 homes – of energy will be produced. The cost for re-opening and upgrading is estimated at $1.6 billion (US) and is dependent on tax breaks for nuclear power contained in the 2022 US Inflation Reduction Act.[199] The US government and the state of Michigan are investing almost $2 billion (US) to reopen the Palisades Nuclear reactor on Lake Michigan. Closed since 2022, the plant is planned to be reopened in October 2025. The Three Mile Island facility will be renamed the Crane Clean Energy Center after Chris Crane, a nuclear proponent and former CEO of Exelon who was responsible for Exelon spinoff of Constellation.[200]\n

        \n

        Misinformation

        \n\n

        YouTube, Facebook and others use recommender systems to guide users to more content. These AI programs were given the goal of maximizing user engagement (that is, the only goal was to keep people watching). The AI learned that users tended to choose misinformation, conspiracy theories, and extreme partisan content, and, to keep them watching, the AI recommended more of it. Users also tended to watch more content on the same subject, so the AI led people into filter bubbles where they received multiple versions of the same misinformation.[201] This convinced many users that the misinformation was true, and ultimately undermined trust in institutions, the media and the government.[202] The AI program had correctly learned to maximize its goal, but the result was harmful to society. After the U.S. election in 2016, major technology companies took steps to mitigate the problem [citation needed].\n

        In 2022, generative AI began to create images, audio, video and text that are indistinguishable from real photographs, recordings, films, or human writing. It is possible for bad actors to use this technology to create massive amounts of misinformation or propaganda.[203] AI pioneer Geoffrey Hinton expressed concern about AI enabling "authoritarian leaders to manipulate their electorates" on a large scale, among other risks.[204]\n

        \n

        Algorithmic bias and fairness

        \n\n

        Machine learning applications will be biased[k] if they learn from biased data.[206] The developers may not be aware that the bias exists.[207] Bias can be introduced by the way training data is selected and by the way a model is deployed.[208][206] If a biased algorithm is used to make decisions that can seriously harm people (as it can in medicine, finance, recruitment, housing or policing) then the algorithm may cause discrimination.[209] The field of fairness studies how to prevent harms from algorithmic biases.\n

        On June 28, 2015, Google Photos\'s new image labeling feature mistakenly identified Jacky Alcine and a friend as "gorillas" because they were black. The system was trained on a dataset that contained very few images of black people,[210] a problem called "sample size disparity".[211] Google "fixed" this problem by preventing the system from labelling anything as a "gorilla". Eight years later, in 2023, Google Photos still could not identify a gorilla, and neither could similar products from Apple, Facebook, Microsoft and Amazon.[212]\n

        COMPAS is a commercial program widely used by U.S. courts to assess the likelihood of a defendant becoming a recidivist. In 2016, Julia Angwin at ProPublica discovered that COMPAS exhibited racial bias, despite the fact that the program was not told the races of the defendants. Although the error rate for both whites and blacks was calibrated equal at exactly 61%, the errors for each race were different—the system consistently overestimated the chance that a black person would re-offend and would underestimate the chance that a white person would not re-offend.[213] In 2017, several researchers[l] showed that it was mathematically impossible for COMPAS to accommodate all possible measures of fairness when the base rates of re-offense were different for whites and blacks in the data.[215]\n

        A program can make biased decisions even if the data does not explicitly mention a problematic feature (such as "race" or "gender"). The feature will correlate with other features (like "address", "shopping history" or "first name"), and the program will make the same decisions based on these features as it would on "race" or "gender".[216] Moritz Hardt said "the most robust fact in this research area is that fairness through blindness doesn\'t work."[217]\n

        Criticism of COMPAS highlighted that machine learning models are designed to make "predictions" that are only valid if we assume that the future will resemble the past. If they are trained on data that includes the results of racist decisions in the past, machine learning models must predict that racist decisions will be made in the future. If an application then uses these predictions as recommendations, some of these "recommendations" will likely be racist.[218] Thus, machine learning is not well suited to help make decisions in areas where there is hope that the future will be better than the past. It is descriptive rather than prescriptive.[m]\n

        Bias and unfairness may go undetected because the developers are overwhelmingly white and male: among AI engineers, about 4% are black and 20% are women.[211]\n

        There are various conflicting definitions and mathematical models of fairness. These notions depend on ethical assumptions, and are influenced by beliefs about society. One broad category is distributive fairness, which focuses on the outcomes, often identifying groups and seeking to compensate for statistical disparities. Representational fairness tries to ensure that AI systems do not reinforce negative stereotypes or render certain groups invisible. Procedural fairness focuses on the decision process rather than the outcome. The most relevant notions of fairness may depend on the context, notably the type of AI application and the stakeholders. The subjectivity in the notions of bias and fairness makes it difficult for companies to operationalize them. Having access to sensitive attributes such as race or gender is also considered by many AI ethicists to be necessary in order to compensate for biases, but it may conflict with anti-discrimination laws.[205]\n

        At its 2022 Conference on Fairness, Accountability, and Transparency (ACM FAccT 2022), the Association for Computing Machinery, in Seoul, South Korea, presented and published findings that recommend that until AI and robotics systems are demonstrated to be free of bias mistakes, they are unsafe, and the use of self-learning neural networks trained on vast, unregulated sources of flawed internet data should be curtailed.[dubiousdiscuss][220]\n

        \n

        Lack of transparency

        \n\n

        Many AI systems are so complex that their designers cannot explain how they reach their decisions.[221] Particularly with deep neural networks, in which there are a large amount of non-linear relationships between inputs and outputs. But some popular explainability techniques exist.[222]\n

        It is impossible to be certain that a program is operating correctly if no one knows how exactly it works. There have been many cases where a machine learning program passed rigorous tests, but nevertheless learned something different than what the programmers intended. For example, a system that could identify skin diseases better than medical professionals was found to actually have a strong tendency to classify images with a ruler as "cancerous", because pictures of malignancies typically include a ruler to show the scale.[223] Another machine learning system designed to help effectively allocate medical resources was found to classify patients with asthma as being at "low risk" of dying from pneumonia. Having asthma is actually a severe risk factor, but since the patients having asthma would usually get much more medical care, they were relatively unlikely to die according to the training data. The correlation between asthma and low risk of dying from pneumonia was real, but misleading.[224]\n

        People who have been harmed by an algorithm\'s decision have a right to an explanation.[225] Doctors, for example, are expected to clearly and completely explain to their colleagues the reasoning behind any decision they make. Early drafts of the European Union\'s General Data Protection Regulation in 2016 included an explicit statement that this right exists.[n] Industry experts noted that this is an unsolved problem with no solution in sight. Regulators argued that nevertheless the harm is real: if the problem has no solution, the tools should not be used.[226]\n

        DARPA established the XAI ("Explainable Artificial Intelligence") program in 2014 to try to solve these problems.[227]\n

        Several approaches aim to address the transparency problem. SHAP enables to visualise the contribution of each feature to the output.[228] LIME can locally approximate a model\'s outputs with a simpler, interpretable model.[229] Multitask learning provides a large number of outputs in addition to the target classification. These other outputs can help developers deduce what the network has learned.[230] Deconvolution, DeepDream and other generative methods can allow developers to see what different layers of a deep network for computer vision have learned, and produce output that can suggest what the network is learning.[231] For generative pre-trained transformers, Anthropic developed a technique based on dictionary learning that associates patterns of neuron activations with human-understandable concepts.[232]\n

        \n

        Bad actors and weaponized AI

        \n\n

        Artificial intelligence provides a number of tools that are useful to bad actors, such as authoritarian governments, terrorists, criminals or rogue states.\n

        A lethal autonomous weapon is a machine that locates, selects and engages human targets without human supervision.[o] Widely available AI tools can be used by bad actors to develop inexpensive autonomous weapons and, if produced at scale, they are potentially weapons of mass destruction.[234] Even when used in conventional warfare, it is unlikely that they will be unable to reliably choose targets and could potentially kill an innocent person.[234] In 2014, 30 nations (including China) supported a ban on autonomous weapons under the United Nations\' Convention on Certain Conventional Weapons, however the United States and others disagreed.[235] By 2015, over fifty countries were reported to be researching battlefield robots.[236]\n

        AI tools make it easier for authoritarian governments to efficiently control their citizens in several ways. Face and voice recognition allow widespread surveillance. Machine learning, operating this data, can classify potential enemies of the state and prevent them from hiding. Recommendation systems can precisely target propaganda and misinformation for maximum effect. Deepfakes and generative AI aid in producing misinformation. Advanced AI can make authoritarian centralized decision making more competitive than liberal and decentralized systems such as markets. It lowers the cost and difficulty of digital warfare and advanced spyware.[237] All these technologies have been available since 2020 or earlier—AI facial recognition systems are already being used for mass surveillance in China.[238][239]\n

        There many other ways that AI is expected to help bad actors, some of which can not be foreseen. For example, machine-learning AI is able to design tens of thousands of toxic molecules in a matter of hours.[240]\n

        \n

        Technological unemployment

        \n\n

        Economists have frequently highlighted the risks of redundancies from AI, and speculated about unemployment if there is no adequate social policy for full employment.[241]\n

        In the past, technology has tended to increase rather than reduce total employment, but economists acknowledge that "we\'re in uncharted territory" with AI.[242] A survey of economists showed disagreement about whether the increasing use of robots and AI will cause a substantial increase in long-term unemployment, but they generally agree that it could be a net benefit if productivity gains are redistributed.[243] Risk estimates vary; for example, in the 2010s, Michael Osborne and Carl Benedikt Frey estimated 47% of U.S. jobs are at "high risk" of potential automation, while an OECD report classified only 9% of U.S. jobs as "high risk".[p][245] The methodology of speculating about future employment levels has been criticised as lacking evidential foundation, and for implying that technology, rather than social policy, creates unemployment, as opposed to redundancies.[241] In April 2023, it was reported that 70% of the jobs for Chinese video game illustrators had been eliminated by generative artificial intelligence.[246][247]\n

        Unlike previous waves of automation, many middle-class jobs may be eliminated by artificial intelligence; The Economist stated in 2015 that "the worry that AI could do to white-collar jobs what steam power did to blue-collar ones during the Industrial Revolution" is "worth taking seriously".[248] Jobs at extreme risk range from paralegals to fast food cooks, while job demand is likely to increase for care-related professions ranging from personal healthcare to the clergy.[249]\n

        From the early days of the development of artificial intelligence, there have been arguments, for example, those put forward by Joseph Weizenbaum, about whether tasks that can be done by computers actually should be done by them, given the difference between computers and humans, and between quantitative calculation and qualitative, value-based judgement.[250]\n

        \n

        Existential risk

        \n\n

        It has been argued AI will become so powerful that humanity may irreversibly lose control of it. This could, as physicist Stephen Hawking stated, "spell the end of the human race".[251] This scenario has been common in science fiction, when a computer or robot suddenly develops a human-like "self-awareness" (or "sentience" or "consciousness") and becomes a malevolent character.[q] These sci-fi scenarios are misleading in several ways.\n

        First, AI does not require human-like "sentience" to be an existential risk. Modern AI programs are given specific goals and use learning and intelligence to achieve them. Philosopher Nick Bostrom argued that if one gives almost any goal to a sufficiently powerful AI, it may choose to destroy humanity to achieve it (he used the example of a paperclip factory manager).[253] Stuart Russell gives the example of household robot that tries to find a way to kill its owner to prevent it from being unplugged, reasoning that "you can\'t fetch the coffee if you\'re dead."[254] In order to be safe for humanity, a superintelligence would have to be genuinely aligned with humanity\'s morality and values so that it is "fundamentally on our side".[255]\n

        Second, Yuval Noah Harari argues that AI does not require a robot body or physical control to pose an existential risk. The essential parts of civilization are not physical. Things like ideologies, law, government, money and the economy are made of language; they exist because there are stories that billions of people believe. The current prevalence of misinformation suggests that an AI could use language to convince people to believe anything, even to take actions that are destructive.[256]\n

        The opinions amongst experts and industry insiders are mixed, with sizable fractions both concerned and unconcerned by risk from eventual superintelligent AI.[257] Personalities such as Stephen Hawking, Bill Gates, and Elon Musk,[258] as well as AI pioneers such as Yoshua Bengio, Stuart Russell, Demis Hassabis, and Sam Altman, have expressed concerns about existential risk from AI.\n

        In May 2023, Geoffrey Hinton announced his resignation from Google in order to be able to "freely speak out about the risks of AI" without "considering how this impacts Google."[259] He notably mentioned risks of an AI takeover,[260] and stressed that in order to avoid the worst outcomes, establishing safety guidelines will require cooperation among those competing in use of AI.[261]\n

        In 2023, many leading AI experts issued the joint statement that "Mitigating the risk of extinction from AI should be a global priority alongside other societal-scale risks such as pandemics and nuclear war".[262]\n

        Other researchers, however, spoke in favor of a less dystopian view. AI pioneer Juergen Schmidhuber did not sign the joint statement, emphasising that in 95% of all cases, AI research is about making "human lives longer and healthier and easier."[263] While the tools that are now being used to improve lives can also be used by bad actors, "they can also be used against the bad actors."[264][265] Andrew Ng also argued that "it\'s a mistake to fall for the doomsday hype on AI—and that regulators who do will only benefit vested interests."[266] Yann LeCun "scoffs at his peers\' dystopian scenarios of supercharged misinformation and even, eventually, human extinction."[267] In the early 2010s, experts argued that the risks are too distant in the future to warrant research or that humans will be valuable from the perspective of a superintelligent machine.[268] However, after 2016, the study of current and future risks and possible solutions became a serious area of research.[269]\n

        \n

        Ethical machines and alignment

        \n\n

        Friendly AI are machines that have been designed from the beginning to minimize risks and to make choices that benefit humans. Eliezer Yudkowsky, who coined the term, argues that developing friendly AI should be a higher research priority: it may require a large investment and it must be completed before AI becomes an existential risk.[270]\n

        Machines with intelligence have the potential to use their intelligence to make ethical decisions. The field of machine ethics provides machines with ethical principles and procedures for resolving ethical dilemmas.[271]\nThe field of machine ethics is also called computational morality,[271]\nand was founded at an AAAI symposium in 2005.[272]\n

        Other approaches include Wendell Wallach\'s "artificial moral agents"[273] and Stuart J. Russell\'s three principles for developing provably beneficial machines.[274]\n

        \n

        Open source

        \n

        Active organizations in the AI open-source community include Hugging Face,[275] Google,[276] EleutherAI and Meta.[277] Various AI models, such as Llama 2, Mistral or Stable Diffusion, have been made open-weight,[278][279] meaning that their architecture and trained parameters (the "weights") are publicly available. Open-weight models can be freely fine-tuned, which allows companies to specialize them with their own data and for their own use-case.[280] Open-weight models are useful for research and innovation but can also be misused. Since they can be fine-tuned, any built-in security measure, such as objecting to harmful requests, can be trained away until it becomes ineffective. Some researchers warn that future AI models may develop dangerous capabilities (such as the potential to drastically facilitate bioterrorism) and that once released on the Internet, they cannot be deleted everywhere if needed. They recommend pre-release audits and cost-benefit analyses.[281]\n

        \n

        Frameworks

        \n

        Artificial Intelligence projects can have their ethical permissibility tested while designing, developing, and implementing an AI system. An AI framework such as the Care and Act Framework containing the SUM values—developed by the Alan Turing Institute tests projects in four main areas:[282][283]\n

        \n
        • Respect the dignity of individual people
        • \n
        • Connect with other people sincerely, openly, and inclusively
        • \n
        • Care for the wellbeing of everyone
        • \n
        • Protect social values, justice, and the public interest
        \n

        Other developments in ethical frameworks include those decided upon during the Asilomar Conference, the Montreal Declaration for Responsible AI, and the IEEE\'s Ethics of Autonomous Systems initiative, among others;[284] however, these principles do not go without their criticisms, especially regards to the people chosen contributes to these frameworks.[285]\n

        Promotion of the wellbeing of the people and communities that these technologies affect requires consideration of the social and ethical implications at all stages of AI system design, development and implementation, and collaboration between job roles such as data scientists, product managers, data engineers, domain experts, and delivery managers.[286]\n

        The UK AI Safety Institute released in 2024 a testing toolset called \'Inspect\' for AI safety evaluations available under a MIT open-source licence which is freely available on GitHub and can be improved with third-party packages. It can be used to evaluate AI models in a range of areas including core knowledge, ability to reason, and autonomous capabilities.[287]\n

        \n

        Regulation

        \n\n
        AI Safety Summit
        The first global AI Safety Summit was held in 2023 with a declaration calling for international co-operation.
        \n

        The regulation of artificial intelligence is the development of public sector policies and laws for promoting and regulating AI; it is therefore related to the broader regulation of algorithms.[288] The regulatory and policy landscape for AI is an emerging issue in jurisdictions globally.[289] According to AI Index at Stanford, the annual number of AI-related laws passed in the 127 survey countries jumped from one passed in 2016 to 37 passed in 2022 alone.[290][291] Between 2016 and 2020, more than 30 countries adopted dedicated strategies for AI.[292] Most EU member states had released national AI strategies, as had Canada, China, India, Japan, Mauritius, the Russian Federation, Saudi Arabia, United Arab Emirates, U.S., and Vietnam. Others were in the process of elaborating their own AI strategy, including Bangladesh, Malaysia and Tunisia.[292] The Global Partnership on Artificial Intelligence was launched in June 2020, stating a need for AI to be developed in accordance with human rights and democratic values, to ensure public confidence and trust in the technology.[292] Henry Kissinger, Eric Schmidt, and Daniel Huttenlocher published a joint statement in November 2021 calling for a government commission to regulate AI.[293] In 2023, OpenAI leaders published recommendations for the governance of superintelligence, which they believe may happen in less than 10 years.[294] In 2023, the United Nations also launched an advisory body to provide recommendations on AI governance; the body comprises technology company executives, governments officials and academics.[295] In 2024, the Council of Europe created the first international legally binding treaty on AI, called the "Framework Convention on Artificial Intelligence and Human Rights, Democracy and the Rule of Law". It was adopted by the European Union, the United States, the United Kingdom, and other signatories.[296]\n

        In a 2022 Ipsos survey, attitudes towards AI varied greatly by country; 78% of Chinese citizens, but only 35% of Americans, agreed that "products and services using AI have more benefits than drawbacks".[290] A 2023 Reuters/Ipsos poll found that 61% of Americans agree, and 22% disagree, that AI poses risks to humanity.[297] In a 2023 Fox News poll, 35% of Americans thought it "very important", and an additional 41% thought it "somewhat important", for the federal government to regulate AI, versus 13% responding "not very important" and 8% responding "not at all important".[298][299]\n

        In November 2023, the first global AI Safety Summit was held in Bletchley Park in the UK to discuss the near and far term risks of AI and the possibility of mandatory and voluntary regulatory frameworks.[300] 28 countries including the United States, China, and the European Union issued a declaration at the start of the summit, calling for international co-operation to manage the challenges and risks of artificial intelligence.[301][302] In May 2024 at the AI Seoul Summit, 16 global AI tech companies agreed to safety commitments on the development of AI.[303][304]\n

        \n

        History

        \n\n\n

        The study of mechanical or "formal" reasoning began with philosophers and mathematicians in antiquity. The study of logic led directly to Alan Turing\'s theory of computation, which suggested that a machine, by shuffling symbols as simple as "0" and "1", could simulate any conceivable form of mathematical reasoning.[305][306] This, along with concurrent discoveries in cybernetics, information theory and neurobiology, led researchers to consider the possibility of building an "electronic brain".[r] They developed several areas of research that would become part of AI,[308] such as McCullouch and Pitts design for "artificial neurons" in 1943,[115] and Turing\'s influential 1950 paper \'Computing Machinery and Intelligence\', which introduced the Turing test and showed that "machine intelligence" was plausible.[309][306]\n

        The field of AI research was founded at a workshop at Dartmouth College in 1956.[s][6] The attendees became the leaders of AI research in the 1960s.[t] They and their students produced programs that the press described as "astonishing":[u] computers were learning checkers strategies, solving word problems in algebra, proving logical theorems and speaking English.[v][7] Artificial intelligence laboratories were set up at a number of British and U.S. universities in the latter 1950s and early 1960s.[306]\n

        Researchers in the 1960s and the 1970s were convinced that their methods would eventually succeed in creating a machine with general intelligence and considered this the goal of their field.[313] In 1965 Herbert Simon predicted, "machines will be capable, within twenty years, of doing any work a man can do".[314] In 1967 Marvin Minsky agreed, writing that "within a generation ... the problem of creating \'artificial intelligence\' will substantially be solved".[315] They had, however, underestimated the difficulty of the problem.[w] In 1974, both the U.S. and British governments cut off exploratory research in response to the criticism of Sir James Lighthill[317] and ongoing pressure from the U.S. Congress to fund more productive projects.[318] Minsky\'s and Papert\'s book Perceptrons was understood as proving that artificial neural networks would never be useful for solving real-world tasks, thus discrediting the approach altogether.[319] The "AI winter", a period when obtaining funding for AI projects was difficult, followed.[9]\n

        In the early 1980s, AI research was revived by the commercial success of expert systems,[320] a form of AI program that simulated the knowledge and analytical skills of human experts. By 1985, the market for AI had reached over a billion dollars. At the same time, Japan\'s fifth generation computer project inspired the U.S. and British governments to restore funding for academic research.[8] However, beginning with the collapse of the Lisp Machine market in 1987, AI once again fell into disrepute, and a second, longer-lasting winter began.[10]\n

        Up to this point, most of AI\'s funding had gone to projects that used high-level symbols to represent mental objects like plans, goals, beliefs, and known facts. In the 1980s, some researchers began to doubt that this approach would be able to imitate all the processes of human cognition, especially perception, robotics, learning and pattern recognition,[321] and began to look into "sub-symbolic" approaches.[322] Rodney Brooks rejected "representation" in general and focussed directly on engineering machines that move and survive.[x] Judea Pearl, Lofti Zadeh and others developed methods that handled incomplete and uncertain information by making reasonable guesses rather than precise logic.[86][327] But the most important development was the revival of "connectionism", including neural network research, by Geoffrey Hinton and others.[328] In 1990, Yann LeCun successfully showed that convolutional neural networks can recognize handwritten digits, the first of many successful applications of neural networks.[329]\n

        AI gradually restored its reputation in the late 1990s and early 21st century by exploiting formal mathematical methods and by finding specific solutions to specific problems. This "narrow" and "formal" focus allowed researchers to produce verifiable results and collaborate with other fields (such as statistics, economics and mathematics).[330] By 2000, solutions developed by AI researchers were being widely used, although in the 1990s they were rarely described as "artificial intelligence" (a tendency known as the AI effect).[331]\nHowever, several academic researchers became concerned that AI was no longer pursuing its original goal of creating versatile, fully intelligent machines. Beginning around 2002, they founded the subfield of artificial general intelligence (or "AGI"), which had several well-funded institutions by the 2010s.[4]\n

        Deep learning began to dominate industry benchmarks in 2012 and was adopted throughout the field.[11]\nFor many specific tasks, other methods were abandoned.[y]\nDeep learning\'s success was based on both hardware improvements (faster computers,[333] graphics processing units, cloud computing[334]) and access to large amounts of data[335] (including curated datasets,[334] such as ImageNet). Deep learning\'s success led to an enormous increase in interest and funding in AI.[z] The amount of machine learning research (measured by total publications) increased by 50% in the years 2015–2019.[292]\n

        In 2016, issues of fairness and the misuse of technology were catapulted into center stage at machine learning conferences, publications vastly increased, funding became available, and many researchers re-focussed their careers on these issues. The alignment problem became a serious field of academic study.[269]\n

        In the late teens and early 2020s, AGI companies began to deliver programs that created enormous interest. In 2015, AlphaGo, developed by DeepMind, beat the world champion Go player. The program was taught only the rules of the game and developed strategy by itself. GPT-3 is a large language model that was released in 2020 by OpenAI and is capable of generating high-quality human-like text.[336] These programs, and others, inspired an aggressive AI boom, where large companies began investing billions in AI research. According to AI Impacts, about $50 billion annually was invested in "AI" around 2022 in the U.S. alone and about 20% of the new U.S. Computer Science PhD graduates have specialized in "AI".[337] About 800,000 "AI"-related U.S. job openings existed in 2022.[338]\n

        \n

        Philosophy

        \n\n

        Philosophical debates have historically sought to determine the nature of intelligence and how to make intelligent machines.[339] Another major focus has been whether machines can be conscious, and the associated ethical implications.[340] Many other topics in philosophy are relevant to AI, such as epistemology and free will.[341] Rapid advancements have intensified public discussions on the philosophy and ethics of AI.[340]\n

        \n

        Defining artificial intelligence

        \n\n

        Alan Turing wrote in 1950 "I propose to consider the question \'can machines think\'?"[342] He advised changing the question from whether a machine "thinks", to "whether or not it is possible for machinery to show intelligent behaviour".[342] He devised the Turing test, which measures the ability of a machine to simulate human conversation.[309] Since we can only observe the behavior of the machine, it does not matter if it is "actually" thinking or literally has a "mind". Turing notes that we can not determine these things about other people but "it is usual to have a polite convention that everyone thinks."[343]\n

        \n
        The Turing test can provide some evidence of intelligence, but it penalizes non-human intelligent behavior.[344]
        \n

        Russell and Norvig agree with Turing that intelligence must be defined in terms of external behavior, not internal structure.[1] However, they are critical that the test requires the machine to imitate humans. "Aeronautical engineering texts," they wrote, "do not define the goal of their field as making \'machines that fly so exactly like pigeons that they can fool other pigeons.\'"[345] AI founder John McCarthy agreed, writing that "Artificial intelligence is not, by definition, simulation of human intelligence".[346]\n

        McCarthy defines intelligence as "the computational part of the ability to achieve goals in the world".[347] Another AI founder, Marvin Minsky similarly describes it as "the ability to solve hard problems".[348] The leading AI textbook defines it as the study of agents that perceive their environment and take actions that maximize their chances of achieving defined goals.[1] These definitions view intelligence in terms of well-defined problems with well-defined solutions, where both the difficulty of the problem and the performance of the program are direct measures of the "intelligence" of the machine—and no other philosophical discussion is required, or may not even be possible.\n

        Another definition has been adopted by Google,[349] a major practitioner in the field of AI. This definition stipulates the ability of systems to synthesize information as the manifestation of intelligence, similar to the way it is defined in biological intelligence.\n

        Some authors have suggested in practice, that the definition of AI is vague and difficult to define, with contention as to whether classical algorithms should be categorised as AI,[350] with many companies during the early 2020s AI boom using the term as a marketing buzzword, often even if they did "not actually use AI in a material way".[351]\n

        \n

        Evaluating approaches to AI

        \n

        No established unifying theory or paradigm has guided AI research for most of its history.[aa] The unprecedented success of statistical machine learning in the 2010s eclipsed all other approaches (so much so that some sources, especially in the business world, use the term "artificial intelligence" to mean "machine learning with neural networks"). This approach is mostly sub-symbolic, soft and narrow. Critics argue that these questions may have to be revisited by future generations of AI researchers.\n

        \n

        Symbolic AI and its limits

        \n

        Symbolic AI (or "GOFAI")[353] simulated the high-level conscious reasoning that people use when they solve puzzles, express legal reasoning and do mathematics. They were highly successful at "intelligent" tasks such as algebra or IQ tests. In the 1960s, Newell and Simon proposed the physical symbol systems hypothesis: "A physical symbol system has the necessary and sufficient means of general intelligent action."[354]\n

        However, the symbolic approach failed on many tasks that humans solve easily, such as learning, recognizing an object or commonsense reasoning. Moravec\'s paradox is the discovery that high-level "intelligent" tasks were easy for AI, but low level "instinctive" tasks were extremely difficult.[355] Philosopher Hubert Dreyfus had argued since the 1960s that human expertise depends on unconscious instinct rather than conscious symbol manipulation, and on having a "feel" for the situation, rather than explicit symbolic knowledge.[356] Although his arguments had been ridiculed and ignored when they were first presented, eventually, AI research came to agree with him.[ab][16]\n

        The issue is not resolved: sub-symbolic reasoning can make many of the same inscrutable mistakes that human intuition does, such as algorithmic bias. Critics such as Noam Chomsky argue continuing research into symbolic AI will still be necessary to attain general intelligence,[358][359] in part because sub-symbolic AI is a move away from explainable AI: it can be difficult or impossible to understand why a modern statistical AI program made a particular decision. The emerging field of neuro-symbolic artificial intelligence attempts to bridge the two approaches.\n

        \n

        Neat vs. scruffy

        \n\n

        "Neats" hope that intelligent behavior is described using simple, elegant principles (such as logic, optimization, or neural networks). "Scruffies" expect that it necessarily requires solving a large number of unrelated problems. Neats defend their programs with theoretical rigor, scruffies rely mainly on incremental testing to see if they work. This issue was actively discussed in the 1970s and 1980s,[360] but eventually was seen as irrelevant. Modern AI has elements of both.\n

        \n

        Soft vs. hard computing

        \n\n

        Finding a provably correct or optimal solution is intractable for many important problems.[15] Soft computing is a set of techniques, including genetic algorithms, fuzzy logic and neural networks, that are tolerant of imprecision, uncertainty, partial truth and approximation. Soft computing was introduced in the late 1980s and most successful AI programs in the 21st century are examples of soft computing with neural networks.\n

        \n

        Narrow vs. general AI

        \n\n

        AI researchers are divided as to whether to pursue the goals of artificial general intelligence and superintelligence directly or to solve as many specific problems as possible (narrow AI) in hopes these solutions will lead indirectly to the field\'s long-term goals.[361][362] General intelligence is difficult to define and difficult to measure, and modern AI has had more verifiable successes by focusing on specific problems with specific solutions. The sub-field of artificial general intelligence studies this area exclusively.\n

        \n

        Machine consciousness, sentience, and mind

        \n\n

        The philosophy of mind does not know whether a machine can have a mind, consciousness and mental states, in the same sense that human beings do. This issue considers the internal experiences of the machine, rather than its external behavior. Mainstream AI research considers this issue irrelevant because it does not affect the goals of the field: to build machines that can solve problems using intelligence. Russell and Norvig add that "[t]he additional project of making a machine conscious in exactly the way humans are is not one that we are equipped to take on."[363] However, the question has become central to the philosophy of mind. It is also typically the central question at issue in artificial intelligence in fiction.\n

        \n

        Consciousness

        \n\n

        David Chalmers identified two problems in understanding the mind, which he named the "hard" and "easy" problems of consciousness.[364] The easy problem is understanding how the brain processes signals, makes plans and controls behavior. The hard problem is explaining how this feels or why it should feel like anything at all, assuming we are right in thinking that it truly does feel like something (Dennett\'s consciousness illusionism says this is an illusion). While human information processing is easy to explain, human subjective experience is difficult to explain. For example, it is easy to imagine a color-blind person who has learned to identify which objects in their field of view are red, but it is not clear what would be required for the person to know what red looks like.[365]\n

        \n

        Computationalism and functionalism

        \n\n

        Computationalism is the position in the philosophy of mind that the human mind is an information processing system and that thinking is a form of computing. Computationalism argues that the relationship between mind and body is similar or identical to the relationship between software and hardware and thus may be a solution to the mind–body problem. This philosophical position was inspired by the work of AI researchers and cognitive scientists in the 1960s and was originally proposed by philosophers Jerry Fodor and Hilary Putnam.[366]\n

        Philosopher John Searle characterized this position as "strong AI": "The appropriately programmed computer with the right inputs and outputs would thereby have a mind in exactly the same sense human beings have minds."[ac] Searle counters this assertion with his Chinese room argument, which attempts to show that, even if a machine perfectly simulates human behavior, there is still no reason to suppose it also has a mind.[370]\n

        \n

        AI welfare and rights

        \n

        It is difficult or impossible to reliably evaluate whether an advanced AI is sentient (has the ability to feel), and if so, to what degree.[371] But if there is a significant chance that a given machine can feel and suffer, then it may be entitled to certain rights or welfare protection measures, similarly to animals.[372][373] Sapience (a set of capacities related to high intelligence, such as discernment or self-awareness) may provide another moral basis for AI rights.[372] Robot rights are also sometimes proposed as a practical way to integrate autonomous agents into society.[374]\n

        In 2017, the European Union considered granting "electronic personhood" to some of the most capable AI systems. Similarly to the legal status of companies, it would have conferred rights but also responsibilities.[375] Critics argued in 2018 that granting rights to AI systems would downplay the importance of human rights, and that legislation should focus on user needs rather than speculative futuristic scenarios. They also noted that robots lacked the autonomy to take part to society on their own.[376][377]\n

        Progress in AI increased interest in the topic. Proponents of AI welfare and rights often argue that AI sentience, if it emerges, would be particularly easy to deny. They warn that this may be a moral blind spot analogous to slavery or factory farming, which could lead to large-scale suffering if sentient AI is created and carelessly exploited.[373][372]\n

        \n

        Future

        \n

        Superintelligence and the singularity

        \n

        A superintelligence is a hypothetical agent that would possess intelligence far surpassing that of the brightest and most gifted human mind.[362]If research into artificial general intelligence produced sufficiently intelligent software, it might be able to reprogram and improve itself. The improved software would be even better at improving itself, leading to what I. J. Good called an "intelligence explosion" and Vernor Vinge called a "singularity".[378]\n

        However, technologies cannot improve exponentially indefinitely, and typically follow an S-shaped curve, slowing when they reach the physical limits of what the technology can do.[379]\n

        \n

        Transhumanism

        \n\n

        Robot designer Hans Moravec, cyberneticist Kevin Warwick and inventor Ray Kurzweil have predicted that humans and machines may merge in the future into cyborgs that are more capable and powerful than either. This idea, called transhumanism, has roots in the writings of Aldous Huxley and Robert Ettinger.[380]\n

        Edward Fredkin argues that "artificial intelligence is the next step in evolution", an idea first proposed by Samuel Butler\'s "Darwin among the Machines" as far back as 1863, and expanded upon by George Dyson in his 1998 book Darwin Among the Machines: The Evolution of Global Intelligence.[381]\n

        \n

        In fiction

        \n\n
        The word "robot" itself was coined by Karel Čapek in his 1921 play R.U.R., the title standing for "Rossum\'s Universal Robots".
        \n

        Thought-capable artificial beings have appeared as storytelling devices since antiquity,[382] and have been a persistent theme in science fiction.[383]\n

        A common trope in these works began with Mary Shelley\'s Frankenstein, where a human creation becomes a threat to its masters. This includes such works as Arthur C. Clarke\'s and Stanley Kubrick\'s 2001: A Space Odyssey (both 1968), with HAL 9000, the murderous computer in charge of the Discovery One spaceship, as well as The Terminator (1984) and The Matrix (1999). In contrast, the rare loyal robots such as Gort from The Day the Earth Stood Still (1951) and Bishop from Aliens (1986) are less prominent in popular culture.[384]\n

        Isaac Asimov introduced the Three Laws of Robotics in many stories, most notably with the "Multivac" super-intelligent computer. Asimov\'s laws are often brought up during lay discussions of machine ethics;[385] while almost all artificial intelligence researchers are familiar with Asimov\'s laws through popular culture, they generally consider the laws useless for many reasons, one of which is their ambiguity.[386]\n

        Several works use AI to force us to confront the fundamental question of what makes us human, showing us artificial beings that have the ability to feel, and thus to suffer. This appears in Karel Čapek\'s R.U.R., the films A.I. Artificial Intelligence and Ex Machina, as well as the novel Do Androids Dream of Electric Sheep?, by Philip K. Dick. Dick considers the idea that our understanding of human subjectivity is altered by technology created with artificial intelligence.[387]\n

        \n

        See also

        \n\n

        Explanatory notes

        \n
        \n
          \n
        1. ^ a b This list of intelligent traits is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
        2. \n
        3. ^ a b This list of tools is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
        4. \n
        5. ^ It is among the reasons that expert systems proved to be inefficient for capturing knowledge.[30][31]\n
        6. \n
        7. ^ \n"Rational agent" is general term used in economics, philosophy and theoretical artificial intelligence. It can refer to anything that directs its behavior to accomplish goals, such as a person, an animal, a corporation, a nation, or in the case of AI, a computer program.\n
        8. \n
        9. ^ Alan Turing discussed the centrality of learning as early as 1950, in his classic paper "Computing Machinery and Intelligence".[42] In 1956, at the original Dartmouth AI summer conference, Ray Solomonoff wrote a report on unsupervised probabilistic machine learning: "An Inductive Inference Machine".[43]\n
        10. \n
        11. ^ See AI winter § Machine translation and the ALPAC report of 1966\n
        12. \n
        13. ^ \nCompared with symbolic logic, formal Bayesian inference is computationally expensive. For inference to be tractable, most observations must be conditionally independent of one another. AdSense uses a Bayesian network with over 300 million edges to learn which ads to serve.[93]\n
        14. \n
        15. ^ Expectation–maximization, one of the most popular algorithms in machine learning, allows clustering in the presence of unknown latent variables.[95]\n
        16. \n
        17. ^ \nSome form of deep neural networks (without a specific learning algorithm) were described by:\nWarren S. McCulloch and Walter Pitts (1943)[115]\nAlan Turing (1948);[116]\nKarl Steinbuch and Roger David Joseph (1961).[117]\nDeep or recurrent networks that learned (or used gradient descent) were developed by:\nFrank Rosenblatt(1957);[116]\nOliver Selfridge (1959);[117]\nAlexey Ivakhnenko and Valentin Lapa (1965);[118]\nKaoru Nakano (1971);[119]\nShun-Ichi Amari (1972);[119]\nJohn Joseph Hopfield (1982).[119]\nPrecursors to backpropagation were developed by:\nHenry J. Kelley (1960);[116]\nArthur E. Bryson (1962);[116]\nStuart Dreyfus (1962);[116]\nArthur E. Bryson and Yu-Chi Ho (1969);[116]\nBackpropagation was independently developed by:\nSeppo Linnainmaa (1970);[120]\nPaul Werbos (1974).[116]\n
        18. \n
        19. ^ Geoffrey Hinton said, of his work on neural networks in the 1990s, "our labeled datasets were thousands of times too small. [And] our computers were millions of times too slow."[121]\n
        20. \n
        21. ^ In statistics, a bias is a systematic error or deviation from the correct value. But in the context of fairness, it refers to a tendency in favor or against a certain group or individual characteristic, usually in a way that is considered unfair or harmful. A statistically unbiased AI system that produces disparate outcomes for different demographic groups may thus be viewed as biased in the ethical sense.[205]\n
        22. \n
        23. ^ Including Jon Kleinberg (Cornell University), Sendhil Mullainathan (University of Chicago), Cynthia Chouldechova (Carnegie Mellon) and Sam Corbett-Davis (Stanford)[214]\n
        24. \n
        25. ^ Moritz Hardt (a director at the Max Planck Institute for Intelligent Systems) argues that machine learning "is fundamentally the wrong tool for a lot of domains, where you\'re trying to design interventions and mechanisms that change the world."[219]\n
        26. \n
        27. ^ When the law was passed in 2018, it still contained a form of this provision.\n
        28. \n
        29. ^ This is the United Nations\' definition, and includes things like land mines as well.[233]\n
        30. \n
        31. ^ See table 4; 9% is both the OECD average and the U.S. average.[244]\n
        32. \n
        33. ^ Sometimes called a "robopocalypse"[252]\n
        34. \n
        35. ^ "Electronic brain" was the term used by the press around this time.[305][307]\n
        36. \n
        37. ^ \nDaniel Crevier wrote, "the conference is generally recognized as the official birthdate of the new science."[310] Russell and Norvig called the conference "the inception of artificial intelligence."[115]\n
        38. \n
        39. ^ \nRussell and Norvig wrote "for the next 20 years the field would be dominated by these people and their students."[311]\n
        40. \n
        41. ^ \nRussell and Norvig wrote "it was astonishing whenever a computer did anything kind of smartish".[312]\n
        42. \n
        43. ^ \nThe programs described are Arthur Samuel\'s checkers program for the IBM 701, Daniel Bobrow\'s STUDENT, Newell and Simon\'s Logic Theorist and Terry Winograd\'s SHRDLU.\n
        44. \n
        45. ^ Russell and Norvig write: "in almost all cases, these early systems failed on more difficult problems"[316]\n
        46. \n
        47. ^ \nEmbodied approaches to AI[323] were championed by Hans Moravec[324] and Rodney Brooks[325] and went by many names: Nouvelle AI.[325] Developmental robotics.[326]\n
        48. \n
        49. ^ Matteo Wong wrote in The Atlantic: "Whereas for decades, computer-science fields such as natural-language processing, computer vision, and robotics used extremely different methods, now they all use a programming method called "deep learning." As a result, their code and approaches have become more similar, and their models are easier to integrate into one another."[332]\n
        50. \n
        51. ^ Jack Clark wrote in Bloomberg: "After a half-decade of quiet breakthroughs in artificial intelligence, 2015 has been a landmark year. Computers are smarter and learning faster than ever", and noted that the number of software projects that use machine learning at Google increased from a "sporadic usage" in 2012 to more than 2,700 projects in 2015.[334]\n
        52. \n
        53. ^ Nils Nilsson wrote in 1983: "Simply put, there is wide disagreement in the field about what AI is all about."[352]\n
        54. \n
        55. ^ \nDaniel Crevier wrote that "time has proven the accuracy and perceptiveness of some of Dreyfus\'s comments. Had he formulated them less aggressively, constructive actions they suggested might have been taken much earlier."[357]\n
        56. \n
        57. ^ \nSearle presented this definition of "Strong AI" in 1999.[367] Searle\'s original formulation was "The appropriately programmed computer really is a mind, in the sense that computers given the right programs can be literally said to understand and have other cognitive states."[368] Strong AI is defined similarly by Russell and Norvig: "Stong AI – the assertion that machines that do so are actually thinking (as opposed to simulating thinking)."[369]\n
        58. \n
        \n

        References

        \n
        \n
          \n
        1. ^ a b c Russell & Norvig (2021), pp. 1–4.\n
        2. \n
        3. ^ AI set to exceed human brain power Archived 2008-02-19 at the Wayback Machine CNN.com (July 26, 2006)\n
        4. \n
        5. ^ Kaplan, Andreas; Haenlein, Michael (2019). "Siri, Siri, in my hand: Who\'s the fairest in the land? On the interpretations, illustrations, and implications of artificial intelligence". Business Horizons. 62: 15–25. doi:10.1016/j.bushor.2018.08.004. ISSN 0007-6813. S2CID 158433736.\n
        6. \n
        7. ^ a b c \nArtificial general intelligence: Russell & Norvig (2021, pp. 32–33, 1020–1021)
          Proposal for the modern version: Pennachin & Goertzel (2007)
          Warnings of overspecialization in AI from leading researchers: Nilsson (1995), McCarthy (2007), Beal & Winston (2009)
          \n
        8. \n
        9. ^ Russell & Norvig (2021, §1.2).\n
        10. \n
        11. ^ a b Dartmouth workshop: Russell & Norvig (2021, p. 18), McCorduck (2004, pp. 111–136), NRC (1999, pp. 200–201)
          The proposal: McCarthy et al. (1955)
          \n
        12. \n
        13. ^ a b Successful programs of the 1960s: McCorduck (2004, pp. 243–252), Crevier (1993, pp. 52–107), Moravec (1988, p. 9), Russell & Norvig (2021, pp. 19–21)\n
        14. \n
        15. ^ a b Funding initiatives in the early 1980s: Fifth Generation Project (Japan), Alvey (UK), Microelectronics and Computer Technology Corporation (US), Strategic Computing Initiative (US): McCorduck (2004, pp. 426–441), Crevier (1993, pp. 161–162, 197–203, 211, 240), Russell & Norvig (2021, p. 23), NRC (1999, pp. 210–211), Newquist (1994, pp. 235–248)\n
        16. \n
        17. ^ a b First AI Winter, Lighthill report, Mansfield Amendment: Crevier (1993, pp. 115–117), Russell & Norvig (2021, pp. 21–22), NRC (1999, pp. 212–213), Howe (1994), Newquist (1994, pp. 189–201)\n
        18. \n
        19. ^ a b Second AI Winter: Russell & Norvig (2021, p. 24), McCorduck (2004, pp. 430–435), Crevier (1993, pp. 209–210), NRC (1999, pp. 214–216), Newquist (1994, pp. 301–318)\n
        20. \n
        21. ^ a b Deep learning revolution, AlexNet: Goldman (2022), Russell & Norvig (2021, p. 26), McKinsey (2018)\n
        22. \n
        23. ^ Toews (2023).\n
        24. \n
        25. ^ Problem-solving, puzzle solving, game playing, and deduction: Russell & Norvig (2021, chpt. 3–5), Russell & Norvig (2021, chpt. 6) (constraint satisfaction), Poole, Mackworth & Goebel (1998, chpt. 2, 3, 7, 9), Luger & Stubblefield (2004, chpt. 3, 4, 6, 8), Nilsson (1998, chpt. 7–12)\n
        26. \n
        27. ^ Uncertain reasoning: Russell & Norvig (2021, chpt. 12–18), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 333–381), Nilsson (1998, chpt. 7–12)\n
        28. \n
        29. ^ a b c Intractability and efficiency and the combinatorial explosion: Russell & Norvig (2021, p. 21)\n
        30. \n
        31. ^ a b c Psychological evidence of the prevalence of sub-symbolic reasoning and knowledge: Kahneman (2011), Dreyfus & Dreyfus (1986), Wason & Shapiro (1966), Kahneman, Slovic & Tversky (1982)\n
        32. \n
        33. ^ Knowledge representation and knowledge engineering: Russell & Norvig (2021, chpt. 10), Poole, Mackworth & Goebel (1998, pp. 23–46, 69–81, 169–233, 235–277, 281–298, 319–345), Luger & Stubblefield (2004, pp. 227–243), Nilsson (1998, chpt. 17.1–17.4, 18)\n
        34. \n
        35. ^ Smoliar & Zhang (1994).\n
        36. \n
        37. ^ Neumann & Möller (2008).\n
        38. \n
        39. ^ Kuperman, Reichley & Bailey (2006).\n
        40. \n
        41. ^ McGarry (2005).\n
        42. \n
        43. ^ Bertini, Del Bimbo & Torniai (2006).\n
        44. \n
        45. ^ Russell & Norvig (2021), pp. 272.\n
        46. \n
        47. ^ Representing categories and relations: Semantic networks, description logics, inheritance (including frames, and scripts): Russell & Norvig (2021, §10.2 & 10.5), Poole, Mackworth & Goebel (1998, pp. 174–177), Luger & Stubblefield (2004, pp. 248–258), Nilsson (1998, chpt. 18.3)\n
        48. \n
        49. ^ Representing events and time:Situation calculus, event calculus, fluent calculus (including solving the frame problem): Russell & Norvig (2021, §10.3), Poole, Mackworth & Goebel (1998, pp. 281–298), Nilsson (1998, chpt. 18.2)\n
        50. \n
        51. ^ Causal calculus: Poole, Mackworth & Goebel (1998, pp. 335–337)\n
        52. \n
        53. ^ Representing knowledge about knowledge: Belief calculus, modal logics: Russell & Norvig (2021, §10.4), Poole, Mackworth & Goebel (1998, pp. 275–277)\n
        54. \n
        55. ^ a b Default reasoning, Frame problem, default logic, non-monotonic logics, circumscription, closed world assumption, abduction: Russell & Norvig (2021, §10.6), Poole, Mackworth & Goebel (1998, pp. 248–256, 323–335), Luger & Stubblefield (2004, pp. 335–363), Nilsson (1998, ~18.3.3)\n(Poole et al. places abduction under "default reasoning". Luger et al. places this under "uncertain reasoning").\n
        56. \n
        57. ^ a b Breadth of commonsense knowledge: Lenat & Guha (1989, Introduction), Crevier (1993, pp. 113–114), Moravec (1988, p. 13), Russell & Norvig (2021, pp. 241, 385, 982) (qualification problem)\n
        58. \n
        59. ^ Newquist (1994), p. 296.\n
        60. \n
        61. ^ Crevier (1993), pp. 204–208.\n
        62. \n
        63. ^ Russell & Norvig (2021), p. 528.\n
        64. \n
        65. ^ Automated planning: Russell & Norvig (2021, chpt. 11).\n
        66. \n
        67. ^ Automated decision making, Decision theory: Russell & Norvig (2021, chpt. 16–18).\n
        68. \n
        69. ^ Classical planning: Russell & Norvig (2021, Section 11.2).\n
        70. \n
        71. ^ Sensorless or "conformant" planning, contingent planning, replanning (a.k.a online planning): Russell & Norvig (2021, Section 11.5).\n
        72. \n
        73. ^ Uncertain preferences: Russell & Norvig (2021, Section 16.7)\nInverse reinforcement learning: Russell & Norvig (2021, Section 22.6)\n
        74. \n
        75. ^ Information value theory: Russell & Norvig (2021, Section 16.6).\n
        76. \n
        77. ^ Markov decision process: Russell & Norvig (2021, chpt. 17).\n
        78. \n
        79. ^ Game theory and multi-agent decision theory: Russell & Norvig (2021, chpt. 18).\n
        80. \n
        81. ^ Learning: Russell & Norvig (2021, chpt. 19–22), Poole, Mackworth & Goebel (1998, pp. 397–438), Luger & Stubblefield (2004, pp. 385–542), Nilsson (1998, chpt. 3.3, 10.3, 17.5, 20)\n
        82. \n
        83. ^ Turing (1950).\n
        84. \n
        85. ^ Solomonoff (1956).\n
        86. \n
        87. ^ Unsupervised learning: Russell & Norvig (2021, pp. 653) (definition), Russell & Norvig (2021, pp. 738–740) (cluster analysis), Russell & Norvig (2021, pp. 846–860) (word embedding)\n
        88. \n
        89. ^ a b Supervised learning: Russell & Norvig (2021, §19.2) (Definition), Russell & Norvig (2021, Chpt. 19–20) (Techniques)\n
        90. \n
        91. ^ Reinforcement learning: Russell & Norvig (2021, chpt. 22), Luger & Stubblefield (2004, pp. 442–449)\n
        92. \n
        93. ^ Transfer learning: Russell & Norvig (2021, pp. 281), The Economist (2016)\n
        94. \n
        95. ^ "Artificial Intelligence (AI): What Is AI and How Does It Work? | Built In". builtin.com. Retrieved 30 October 2023.\n
        96. \n
        97. ^ Computational learning theory: Russell & Norvig (2021, pp. 672–674), Jordan & Mitchell (2015)\n
        98. \n
        99. ^ Natural language processing (NLP): Russell & Norvig (2021, chpt. 23–24), Poole, Mackworth & Goebel (1998, pp. 91–104), Luger & Stubblefield (2004, pp. 591–632)\n
        100. \n
        101. ^ Subproblems of NLP: Russell & Norvig (2021, pp. 849–850)\n
        102. \n
        103. ^ Russell & Norvig (2021), pp. 856–858.\n
        104. \n
        105. ^ Dickson (2022).\n
        106. \n
        107. ^ Modern statistical and deep learning approaches to NLP: Russell & Norvig (2021, chpt. 24), Cambria & White (2014)\n
        108. \n
        109. ^ Vincent (2019).\n
        110. \n
        111. ^ Russell & Norvig (2021), pp. 875–878.\n
        112. \n
        113. ^ Bushwick (2023).\n
        114. \n
        115. ^ Computer vision: Russell & Norvig (2021, chpt. 25), Nilsson (1998, chpt. 6)\n
        116. \n
        117. ^ Russell & Norvig (2021), pp. 849–850.\n
        118. \n
        119. ^ Russell & Norvig (2021), pp. 895–899.\n
        120. \n
        121. ^ Russell & Norvig (2021), pp. 899–901.\n
        122. \n
        123. ^ Challa et al. (2011).\n
        124. \n
        125. ^ Russell & Norvig (2021), pp. 931–938.\n
        126. \n
        127. ^ MIT AIL (2014).\n
        128. \n
        129. ^ Affective computing: Thro (1993), Edelson (1991), Tao & Tan (2005), Scassellati (2002)\n
        130. \n
        131. ^ Waddell (2018).\n
        132. \n
        133. ^ Poria et al. (2017).\n
        134. \n
        135. ^ Search algorithms: Russell & Norvig (2021, chpts. 3–5), Poole, Mackworth & Goebel (1998, pp. 113–163), Luger & Stubblefield (2004, pp. 79–164, 193–219), Nilsson (1998, chpts. 7–12)\n
        136. \n
        137. ^ State space search: Russell & Norvig (2021, chpt. 3)\n
        138. \n
        139. ^ Russell & Norvig (2021), sect. 11.2.\n
        140. \n
        141. ^ Uninformed searches (breadth first search, depth-first search and general state space search): Russell & Norvig (2021, sect. 3.4), Poole, Mackworth & Goebel (1998, pp. 113–132), Luger & Stubblefield (2004, pp. 79–121), Nilsson (1998, chpt. 8)\n
        142. \n
        143. ^ Heuristic or informed searches (e.g., greedy best first and A*): Russell & Norvig (2021, sect. 3.5), Poole, Mackworth & Goebel (1998, pp. 132–147), Poole & Mackworth (2017, sect. 3.6), Luger & Stubblefield (2004, pp. 133–150)\n
        144. \n
        145. ^ Adversarial search: Russell & Norvig (2021, chpt. 5)\n
        146. \n
        147. ^ Local or "optimization" search: Russell & Norvig (2021, chpt. 4)\n
        148. \n
        149. ^ Singh Chauhan, Nagesh (18 December 2020). "Optimization Algorithms in Neural Networks". KDnuggets. Retrieved 13 January 2024.\n
        150. \n
        151. ^ Evolutionary computation: Russell & Norvig (2021, sect. 4.1.2)\n
        152. \n
        153. ^ Merkle & Middendorf (2013).\n
        154. \n
        155. ^ Logic: Russell & Norvig (2021, chpts. 6–9), Luger & Stubblefield (2004, pp. 35–77), Nilsson (1998, chpt. 13–16)\n
        156. \n
        157. ^ Propositional logic: Russell & Norvig (2021, chpt. 6), Luger & Stubblefield (2004, pp. 45–50), Nilsson (1998, chpt. 13)\n
        158. \n
        159. ^ First-order logic and features such as equality: Russell & Norvig (2021, chpt. 7), Poole, Mackworth & Goebel (1998, pp. 268–275), Luger & Stubblefield (2004, pp. 50–62), Nilsson (1998, chpt. 15)\n
        160. \n
        161. ^ Logical inference: Russell & Norvig (2021, chpt. 10)\n
        162. \n
        163. ^ logical deduction as search: Russell & Norvig (2021, sects. 9.3, 9.4), Poole, Mackworth & Goebel (1998, pp. ~46–52), Luger & Stubblefield (2004, pp. 62–73), Nilsson (1998, chpt. 4.2, 7.2)\n
        164. \n
        165. ^ Resolution and unification: Russell & Norvig (2021, sections 7.5.2, 9.2, 9.5)\n
        166. \n
        167. ^ Warren, D.H.; Pereira, L.M.; Pereira, F. (1977). "Prolog-the language and its implementation compared with Lisp". ACM SIGPLAN Notices. 12 (8): 109–115. doi:10.1145/872734.806939.\n
        168. \n
        169. ^ Fuzzy logic: Russell & Norvig (2021, pp. 214, 255, 459), Scientific American (1999)\n
        170. \n
        171. ^ a b Stochastic methods for uncertain reasoning: Russell & Norvig (2021, chpt. 12–18, 20), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 165–191, 333–381), Nilsson (1998, chpt. 19)\n
        172. \n
        173. ^ decision theory and decision analysis: Russell & Norvig (2021, chpt. 16–18), Poole, Mackworth & Goebel (1998, pp. 381–394)\n
        174. \n
        175. ^ Information value theory: Russell & Norvig (2021, sect. 16.6)\n
        176. \n
        177. ^ Markov decision processes and dynamic decision networks: Russell & Norvig (2021, chpt. 17)\n
        178. \n
        179. ^ a b c Stochastic temporal models: Russell & Norvig (2021, chpt. 14)\nHidden Markov model: Russell & Norvig (2021, sect. 14.3)\nKalman filters: Russell & Norvig (2021, sect. 14.4)\nDynamic Bayesian networks: Russell & Norvig (2021, sect. 14.5)\n
        180. \n
        181. ^ Game theory and mechanism design: Russell & Norvig (2021, chpt. 18)\n
        182. \n
        183. ^ Bayesian networks: Russell & Norvig (2021, sects. 12.5–12.6, 13.4–13.5, 14.3–14.5, 16.5, 20.2–20.3), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~182–190, ≈363–379), Nilsson (1998, chpt. 19.3–19.4)\n
        184. \n
        185. ^ Domingos (2015), chpt. 6.\n
        186. \n
        187. ^ Bayesian inference algorithm: Russell & Norvig (2021, sect. 13.3–13.5), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~363–379), Nilsson (1998, chpt. 19.4 & 7)\n
        188. \n
        189. ^ Domingos (2015), p. 210.\n
        190. \n
        191. ^ Bayesian learning and the expectation–maximization algorithm: Russell & Norvig (2021, chpt. 20), Poole, Mackworth & Goebel (1998, pp. 424–433), Nilsson (1998, chpt. 20), Domingos (2015, p. 210)\n
        192. \n
        193. ^ Bayesian decision theory and Bayesian decision networks: Russell & Norvig (2021, sect. 16.5)\n
        194. \n
        195. ^ Statistical learning methods and classifiers: Russell & Norvig (2021, chpt. 20),\n
        196. \n
        197. ^ Ciaramella, Alberto; Ciaramella, Marco (2024). Introduction to Artificial Intelligence: from data analysis to generative AI. Intellisemantic Editions. ISBN 978-8-8947-8760-3.\n
        198. \n
        199. ^ Decision trees: Russell & Norvig (2021, sect. 19.3), Domingos (2015, p. 88)\n
        200. \n
        201. ^ Non-parameteric learning models such as K-nearest neighbor and support vector machines: Russell & Norvig (2021, sect. 19.7), Domingos (2015, p. 187) (k-nearest neighbor)\n\n
        202. \n
        203. ^ Domingos (2015), p. 152.\n
        204. \n
        205. ^ Naive Bayes classifier: Russell & Norvig (2021, sect. 12.6), Domingos (2015, p. 152)\n
        206. \n
        207. ^ a b Neural networks: Russell & Norvig (2021, chpt. 21), Domingos (2015, Chapter 4)\n
        208. \n
        209. ^ Gradient calculation in computational graphs, backpropagation, automatic differentiation: Russell & Norvig (2021, sect. 21.2), Luger & Stubblefield (2004, pp. 467–474), Nilsson (1998, chpt. 3.3)\n
        210. \n
        211. ^ Universal approximation theorem: Russell & Norvig (2021, p. 752)\nThe theorem: Cybenko (1988), Hornik, Stinchcombe & White (1989)\n
        212. \n
        213. ^ Feedforward neural networks: Russell & Norvig (2021, sect. 21.1)\n
        214. \n
        215. ^ Recurrent neural networks: Russell & Norvig (2021, sect. 21.6)\n
        216. \n
        217. ^ Perceptrons: Russell & Norvig (2021, pp. 21, 22, 683, 22)\n
        218. \n
        219. ^ a b Deep learning: Russell & Norvig (2021, chpt. 21), Goodfellow, Bengio & Courville (2016), Hinton et al. (2016), Schmidhuber (2015)\n
        220. \n
        221. ^ Convolutional neural networks: Russell & Norvig (2021, sect. 21.3)\n
        222. \n
        223. ^ Deng & Yu (2014), pp. 199–200.\n
        224. \n
        225. ^ Ciresan, Meier & Schmidhuber (2012).\n
        226. \n
        227. ^ Russell & Norvig (2021), p. 751.\n
        228. \n
        229. ^ a b c Russell & Norvig (2021), p. 17.\n
        230. \n
        231. ^ a b c d e f g Russell & Norvig (2021), p. 785.\n
        232. \n
        233. ^ a b Schmidhuber (2022), sect. 5.\n
        234. \n
        235. ^ Schmidhuber (2022), sect. 6.\n
        236. \n
        237. ^ a b c Schmidhuber (2022), sect. 7.\n
        238. \n
        239. ^ Schmidhuber (2022), sect. 8.\n
        240. \n
        241. ^ Quoted in Christian (2020, p. 22)\n
        242. \n
        243. ^ Smith (2023).\n
        244. \n
        245. ^ "Explained: Generative AI". 9 November 2023.\n
        246. \n
        247. ^ "AI Writing and Content Creation Tools". MIT Sloan Teaching & Learning Technologies. Archived from the original on 25 December 2023. Retrieved 25 December 2023.\n
        248. \n
        249. ^ Marmouyet (2023).\n
        250. \n
        251. ^ Kobielus (2019).\n
        252. \n
        253. ^ Thomason, James (21 May 2024). "Mojo Rising: The resurgence of AI-first programming languages". VentureBeat. Archived from the original on 27 June 2024. Retrieved 26 May 2024.\n
        254. \n
        255. ^ Wodecki, Ben (5 May 2023). "7 AI Programming Languages You Need to Know". AI Business. Archived from the original on 25 July 2024. Retrieved 5 October 2024.\n
        256. \n
        257. ^ Plumb, Taryn (18 September 2024). "Why Jensen Huang and Marc Benioff see \'gigantic\' opportunity for agentic AI". VentureBeat. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
        258. \n
        259. ^ Davenport, T; Kalakota, R (June 2019). "The potential for artificial intelligence in healthcare". Future Healthc J. 6 (2): 94–98. doi:10.7861/futurehosp.6-2-94. PMC 6616181. PMID 31363513.\n
        260. \n
        261. ^ Lyakhova, U.A.; Lyakhov, P.A. (2024). "Systematic review of approaches to detection and classification of skin cancer using artificial intelligence: Development and prospects". Computers in Biology and Medicine. 178: 108742. doi:10.1016/j.compbiomed.2024.108742. PMID 38875908.\n
        262. \n
        263. ^ Alqudaihi, Kawther S.; Aslam, Nida; Khan, Irfan Ullah; Almuhaideb, Abdullah M.; Alsunaidi, Shikah J.; Ibrahim, Nehad M. Abdel Rahman; Alhaidari, Fahd A.; Shaikh, Fatema S.; Alsenbel, Yasmine M.; Alalharith, Dima M.; Alharthi, Hajar M.; Alghamdi, Wejdan M.; Alshahrani, Mohammed S. (2021). "Cough Sound Detection and Diagnosis Using Artificial Intelligence Techniques: Challenges and Opportunities". IEEE Access. 9: 102327–102344. Bibcode:2021IEEEA...9j2327A. doi:10.1109/ACCESS.2021.3097559. ISSN 2169-3536. PMC 8545201. PMID 34786317.\n
        264. \n
        265. ^ a b Bax, Monique; Thorpe, Jordan; Romanov, Valentin (December 2023). "The future of personalized cardiovascular medicine demands 3D and 4D printing, stem cells, and artificial intelligence". Frontiers in Sensors. 4. doi:10.3389/fsens.2023.1294721. ISSN 2673-5067.\n
        266. \n
        267. ^ Jumper, J; Evans, R; Pritzel, A (2021). "Highly accurate protein structure prediction with AlphaFold". Nature. 596 (7873): 583–589. Bibcode:2021Natur.596..583J. doi:10.1038/s41586-021-03819-2. PMC 8371605. PMID 34265844.\n
        268. \n
        269. ^ "AI discovers new class of antibiotics to kill drug-resistant bacteria". 20 December 2023. Archived from the original on 16 September 2024. Retrieved 5 October 2024.\n
        270. \n
        271. ^ "AI speeds up drug design for Parkinson\'s ten-fold". Cambridge University. 17 April 2024. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
        272. \n
        273. ^ Horne, Robert I.; Andrzejewska, Ewa A.; Alam, Parvez; Brotzakis, Z. Faidon; Srivastava, Ankit; Aubert, Alice; Nowinska, Magdalena; Gregory, Rebecca C.; Staats, Roxine; Possenti, Andrea; Chia, Sean; Sormanni, Pietro; Ghetti, Bernardino; Caughey, Byron; Knowles, Tuomas P. J.; Vendruscolo, Michele (17 April 2024). "Discovery of potent inhibitors of α-synuclein aggregation using structure-based iterative learning". Nature Chemical Biology. 20 (5). Nature: 634–645. doi:10.1038/s41589-024-01580-x. PMC 11062903. PMID 38632492.\n
        274. \n
        275. ^ Grant, Eugene F.; Lardner, Rex (25 July 1952). "The Talk of the Town – It". The New Yorker. ISSN 0028-792X. Archived from the original on 16 February 2020. Retrieved 28 January 2024.\n
        276. \n
        277. ^ Anderson, Mark Robert (11 May 2017). "Twenty years on from Deep Blue vs Kasparov: how a chess match started the big data revolution". The Conversation. Archived from the original on 17 September 2024. Retrieved 28 January 2024.\n
        278. \n
        279. ^ Markoff, John (16 February 2011). "Computer Wins on \'Jeopardy!\': Trivial, It\'s Not". The New York Times. ISSN 0362-4331. Archived from the original on 22 October 2014. Retrieved 28 January 2024.\n
        280. \n
        281. ^ Byford, Sam (27 May 2017). "AlphaGo retires from competitive Go after defeating world number one 3–0". The Verge. Archived from the original on 7 June 2017. Retrieved 28 January 2024.\n
        282. \n
        283. ^ Brown, Noam; Sandholm, Tuomas (30 August 2019). "Superhuman AI for multiplayer poker". Science. 365 (6456): 885–890. Bibcode:2019Sci...365..885B. doi:10.1126/science.aay2400. ISSN 0036-8075. PMID 31296650.\n
        284. \n
        285. ^ "MuZero: Mastering Go, chess, shogi and Atari without rules". Google DeepMind. 23 December 2020. Retrieved 28 January 2024.\n
        286. \n
        287. ^ Sample, Ian (30 October 2019). "AI becomes grandmaster in \'fiendishly complex\' StarCraft II". The Guardian. ISSN 0261-3077. Archived from the original on 29 December 2020. Retrieved 28 January 2024.\n
        288. \n
        289. ^ Wurman, P. R.; Barrett, S.; Kawamoto, K. (2022). "Outracing champion Gran Turismo drivers with deep reinforcement learning" (PDF). Nature. 602 (7896): 223–228. Bibcode:2022Natur.602..223W. doi:10.1038/s41586-021-04357-7. PMID 35140384.\n
        290. \n
        291. ^ Wilkins, Alex (13 March 2024). "Google AI learns to play open-world video games by watching them". New Scientist. Archived from the original on 26 July 2024. Retrieved 21 July 2024.\n
        292. \n
        293. ^ Uesato, J. et al.: Improving mathematical reasoning with process supervision. Archived 15 September 2024 at the Wayback Machine openai.com, May 31, 2023. Retrieved 2024-08-07.\n
        294. \n
        295. ^ Srivastava, Saurabh (29 February 2024). "Functional Benchmarks for Robust Evaluation of Reasoning Performance, and the Reasoning Gap". arXiv:2402.19450 [cs.AI].\n
        296. \n
        297. ^ Roberts, Siobhan (25 July 2024). "AI achieves silver-medal standard solving International Mathematical Olympiad problems". The New York Times. Archived from the original on 26 September 2024. Retrieved 7 August 2024.\n
        298. \n
        299. ^ LLEMMA. eleuther.ai. Retrieved 2024-08-07.\n
        300. \n
        301. ^ AI Math. Archived 5 October 2024 at the Wayback Machine Caesars Labs, 2024. Retrieved 2024-08-07.\n
        302. \n
        303. ^ Alex McFarland: 7 Best AI for Math Tools. Archived 11 September 2024 at the Wayback Machine unite.ai. Retrieved 2024-08-07\n
        304. \n
        305. ^ Matthew Finio & Amanda Downie: IBM Think 2024 Primer, "What is Artificial Intelligence (AI) in Finance?" 8 Dec. 2023\n
        306. \n
        307. ^ M. Nicolas, J. Firzli: Pensions Age/European Pensions magazine, "Artificial Intelligence: Ask the Industry" May June 2024 https://videovoice.org/ai-in-finance-innovation-entrepreneurship-vs-over-regulation-with-the-eus-artificial-intelligence-act-wont-work-as-intended/ Archived 11 September 2024 at the Wayback Machine.\n
        308. \n
        309. ^ a b c Congressional Research Service (2019). Artificial Intelligence and National Security (PDF). Washington, DC: Congressional Research Service. Archived (PDF) from the original on 8 May 2020. Retrieved 5 October 2024.PD-notice\n
        310. \n
        311. ^ a b Slyusar, Vadym (2019). "Artificial intelligence as the basis of future control networks". ResearchGate. doi:10.13140/RG.2.2.30247.50087. Archived from the original on 28 April 2021. Retrieved 20 July 2019.\n
        312. \n
        313. ^ Knight, Will. "The US and 30 Other Nations Agree to Set Guardrails for Military AI". Wired. ISSN 1059-1028. Archived from the original on 20 September 2024. Retrieved 24 January 2024.\n
        314. \n
        315. ^ Newsom, Gavin; Weber, Shirley N. (6 September 2023). "Executive Order N-12-23" (PDF). Executive Department, State of California. Archived (PDF) from the original on 21 February 2024. Retrieved 7 September 2023.\n
        316. \n
        317. ^ Pinaya, Walter H. L.; Graham, Mark S.; Kerfoot, Eric; Tudosiu, Petru-Daniel; Dafflon, Jessica; Fernandez, Virginia; Sanchez, Pedro; Wolleb, Julia; da Costa, Pedro F.; Patel, Ashay (2023). "Generative AI for Medical Imaging: extending the MONAI Framework". arXiv:2307.15208 [eess.IV].\n
        318. \n
        319. ^ Griffith, Erin; Metz, Cade (27 January 2023). "Anthropic Said to Be Closing In on $300 Million in New A.I. Funding". The New York Times. Archived from the original on 9 December 2023. Retrieved 14 March 2023.\n
        320. \n
        321. ^ Lanxon, Nate; Bass, Dina; Davalos, Jackie (10 March 2023). "A Cheat Sheet to AI Buzzwords and Their Meanings". Bloomberg News. Archived from the original on 17 November 2023. Retrieved 14 March 2023.\n
        322. \n
        323. ^ Marcelline, Marco (27 May 2023). "ChatGPT: Most Americans Know About It, But Few Actually Use the AI Chatbot". PCMag. Archived from the original on 21 May 2024. Retrieved 28 January 2024.\n
        324. \n
        325. ^ Lu, Donna (31 March 2023). "Misinformation, mistakes and the Pope in a puffer: what rapidly evolving AI can – and can\'t – do". The Guardian. ISSN 0261-3077. Archived from the original on 10 June 2024. Retrieved 28 January 2024.\n
        326. \n
        327. ^ Hurst, Luke (23 May 2023). "How a fake image of a Pentagon explosion shared on Twitter caused a real dip on Wall Street". euronews. Retrieved 28 January 2024.\n
        328. \n
        329. ^ Poole, David; Mackworth, Alan (2023). Artificial Intelligence, Foundations of Computational Agents (3rd ed.). Cambridge University Press. doi:10.1017/9781009258227. ISBN 978-1-0092-5819-7. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
        330. \n
        331. ^ Russell, Stuart; Norvig, Peter (2020). Artificial Intelligence: A Modern Approach (4th ed.). Pearson. ISBN 978-0-1346-1099-3.\n
        332. \n
        333. ^ "Why agents are the next frontier of generative AI". McKinsey Digital. 24 July 2024. Archived from the original on 3 October 2024. Retrieved 10 August 2024.\n
        334. \n
        335. ^ Ransbotham, Sam; Kiron, David; Gerbert, Philipp; Reeves, Martin (6 September 2017). "Reshaping Business With Artificial Intelligence". MIT Sloan Management Review. Archived from the original on 13 February 2024.\n
        336. \n
        337. ^ Sun, Yuran; Zhao, Xilei; Lovreglio, Ruggiero; Kuligowski, Erica (1 January 2024), Naser, M. Z. (ed.), "8 – AI for large-scale evacuation modeling: promises and challenges", Interpretable Machine Learning for the Analysis, Design, Assessment, and Informed Decision Making for Civil Infrastructure, Woodhead Publishing Series in Civil and Structural Engineering, Woodhead Publishing, pp. 185–204, ISBN 978-0-1282-4073-1, archived from the original on 19 May 2024, retrieved 28 June 2024.\n
        338. \n
        339. ^ Gomaa, Islam; Adelzadeh, Masoud; Gwynne, Steven; Spencer, Bruce; Ko, Yoon; Bénichou, Noureddine; Ma, Chunyun; Elsagan, Nour; Duong, Dana; Zalok, Ehab; Kinateder, Max (1 November 2021). "A Framework for Intelligent Fire Detection and Evacuation System". Fire Technology. 57 (6): 3179–3185. doi:10.1007/s10694-021-01157-3. ISSN 1572-8099. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
        340. \n
        341. ^ Zhao, Xilei; Lovreglio, Ruggiero; Nilsson, Daniel (1 May 2020). "Modelling and interpreting pre-evacuation decision-making using machine learning". Automation in Construction. 113: 103140. doi:10.1016/j.autcon.2020.103140. ISSN 0926-5805. Archived from the original on 19 May 2024. Retrieved 5 October 2024.\n
        342. \n
        343. ^ "India\'s latest election embraced AI technology. Here are some ways it was used constructively". PBS News. 12 June 2024. Retrieved 28 October 2024.\n
        344. \n
        345. ^ Müller, Vincent C. (30 April 2020). "Ethics of Artificial Intelligence and Robotics". Stanford Encyclopedia of Philosophy Archive. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
        346. \n
        347. ^ Simonite (2016).\n
        348. \n
        349. ^ Russell & Norvig (2021), p. 987.\n
        350. \n
        351. ^ Laskowski (2023).\n
        352. \n
        353. ^ GAO (2022).\n
        354. \n
        355. ^ Valinsky (2019).\n
        356. \n
        357. ^ Russell & Norvig (2021), p. 991.\n
        358. \n
        359. ^ Russell & Norvig (2021), pp. 991–992.\n
        360. \n
        361. ^ Christian (2020), p. 63.\n
        362. \n
        363. ^ Vincent (2022).\n
        364. \n
        365. ^ Kopel, Matthew. "Copyright Services: Fair Use". Cornell University Library. Archived from the original on 26 September 2024. Retrieved 26 April 2024.\n
        366. \n
        367. ^ Burgess, Matt. "How to Stop Your Data From Being Used to Train AI". Wired. ISSN 1059-1028. Archived from the original on 3 October 2024. Retrieved 26 April 2024.\n
        368. \n
        369. ^ Reisner (2023).\n
        370. \n
        371. ^ Alter & Harris (2023).\n
        372. \n
        373. ^ "Getting the Innovation Ecosystem Ready for AI. An IP policy toolkit" (PDF). WIPO.\n
        374. \n
        375. ^ Hammond, George (27 December 2023). "Big Tech is spending more than VC firms on AI startups". Ars Technica. Archived from the original on 10 January 2024.\n
        376. \n
        377. ^ Wong, Matteo (24 October 2023). "The Future of AI Is GOMA". The Atlantic. Archived from the original on 5 January 2024.\n
        378. \n
        379. ^ "Big tech and the pursuit of AI dominance". The Economist. 26 March 2023. Archived from the original on 29 December 2023.\n
        380. \n
        381. ^ Fung, Brian (19 December 2023). "Where the battle to dominate AI may be won". CNN Business. Archived from the original on 13 January 2024.\n
        382. \n
        383. ^ Metz, Cade (5 July 2023). "In the Age of A.I., Tech\'s Little Guys Need Big Friends". The New York Times. Archived from the original on 8 July 2024. Retrieved 5 October 2024.\n
        384. \n
        385. ^ "Electricity 2024 – Analysis". IEA. 24 January 2024. Retrieved 13 July 2024.\n
        386. \n
        387. ^ Calvert, Brian (28 March 2024). "AI already uses as much energy as a small country. It\'s only the beginning". Vox. New York, New York. Archived from the original on 3 July 2024. Retrieved 5 October 2024.\n
        388. \n
        389. ^ Halper, Evan; O\'Donovan, Caroline (21 June 2024). "AI is exhausting the power grid. Tech firms are seeking a miracle solution". Washington Post.\n
        390. \n
        391. ^ Davenport, Carly. "AI Data Centers and the Coming YS Power Demand Surge" (PDF). Goldman Sachs. Archived from the original (PDF) on 26 July 2024. Retrieved 5 October 2024.\n
        392. \n
        393. ^ Ryan, Carol (12 April 2024). "Energy-Guzzling AI Is Also the Future of Energy Savings". Wall Street Journal. Dow Jones.\n
        394. \n
        395. ^ Hiller, Jennifer (1 July 2024). "Tech Industry Wants to Lock Up Nuclear Power for AI". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
        396. \n
        397. ^ Halper, Evan (20 September 2024). "Microsoft deal would reopen Three Mile Island nuclear plant to power AI". Washington Post.\n
        398. \n
        399. ^ Hiller, Jennifer (20 September 2024). "Three Mile Island\'s Nuclear Plant to Reopen, Help Power Microsoft\'s AI Centers". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
        400. \n
        401. ^ Nicas (2018).\n
        402. \n
        403. ^ Rainie, Lee; Keeter, Scott; Perrin, Andrew (22 July 2019). "Trust and Distrust in America". Pew Research Center. Archived from the original on 22 February 2024.\n
        404. \n
        405. ^ Williams (2023).\n
        406. \n
        407. ^ Taylor & Hern (2023).\n
        408. \n
        409. ^ a b Samuel, Sigal (19 April 2022). "Why it\'s so damn hard to make AI fair and unbiased". Vox. Archived from the original on 5 October 2024. Retrieved 24 July 2024.\n
        410. \n
        411. ^ a b Rose (2023).\n
        412. \n
        413. ^ CNA (2019).\n
        414. \n
        415. ^ Goffrey (2008), p. 17.\n
        416. \n
        417. ^ Berdahl et al. (2023); Goffrey (2008, p. 17); Rose (2023); Russell & Norvig (2021, p. 995)\n
        418. \n
        419. ^ Christian (2020), p. 25.\n
        420. \n
        421. ^ a b Russell & Norvig (2021), p. 995.\n
        422. \n
        423. ^ Grant & Hill (2023).\n
        424. \n
        425. ^ Larson & Angwin (2016).\n
        426. \n
        427. ^ Christian (2020), p. 67–70.\n
        428. \n
        429. ^ Christian (2020, pp. 67–70); Russell & Norvig (2021, pp. 993–994)\n
        430. \n
        431. ^ Russell & Norvig (2021, p. 995); Lipartito (2011, p. 36); Goodman & Flaxman (2017, p. 6); Christian (2020, pp. 39–40, 65)\n
        432. \n
        433. ^ Quoted in Christian (2020, p. 65).\n
        434. \n
        435. ^ Russell & Norvig (2021, p. 994); Christian (2020, pp. 40, 80–81)\n
        436. \n
        437. ^ Quoted in Christian (2020, p. 80)\n
        438. \n
        439. ^ Dockrill (2022).\n
        440. \n
        441. ^ Sample (2017).\n
        442. \n
        443. ^ "Black Box AI". 16 June 2023. Archived from the original on 15 June 2024. Retrieved 5 October 2024.\n
        444. \n
        445. ^ Christian (2020), p. 110.\n
        446. \n
        447. ^ Christian (2020), pp. 88–91.\n
        448. \n
        449. ^ Christian (2020, p. 83); Russell & Norvig (2021, p. 997)\n
        450. \n
        451. ^ Christian (2020), p. 91.\n
        452. \n
        453. ^ Christian (2020), p. 83.\n
        454. \n
        455. ^ Verma (2021).\n
        456. \n
        457. ^ Rothman (2020).\n
        458. \n
        459. ^ Christian (2020), pp. 105–108.\n
        460. \n
        461. ^ Christian (2020), pp. 108–112.\n
        462. \n
        463. ^ Ropek, Lucas (21 May 2024). "New Anthropic Research Sheds Light on AI\'s \'Black Box\'". Gizmodo. Archived from the original on 5 October 2024. Retrieved 23 May 2024.\n
        464. \n
        465. ^ Russell & Norvig (2021), p. 989.\n
        466. \n
        467. ^ a b Russell & Norvig (2021), pp. 987–990.\n
        468. \n
        469. ^ Russell & Norvig (2021), p. 988.\n
        470. \n
        471. ^ Robitzski (2018); Sainato (2015)\n
        472. \n
        473. ^ Harari (2018).\n
        474. \n
        475. ^ Buckley, Chris; Mozur, Paul (22 May 2019). "How China Uses High-Tech Surveillance to Subdue Minorities". The New York Times. Archived from the original on 25 November 2019. Retrieved 2 July 2019.\n
        476. \n
        477. ^ "Security lapse exposed a Chinese smart city surveillance system". 3 May 2019. Archived from the original on 7 March 2021. Retrieved 14 September 2020.\n
        478. \n
        479. ^ Urbina et al. (2022).\n
        480. \n
        481. ^ a b E. McGaughey, \'Will Robots Automate Your Job Away? Full Employment, Basic Income, and Economic Democracy\' (2022), 51(3) Industrial Law Journal 511–559. Archived 27 May 2023 at the Wayback Machine.\n
        482. \n
        483. ^ Ford & Colvin (2015);McGaughey (2022)\n
        484. \n
        485. ^ IGM Chicago (2017).\n
        486. \n
        487. ^ Arntz, Gregory & Zierahn (2016), p. 33.\n
        488. \n
        489. ^ Lohr (2017); Frey & Osborne (2017); Arntz, Gregory & Zierahn (2016, p. 33)\n
        490. \n
        491. ^ Zhou, Viola (11 April 2023). "AI is already taking video game illustrators\' jobs in China". Rest of World. Archived from the original on 21 February 2024. Retrieved 17 August 2023.\n
        492. \n
        493. ^ Carter, Justin (11 April 2023). "China\'s game art industry reportedly decimated by growing AI use". Game Developer. Archived from the original on 17 August 2023. Retrieved 17 August 2023.\n
        494. \n
        495. ^ Morgenstern (2015).\n
        496. \n
        497. ^ Mahdawi (2017); Thompson (2014)\n
        498. \n
        499. ^ Tarnoff, Ben (4 August 2023). "Lessons from Eliza". The Guardian Weekly. pp. 34–39.\n
        500. \n
        501. ^ Cellan-Jones (2014).\n
        502. \n
        503. ^ Russell & Norvig 2021, p. 1001.\n
        504. \n
        505. ^ Bostrom (2014).\n
        506. \n
        507. ^ Russell (2019).\n
        508. \n
        509. ^ Bostrom (2014); Müller & Bostrom (2014); Bostrom (2015).\n
        510. \n
        511. ^ Harari (2023).\n
        512. \n
        513. ^ Müller & Bostrom (2014).\n
        514. \n
        515. ^ Leaders\' concerns about the existential risks of AI around 2015: Rawlinson (2015), Holley (2015), Gibbs (2014), Sainato (2015)\n
        516. \n
        517. ^ ""Godfather of artificial intelligence" talks impact and potential of new AI". CBS News. 25 March 2023. Archived from the original on 28 March 2023. Retrieved 28 March 2023.\n
        518. \n
        519. ^ Pittis, Don (4 May 2023). "Canadian artificial intelligence leader Geoffrey Hinton piles on fears of computer takeover". CBC. Archived from the original on 7 July 2024. Retrieved 5 October 2024.\n
        520. \n
        521. ^ "\'50–50 chance\' that AI outsmarts humanity, Geoffrey Hinton says". Bloomberg BNN. 14 June 2024. Retrieved 6 July 2024.\n
        522. \n
        523. ^ Valance (2023).\n
        524. \n
        525. ^ Taylor, Josh (7 May 2023). "Rise of artificial intelligence is inevitable but should not be feared, \'father of AI\' says". The Guardian. Archived from the original on 23 October 2023. Retrieved 26 May 2023.\n
        526. \n
        527. ^ Colton, Emma (7 May 2023). "\'Father of AI\' says tech fears misplaced: \'You cannot stop it\'". Fox News. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
        528. \n
        529. ^ Jones, Hessie (23 May 2023). "Juergen Schmidhuber, Renowned \'Father Of Modern AI,\' Says His Life\'s Work Won\'t Lead To Dystopia". Forbes. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
        530. \n
        531. ^ McMorrow, Ryan (19 December 2023). "Andrew Ng: \'Do we think the world is better off with more or less intelligence?\'". Financial Times. Archived from the original on 25 January 2024. Retrieved 30 December 2023.\n
        532. \n
        533. ^ Levy, Steven (22 December 2023). "How Not to Be Stupid About AI, With Yann LeCun". Wired. Archived from the original on 28 December 2023. Retrieved 30 December 2023.\n
        534. \n
        535. ^ Arguments that AI is not an imminent risk: Brooks (2014), Geist (2015), Madrigal (2015), Lee (2014)\n
        536. \n
        537. ^ a b Christian (2020), pp. 67, 73.\n
        538. \n
        539. ^ Yudkowsky (2008).\n
        540. \n
        541. ^ a b Anderson & Anderson (2011).\n
        542. \n
        543. ^ AAAI (2014).\n
        544. \n
        545. ^ Wallach (2010).\n
        546. \n
        547. ^ Russell (2019), p. 173.\n
        548. \n
        549. ^ Stewart, Ashley; Melton, Monica. "Hugging Face CEO says he\'s focused on building a \'sustainable model\' for the $4.5 billion open-source-AI startup". Business Insider. Archived from the original on 25 September 2024. Retrieved 14 April 2024.\n
        550. \n
        551. ^ Wiggers, Kyle (9 April 2024). "Google open sources tools to support AI model development". TechCrunch. Archived from the original on 10 September 2024. Retrieved 14 April 2024.\n
        552. \n
        553. ^ Heaven, Will Douglas (12 May 2023). "The open-source AI boom is built on Big Tech\'s handouts. How long will it last?". MIT Technology Review. Retrieved 14 April 2024.\n
        554. \n
        555. ^ Brodsky, Sascha (19 December 2023). "Mistral AI\'s New Language Model Aims for Open Source Supremacy". AI Business. Archived from the original on 5 September 2024. Retrieved 5 October 2024.\n
        556. \n
        557. ^ Edwards, Benj (22 February 2024). "Stability announces Stable Diffusion 3, a next-gen AI image generator". Ars Technica. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
        558. \n
        559. ^ Marshall, Matt (29 January 2024). "How enterprises are using open source LLMs: 16 examples". VentureBeat. Archived from the original on 26 September 2024. Retrieved 5 October 2024.\n
        560. \n
        561. ^ Piper, Kelsey (2 February 2024). "Should we make our most powerful AI models open source to all?". Vox. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
        562. \n
        563. ^ Alan Turing Institute (2019). "Understanding artificial intelligence ethics and safety" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
        564. \n
        565. ^ Alan Turing Institute (2023). "AI Ethics and Governance in Practice" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
        566. \n
        567. ^ Floridi, Luciano; Cowls, Josh (23 June 2019). "A Unified Framework of Five Principles for AI in Society". Harvard Data Science Review. 1 (1). doi:10.1162/99608f92.8cd550d1. S2CID 198775713.\n
        568. \n
        569. ^ Buruk, Banu; Ekmekci, Perihan Elif; Arda, Berna (1 September 2020). "A critical perspective on guidelines for responsible and trustworthy artificial intelligence". Medicine, Health Care and Philosophy. 23 (3): 387–399. doi:10.1007/s11019-020-09948-1. ISSN 1572-8633. PMID 32236794. S2CID 214766800. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
        570. \n
        571. ^ Kamila, Manoj Kumar; Jasrotia, Sahil Singh (1 January 2023). "Ethical issues in the development of artificial intelligence: recognizing the risks". International Journal of Ethics and Systems. ahead-of-print (ahead-of-print). doi:10.1108/IJOES-05-2023-0107. ISSN 2514-9369. S2CID 259614124. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
        572. \n
        573. ^ "AI Safety Institute releases new AI safety evaluations platform". UK Government. 10 May 2024. Archived from the original on 5 October 2024. Retrieved 14 May 2024.\n
        574. \n
        575. ^ Regulation of AI to mitigate risks: Berryhill et al. (2019), Barfield & Pagallo (2018), Iphofen & Kritikos (2019), Wirtz, Weyerer & Geyer (2018), Buiten (2019)\n
        576. \n\n
        577. ^ a b Vincent (2023).\n
        578. \n
        579. ^ Stanford University (2023).\n
        580. \n
        581. ^ a b c d UNESCO (2021).\n
        582. \n
        583. ^ Kissinger (2021).\n
        584. \n
        585. ^ Altman, Brockman & Sutskever (2023).\n
        586. \n
        587. ^ VOA News (25 October 2023). "UN Announces Advisory Body on Artificial Intelligence". Archived from the original on 18 September 2024. Retrieved 5 October 2024.\n
        588. \n
        589. ^ "Council of Europe opens first ever global treaty on AI for signature". Council of Europe. 5 September 2024. Archived from the original on 17 September 2024. Retrieved 17 September 2024.\n
        590. \n
        591. ^ Edwards (2023).\n
        592. \n
        593. ^ Kasperowicz (2023).\n
        594. \n
        595. ^ Fox News (2023).\n
        596. \n
        597. ^ Milmo, Dan (3 November 2023). "Hope or Horror? The great AI debate dividing its pioneers". The Guardian Weekly. pp. 10–12.\n
        598. \n
        599. ^ "The Bletchley Declaration by Countries Attending the AI Safety Summit, 1–2 November 2023". GOV.UK. 1 November 2023. Archived from the original on 1 November 2023. Retrieved 2 November 2023.\n
        600. \n
        601. ^ "Countries agree to safe and responsible development of frontier AI in landmark Bletchley Declaration". GOV.UK (Press release). Archived from the original on 1 November 2023. Retrieved 1 November 2023.\n
        602. \n
        603. ^ "Second global AI summit secures safety commitments from companies". Reuters. 21 May 2024. Retrieved 23 May 2024.\n
        604. \n
        605. ^ "Frontier AI Safety Commitments, AI Seoul Summit 2024". gov.uk. 21 May 2024. Archived from the original on 23 May 2024. Retrieved 23 May 2024.\n
        606. \n
        607. ^ a b Russell & Norvig 2021, p. 9.\n
        608. \n
        609. ^ a b c Copeland, J., ed. (2004). The Essential Turing: the ideas that gave birth to the computer age. Oxford, England: Clarendon Press. ISBN 0-1982-5079-7.\n
        610. \n
        611. ^ "Google books ngram". Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
        612. \n
        613. ^ AI\'s immediate precursors: McCorduck (2004, pp. 51–107), Crevier (1993, pp. 27–32), Russell & Norvig (2021, pp. 8–17), Moravec (1988, p. 3)\n
        614. \n
        615. ^ a b Turing\'s original publication of the Turing test in "Computing machinery and intelligence": Turing (1950)\nHistorical influence and philosophical implications: Haugeland (1985, pp. 6–9), Crevier (1993, p. 24), McCorduck (2004, pp. 70–71), Russell & Norvig (2021, pp. 2, 984)\n
        616. \n
        617. ^ Crevier (1993), pp. 47–49.\n
        618. \n
        619. ^ Russell & Norvig (2003), p. 17.\n
        620. \n
        621. ^ Russell & Norvig (2003), p. 18.\n
        622. \n
        623. ^ Newquist (1994), pp. 86–86.\n
        624. \n
        625. ^ Simon (1965, p. 96) quoted in Crevier (1993, p. 109)\n
        626. \n
        627. ^ Minsky (1967, p. 2) quoted in Crevier (1993, p. 109)\n
        628. \n
        629. ^ Russell & Norvig (2021), p. 21.\n
        630. \n
        631. ^ Lighthill (1973).\n
        632. \n
        633. ^ NRC 1999, pp. 212–213.\n
        634. \n
        635. ^ Russell & Norvig (2021), p. 22.\n
        636. \n
        637. ^ Expert systems: Russell & Norvig (2021, pp. 23, 292), Luger & Stubblefield (2004, pp. 227–331), Nilsson (1998, chpt. 17.4), McCorduck (2004, pp. 327–335, 434–435), Crevier (1993, pp. 145–162, 197–203), Newquist (1994, pp. 155–183)\n
        638. \n
        639. ^ Russell & Norvig (2021), p. 24.\n
        640. \n
        641. ^ Nilsson (1998), p. 7.\n
        642. \n
        643. ^ McCorduck (2004), pp. 454–462.\n
        644. \n
        645. ^ Moravec (1988).\n
        646. \n
        647. ^ a b Brooks (1990).\n
        648. \n
        649. ^ Developmental robotics: Weng et al. (2001), Lungarella et al. (2003), Asada et al. (2009), Oudeyer (2010)\n
        650. \n
        651. ^ Russell & Norvig (2021), p. 25.\n
        652. \n
        653. ^ Crevier (1993, pp. 214–215), Russell & Norvig (2021, pp. 24, 26)\n
        654. \n
        655. ^ Russell & Norvig (2021), p. 26.\n
        656. \n
        657. ^ Formal and narrow methods adopted in the 1990s: Russell & Norvig (2021, pp. 24–26), McCorduck (2004, pp. 486–487)\n
        658. \n
        659. ^ AI widely used in the late 1990s: Kurzweil (2005, p. 265), NRC (1999, pp. 216–222), Newquist (1994, pp. 189–201)\n
        660. \n
        661. ^ Wong (2023).\n
        662. \n
        663. ^ Moore\'s Law and AI: Russell & Norvig (2021, pp. 14, 27)\n
        664. \n
        665. ^ a b c Clark (2015b).\n
        666. \n
        667. ^ Big data: Russell & Norvig (2021, p. 26)\n
        668. \n
        669. ^ Sagar, Ram (3 June 2020). "OpenAI Releases GPT-3, The Largest Model So Far". Analytics India Magazine. Archived from the original on 4 August 2020. Retrieved 15 March 2023.\n
        670. \n
        671. ^ DiFeliciantonio (2023).\n
        672. \n
        673. ^ Goswami (2023).\n
        674. \n
        675. ^ Grayling, Anthony; Ball, Brian (1 August 2024). "Philosophy is crucial in the age of AI". The Conversation. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
        676. \n
        677. ^ a b Jarow, Oshan (15 June 2024). "Will AI ever become conscious? It depends on how you think about biology". Vox. Archived from the original on 21 September 2024. Retrieved 4 October 2024.\n
        678. \n
        679. ^ McCarthy, John. "The Philosophy of AI and the AI of Philosophy". jmc.stanford.edu. Archived from the original on 23 October 2018. Retrieved 3 October 2024.\n
        680. \n
        681. ^ a b Turing (1950), p. 1.\n
        682. \n
        683. ^ Turing (1950), Under "The Argument from Consciousness".\n
        684. \n
        685. ^ Kirk-Giannini, Cameron Domenico; Goldstein, Simon (16 October 2023). "AI is closer than ever to passing the Turing test for \'intelligence\'. What happens when it does?". The Conversation. Archived from the original on 25 September 2024. Retrieved 17 August 2024.\n
        686. \n
        687. ^ Russell & Norvig (2021), p. 3.\n
        688. \n
        689. ^ Maker (2006).\n
        690. \n
        691. ^ McCarthy (1999).\n
        692. \n
        693. ^ Minsky (1986).\n
        694. \n
        695. ^ "What Is Artificial Intelligence (AI)?". Google Cloud Platform. Archived from the original on 31 July 2023. Retrieved 16 October 2023.\n
        696. \n
        697. ^ "One of the Biggest Problems in Regulating AI Is Agreeing on a Definition". carnegieendowment.org. Retrieved 31 July 2024.\n
        698. \n
        699. ^ "AI or BS? How to tell if a marketing tool really uses artificial intelligence". The Drum. Retrieved 31 July 2024.\n
        700. \n
        701. ^ Nilsson (1983), p. 10.\n
        702. \n
        703. ^ Haugeland (1985), pp. 112–117.\n
        704. \n
        705. ^ Physical symbol system hypothesis: Newell & Simon (1976, p. 116)\nHistorical significance: McCorduck (2004, p. 153), Russell & Norvig (2021, p. 19)\n
        706. \n
        707. ^ Moravec\'s paradox: Moravec (1988, pp. 15–16), Minsky (1986, p. 29), Pinker (2007, pp. 190–191)\n
        708. \n
        709. ^ Dreyfus\' critique of AI: Dreyfus (1972), Dreyfus & Dreyfus (1986)\nHistorical significance and philosophical implications: Crevier (1993, pp. 120–132), McCorduck (2004, pp. 211–239), Russell & Norvig (2021, pp. 981–982), Fearn (2007, chpt. 3)\n
        710. \n
        711. ^ Crevier (1993), p. 125.\n
        712. \n
        713. ^ Langley (2011).\n
        714. \n
        715. ^ Katz (2012).\n
        716. \n
        717. ^ Neats vs. scruffies, the historic debate: McCorduck (2004, pp. 421–424, 486–489), Crevier (1993, p. 168), Nilsson (1983, pp. 10–11), Russell & Norvig (2021, p. 24)\nA classic example of the "scruffy" approach to intelligence: Minsky (1986)\nA modern example of neat AI and its aspirations in the 21st century: Domingos (2015)\n
        718. \n
        719. ^ Pennachin & Goertzel (2007).\n
        720. \n
        721. ^ a b Roberts (2016).\n
        722. \n
        723. ^ Russell & Norvig (2021), p. 986.\n
        724. \n
        725. ^ Chalmers (1995).\n
        726. \n
        727. ^ Dennett (1991).\n
        728. \n
        729. ^ Horst (2005).\n
        730. \n
        731. ^ Searle (1999).\n
        732. \n
        733. ^ Searle (1980), p. 1.\n
        734. \n
        735. ^ Russell & Norvig (2021), p. 9817.\n
        736. \n
        737. ^ Searle\'s Chinese room argument: Searle (1980). Searle\'s original presentation of the thought experiment., Searle (1999).\nDiscussion: Russell & Norvig (2021, pp. 985), McCorduck (2004, pp. 443–445), Crevier (1993, pp. 269–271)\n
        738. \n
        739. ^ Leith, Sam (7 July 2022). "Nick Bostrom: How can we be certain a machine isn\'t conscious?". The Spectator. Archived from the original on 26 September 2024. Retrieved 23 February 2024.\n
        740. \n
        741. ^ a b c Thomson, Jonny (31 October 2022). "Why don\'t robots have rights?". Big Think. Archived from the original on 13 September 2024. Retrieved 23 February 2024.\n
        742. \n
        743. ^ a b Kateman, Brian (24 July 2023). "AI Should Be Terrified of Humans". Time. Archived from the original on 25 September 2024. Retrieved 23 February 2024.\n
        744. \n
        745. ^ Wong, Jeff (10 July 2023). "What leaders need to know about robot rights". Fast Company.\n
        746. \n
        747. ^ Hern, Alex (12 January 2017). "Give robots \'personhood\' status, EU committee argues". The Guardian. ISSN 0261-3077. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
        748. \n
        749. ^ Dovey, Dana (14 April 2018). "Experts Don\'t Think Robots Should Have Rights". Newsweek. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
        750. \n
        751. ^ Cuddy, Alice (13 April 2018). "Robot rights violate human rights, experts warn EU". euronews. Archived from the original on 19 September 2024. Retrieved 23 February 2024.\n
        752. \n
        753. ^ The Intelligence explosion and technological singularity: Russell & Norvig (2021, pp. 1004–1005), Omohundro (2008), Kurzweil (2005)\n\nI. J. Good\'s "intelligence explosion": Good (1965)\n\nVernor Vinge\'s "singularity": Vinge (1993)\n
        754. \n
        755. ^ Russell & Norvig (2021), p. 1005.\n
        756. \n
        757. ^ Transhumanism: Moravec (1988), Kurzweil (2005), Russell & Norvig (2021, p. 1005)\n
        758. \n
        759. ^ AI as evolution: Edward Fredkin is quoted in McCorduck (2004, p. 401), Butler (1863), Dyson (1998)\n
        760. \n
        761. ^ AI in myth: McCorduck (2004, pp. 4–5)\n
        762. \n
        763. ^ McCorduck (2004), pp. 340–400.\n
        764. \n
        765. ^ Buttazzo (2001).\n
        766. \n
        767. ^ Anderson (2008).\n
        768. \n
        769. ^ McCauley (2007).\n
        770. \n
        771. ^ Galvan (1997).\n
        772. \n
        \n

        AI textbooks

        \n

        The two most widely used textbooks in 2023 (see the Open Syllabus):\n

        \n\n

        These were the four of the most widely used AI textbooks in 2008:\n

        \n
        \n\n
        \n

        Other textbooks:\n

        \n\n

        History of AI

        \n
        \n\n
        \n

        Other sources

        \n
        \n\n\n
        \n

        Further reading

        \n
        \n\n
        \n
        \n\n\n\n\n\n\n\n\n\n
        \n
        \n\t\t\t\t\t\n\t\t\t\t
        \n\t\t\t\n\t\t\t\n\t\t\n\t\t\n\t \n \n
        \n\t
          \n
          \n\n\n'}'. -[2024-11-03 23:36:52,262] [DEBUG] [Engine]: Agent 'agent3' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_3oMrrEdM4incsa48DWVMKUej', type='function')], function_call=None) -[2024-11-03 23:36:52,335] [INFO] [Engine]: Assigning task to agent4: Search for the latest trends in AI using Twitter. -[2024-11-03 23:36:52,336] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Search for the latest trends in AI using Twitter.'. -[2024-11-03 23:36:58,443] [INFO] [BaseAgent]: Agent 'agent4' called 'fetch_webpage' with args '{'url': 'https://twitter.com/search?q=latest%20trends%20in%20AI'}'. -[2024-11-03 23:36:58,444] [INFO] [BaseAgent]: Agent 'agent4' obtained result '{'success': True, 'error-msg': '', 'url': 'https://twitter.com/search?q=latest%20trends%20in%20AI', 'content': '\n \n \n x.com\n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n \n \n '}'. -[2024-11-03 23:36:58,446] [DEBUG] [Engine]: Agent 'agent4' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=latest%20trends%20in%20AI"}', name='fetch_webpage'), id='call_uGlejVgiSzmPec07bsHEECm5', type='function')], function_call=None) -[2024-11-03 23:36:58,447] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_wWXjnvnmgMMpCZyq4D6B1S8S', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_3oMrrEdM4incsa48DWVMKUej', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=latest%20trends%20in%20AI"}', name='fetch_webpage'), id='call_uGlejVgiSzmPec07bsHEECm5', type='function')], function_call=None) - -[2024-11-03 23:36:58,448] [INFO] [Engine]: Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_wWXjnvnmgMMpCZyq4D6B1S8S', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_3oMrrEdM4incsa48DWVMKUej', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=latest%20trends%20in%20AI"}', name='fetch_webpage'), id='call_uGlejVgiSzmPec07bsHEECm5', type='function')], function_call=None) - -[2024-11-03 23:36:58,451] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_wWXjnvnmgMMpCZyq4D6B1S8S', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_3oMrrEdM4incsa48DWVMKUej', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=latest%20trends%20in%20AI"}', name='fetch_webpage'), id='call_uGlejVgiSzmPec07bsHEECm5', type='function')], function_call=None) - -[2024-11-03 23:36:58,886] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-03 23:36:58,887] [INFO] [Engine]: Starting iteration 2 -[2024-11-03 23:36:58,888] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_wWXjnvnmgMMpCZyq4D6B1S8S', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_3oMrrEdM4incsa48DWVMKUej', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=latest%20trends%20in%20AI"}', name='fetch_webpage'), id='call_uGlejVgiSzmPec07bsHEECm5', type='function')], function_call=None) - - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-11-03 23:37:00,078] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent1': 'Please collaborate with agent3, agent4 to gather information on the latest trends in AI from the sources they have accessed (Wikipedia and Twitter). Consolidate the information and provide a summary report.', 'agent2': 'Task description for agent2', 'agent3': 'Task description for agent3', 'agent4': 'Task description for agent4'}, 'continue': True} -[2024-11-03 23:37:00,079] [INFO] [Engine]: Assigned tasks: {'agent1': 'Please collaborate with agent3, agent4 to gather information on the latest trends in AI from the sources they have accessed (Wikipedia and Twitter). Consolidate the information and provide a summary report.', 'agent2': 'Task description for agent2', 'agent3': 'Task description for agent3', 'agent4': 'Task description for agent4'} -[2024-11-03 23:37:00,081] [INFO] [Engine]: Assigning task to agent1: Please collaborate with agent3, agent4 to gather information on the latest trends in AI from the sources they have accessed (Wikipedia and Twitter). Consolidate the information and provide a summary report. -[2024-11-03 23:37:00,082] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Please collaborate with agent3, agent4 to gather information on the latest trends in AI from the sources they have accessed (Wikipedia and Twitter). Consolidate the information and provide a summary report.'. -[2024-11-03 23:37:05,543] [INFO] [BaseAgent]: Agent agent1 sent message to agent3: Please gather information on the latest trends in AI from Wikipedia. -[2024-11-03 23:37:05,544] [INFO] [BaseAgent]: Agent agent3 received message from agent1: Please gather information on the latest trends in AI from Wikipedia. -[2024-11-03 23:41:28,109] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-03 23:41:28,111] [INFO] [Evaluator]: Total Token Consumption: 36 -[2024-11-03 23:41:28,132] [INFO] [Evaluator]: Average Tokens per Iteration: 36.0 -[2024-11-03 23:41:28,133] [INFO] [Engine]: Simulation completed. -[2024-11-03 23:44:30,290] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-11-03 23:44:30,350] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 23:44:30,350] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 23:44:30,352] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 23:44:30,353] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 23:44:30,354] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 23:44:30,355] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 23:44:30,356] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 23:44:30,358] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 23:44:30,360] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-11-03 23:44:30,360] [INFO] [AgentGraph]: Relationship added: agent2 --[reports_to]--> agent1 -[2024-11-03 23:44:30,362] [INFO] [AgentGraph]: Relationship added: agent3 --[reports_to]--> agent1 -[2024-11-03 23:44:30,365] [INFO] [AgentGraph]: Relationship added: agent4 --[reports_to]--> agent1 -[2024-11-03 23:44:30,367] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-03 23:44:30,374] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-03 23:44:30,374] [INFO] [Engine]: Engine initialized. -[2024-11-03 23:44:30,375] [INFO] [Engine]: Engine starting simulation. -[2024-11-03 23:44:30,376] [INFO] [Engine]: Starting iteration 1 -[2024-11-03 23:44:30,377] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-11-03 23:44:31,296] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'}, 'continue': True} -[2024-11-03 23:44:31,297] [INFO] [Engine]: Assigned tasks: {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'} -[2024-11-03 23:44:31,298] [INFO] [Engine]: Assigning task to agent2: Search for the latest trends in AI using Google. -[2024-11-03 23:44:31,299] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Search for the latest trends in AI using Google.'. -[2024-11-03 23:45:47,223] [ERROR] [Engine]: Error while executing task for agent 'agent2': -[2024-11-03 23:45:47,224] [INFO] [Engine]: Assigning task to agent3: Search for the latest trends in AI using Wikipedia. -[2024-11-03 23:45:47,234] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Search for the latest trends in AI using Wikipedia.'. -[2024-11-03 23:45:48,912] [ERROR] [Engine]: Error while executing task for agent 'agent3': -[2024-11-03 23:45:48,914] [INFO] [Engine]: Assigning task to agent4: Search for the latest trends in AI using Twitter. -[2024-11-03 23:45:48,916] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Search for the latest trends in AI using Twitter.'. -[2024-11-03 23:45:50,844] [ERROR] [Engine]: Error while executing task for agent 'agent4': -[2024-11-03 23:45:50,846] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: - -[2024-11-03 23:45:50,847] [INFO] [Engine]: Agents' Results Summary: - -[2024-11-03 23:45:50,848] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: - -[2024-11-03 23:45:50,849] [ERROR] [Engine]: An error occurred during simulation. -Traceback (most recent call last): - File "/home/zhong42/marble/MARBLE/marble/engine/engine.py", line 147, in start - self.evaluator.update(self.environment, self.agents) - File "/home/zhong42/marble/MARBLE/marble/evaluator/evaluator.py", line 39, in update - if environment.is_task_completed(): - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/zhong42/marble/MARBLE/marble/environments/base_env.py", line 38, in is_task_completed - return self._compare_to_ground_truth(last_action_result, self.ground_truth) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/zhong42/marble/MARBLE/marble/environments/base_env.py", line 43, in _compare_to_ground_truth - result_str: str = result.get("result", "") - ^^^^^^^^^^ -AttributeError: 'str' object has no attribute 'get' -[2024-11-03 23:45:50,853] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-03 23:45:50,868] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-03 23:45:50,869] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-03 23:45:50,870] [INFO] [Engine]: Simulation completed. -[2024-11-03 23:45:56,564] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-11-03 23:45:56,567] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-03 23:45:56,567] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-03 23:45:56,569] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-03 23:45:56,570] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-03 23:45:56,571] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-03 23:45:56,573] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-03 23:45:56,574] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-03 23:45:56,575] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-03 23:45:56,578] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-11-03 23:45:56,578] [INFO] [AgentGraph]: Relationship added: agent2 --[reports_to]--> agent1 -[2024-11-03 23:45:56,579] [INFO] [AgentGraph]: Relationship added: agent3 --[reports_to]--> agent1 -[2024-11-03 23:45:56,581] [INFO] [AgentGraph]: Relationship added: agent4 --[reports_to]--> agent1 -[2024-11-03 23:45:56,582] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-03 23:45:56,585] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-03 23:45:56,585] [INFO] [Engine]: Engine initialized. -[2024-11-03 23:45:56,587] [INFO] [Engine]: Engine starting simulation. -[2024-11-03 23:45:56,588] [INFO] [Engine]: Starting iteration 1 -[2024-11-03 23:45:56,589] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-11-03 23:45:57,628] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'}, 'continue': True} -[2024-11-03 23:45:57,629] [INFO] [Engine]: Assigned tasks: {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'} -[2024-11-03 23:45:57,630] [INFO] [Engine]: Assigning task to agent2: Search for the latest trends in AI using Google. -[2024-11-03 23:45:57,631] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Search for the latest trends in AI using Google.'. -[2024-11-03 23:46:33,005] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Can you search for the latest trends in AI using Google and provide the information? -[2024-11-03 23:46:33,006] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Can you search for the latest trends in AI using Google and provide the information? -[2024-11-03 23:46:39,076] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: The task is: Search for the latest trends in AI using Google. -[2024-11-03 23:46:39,078] [INFO] [BaseAgent]: Agent agent2 received message from agent1: The task is: Search for the latest trends in AI using Google. -[2024-11-03 23:46:44,754] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Can you search for the latest trends in AI using Google and provide the information? -[2024-11-03 23:46:44,756] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Can you search for the latest trends in AI using Google and provide the information? -[2024-11-04 00:12:51,885] [ERROR] [Engine]: Error while executing task for agent 'agent2': -[2024-11-04 00:12:51,886] [INFO] [Engine]: Assigning task to agent3: Search for the latest trends in AI using Wikipedia. -[2024-11-04 00:12:51,929] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Search for the latest trends in AI using Wikipedia.'. -[2024-11-04 00:12:54,264] [ERROR] [Engine]: Error while executing task for agent 'agent3': -[2024-11-04 00:12:54,265] [INFO] [Engine]: Assigning task to agent4: Search for the latest trends in AI using Twitter. -[2024-11-04 00:12:54,266] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Search for the latest trends in AI using Twitter.'. -[2024-11-04 00:12:55,828] [ERROR] [Engine]: Error while executing task for agent 'agent4': -[2024-11-04 00:12:55,830] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: - -[2024-11-04 00:12:55,831] [INFO] [Engine]: Agents' Results Summary: - -[2024-11-04 00:12:55,832] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: - -[2024-11-04 00:12:55,833] [ERROR] [Engine]: An error occurred during simulation. -Traceback (most recent call last): - File "/home/zhong42/marble/MARBLE/marble/engine/engine.py", line 147, in start - self.evaluator.update(self.environment, self.agents) - File "/home/zhong42/marble/MARBLE/marble/evaluator/evaluator.py", line 39, in update - if environment.is_task_completed(): - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/zhong42/marble/MARBLE/marble/environments/base_env.py", line 38, in is_task_completed - return self._compare_to_ground_truth(last_action_result, self.ground_truth) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/zhong42/marble/MARBLE/marble/environments/base_env.py", line 43, in _compare_to_ground_truth - result_str: str = result.get("result", "") - ^^^^^^^^^^ -AttributeError: 'str' object has no attribute 'get' -[2024-11-04 00:12:55,840] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-04 00:12:55,841] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-04 00:12:55,842] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-04 00:12:55,843] [INFO] [Engine]: Simulation completed. -[2024-11-04 00:13:23,581] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-11-04 00:13:23,630] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-04 00:13:23,630] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-04 00:13:23,631] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-04 00:13:23,632] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-04 00:13:23,633] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-04 00:13:23,634] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-04 00:13:23,635] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-04 00:13:23,636] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-04 00:13:23,639] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-11-04 00:13:23,639] [INFO] [AgentGraph]: Relationship added: agent2 --[reports_to]--> agent1 -[2024-11-04 00:13:23,640] [INFO] [AgentGraph]: Relationship added: agent3 --[reports_to]--> agent1 -[2024-11-04 00:13:23,641] [INFO] [AgentGraph]: Relationship added: agent4 --[reports_to]--> agent1 -[2024-11-04 00:13:23,642] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-04 00:13:23,682] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-04 00:13:23,682] [INFO] [Engine]: Engine initialized. -[2024-11-04 00:13:23,683] [INFO] [Engine]: Engine starting simulation. -[2024-11-04 00:13:23,684] [INFO] [Engine]: Starting iteration 1 -[2024-11-04 00:13:23,685] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-11-04 00:13:24,928] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent2': 'Search for the latest trends in AI using google.', 'agent3': 'Search for the latest trends in AI using wikipedia.', 'agent4': 'Search for the latest trends in AI using twitter.'}, 'continue': True} -[2024-11-04 00:13:24,929] [INFO] [Engine]: Assigned tasks: {'agent2': 'Search for the latest trends in AI using google.', 'agent3': 'Search for the latest trends in AI using wikipedia.', 'agent4': 'Search for the latest trends in AI using twitter.'} -[2024-11-04 00:13:24,969] [INFO] [Engine]: Assigning task to agent2: Search for the latest trends in AI using google. -[2024-11-04 00:13:24,971] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Search for the latest trends in AI using google.'. -[2024-11-04 00:14:05,671] [INFO] [BaseAgent]: Agent 'agent2' called 'fetch_webpage' with args '{'url': 'https://www.google.com/search?q=latest+trends+in+AI'}'. -[2024-11-04 00:14:05,678] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'error-msg': '', 'url': 'https://www.google.com/search?q=latest+trends+in+AI', 'content': 'latest trends in AI - Google Search

          Accessibility Links

          About 541,000,000 results (0.60 seconds) 

          Search Results

          AI Overview

          AI Overview
          Here are some of the latest trends in artificial intelligence (AI):
          • Increased regulation
            As AI becomes more widespread, there will be more regulation to ensure AI is used ethically and responsibly. 
          • Agentic AI
            Autonomous systems are being used to make decisions for users, enhancing output and decision-making. 
          • Predictive analytics
            AI and machine learning techniques are being used to improve accuracy and efficiency in business decision-making. 
          • Top AI Trends 2024: Key Developments to Watch
            Oct 7, 2024 — Agentic AI. One of the most notable new developments in the AI field is the use of autonomous systems to make decisions...
            Appinventiv
          • Unveiling the Top Artificial Intelligence Trends for 2024
            Apr 5, 2024 — Workplace AI. The final list in the new AI trend in 2024 and beyond will be the workplace AI wherein the technology is ...
            Prismetric
          • 6 Trends Driving the AI Everywhere Boom
            Feb 24, 2023 — Increased Regulation: As AI becomes more widespread, we can expect to see increased regulation of this technology. Thi...
            Intel Community
          • Show all
          Show more

          Ads

          Sponsored
          GenAI and Privacy Concerns — Get answers to CX professionals’ top questions on Generative AI. Read the Forrester report. Boost GenAI knowledge. Read Forrester’s report, Generative AI Essentials for CX Leaders. View Products. Check Services. Browse Resources.
          Sponsored
          Navigate the Contract Landscape: Discover our curated selection of Events & Webinars. For Legal & Procurement, insights and solutions to optimize The Full Contract Lifecycle.
          People also ask
          Feedback
          People also search for
          Feedback

          Page Navigation

          Google apps
          '}'. -[2024-11-04 00:14:05,723] [DEBUG] [Engine]: Agent 'agent2' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_dqATcsluqCZEgeiCXD0lDGCu', type='function')], function_call=None) -[2024-11-04 00:14:05,787] [INFO] [Engine]: Assigning task to agent3: Search for the latest trends in AI using wikipedia. -[2024-11-04 00:14:05,789] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Search for the latest trends in AI using wikipedia.'. -[2024-11-04 00:14:08,165] [INFO] [BaseAgent]: Agent 'agent3' called 'fetch_webpage' with args '{'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence'}'. -[2024-11-04 00:14:08,174] [INFO] [BaseAgent]: Agent 'agent3' obtained result '{'success': True, 'error-msg': '', 'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence', 'content': '\n\n\n\nArtificial intelligence - Wikipedia\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nJump to content\n
          \n\t
          \n\t\t
          \n\t\t\t
          \n\n\t\t\n\t\t\t\n\n\n\t\t
          \n\t\t
          \n\t\t\t\n\n\n\t\t\t\n\n\t\t
          \n\t\n\n
          \n\t
          \n\t\t
          \n\t\t\t
          \n\t\t
          \n\t\t
          \n\t\t\t
          \n\t\t
          \n\t\t\t\n\t\t
          \n\t
          \n\t
          \n\t\t\t\t
          \n\t\t\n\t\t\t
          \n\t\t
          \n\t\t
          \n\t\t\t
          \n\t\t\t\t
          \n\t\t\t\t\t\n\t\t\t\t\t

          Artificial intelligence

          \n\t\t\t\t\t\t\t\n
          \n\t\n\t\n\t
          \n\n\t\t
          \n\t\t\t\n\t\t\t\n\t\t\t\n\t\t
          \n\n\t
          \n
          \n
          \n\t\t\t\t
          \n\t\t\t\t\t
          \n\t\t\t\t\t\t
          \n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
          \n\t\t\t\t\t\t
          \n\t\t\t\t\t\t\t\n\t\t\t\t\n\t\t\t\t\t\t\t
          \n\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
          \n\t\t\t\t\t
          \n\t\t\t\t
          \n\t\t\t\t
          \n\t\t\t\t\t
          \n\t\t\t\t\t\t\n\t\t\t\t\t\t
          \n\t\t\n\t\t\t\t\t
          \n\t\t\t\t
          \n\t\t\t\t
          \n\t\t\t\t\t
          \n\t\t\t\t\t\t\t
          \n\t\t
          Page semi-protected
          \n\t\t
          \n\n\t\t\t\t\t\t
          From Wikipedia, the free encyclopedia
          \n\t\t\t\t\t
          \n\t\t\t\t\t
          \n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t
          \n\n

          \n

          \n\n\n\n\n\n\n\n

          Artificial intelligence (AI), in its broadest sense, is intelligence exhibited by machines, particularly computer systems. It is a field of research in computer science that develops and studies methods and software that enable machines to perceive their environment and use learning and intelligence to take actions that maximize their chances of achieving defined goals.[1] Such machines may be called AIs.\n

          Some high-profile applications of AI include advanced web search engines (e.g., Google Search); recommendation systems (used by YouTube, Amazon, and Netflix); interacting via human speech (e.g., Google Assistant, Siri, and Alexa); autonomous vehicles (e.g., Waymo); generative and creative tools (e.g., ChatGPT, and AI art); and superhuman play and analysis in strategy games (e.g., chess and Go). However, many AI applications are not perceived as AI: "A lot of cutting edge AI has filtered into general applications, often without being called AI because once something becomes useful enough and common enough it\'s not labeled AI anymore."[2][3]\n

          The various subfields of AI research are centered around particular goals and the use of particular tools. The traditional goals of AI research include reasoning, knowledge representation, planning, learning, natural language processing, perception, and support for robotics.[a] General intelligence—the ability to complete any task performable by a human on an at least equal level—is among the field\'s long-term goals.[4] To reach these goals, AI researchers have adapted and integrated a wide range of techniques, including search and mathematical optimization, formal logic, artificial neural networks, and methods based on statistics, operations research, and economics.[b] AI also draws upon psychology, linguistics, philosophy, neuroscience, and other fields.[5]\n

          Artificial intelligence was founded as an academic discipline in 1956,[6] and the field went through multiple cycles of optimism,[7][8] followed by periods of disappointment and loss of funding, known as AI winter.[9][10] Funding and interest vastly increased after 2012 when deep learning outperformed previous AI techniques.[11] This growth accelerated further after 2017 with the transformer architecture,[12] and by the early 2020s hundreds of billions of dollars were being invested in AI (known as the "AI boom"). The widespread use of AI in the 21st century exposed several unintended consequences and harms in the present and raised concerns about its risks and long-term effects in the future, prompting discussions about regulatory policies to ensure the safety and benefits of the technology.\n

          \n\n

          Goals

          \n

          The general problem of simulating (or creating) intelligence has been broken into subproblems. These consist of particular traits or capabilities that researchers expect an intelligent system to display. The traits described below have received the most attention and cover the scope of AI research.[a]\n

          \n

          Reasoning and problem-solving

          \n

          Early researchers developed algorithms that imitated step-by-step reasoning that humans use when they solve puzzles or make logical deductions.[13] By the late 1980s and 1990s, methods were developed for dealing with uncertain or incomplete information, employing concepts from probability and economics.[14]\n

          Many of these algorithms are insufficient for solving large reasoning problems because they experience a "combinatorial explosion": They become exponentially slower as the problems grow.[15] Even humans rarely use the step-by-step deduction that early AI research could model. They solve most of their problems using fast, intuitive judgments.[16] Accurate and efficient reasoning is an unsolved problem.\n

          \n

          Knowledge representation

          \n
          An ontology represents knowledge as a set of concepts within a domain and the relationships between those concepts.
          \n

          Knowledge representation and knowledge engineering[17] allow AI programs to answer questions intelligently and make deductions about real-world facts. Formal knowledge representations are used in content-based indexing and retrieval,[18] scene interpretation,[19] clinical decision support,[20] knowledge discovery (mining "interesting" and actionable inferences from large databases),[21] and other areas.[22]\n

          A knowledge base is a body of knowledge represented in a form that can be used by a program. An ontology is the set of objects, relations, concepts, and properties used by a particular domain of knowledge.[23] Knowledge bases need to represent things such as objects, properties, categories, and relations between objects;[24] situations, events, states, and time;[25] causes and effects;[26] knowledge about knowledge (what we know about what other people know);[27] default reasoning (things that humans assume are true until they are told differently and will remain true even when other facts are changing);[28] and many other aspects and domains of knowledge.\n

          Among the most difficult problems in knowledge representation are the breadth of commonsense knowledge (the set of atomic facts that the average person knows is enormous);[29] and the sub-symbolic form of most commonsense knowledge (much of what people know is not represented as "facts" or "statements" that they could express verbally).[16] There is also the difficulty of knowledge acquisition, the problem of obtaining knowledge for AI applications.[c]\n

          \n

          Planning and decision-making

          \n

          An "agent" is anything that perceives and takes actions in the world. A rational agent has goals or preferences and takes actions to make them happen.[d][32] In automated planning, the agent has a specific goal.[33] In automated decision-making, the agent has preferences—there are some situations it would prefer to be in, and some situations it is trying to avoid. The decision-making agent assigns a number to each situation (called the "utility") that measures how much the agent prefers it. For each possible action, it can calculate the "expected utility": the utility of all possible outcomes of the action, weighted by the probability that the outcome will occur. It can then choose the action with the maximum expected utility.[34]\n

          In classical planning, the agent knows exactly what the effect of any action will be.[35] In most real-world problems, however, the agent may not be certain about the situation they are in (it is "unknown" or "unobservable") and it may not know for certain what will happen after each possible action (it is not "deterministic"). It must choose an action by making a probabilistic guess and then reassess the situation to see if the action worked.[36]\n

          In some problems, the agent\'s preferences may be uncertain, especially if there are other agents or humans involved. These can be learned (e.g., with inverse reinforcement learning), or the agent can seek information to improve its preferences.[37] Information value theory can be used to weigh the value of exploratory or experimental actions.[38] The space of possible future actions and situations is typically intractably large, so the agents must take actions and evaluate situations while being uncertain of what the outcome will be.\n

          A Markov decision process has a transition model that describes the probability that a particular action will change the state in a particular way and a reward function that supplies the utility of each state and the cost of each action. A policy associates a decision with each possible state. The policy could be calculated (e.g., by iteration), be heuristic, or it can be learned.[39]\n

          Game theory describes the rational behavior of multiple interacting agents and is used in AI programs that make decisions that involve other agents.[40]\n

          \n

          Learning

          \n

          Machine learning is the study of programs that can improve their performance on a given task automatically.[41] It has been a part of AI from the beginning.[e]\n

          There are several kinds of machine learning. Unsupervised learning analyzes a stream of data and finds patterns and makes predictions without any other guidance.[44] Supervised learning requires a human to label the input data first, and comes in two main varieties: classification (where the program must learn to predict what category the input belongs in) and regression (where the program must deduce a numeric function based on numeric input).[45]\n

          In reinforcement learning, the agent is rewarded for good responses and punished for bad ones. The agent learns to choose responses that are classified as "good".[46] Transfer learning is when the knowledge gained from one problem is applied to a new problem.[47] Deep learning is a type of machine learning that runs inputs through biologically inspired artificial neural networks for all of these types of learning.[48]\n

          Computational learning theory can assess learners by computational complexity, by sample complexity (how much data is required), or by other notions of optimization.[49]\n

          \n
          \n

          Natural language processing

          \n

          Natural language processing (NLP)[50] allows programs to read, write and communicate in human languages such as English. Specific problems include speech recognition, speech synthesis, machine translation, information extraction, information retrieval and question answering.[51]\n

          Early work, based on Noam Chomsky\'s generative grammar and semantic networks, had difficulty with word-sense disambiguation[f] unless restricted to small domains called "micro-worlds" (due to the common sense knowledge problem[29]). Margaret Masterman believed that it was meaning and not grammar that was the key to understanding languages, and that thesauri and not dictionaries should be the basis of computational language structure.\n

          Modern deep learning techniques for NLP include word embedding (representing words, typically as vectors encoding their meaning),[52] transformers (a deep learning architecture using an attention mechanism),[53] and others.[54] In 2019, generative pre-trained transformer (or "GPT") language models began to generate coherent text,[55][56] and by 2023, these models were able to get human-level scores on the bar exam, SAT test, GRE test, and many other real-world applications.[57]\n

          \n

          Perception

          \n

          Machine perception is the ability to use input from sensors (such as cameras, microphones, wireless signals, active lidar, sonar, radar, and tactile sensors) to deduce aspects of the world. Computer vision is the ability to analyze visual input.[58]\n

          The field includes speech recognition,[59] image classification,[60] facial recognition, object recognition,[61]object tracking,[62] and robotic perception.[63]\n

          \n

          Social intelligence

          \n
          Kismet, a robot head which was made in the 1990s; it is a machine that can recognize and simulate emotions.[64]
          \n

          Affective computing is an interdisciplinary umbrella that comprises systems that recognize, interpret, process, or simulate human feeling, emotion, and mood.[65] For example, some virtual assistants are programmed to speak conversationally or even to banter humorously; it makes them appear more sensitive to the emotional dynamics of human interaction, or to otherwise facilitate human–computer interaction.\n

          However, this tends to give naïve users an unrealistic conception of the intelligence of existing computer agents.[66] Moderate successes related to affective computing include textual sentiment analysis and, more recently, multimodal sentiment analysis, wherein AI classifies the affects displayed by a videotaped subject.[67]\n

          \n

          General intelligence

          \n

          A machine with artificial general intelligence should be able to solve a wide variety of problems with breadth and versatility similar to human intelligence.[4]\n

          \n

          Techniques

          \n

          AI research uses a wide variety of techniques to accomplish the goals above.[b]\n

          \n

          Search and optimization

          \n

          AI can solve many problems by intelligently searching through many possible solutions.[68] There are two very different kinds of search used in AI: state space search and local search.\n

          \n
          \n

          State space search searches through a tree of possible states to try to find a goal state.[69] For example, planning algorithms search through trees of goals and subgoals, attempting to find a path to a target goal, a process called means-ends analysis.[70]\n

          Simple exhaustive searches[71] are rarely sufficient for most real-world problems: the search space (the number of places to search) quickly grows to astronomical numbers. The result is a search that is too slow or never completes.[15] "Heuristics" or "rules of thumb" can help prioritize choices that are more likely to reach a goal.[72]\n

          Adversarial search is used for game-playing programs, such as chess or Go. It searches through a tree of possible moves and counter-moves, looking for a winning position.[73]\n

          \n
          \n
          Illustration of gradient descent for 3 different starting points; two parameters (represented by the plan coordinates) are adjusted in order to minimize the loss function (the height)

          Local search uses mathematical optimization to find a solution to a problem. It begins with some form of guess and refines it incrementally.[74]\n

          Gradient descent is a type of local search that optimizes a set of numerical parameters by incrementally adjusting them to minimize a loss function. Variants of gradient descent are commonly used to train neural networks.[75]\n

          Another type of local search is evolutionary computation, which aims to iteratively improve a set of candidate solutions by "mutating" and "recombining" them, selecting only the fittest to survive each generation.[76]\n

          Distributed search processes can coordinate via swarm intelligence algorithms. Two popular swarm algorithms used in search are particle swarm optimization (inspired by bird flocking) and ant colony optimization (inspired by ant trails).[77]\n

          \n

          Logic

          \n

          Formal logic is used for reasoning and knowledge representation.[78]\nFormal logic comes in two main forms: propositional logic (which operates on statements that are true or false and uses logical connectives such as "and", "or", "not" and "implies")[79] and predicate logic (which also operates on objects, predicates and relations and uses quantifiers such as "Every X is a Y" and "There are some Xs that are Ys").[80]\n

          Deductive reasoning in logic is the process of proving a new statement (conclusion) from other statements that are given and assumed to be true (the premises).[81] Proofs can be structured as proof trees, in which nodes are labelled by sentences, and children nodes are connected to parent nodes by inference rules.\n

          Given a problem and a set of premises, problem-solving reduces to searching for a proof tree whose root node is labelled by a solution of the problem and whose leaf nodes are labelled by premises or axioms. In the case of Horn clauses, problem-solving search can be performed by reasoning forwards from the premises or backwards from the problem.[82] In the more general case of the clausal form of first-order logic, resolution is a single, axiom-free rule of inference, in which a problem is solved by proving a contradiction from premises that include the negation of the problem to be solved.[83]\n

          Inference in both Horn clause logic and first-order logic is undecidable, and therefore intractable. However, backward reasoning with Horn clauses, which underpins computation in the logic programming language Prolog, is Turing complete. Moreover, its efficiency is competitive with computation in other symbolic programming languages.[84]\n

          Fuzzy logic assigns a "degree of truth" between 0 and 1. It can therefore handle propositions that are vague and partially true.[85]\n

          Non-monotonic logics, including logic programming with negation as failure, are designed to handle default reasoning.[28] Other specialized versions of logic have been developed to describe many complex domains.\n

          \n

          Probabilistic methods for uncertain reasoning

          \n
          A simple Bayesian network, with the associated conditional probability tables
          \n

          Many problems in AI (including in reasoning, planning, learning, perception, and robotics) require the agent to operate with incomplete or uncertain information. AI researchers have devised a number of tools to solve these problems using methods from probability theory and economics.[86] Precise mathematical tools have been developed that analyze how an agent can make choices and plan, using decision theory, decision analysis,[87] and information value theory.[88] These tools include models such as Markov decision processes,[89] dynamic decision networks,[90] game theory and mechanism design.[91]\n

          Bayesian networks[92] are a tool that can be used for reasoning (using the Bayesian inference algorithm),[g][94] learning (using the expectation–maximization algorithm),[h][96] planning (using decision networks)[97] and perception (using dynamic Bayesian networks).[90]\n

          Probabilistic algorithms can also be used for filtering, prediction, smoothing, and finding explanations for streams of data, thus helping perception systems analyze processes that occur over time (e.g., hidden Markov models or Kalman filters).[90]\n

          \n
          Expectation–maximization clustering of Old Faithful eruption data starts from a random guess but then successfully converges on an accurate clustering of the two physically distinct modes of eruption.
          \n

          Classifiers and statistical learning methods

          \n

          The simplest AI applications can be divided into two types: classifiers (e.g., "if shiny then diamond"), on one hand, and controllers (e.g., "if diamond then pick up"), on the other hand. Classifiers[98] are functions that use pattern matching to determine the closest match. They can be fine-tuned based on chosen examples using supervised learning. Each pattern (also called an "observation") is labeled with a certain predefined class. All the observations combined with their class labels are known as a data set. When a new observation is received, that observation is classified based on previous experience.[45]\n

          There are many kinds of classifiers in use.[99] The decision tree is the simplest and most widely used symbolic machine learning algorithm.[100] K-nearest neighbor algorithm was the most widely used analogical AI until the mid-1990s, and Kernel methods such as the support vector machine (SVM) displaced k-nearest neighbor in the 1990s.[101]\nThe naive Bayes classifier is reportedly the "most widely used learner"[102] at Google, due in part to its scalability.[103]\nNeural networks are also used as classifiers.[104]\n

          \n

          Artificial neural networks

          \n
          A neural network is an interconnected group of nodes, akin to the vast network of neurons in the human brain.
          \n

          An artificial neural network is based on a collection of nodes also known as artificial neurons, which loosely model the neurons in a biological brain. It is trained to recognise patterns; once trained, it can recognise those patterns in fresh data. There is an input, at least one hidden layer of nodes and an output. Each node applies a function and once the weight crosses its specified threshold, the data is transmitted to the next layer. A network is typically called a deep neural network if it has at least 2 hidden layers.[104]\n

          Learning algorithms for neural networks use local search to choose the weights that will get the right output for each input during training. The most common training technique is the backpropagation algorithm.[105] Neural networks learn to model complex relationships between inputs and outputs and find patterns in data. In theory, a neural network can learn any function.[106]\n

          In feedforward neural networks the signal passes in only one direction.[107] Recurrent neural networks feed the output signal back into the input, which allows short-term memories of previous input events. Long short term memory is the most successful network architecture for recurrent networks.[108] Perceptrons[109] use only a single layer of neurons; deep learning[110] uses multiple layers. Convolutional neural networks strengthen the connection between neurons that are "close" to each other—this is especially important in image processing, where a local set of neurons must identify an "edge" before the network can identify an object.[111]\n

          \n
          \n

          Deep learning

          \n
          \n

          Deep learning[110] uses several layers of neurons between the network\'s inputs and outputs. The multiple layers can progressively extract higher-level features from the raw input. For example, in image processing, lower layers may identify edges, while higher layers may identify the concepts relevant to a human such as digits, letters, or faces.[112]\n

          Deep learning has profoundly improved the performance of programs in many important subfields of artificial intelligence, including computer vision, speech recognition, natural language processing, image classification,[113] and others. The reason that deep learning performs so well in so many applications is not known as of 2023.[114] The sudden success of deep learning in 2012–2015 did not occur because of some new discovery or theoretical breakthrough (deep neural networks and backpropagation had been described by many people, as far back as the 1950s)[i] but because of two factors: the incredible increase in computer power (including the hundred-fold increase in speed by switching to GPUs) and the availability of vast amounts of training data, especially the giant curated datasets used for benchmark testing, such as ImageNet.[j]\n

          \n

          GPT

          \n

          Generative pre-trained transformers (GPT) are large language models (LLMs) that generate text based on the semantic relationships between words in sentences. Text-based GPT models are pretrained on a large corpus of text that can be from the Internet. The pretraining consists of predicting the next token (a token being usually a word, subword, or punctuation). Throughout this pretraining, GPT models accumulate knowledge about the world and can then generate human-like text by repeatedly predicting the next token. Typically, a subsequent training phase makes the model more truthful, useful, and harmless, usually with a technique called reinforcement learning from human feedback (RLHF). Current GPT models are prone to generating falsehoods called "hallucinations", although this can be reduced with RLHF and quality data. They are used in chatbots, which allow people to ask a question or request a task in simple text.[122][123]\n

          Current models and services include Gemini (formerly Bard), ChatGPT, Grok, Claude, Copilot, and LLaMA.[124] Multimodal GPT models can process different types of data (modalities) such as images, videos, sound, and text.[125]\n

          \n

          Hardware and software

          \n\n

          In the late 2010s, graphics processing units (GPUs) that were increasingly designed with AI-specific enhancements and used with specialized TensorFlow software had replaced previously used central processing unit (CPUs) as the dominant means for large-scale (commercial and academic) machine learning models\' training.[126] Specialized programming languages such as Prolog were used in early AI research,[127] but general-purpose programming languages like Python have become predominant.[128]\n

          The transistor density in integrated circuits has been observed to roughly double every 18 months—a trend known as Moore\'s law, named after the Intel co-founder Gordon Moore, who first identified it. Improvements in GPUs have been even faster.[129]\n

          \n

          Applications

          \n

          AI and machine learning technology is used in most of the essential applications of the 2020s, including: search engines (such as Google Search), targeting online advertisements, recommendation systems (offered by Netflix, YouTube or Amazon), driving internet traffic, targeted advertising (AdSense, Facebook), virtual assistants (such as Siri or Alexa), autonomous vehicles (including drones, ADAS and self-driving cars), automatic language translation (Microsoft Translator, Google Translate), facial recognition (Apple\'s Face ID or Microsoft\'s DeepFace and Google\'s FaceNet) and image labeling (used by Facebook, Apple\'s iPhoto and TikTok). The deployment of AI may be overseen by a Chief automation officer (CAO).\n

          Health and medicine

          \n\n

          The application of AI in medicine and medical research has the potential to increase patient care and quality of life.[130] Through the lens of the Hippocratic Oath, medical professionals are ethically compelled to use AI, if applications can more accurately diagnose and treat patients.[131][132]\n

          For medical research, AI is an important tool for processing and integrating big data. This is particularly important for organoid and tissue engineering development which use microscopy imaging as a key technique in fabrication.[133] It has been suggested that AI can overcome discrepancies in funding allocated to different fields of research.[133] New AI tools can deepen the understanding of biomedically relevant pathways. For example, AlphaFold 2 (2021) demonstrated the ability to approximate, in hours rather than months, the 3D structure of a protein.[134] In 2023, it was reported that AI-guided drug discovery helped find a class of antibiotics capable of killing two different types of drug-resistant bacteria.[135] In 2024, researchers used machine learning to accelerate the search for Parkinson\'s disease drug treatments. Their aim was to identify compounds that block the clumping, or aggregation, of alpha-synuclein (the protein that characterises Parkinson\'s disease). They were able to speed up the initial screening process ten-fold and reduce the cost by a thousand-fold.[136][137]\n

          \n

          Games

          \n\n

          Game playing programs have been used since the 1950s to demonstrate and test AI\'s most advanced techniques.[138] Deep Blue became the first computer chess-playing system to beat a reigning world chess champion, Garry Kasparov, on 11 May 1997.[139] In 2011, in a Jeopardy! quiz show exhibition match, IBM\'s question answering system, Watson, defeated the two greatest Jeopardy! champions, Brad Rutter and Ken Jennings, by a significant margin.[140] In March 2016, AlphaGo won 4 out of 5 games of Go in a match with Go champion Lee Sedol, becoming the first computer Go-playing system to beat a professional Go player without handicaps. Then, in 2017, it defeated Ke Jie, who was the best Go player in the world.[141] Other programs handle imperfect-information games, such as the poker-playing program Pluribus.[142] DeepMind developed increasingly generalistic reinforcement learning models, such as with MuZero, which could be trained to play chess, Go, or Atari games.[143] In 2019, DeepMind\'s AlphaStar achieved grandmaster level in StarCraft II, a particularly challenging real-time strategy game that involves incomplete knowledge of what happens on the map.[144] In 2021, an AI agent competed in a PlayStation Gran Turismo competition, winning against four of the world\'s best Gran Turismo drivers using deep reinforcement learning.[145] In 2024, Google DeepMind introduced SIMA, a type of AI capable of autonomously playing nine previously unseen open-world video games by observing screen output, as well as executing short, specific tasks in response to natural language instructions.[146]\n

          \n

          Mathematics

          \n

          In mathematics, special forms of formal step-by-step reasoning are used. In contrast, LLMs such as GPT-4 Turbo, Gemini Ultra, Claude Opus, LLaMa-2 or Mistral Large are working with probabilistic models, which can produce wrong answers in the form of hallucinations. Therefore, they need not only a large database of mathematical problems to learn from but also methods such as supervised fine-tuning or trained classifiers with human-annotated data to improve answers for new problems and learn from corrections.[147] A 2024 study showed that the performance of some language models for reasoning capabilities in solving math problems not included in their training data was low, even for problems with only minor deviations from trained data.[148]\n

          Alternatively, dedicated models for mathematic problem solving with higher precision for the outcome including proof of theorems have been developed such as Alpha Tensor, Alpha Geometry and Alpha Proof all from Google DeepMind,[149] Llemma from eleuther[150] or Julius.[151]\n

          When natural language is used to describe mathematical problems, converters transform such prompts into a formal language such as Lean to define mathematic tasks.\n

          Some models have been developed to solve challenging problems and reach good results in benchmark tests, others to serve as educational tools in mathematics.[152]\n

          \n

          Finance

          \n

          Finance is one of the fastest growing sectors where applied AI tools are being deployed: from retail online banking to investment advice and insurance, where automated "robot advisers" have been in use for some years.[153]\n

          World Pensions experts like Nicolas Firzli insist it may be too early to see the emergence of highly innovative AI-informed financial products and services: "the deployment of AI tools will simply further automatise things: destroying tens of thousands of jobs in banking, financial planning, and pension advice in the process, but I\'m not sure it will unleash a new wave of [e.g., sophisticated] pension innovation."[154]\n

          \n

          Military

          \n\n

          Various countries are deploying AI military applications.[155] The main applications enhance command and control, communications, sensors, integration and interoperability.[156] Research is targeting intelligence collection and analysis, logistics, cyber operations, information operations, and semiautonomous and autonomous vehicles.[155] AI technologies enable coordination of sensors and effectors, threat detection and identification, marking of enemy positions, target acquisition, coordination and deconfliction of distributed Joint Fires between networked combat vehicles involving manned and unmanned teams.[156] AI was incorporated into military operations in Iraq and Syria.[155]\n

          In November 2023, US Vice President Kamala Harris disclosed a declaration signed by 31 nations to set guardrails for the military use of AI. The commitments include using legal reviews to ensure the compliance of military AI with international laws, and being cautious and transparent in the development of this technology.[157]\n

          \n

          Generative AI

          \n\n
          Vincent van Gogh in watercolour created by generative AI software
          \n

          In the early 2020s, generative AI gained widespread prominence. GenAI is AI capable of generating text, images, videos, or other data using generative models,[158][159] often in response to prompts.[160][161]\n

          In March 2023, 58% of U.S. adults had heard about ChatGPT and 14% had tried it.[162] The increasing realism and ease-of-use of AI-based text-to-image generators such as Midjourney, DALL-E, and Stable Diffusion sparked a trend of viral AI-generated photos. Widespread attention was gained by a fake photo of Pope Francis wearing a white puffer coat, the fictional arrest of Donald Trump, and a hoax of an attack on the Pentagon, as well as the usage in professional creative arts.[163][164]\n

          \n

          Agents

          \n

          Artificial intelligent (AI) agents are software entities designed to perceive their environment, make decisions, and take actions autonomously to achieve specific goals. These agents can interact with users, their environment, or other agents. AI agents are used in various applications, including virtual assistants, chatbots, autonomous vehicles, game-playing systems, and industrial robotics. AI agents operate within the constraints of their programming, available computational resources, and hardware limitations. This means they are restricted to performing tasks within their defined scope and have finite memory and processing capabilities. In real-world applications, AI agents often face time constraints for decision-making and action execution. Many AI agents incorporate learning algorithms, enabling them to improve their performance over time through experience or training. Using machine learning, AI agents can adapt to new situations and optimise their behaviour for their designated tasks.[165][166][167]\n

          \n

          Other industry-specific tasks

          \n

          There are also thousands of successful AI applications used to solve specific problems for specific industries or institutions. In a 2017 survey, one in five companies reported having incorporated "AI" in some offerings or processes.[168] A few examples are energy storage, medical diagnosis, military logistics, applications that predict the result of judicial decisions, foreign policy, or supply chain management.\n

          AI applications for evacuation and disaster management are growing. AI has been used to investigate if and how people evacuated in large scale and small scale evacuations using historical data from GPS, videos or social media. Further, AI can provide real time information on the real time evacuation conditions.[169][170][171]\n

          In agriculture, AI has helped farmers identify areas that need irrigation, fertilization, pesticide treatments or increasing yield. Agronomists use AI to conduct research and development. AI has been used to predict the ripening time for crops such as tomatoes, monitor soil moisture, operate agricultural robots, conduct predictive analytics, classify livestock pig call emotions, automate greenhouses, detect diseases and pests, and save water.\n

          Artificial intelligence is used in astronomy to analyze increasing amounts of available data and applications, mainly for "classification, regression, clustering, forecasting, generation, discovery, and the development of new scientific insights." For example, it is used for discovering exoplanets, forecasting solar activity, and distinguishing between signals and instrumental effects in gravitational wave astronomy. Additionally, it could be used for activities in space, such as space exploration, including the analysis of data from space missions, real-time science decisions of spacecraft, space debris avoidance, and more autonomous operation.\n

          During the 2024 Indian elections, US$50 millions was spent on authorized AI-generated content, notably by creating deepfakes of allied (including sometimes deceased) politicians to better engage with voters, and by translating speeches to various local languages.[172] \n

          \n

          Ethics

          \n\n

          AI has potential benefits and potential risks.[173] AI may be able to advance science and find solutions for serious problems: Demis Hassabis of Deep Mind hopes to "solve intelligence, and then use that to solve everything else".[174] However, as the use of AI has become widespread, several unintended consequences and risks have been identified.[175] In-production systems can sometimes not factor ethics and bias into their AI training processes, especially when the AI algorithms are inherently unexplainable in deep learning.[176]\n

          \n

          Risks and harm

          \n
          \n\n

          Machine learning algorithms require large amounts of data. The techniques used to acquire this data have raised concerns about privacy, surveillance and copyright.\n

          AI-powered devices and services, such as virtual assistants and IoT products, continuously collect personal information, raising concerns about intrusive data gathering and unauthorized access by third parties. The loss of privacy is further exacerbated by AI\'s ability to process and combine vast amounts of data, potentially leading to a surveillance society where individual activities are constantly monitored and analyzed without adequate safeguards or transparency.\n

          Sensitive user data collected may include online activity records, geolocation data, video or audio.[177] For example, in order to build speech recognition algorithms, Amazon has recorded millions of private conversations and allowed temporary workers to listen to and transcribe some of them.[178] Opinions about this widespread surveillance range from those who see it as a necessary evil to those for whom it is clearly unethical and a violation of the right to privacy.[179]\n

          AI developers argue that this is the only way to deliver valuable applications. and have developed several techniques that attempt to preserve privacy while still obtaining the data, such as data aggregation, de-identification and differential privacy.[180] Since 2016, some privacy experts, such as Cynthia Dwork, have begun to view privacy in terms of fairness. Brian Christian wrote that experts have pivoted "from the question of \'what they know\' to the question of \'what they\'re doing with it\'."[181]\n

          Generative AI is often trained on unlicensed copyrighted works, including in domains such as images or computer code; the output is then used under the rationale of "fair use". Experts disagree about how well and under what circumstances this rationale will hold up in courts of law; relevant factors may include "the purpose and character of the use of the copyrighted work" and "the effect upon the potential market for the copyrighted work".[182][183] Website owners who do not wish to have their content scraped can indicate it in a "robots.txt" file.[184] In 2023, leading authors (including John Grisham and Jonathan Franzen) sued AI companies for using their work to train generative AI.[185][186] Another discussed approach is to envision a separate sui generis system of protection for creations generated by AI to ensure fair attribution and compensation for human authors.[187]\n

          \n

          Dominance by tech giants

          \n

          The commercial AI scene is dominated by Big Tech companies such as Alphabet Inc., Amazon, Apple Inc., Meta Platforms, and Microsoft.[188][189][190] Some of these players already own the vast majority of existing cloud infrastructure and computing power from data centers, allowing them to entrench further in the marketplace.[191][192]\n

          \n

          Substantial power needs and other environmental impacts

          \n\n

          In January 2024, the International Energy Agency (IEA) released Electricity 2024, Analysis and Forecast to 2026, forecasting electric power use.[193] This is the first IEA report to make projections for data centers and power consumption for artificial intelligence and cryptocurrency. The report states that power demand for these uses might double by 2026, with additional electric power usage equal to electricity used by the whole Japanese nation.[194]\n

          Prodigious power consumption by AI is responsible for the growth of fossil fuels use, and might delay closings of obsolete, carbon-emitting coal energy facilities. There is a feverish rise in the construction of data centers throughout the US, making large technology firms (e.g., Microsoft, Meta, Google, Amazon) into voracious consumers of electric power. Projected electric consumption is so immense that there is concern that it will be fulfilled no matter the source. A ChatGPT search involves the use of 10 times the electrical energy as a Google search. The large firms are in haste to find power sources – from nuclear energy to geothermal to fusion. The tech firms argue that – in the long view – AI will be eventually kinder to the environment, but they need the energy now. AI makes the power grid more efficient and "intelligent", will assist in the growth of nuclear power, and track overall carbon emissions, according to technology firms.[195]\n

          A 2024 Goldman Sachs Research Paper, AI Data Centers and the Coming US Power Demand Surge, found "US power demand (is) likely to experience growth not seen in a generation...." and forecasts that, by 2030, US data centers will consume 8% of US power, as opposed to 3% in 2022, presaging growth for the electrical power generation industry by a variety of means.[196] Data centers\' need for more and more electrical power is such that they might max out the electrical grid. The Big Tech companies counter that AI can be used to maximize the utilization of the grid by all.[197]\n

          In 2024, the Wall Street Journal reported that big AI companies have begun negotiations with the US nuclear power providers to provide electricity to the data centers. In March 2024 Amazon purchased a Pennsylvania nuclear-powered data center for $650 Million (US).[198]\n

          In September 2024, Microsoft announced an agreement with Constellation Energy to re-open the Three Mile Island nuclear power plant to provide Microsoft with 100% of all electric power produced by the plant for 20 years. Reopening the plant, which suffered a partial nuclear meltdown of its Unit 2 reactor in 1979, will require Constellation to get through strict regulatory processes which will include extensive safety scrutiny from the US Nuclear Regulatory Commission. If approved (this will be the first ever US re-commissioning of a nuclear plant), over 835 megawatts of power – enough for 800,000 homes – of energy will be produced. The cost for re-opening and upgrading is estimated at $1.6 billion (US) and is dependent on tax breaks for nuclear power contained in the 2022 US Inflation Reduction Act.[199] The US government and the state of Michigan are investing almost $2 billion (US) to reopen the Palisades Nuclear reactor on Lake Michigan. Closed since 2022, the plant is planned to be reopened in October 2025. The Three Mile Island facility will be renamed the Crane Clean Energy Center after Chris Crane, a nuclear proponent and former CEO of Exelon who was responsible for Exelon spinoff of Constellation.[200]\n

          \n

          Misinformation

          \n\n

          YouTube, Facebook and others use recommender systems to guide users to more content. These AI programs were given the goal of maximizing user engagement (that is, the only goal was to keep people watching). The AI learned that users tended to choose misinformation, conspiracy theories, and extreme partisan content, and, to keep them watching, the AI recommended more of it. Users also tended to watch more content on the same subject, so the AI led people into filter bubbles where they received multiple versions of the same misinformation.[201] This convinced many users that the misinformation was true, and ultimately undermined trust in institutions, the media and the government.[202] The AI program had correctly learned to maximize its goal, but the result was harmful to society. After the U.S. election in 2016, major technology companies took steps to mitigate the problem [citation needed].\n

          In 2022, generative AI began to create images, audio, video and text that are indistinguishable from real photographs, recordings, films, or human writing. It is possible for bad actors to use this technology to create massive amounts of misinformation or propaganda.[203] AI pioneer Geoffrey Hinton expressed concern about AI enabling "authoritarian leaders to manipulate their electorates" on a large scale, among other risks.[204]\n

          \n

          Algorithmic bias and fairness

          \n\n

          Machine learning applications will be biased[k] if they learn from biased data.[206] The developers may not be aware that the bias exists.[207] Bias can be introduced by the way training data is selected and by the way a model is deployed.[208][206] If a biased algorithm is used to make decisions that can seriously harm people (as it can in medicine, finance, recruitment, housing or policing) then the algorithm may cause discrimination.[209] The field of fairness studies how to prevent harms from algorithmic biases.\n

          On June 28, 2015, Google Photos\'s new image labeling feature mistakenly identified Jacky Alcine and a friend as "gorillas" because they were black. The system was trained on a dataset that contained very few images of black people,[210] a problem called "sample size disparity".[211] Google "fixed" this problem by preventing the system from labelling anything as a "gorilla". Eight years later, in 2023, Google Photos still could not identify a gorilla, and neither could similar products from Apple, Facebook, Microsoft and Amazon.[212]\n

          COMPAS is a commercial program widely used by U.S. courts to assess the likelihood of a defendant becoming a recidivist. In 2016, Julia Angwin at ProPublica discovered that COMPAS exhibited racial bias, despite the fact that the program was not told the races of the defendants. Although the error rate for both whites and blacks was calibrated equal at exactly 61%, the errors for each race were different—the system consistently overestimated the chance that a black person would re-offend and would underestimate the chance that a white person would not re-offend.[213] In 2017, several researchers[l] showed that it was mathematically impossible for COMPAS to accommodate all possible measures of fairness when the base rates of re-offense were different for whites and blacks in the data.[215]\n

          A program can make biased decisions even if the data does not explicitly mention a problematic feature (such as "race" or "gender"). The feature will correlate with other features (like "address", "shopping history" or "first name"), and the program will make the same decisions based on these features as it would on "race" or "gender".[216] Moritz Hardt said "the most robust fact in this research area is that fairness through blindness doesn\'t work."[217]\n

          Criticism of COMPAS highlighted that machine learning models are designed to make "predictions" that are only valid if we assume that the future will resemble the past. If they are trained on data that includes the results of racist decisions in the past, machine learning models must predict that racist decisions will be made in the future. If an application then uses these predictions as recommendations, some of these "recommendations" will likely be racist.[218] Thus, machine learning is not well suited to help make decisions in areas where there is hope that the future will be better than the past. It is descriptive rather than prescriptive.[m]\n

          Bias and unfairness may go undetected because the developers are overwhelmingly white and male: among AI engineers, about 4% are black and 20% are women.[211]\n

          There are various conflicting definitions and mathematical models of fairness. These notions depend on ethical assumptions, and are influenced by beliefs about society. One broad category is distributive fairness, which focuses on the outcomes, often identifying groups and seeking to compensate for statistical disparities. Representational fairness tries to ensure that AI systems do not reinforce negative stereotypes or render certain groups invisible. Procedural fairness focuses on the decision process rather than the outcome. The most relevant notions of fairness may depend on the context, notably the type of AI application and the stakeholders. The subjectivity in the notions of bias and fairness makes it difficult for companies to operationalize them. Having access to sensitive attributes such as race or gender is also considered by many AI ethicists to be necessary in order to compensate for biases, but it may conflict with anti-discrimination laws.[205]\n

          At its 2022 Conference on Fairness, Accountability, and Transparency (ACM FAccT 2022), the Association for Computing Machinery, in Seoul, South Korea, presented and published findings that recommend that until AI and robotics systems are demonstrated to be free of bias mistakes, they are unsafe, and the use of self-learning neural networks trained on vast, unregulated sources of flawed internet data should be curtailed.[dubiousdiscuss][220]\n

          \n

          Lack of transparency

          \n\n

          Many AI systems are so complex that their designers cannot explain how they reach their decisions.[221] Particularly with deep neural networks, in which there are a large amount of non-linear relationships between inputs and outputs. But some popular explainability techniques exist.[222]\n

          It is impossible to be certain that a program is operating correctly if no one knows how exactly it works. There have been many cases where a machine learning program passed rigorous tests, but nevertheless learned something different than what the programmers intended. For example, a system that could identify skin diseases better than medical professionals was found to actually have a strong tendency to classify images with a ruler as "cancerous", because pictures of malignancies typically include a ruler to show the scale.[223] Another machine learning system designed to help effectively allocate medical resources was found to classify patients with asthma as being at "low risk" of dying from pneumonia. Having asthma is actually a severe risk factor, but since the patients having asthma would usually get much more medical care, they were relatively unlikely to die according to the training data. The correlation between asthma and low risk of dying from pneumonia was real, but misleading.[224]\n

          People who have been harmed by an algorithm\'s decision have a right to an explanation.[225] Doctors, for example, are expected to clearly and completely explain to their colleagues the reasoning behind any decision they make. Early drafts of the European Union\'s General Data Protection Regulation in 2016 included an explicit statement that this right exists.[n] Industry experts noted that this is an unsolved problem with no solution in sight. Regulators argued that nevertheless the harm is real: if the problem has no solution, the tools should not be used.[226]\n

          DARPA established the XAI ("Explainable Artificial Intelligence") program in 2014 to try to solve these problems.[227]\n

          Several approaches aim to address the transparency problem. SHAP enables to visualise the contribution of each feature to the output.[228] LIME can locally approximate a model\'s outputs with a simpler, interpretable model.[229] Multitask learning provides a large number of outputs in addition to the target classification. These other outputs can help developers deduce what the network has learned.[230] Deconvolution, DeepDream and other generative methods can allow developers to see what different layers of a deep network for computer vision have learned, and produce output that can suggest what the network is learning.[231] For generative pre-trained transformers, Anthropic developed a technique based on dictionary learning that associates patterns of neuron activations with human-understandable concepts.[232]\n

          \n

          Bad actors and weaponized AI

          \n\n

          Artificial intelligence provides a number of tools that are useful to bad actors, such as authoritarian governments, terrorists, criminals or rogue states.\n

          A lethal autonomous weapon is a machine that locates, selects and engages human targets without human supervision.[o] Widely available AI tools can be used by bad actors to develop inexpensive autonomous weapons and, if produced at scale, they are potentially weapons of mass destruction.[234] Even when used in conventional warfare, it is unlikely that they will be unable to reliably choose targets and could potentially kill an innocent person.[234] In 2014, 30 nations (including China) supported a ban on autonomous weapons under the United Nations\' Convention on Certain Conventional Weapons, however the United States and others disagreed.[235] By 2015, over fifty countries were reported to be researching battlefield robots.[236]\n

          AI tools make it easier for authoritarian governments to efficiently control their citizens in several ways. Face and voice recognition allow widespread surveillance. Machine learning, operating this data, can classify potential enemies of the state and prevent them from hiding. Recommendation systems can precisely target propaganda and misinformation for maximum effect. Deepfakes and generative AI aid in producing misinformation. Advanced AI can make authoritarian centralized decision making more competitive than liberal and decentralized systems such as markets. It lowers the cost and difficulty of digital warfare and advanced spyware.[237] All these technologies have been available since 2020 or earlier—AI facial recognition systems are already being used for mass surveillance in China.[238][239]\n

          There many other ways that AI is expected to help bad actors, some of which can not be foreseen. For example, machine-learning AI is able to design tens of thousands of toxic molecules in a matter of hours.[240]\n

          \n

          Technological unemployment

          \n\n

          Economists have frequently highlighted the risks of redundancies from AI, and speculated about unemployment if there is no adequate social policy for full employment.[241]\n

          In the past, technology has tended to increase rather than reduce total employment, but economists acknowledge that "we\'re in uncharted territory" with AI.[242] A survey of economists showed disagreement about whether the increasing use of robots and AI will cause a substantial increase in long-term unemployment, but they generally agree that it could be a net benefit if productivity gains are redistributed.[243] Risk estimates vary; for example, in the 2010s, Michael Osborne and Carl Benedikt Frey estimated 47% of U.S. jobs are at "high risk" of potential automation, while an OECD report classified only 9% of U.S. jobs as "high risk".[p][245] The methodology of speculating about future employment levels has been criticised as lacking evidential foundation, and for implying that technology, rather than social policy, creates unemployment, as opposed to redundancies.[241] In April 2023, it was reported that 70% of the jobs for Chinese video game illustrators had been eliminated by generative artificial intelligence.[246][247]\n

          Unlike previous waves of automation, many middle-class jobs may be eliminated by artificial intelligence; The Economist stated in 2015 that "the worry that AI could do to white-collar jobs what steam power did to blue-collar ones during the Industrial Revolution" is "worth taking seriously".[248] Jobs at extreme risk range from paralegals to fast food cooks, while job demand is likely to increase for care-related professions ranging from personal healthcare to the clergy.[249]\n

          From the early days of the development of artificial intelligence, there have been arguments, for example, those put forward by Joseph Weizenbaum, about whether tasks that can be done by computers actually should be done by them, given the difference between computers and humans, and between quantitative calculation and qualitative, value-based judgement.[250]\n

          \n

          Existential risk

          \n\n

          It has been argued AI will become so powerful that humanity may irreversibly lose control of it. This could, as physicist Stephen Hawking stated, "spell the end of the human race".[251] This scenario has been common in science fiction, when a computer or robot suddenly develops a human-like "self-awareness" (or "sentience" or "consciousness") and becomes a malevolent character.[q] These sci-fi scenarios are misleading in several ways.\n

          First, AI does not require human-like "sentience" to be an existential risk. Modern AI programs are given specific goals and use learning and intelligence to achieve them. Philosopher Nick Bostrom argued that if one gives almost any goal to a sufficiently powerful AI, it may choose to destroy humanity to achieve it (he used the example of a paperclip factory manager).[253] Stuart Russell gives the example of household robot that tries to find a way to kill its owner to prevent it from being unplugged, reasoning that "you can\'t fetch the coffee if you\'re dead."[254] In order to be safe for humanity, a superintelligence would have to be genuinely aligned with humanity\'s morality and values so that it is "fundamentally on our side".[255]\n

          Second, Yuval Noah Harari argues that AI does not require a robot body or physical control to pose an existential risk. The essential parts of civilization are not physical. Things like ideologies, law, government, money and the economy are made of language; they exist because there are stories that billions of people believe. The current prevalence of misinformation suggests that an AI could use language to convince people to believe anything, even to take actions that are destructive.[256]\n

          The opinions amongst experts and industry insiders are mixed, with sizable fractions both concerned and unconcerned by risk from eventual superintelligent AI.[257] Personalities such as Stephen Hawking, Bill Gates, and Elon Musk,[258] as well as AI pioneers such as Yoshua Bengio, Stuart Russell, Demis Hassabis, and Sam Altman, have expressed concerns about existential risk from AI.\n

          In May 2023, Geoffrey Hinton announced his resignation from Google in order to be able to "freely speak out about the risks of AI" without "considering how this impacts Google."[259] He notably mentioned risks of an AI takeover,[260] and stressed that in order to avoid the worst outcomes, establishing safety guidelines will require cooperation among those competing in use of AI.[261]\n

          In 2023, many leading AI experts issued the joint statement that "Mitigating the risk of extinction from AI should be a global priority alongside other societal-scale risks such as pandemics and nuclear war".[262]\n

          Other researchers, however, spoke in favor of a less dystopian view. AI pioneer Juergen Schmidhuber did not sign the joint statement, emphasising that in 95% of all cases, AI research is about making "human lives longer and healthier and easier."[263] While the tools that are now being used to improve lives can also be used by bad actors, "they can also be used against the bad actors."[264][265] Andrew Ng also argued that "it\'s a mistake to fall for the doomsday hype on AI—and that regulators who do will only benefit vested interests."[266] Yann LeCun "scoffs at his peers\' dystopian scenarios of supercharged misinformation and even, eventually, human extinction."[267] In the early 2010s, experts argued that the risks are too distant in the future to warrant research or that humans will be valuable from the perspective of a superintelligent machine.[268] However, after 2016, the study of current and future risks and possible solutions became a serious area of research.[269]\n

          \n

          Ethical machines and alignment

          \n\n

          Friendly AI are machines that have been designed from the beginning to minimize risks and to make choices that benefit humans. Eliezer Yudkowsky, who coined the term, argues that developing friendly AI should be a higher research priority: it may require a large investment and it must be completed before AI becomes an existential risk.[270]\n

          Machines with intelligence have the potential to use their intelligence to make ethical decisions. The field of machine ethics provides machines with ethical principles and procedures for resolving ethical dilemmas.[271]\nThe field of machine ethics is also called computational morality,[271]\nand was founded at an AAAI symposium in 2005.[272]\n

          Other approaches include Wendell Wallach\'s "artificial moral agents"[273] and Stuart J. Russell\'s three principles for developing provably beneficial machines.[274]\n

          \n

          Open source

          \n

          Active organizations in the AI open-source community include Hugging Face,[275] Google,[276] EleutherAI and Meta.[277] Various AI models, such as Llama 2, Mistral or Stable Diffusion, have been made open-weight,[278][279] meaning that their architecture and trained parameters (the "weights") are publicly available. Open-weight models can be freely fine-tuned, which allows companies to specialize them with their own data and for their own use-case.[280] Open-weight models are useful for research and innovation but can also be misused. Since they can be fine-tuned, any built-in security measure, such as objecting to harmful requests, can be trained away until it becomes ineffective. Some researchers warn that future AI models may develop dangerous capabilities (such as the potential to drastically facilitate bioterrorism) and that once released on the Internet, they cannot be deleted everywhere if needed. They recommend pre-release audits and cost-benefit analyses.[281]\n

          \n

          Frameworks

          \n

          Artificial Intelligence projects can have their ethical permissibility tested while designing, developing, and implementing an AI system. An AI framework such as the Care and Act Framework containing the SUM values—developed by the Alan Turing Institute tests projects in four main areas:[282][283]\n

          \n
          • Respect the dignity of individual people
          • \n
          • Connect with other people sincerely, openly, and inclusively
          • \n
          • Care for the wellbeing of everyone
          • \n
          • Protect social values, justice, and the public interest
          \n

          Other developments in ethical frameworks include those decided upon during the Asilomar Conference, the Montreal Declaration for Responsible AI, and the IEEE\'s Ethics of Autonomous Systems initiative, among others;[284] however, these principles do not go without their criticisms, especially regards to the people chosen contributes to these frameworks.[285]\n

          Promotion of the wellbeing of the people and communities that these technologies affect requires consideration of the social and ethical implications at all stages of AI system design, development and implementation, and collaboration between job roles such as data scientists, product managers, data engineers, domain experts, and delivery managers.[286]\n

          The UK AI Safety Institute released in 2024 a testing toolset called \'Inspect\' for AI safety evaluations available under a MIT open-source licence which is freely available on GitHub and can be improved with third-party packages. It can be used to evaluate AI models in a range of areas including core knowledge, ability to reason, and autonomous capabilities.[287]\n

          \n

          Regulation

          \n\n
          AI Safety Summit
          The first global AI Safety Summit was held in 2023 with a declaration calling for international co-operation.
          \n

          The regulation of artificial intelligence is the development of public sector policies and laws for promoting and regulating AI; it is therefore related to the broader regulation of algorithms.[288] The regulatory and policy landscape for AI is an emerging issue in jurisdictions globally.[289] According to AI Index at Stanford, the annual number of AI-related laws passed in the 127 survey countries jumped from one passed in 2016 to 37 passed in 2022 alone.[290][291] Between 2016 and 2020, more than 30 countries adopted dedicated strategies for AI.[292] Most EU member states had released national AI strategies, as had Canada, China, India, Japan, Mauritius, the Russian Federation, Saudi Arabia, United Arab Emirates, U.S., and Vietnam. Others were in the process of elaborating their own AI strategy, including Bangladesh, Malaysia and Tunisia.[292] The Global Partnership on Artificial Intelligence was launched in June 2020, stating a need for AI to be developed in accordance with human rights and democratic values, to ensure public confidence and trust in the technology.[292] Henry Kissinger, Eric Schmidt, and Daniel Huttenlocher published a joint statement in November 2021 calling for a government commission to regulate AI.[293] In 2023, OpenAI leaders published recommendations for the governance of superintelligence, which they believe may happen in less than 10 years.[294] In 2023, the United Nations also launched an advisory body to provide recommendations on AI governance; the body comprises technology company executives, governments officials and academics.[295] In 2024, the Council of Europe created the first international legally binding treaty on AI, called the "Framework Convention on Artificial Intelligence and Human Rights, Democracy and the Rule of Law". It was adopted by the European Union, the United States, the United Kingdom, and other signatories.[296]\n

          In a 2022 Ipsos survey, attitudes towards AI varied greatly by country; 78% of Chinese citizens, but only 35% of Americans, agreed that "products and services using AI have more benefits than drawbacks".[290] A 2023 Reuters/Ipsos poll found that 61% of Americans agree, and 22% disagree, that AI poses risks to humanity.[297] In a 2023 Fox News poll, 35% of Americans thought it "very important", and an additional 41% thought it "somewhat important", for the federal government to regulate AI, versus 13% responding "not very important" and 8% responding "not at all important".[298][299]\n

          In November 2023, the first global AI Safety Summit was held in Bletchley Park in the UK to discuss the near and far term risks of AI and the possibility of mandatory and voluntary regulatory frameworks.[300] 28 countries including the United States, China, and the European Union issued a declaration at the start of the summit, calling for international co-operation to manage the challenges and risks of artificial intelligence.[301][302] In May 2024 at the AI Seoul Summit, 16 global AI tech companies agreed to safety commitments on the development of AI.[303][304]\n

          \n

          History

          \n\n\n

          The study of mechanical or "formal" reasoning began with philosophers and mathematicians in antiquity. The study of logic led directly to Alan Turing\'s theory of computation, which suggested that a machine, by shuffling symbols as simple as "0" and "1", could simulate any conceivable form of mathematical reasoning.[305][306] This, along with concurrent discoveries in cybernetics, information theory and neurobiology, led researchers to consider the possibility of building an "electronic brain".[r] They developed several areas of research that would become part of AI,[308] such as McCullouch and Pitts design for "artificial neurons" in 1943,[115] and Turing\'s influential 1950 paper \'Computing Machinery and Intelligence\', which introduced the Turing test and showed that "machine intelligence" was plausible.[309][306]\n

          The field of AI research was founded at a workshop at Dartmouth College in 1956.[s][6] The attendees became the leaders of AI research in the 1960s.[t] They and their students produced programs that the press described as "astonishing":[u] computers were learning checkers strategies, solving word problems in algebra, proving logical theorems and speaking English.[v][7] Artificial intelligence laboratories were set up at a number of British and U.S. universities in the latter 1950s and early 1960s.[306]\n

          Researchers in the 1960s and the 1970s were convinced that their methods would eventually succeed in creating a machine with general intelligence and considered this the goal of their field.[313] In 1965 Herbert Simon predicted, "machines will be capable, within twenty years, of doing any work a man can do".[314] In 1967 Marvin Minsky agreed, writing that "within a generation ... the problem of creating \'artificial intelligence\' will substantially be solved".[315] They had, however, underestimated the difficulty of the problem.[w] In 1974, both the U.S. and British governments cut off exploratory research in response to the criticism of Sir James Lighthill[317] and ongoing pressure from the U.S. Congress to fund more productive projects.[318] Minsky\'s and Papert\'s book Perceptrons was understood as proving that artificial neural networks would never be useful for solving real-world tasks, thus discrediting the approach altogether.[319] The "AI winter", a period when obtaining funding for AI projects was difficult, followed.[9]\n

          In the early 1980s, AI research was revived by the commercial success of expert systems,[320] a form of AI program that simulated the knowledge and analytical skills of human experts. By 1985, the market for AI had reached over a billion dollars. At the same time, Japan\'s fifth generation computer project inspired the U.S. and British governments to restore funding for academic research.[8] However, beginning with the collapse of the Lisp Machine market in 1987, AI once again fell into disrepute, and a second, longer-lasting winter began.[10]\n

          Up to this point, most of AI\'s funding had gone to projects that used high-level symbols to represent mental objects like plans, goals, beliefs, and known facts. In the 1980s, some researchers began to doubt that this approach would be able to imitate all the processes of human cognition, especially perception, robotics, learning and pattern recognition,[321] and began to look into "sub-symbolic" approaches.[322] Rodney Brooks rejected "representation" in general and focussed directly on engineering machines that move and survive.[x] Judea Pearl, Lofti Zadeh and others developed methods that handled incomplete and uncertain information by making reasonable guesses rather than precise logic.[86][327] But the most important development was the revival of "connectionism", including neural network research, by Geoffrey Hinton and others.[328] In 1990, Yann LeCun successfully showed that convolutional neural networks can recognize handwritten digits, the first of many successful applications of neural networks.[329]\n

          AI gradually restored its reputation in the late 1990s and early 21st century by exploiting formal mathematical methods and by finding specific solutions to specific problems. This "narrow" and "formal" focus allowed researchers to produce verifiable results and collaborate with other fields (such as statistics, economics and mathematics).[330] By 2000, solutions developed by AI researchers were being widely used, although in the 1990s they were rarely described as "artificial intelligence" (a tendency known as the AI effect).[331]\nHowever, several academic researchers became concerned that AI was no longer pursuing its original goal of creating versatile, fully intelligent machines. Beginning around 2002, they founded the subfield of artificial general intelligence (or "AGI"), which had several well-funded institutions by the 2010s.[4]\n

          Deep learning began to dominate industry benchmarks in 2012 and was adopted throughout the field.[11]\nFor many specific tasks, other methods were abandoned.[y]\nDeep learning\'s success was based on both hardware improvements (faster computers,[333] graphics processing units, cloud computing[334]) and access to large amounts of data[335] (including curated datasets,[334] such as ImageNet). Deep learning\'s success led to an enormous increase in interest and funding in AI.[z] The amount of machine learning research (measured by total publications) increased by 50% in the years 2015–2019.[292]\n

          In 2016, issues of fairness and the misuse of technology were catapulted into center stage at machine learning conferences, publications vastly increased, funding became available, and many researchers re-focussed their careers on these issues. The alignment problem became a serious field of academic study.[269]\n

          In the late teens and early 2020s, AGI companies began to deliver programs that created enormous interest. In 2015, AlphaGo, developed by DeepMind, beat the world champion Go player. The program was taught only the rules of the game and developed strategy by itself. GPT-3 is a large language model that was released in 2020 by OpenAI and is capable of generating high-quality human-like text.[336] These programs, and others, inspired an aggressive AI boom, where large companies began investing billions in AI research. According to AI Impacts, about $50 billion annually was invested in "AI" around 2022 in the U.S. alone and about 20% of the new U.S. Computer Science PhD graduates have specialized in "AI".[337] About 800,000 "AI"-related U.S. job openings existed in 2022.[338]\n

          \n

          Philosophy

          \n\n

          Philosophical debates have historically sought to determine the nature of intelligence and how to make intelligent machines.[339] Another major focus has been whether machines can be conscious, and the associated ethical implications.[340] Many other topics in philosophy are relevant to AI, such as epistemology and free will.[341] Rapid advancements have intensified public discussions on the philosophy and ethics of AI.[340]\n

          \n

          Defining artificial intelligence

          \n\n

          Alan Turing wrote in 1950 "I propose to consider the question \'can machines think\'?"[342] He advised changing the question from whether a machine "thinks", to "whether or not it is possible for machinery to show intelligent behaviour".[342] He devised the Turing test, which measures the ability of a machine to simulate human conversation.[309] Since we can only observe the behavior of the machine, it does not matter if it is "actually" thinking or literally has a "mind". Turing notes that we can not determine these things about other people but "it is usual to have a polite convention that everyone thinks."[343]\n

          \n
          The Turing test can provide some evidence of intelligence, but it penalizes non-human intelligent behavior.[344]
          \n

          Russell and Norvig agree with Turing that intelligence must be defined in terms of external behavior, not internal structure.[1] However, they are critical that the test requires the machine to imitate humans. "Aeronautical engineering texts," they wrote, "do not define the goal of their field as making \'machines that fly so exactly like pigeons that they can fool other pigeons.\'"[345] AI founder John McCarthy agreed, writing that "Artificial intelligence is not, by definition, simulation of human intelligence".[346]\n

          McCarthy defines intelligence as "the computational part of the ability to achieve goals in the world".[347] Another AI founder, Marvin Minsky similarly describes it as "the ability to solve hard problems".[348] The leading AI textbook defines it as the study of agents that perceive their environment and take actions that maximize their chances of achieving defined goals.[1] These definitions view intelligence in terms of well-defined problems with well-defined solutions, where both the difficulty of the problem and the performance of the program are direct measures of the "intelligence" of the machine—and no other philosophical discussion is required, or may not even be possible.\n

          Another definition has been adopted by Google,[349] a major practitioner in the field of AI. This definition stipulates the ability of systems to synthesize information as the manifestation of intelligence, similar to the way it is defined in biological intelligence.\n

          Some authors have suggested in practice, that the definition of AI is vague and difficult to define, with contention as to whether classical algorithms should be categorised as AI,[350] with many companies during the early 2020s AI boom using the term as a marketing buzzword, often even if they did "not actually use AI in a material way".[351]\n

          \n

          Evaluating approaches to AI

          \n

          No established unifying theory or paradigm has guided AI research for most of its history.[aa] The unprecedented success of statistical machine learning in the 2010s eclipsed all other approaches (so much so that some sources, especially in the business world, use the term "artificial intelligence" to mean "machine learning with neural networks"). This approach is mostly sub-symbolic, soft and narrow. Critics argue that these questions may have to be revisited by future generations of AI researchers.\n

          \n

          Symbolic AI and its limits

          \n

          Symbolic AI (or "GOFAI")[353] simulated the high-level conscious reasoning that people use when they solve puzzles, express legal reasoning and do mathematics. They were highly successful at "intelligent" tasks such as algebra or IQ tests. In the 1960s, Newell and Simon proposed the physical symbol systems hypothesis: "A physical symbol system has the necessary and sufficient means of general intelligent action."[354]\n

          However, the symbolic approach failed on many tasks that humans solve easily, such as learning, recognizing an object or commonsense reasoning. Moravec\'s paradox is the discovery that high-level "intelligent" tasks were easy for AI, but low level "instinctive" tasks were extremely difficult.[355] Philosopher Hubert Dreyfus had argued since the 1960s that human expertise depends on unconscious instinct rather than conscious symbol manipulation, and on having a "feel" for the situation, rather than explicit symbolic knowledge.[356] Although his arguments had been ridiculed and ignored when they were first presented, eventually, AI research came to agree with him.[ab][16]\n

          The issue is not resolved: sub-symbolic reasoning can make many of the same inscrutable mistakes that human intuition does, such as algorithmic bias. Critics such as Noam Chomsky argue continuing research into symbolic AI will still be necessary to attain general intelligence,[358][359] in part because sub-symbolic AI is a move away from explainable AI: it can be difficult or impossible to understand why a modern statistical AI program made a particular decision. The emerging field of neuro-symbolic artificial intelligence attempts to bridge the two approaches.\n

          \n

          Neat vs. scruffy

          \n\n

          "Neats" hope that intelligent behavior is described using simple, elegant principles (such as logic, optimization, or neural networks). "Scruffies" expect that it necessarily requires solving a large number of unrelated problems. Neats defend their programs with theoretical rigor, scruffies rely mainly on incremental testing to see if they work. This issue was actively discussed in the 1970s and 1980s,[360] but eventually was seen as irrelevant. Modern AI has elements of both.\n

          \n

          Soft vs. hard computing

          \n\n

          Finding a provably correct or optimal solution is intractable for many important problems.[15] Soft computing is a set of techniques, including genetic algorithms, fuzzy logic and neural networks, that are tolerant of imprecision, uncertainty, partial truth and approximation. Soft computing was introduced in the late 1980s and most successful AI programs in the 21st century are examples of soft computing with neural networks.\n

          \n

          Narrow vs. general AI

          \n\n

          AI researchers are divided as to whether to pursue the goals of artificial general intelligence and superintelligence directly or to solve as many specific problems as possible (narrow AI) in hopes these solutions will lead indirectly to the field\'s long-term goals.[361][362] General intelligence is difficult to define and difficult to measure, and modern AI has had more verifiable successes by focusing on specific problems with specific solutions. The sub-field of artificial general intelligence studies this area exclusively.\n

          \n

          Machine consciousness, sentience, and mind

          \n\n

          The philosophy of mind does not know whether a machine can have a mind, consciousness and mental states, in the same sense that human beings do. This issue considers the internal experiences of the machine, rather than its external behavior. Mainstream AI research considers this issue irrelevant because it does not affect the goals of the field: to build machines that can solve problems using intelligence. Russell and Norvig add that "[t]he additional project of making a machine conscious in exactly the way humans are is not one that we are equipped to take on."[363] However, the question has become central to the philosophy of mind. It is also typically the central question at issue in artificial intelligence in fiction.\n

          \n

          Consciousness

          \n\n

          David Chalmers identified two problems in understanding the mind, which he named the "hard" and "easy" problems of consciousness.[364] The easy problem is understanding how the brain processes signals, makes plans and controls behavior. The hard problem is explaining how this feels or why it should feel like anything at all, assuming we are right in thinking that it truly does feel like something (Dennett\'s consciousness illusionism says this is an illusion). While human information processing is easy to explain, human subjective experience is difficult to explain. For example, it is easy to imagine a color-blind person who has learned to identify which objects in their field of view are red, but it is not clear what would be required for the person to know what red looks like.[365]\n

          \n

          Computationalism and functionalism

          \n\n

          Computationalism is the position in the philosophy of mind that the human mind is an information processing system and that thinking is a form of computing. Computationalism argues that the relationship between mind and body is similar or identical to the relationship between software and hardware and thus may be a solution to the mind–body problem. This philosophical position was inspired by the work of AI researchers and cognitive scientists in the 1960s and was originally proposed by philosophers Jerry Fodor and Hilary Putnam.[366]\n

          Philosopher John Searle characterized this position as "strong AI": "The appropriately programmed computer with the right inputs and outputs would thereby have a mind in exactly the same sense human beings have minds."[ac] Searle counters this assertion with his Chinese room argument, which attempts to show that, even if a machine perfectly simulates human behavior, there is still no reason to suppose it also has a mind.[370]\n

          \n

          AI welfare and rights

          \n

          It is difficult or impossible to reliably evaluate whether an advanced AI is sentient (has the ability to feel), and if so, to what degree.[371] But if there is a significant chance that a given machine can feel and suffer, then it may be entitled to certain rights or welfare protection measures, similarly to animals.[372][373] Sapience (a set of capacities related to high intelligence, such as discernment or self-awareness) may provide another moral basis for AI rights.[372] Robot rights are also sometimes proposed as a practical way to integrate autonomous agents into society.[374]\n

          In 2017, the European Union considered granting "electronic personhood" to some of the most capable AI systems. Similarly to the legal status of companies, it would have conferred rights but also responsibilities.[375] Critics argued in 2018 that granting rights to AI systems would downplay the importance of human rights, and that legislation should focus on user needs rather than speculative futuristic scenarios. They also noted that robots lacked the autonomy to take part to society on their own.[376][377]\n

          Progress in AI increased interest in the topic. Proponents of AI welfare and rights often argue that AI sentience, if it emerges, would be particularly easy to deny. They warn that this may be a moral blind spot analogous to slavery or factory farming, which could lead to large-scale suffering if sentient AI is created and carelessly exploited.[373][372]\n

          \n

          Future

          \n

          Superintelligence and the singularity

          \n

          A superintelligence is a hypothetical agent that would possess intelligence far surpassing that of the brightest and most gifted human mind.[362]If research into artificial general intelligence produced sufficiently intelligent software, it might be able to reprogram and improve itself. The improved software would be even better at improving itself, leading to what I. J. Good called an "intelligence explosion" and Vernor Vinge called a "singularity".[378]\n

          However, technologies cannot improve exponentially indefinitely, and typically follow an S-shaped curve, slowing when they reach the physical limits of what the technology can do.[379]\n

          \n

          Transhumanism

          \n\n

          Robot designer Hans Moravec, cyberneticist Kevin Warwick and inventor Ray Kurzweil have predicted that humans and machines may merge in the future into cyborgs that are more capable and powerful than either. This idea, called transhumanism, has roots in the writings of Aldous Huxley and Robert Ettinger.[380]\n

          Edward Fredkin argues that "artificial intelligence is the next step in evolution", an idea first proposed by Samuel Butler\'s "Darwin among the Machines" as far back as 1863, and expanded upon by George Dyson in his 1998 book Darwin Among the Machines: The Evolution of Global Intelligence.[381]\n

          \n

          In fiction

          \n\n
          The word "robot" itself was coined by Karel Čapek in his 1921 play R.U.R., the title standing for "Rossum\'s Universal Robots".
          \n

          Thought-capable artificial beings have appeared as storytelling devices since antiquity,[382] and have been a persistent theme in science fiction.[383]\n

          A common trope in these works began with Mary Shelley\'s Frankenstein, where a human creation becomes a threat to its masters. This includes such works as Arthur C. Clarke\'s and Stanley Kubrick\'s 2001: A Space Odyssey (both 1968), with HAL 9000, the murderous computer in charge of the Discovery One spaceship, as well as The Terminator (1984) and The Matrix (1999). In contrast, the rare loyal robots such as Gort from The Day the Earth Stood Still (1951) and Bishop from Aliens (1986) are less prominent in popular culture.[384]\n

          Isaac Asimov introduced the Three Laws of Robotics in many stories, most notably with the "Multivac" super-intelligent computer. Asimov\'s laws are often brought up during lay discussions of machine ethics;[385] while almost all artificial intelligence researchers are familiar with Asimov\'s laws through popular culture, they generally consider the laws useless for many reasons, one of which is their ambiguity.[386]\n

          Several works use AI to force us to confront the fundamental question of what makes us human, showing us artificial beings that have the ability to feel, and thus to suffer. This appears in Karel Čapek\'s R.U.R., the films A.I. Artificial Intelligence and Ex Machina, as well as the novel Do Androids Dream of Electric Sheep?, by Philip K. Dick. Dick considers the idea that our understanding of human subjectivity is altered by technology created with artificial intelligence.[387]\n

          \n

          See also

          \n\n

          Explanatory notes

          \n
          \n
            \n
          1. ^ a b This list of intelligent traits is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
          2. \n
          3. ^ a b This list of tools is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
          4. \n
          5. ^ It is among the reasons that expert systems proved to be inefficient for capturing knowledge.[30][31]\n
          6. \n
          7. ^ \n"Rational agent" is general term used in economics, philosophy and theoretical artificial intelligence. It can refer to anything that directs its behavior to accomplish goals, such as a person, an animal, a corporation, a nation, or in the case of AI, a computer program.\n
          8. \n
          9. ^ Alan Turing discussed the centrality of learning as early as 1950, in his classic paper "Computing Machinery and Intelligence".[42] In 1956, at the original Dartmouth AI summer conference, Ray Solomonoff wrote a report on unsupervised probabilistic machine learning: "An Inductive Inference Machine".[43]\n
          10. \n
          11. ^ See AI winter § Machine translation and the ALPAC report of 1966\n
          12. \n
          13. ^ \nCompared with symbolic logic, formal Bayesian inference is computationally expensive. For inference to be tractable, most observations must be conditionally independent of one another. AdSense uses a Bayesian network with over 300 million edges to learn which ads to serve.[93]\n
          14. \n
          15. ^ Expectation–maximization, one of the most popular algorithms in machine learning, allows clustering in the presence of unknown latent variables.[95]\n
          16. \n
          17. ^ \nSome form of deep neural networks (without a specific learning algorithm) were described by:\nWarren S. McCulloch and Walter Pitts (1943)[115]\nAlan Turing (1948);[116]\nKarl Steinbuch and Roger David Joseph (1961).[117]\nDeep or recurrent networks that learned (or used gradient descent) were developed by:\nFrank Rosenblatt(1957);[116]\nOliver Selfridge (1959);[117]\nAlexey Ivakhnenko and Valentin Lapa (1965);[118]\nKaoru Nakano (1971);[119]\nShun-Ichi Amari (1972);[119]\nJohn Joseph Hopfield (1982).[119]\nPrecursors to backpropagation were developed by:\nHenry J. Kelley (1960);[116]\nArthur E. Bryson (1962);[116]\nStuart Dreyfus (1962);[116]\nArthur E. Bryson and Yu-Chi Ho (1969);[116]\nBackpropagation was independently developed by:\nSeppo Linnainmaa (1970);[120]\nPaul Werbos (1974).[116]\n
          18. \n
          19. ^ Geoffrey Hinton said, of his work on neural networks in the 1990s, "our labeled datasets were thousands of times too small. [And] our computers were millions of times too slow."[121]\n
          20. \n
          21. ^ In statistics, a bias is a systematic error or deviation from the correct value. But in the context of fairness, it refers to a tendency in favor or against a certain group or individual characteristic, usually in a way that is considered unfair or harmful. A statistically unbiased AI system that produces disparate outcomes for different demographic groups may thus be viewed as biased in the ethical sense.[205]\n
          22. \n
          23. ^ Including Jon Kleinberg (Cornell University), Sendhil Mullainathan (University of Chicago), Cynthia Chouldechova (Carnegie Mellon) and Sam Corbett-Davis (Stanford)[214]\n
          24. \n
          25. ^ Moritz Hardt (a director at the Max Planck Institute for Intelligent Systems) argues that machine learning "is fundamentally the wrong tool for a lot of domains, where you\'re trying to design interventions and mechanisms that change the world."[219]\n
          26. \n
          27. ^ When the law was passed in 2018, it still contained a form of this provision.\n
          28. \n
          29. ^ This is the United Nations\' definition, and includes things like land mines as well.[233]\n
          30. \n
          31. ^ See table 4; 9% is both the OECD average and the U.S. average.[244]\n
          32. \n
          33. ^ Sometimes called a "robopocalypse"[252]\n
          34. \n
          35. ^ "Electronic brain" was the term used by the press around this time.[305][307]\n
          36. \n
          37. ^ \nDaniel Crevier wrote, "the conference is generally recognized as the official birthdate of the new science."[310] Russell and Norvig called the conference "the inception of artificial intelligence."[115]\n
          38. \n
          39. ^ \nRussell and Norvig wrote "for the next 20 years the field would be dominated by these people and their students."[311]\n
          40. \n
          41. ^ \nRussell and Norvig wrote "it was astonishing whenever a computer did anything kind of smartish".[312]\n
          42. \n
          43. ^ \nThe programs described are Arthur Samuel\'s checkers program for the IBM 701, Daniel Bobrow\'s STUDENT, Newell and Simon\'s Logic Theorist and Terry Winograd\'s SHRDLU.\n
          44. \n
          45. ^ Russell and Norvig write: "in almost all cases, these early systems failed on more difficult problems"[316]\n
          46. \n
          47. ^ \nEmbodied approaches to AI[323] were championed by Hans Moravec[324] and Rodney Brooks[325] and went by many names: Nouvelle AI.[325] Developmental robotics.[326]\n
          48. \n
          49. ^ Matteo Wong wrote in The Atlantic: "Whereas for decades, computer-science fields such as natural-language processing, computer vision, and robotics used extremely different methods, now they all use a programming method called "deep learning." As a result, their code and approaches have become more similar, and their models are easier to integrate into one another."[332]\n
          50. \n
          51. ^ Jack Clark wrote in Bloomberg: "After a half-decade of quiet breakthroughs in artificial intelligence, 2015 has been a landmark year. Computers are smarter and learning faster than ever", and noted that the number of software projects that use machine learning at Google increased from a "sporadic usage" in 2012 to more than 2,700 projects in 2015.[334]\n
          52. \n
          53. ^ Nils Nilsson wrote in 1983: "Simply put, there is wide disagreement in the field about what AI is all about."[352]\n
          54. \n
          55. ^ \nDaniel Crevier wrote that "time has proven the accuracy and perceptiveness of some of Dreyfus\'s comments. Had he formulated them less aggressively, constructive actions they suggested might have been taken much earlier."[357]\n
          56. \n
          57. ^ \nSearle presented this definition of "Strong AI" in 1999.[367] Searle\'s original formulation was "The appropriately programmed computer really is a mind, in the sense that computers given the right programs can be literally said to understand and have other cognitive states."[368] Strong AI is defined similarly by Russell and Norvig: "Stong AI – the assertion that machines that do so are actually thinking (as opposed to simulating thinking)."[369]\n
          58. \n
          \n

          References

          \n
          \n
            \n
          1. ^ a b c Russell & Norvig (2021), pp. 1–4.\n
          2. \n
          3. ^ AI set to exceed human brain power Archived 2008-02-19 at the Wayback Machine CNN.com (July 26, 2006)\n
          4. \n
          5. ^ Kaplan, Andreas; Haenlein, Michael (2019). "Siri, Siri, in my hand: Who\'s the fairest in the land? On the interpretations, illustrations, and implications of artificial intelligence". Business Horizons. 62: 15–25. doi:10.1016/j.bushor.2018.08.004. ISSN 0007-6813. S2CID 158433736.\n
          6. \n
          7. ^ a b c \nArtificial general intelligence: Russell & Norvig (2021, pp. 32–33, 1020–1021)
            Proposal for the modern version: Pennachin & Goertzel (2007)
            Warnings of overspecialization in AI from leading researchers: Nilsson (1995), McCarthy (2007), Beal & Winston (2009)
            \n
          8. \n
          9. ^ Russell & Norvig (2021, §1.2).\n
          10. \n
          11. ^ a b Dartmouth workshop: Russell & Norvig (2021, p. 18), McCorduck (2004, pp. 111–136), NRC (1999, pp. 200–201)
            The proposal: McCarthy et al. (1955)
            \n
          12. \n
          13. ^ a b Successful programs of the 1960s: McCorduck (2004, pp. 243–252), Crevier (1993, pp. 52–107), Moravec (1988, p. 9), Russell & Norvig (2021, pp. 19–21)\n
          14. \n
          15. ^ a b Funding initiatives in the early 1980s: Fifth Generation Project (Japan), Alvey (UK), Microelectronics and Computer Technology Corporation (US), Strategic Computing Initiative (US): McCorduck (2004, pp. 426–441), Crevier (1993, pp. 161–162, 197–203, 211, 240), Russell & Norvig (2021, p. 23), NRC (1999, pp. 210–211), Newquist (1994, pp. 235–248)\n
          16. \n
          17. ^ a b First AI Winter, Lighthill report, Mansfield Amendment: Crevier (1993, pp. 115–117), Russell & Norvig (2021, pp. 21–22), NRC (1999, pp. 212–213), Howe (1994), Newquist (1994, pp. 189–201)\n
          18. \n
          19. ^ a b Second AI Winter: Russell & Norvig (2021, p. 24), McCorduck (2004, pp. 430–435), Crevier (1993, pp. 209–210), NRC (1999, pp. 214–216), Newquist (1994, pp. 301–318)\n
          20. \n
          21. ^ a b Deep learning revolution, AlexNet: Goldman (2022), Russell & Norvig (2021, p. 26), McKinsey (2018)\n
          22. \n
          23. ^ Toews (2023).\n
          24. \n
          25. ^ Problem-solving, puzzle solving, game playing, and deduction: Russell & Norvig (2021, chpt. 3–5), Russell & Norvig (2021, chpt. 6) (constraint satisfaction), Poole, Mackworth & Goebel (1998, chpt. 2, 3, 7, 9), Luger & Stubblefield (2004, chpt. 3, 4, 6, 8), Nilsson (1998, chpt. 7–12)\n
          26. \n
          27. ^ Uncertain reasoning: Russell & Norvig (2021, chpt. 12–18), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 333–381), Nilsson (1998, chpt. 7–12)\n
          28. \n
          29. ^ a b c Intractability and efficiency and the combinatorial explosion: Russell & Norvig (2021, p. 21)\n
          30. \n
          31. ^ a b c Psychological evidence of the prevalence of sub-symbolic reasoning and knowledge: Kahneman (2011), Dreyfus & Dreyfus (1986), Wason & Shapiro (1966), Kahneman, Slovic & Tversky (1982)\n
          32. \n
          33. ^ Knowledge representation and knowledge engineering: Russell & Norvig (2021, chpt. 10), Poole, Mackworth & Goebel (1998, pp. 23–46, 69–81, 169–233, 235–277, 281–298, 319–345), Luger & Stubblefield (2004, pp. 227–243), Nilsson (1998, chpt. 17.1–17.4, 18)\n
          34. \n
          35. ^ Smoliar & Zhang (1994).\n
          36. \n
          37. ^ Neumann & Möller (2008).\n
          38. \n
          39. ^ Kuperman, Reichley & Bailey (2006).\n
          40. \n
          41. ^ McGarry (2005).\n
          42. \n
          43. ^ Bertini, Del Bimbo & Torniai (2006).\n
          44. \n
          45. ^ Russell & Norvig (2021), pp. 272.\n
          46. \n
          47. ^ Representing categories and relations: Semantic networks, description logics, inheritance (including frames, and scripts): Russell & Norvig (2021, §10.2 & 10.5), Poole, Mackworth & Goebel (1998, pp. 174–177), Luger & Stubblefield (2004, pp. 248–258), Nilsson (1998, chpt. 18.3)\n
          48. \n
          49. ^ Representing events and time:Situation calculus, event calculus, fluent calculus (including solving the frame problem): Russell & Norvig (2021, §10.3), Poole, Mackworth & Goebel (1998, pp. 281–298), Nilsson (1998, chpt. 18.2)\n
          50. \n
          51. ^ Causal calculus: Poole, Mackworth & Goebel (1998, pp. 335–337)\n
          52. \n
          53. ^ Representing knowledge about knowledge: Belief calculus, modal logics: Russell & Norvig (2021, §10.4), Poole, Mackworth & Goebel (1998, pp. 275–277)\n
          54. \n
          55. ^ a b Default reasoning, Frame problem, default logic, non-monotonic logics, circumscription, closed world assumption, abduction: Russell & Norvig (2021, §10.6), Poole, Mackworth & Goebel (1998, pp. 248–256, 323–335), Luger & Stubblefield (2004, pp. 335–363), Nilsson (1998, ~18.3.3)\n(Poole et al. places abduction under "default reasoning". Luger et al. places this under "uncertain reasoning").\n
          56. \n
          57. ^ a b Breadth of commonsense knowledge: Lenat & Guha (1989, Introduction), Crevier (1993, pp. 113–114), Moravec (1988, p. 13), Russell & Norvig (2021, pp. 241, 385, 982) (qualification problem)\n
          58. \n
          59. ^ Newquist (1994), p. 296.\n
          60. \n
          61. ^ Crevier (1993), pp. 204–208.\n
          62. \n
          63. ^ Russell & Norvig (2021), p. 528.\n
          64. \n
          65. ^ Automated planning: Russell & Norvig (2021, chpt. 11).\n
          66. \n
          67. ^ Automated decision making, Decision theory: Russell & Norvig (2021, chpt. 16–18).\n
          68. \n
          69. ^ Classical planning: Russell & Norvig (2021, Section 11.2).\n
          70. \n
          71. ^ Sensorless or "conformant" planning, contingent planning, replanning (a.k.a online planning): Russell & Norvig (2021, Section 11.5).\n
          72. \n
          73. ^ Uncertain preferences: Russell & Norvig (2021, Section 16.7)\nInverse reinforcement learning: Russell & Norvig (2021, Section 22.6)\n
          74. \n
          75. ^ Information value theory: Russell & Norvig (2021, Section 16.6).\n
          76. \n
          77. ^ Markov decision process: Russell & Norvig (2021, chpt. 17).\n
          78. \n
          79. ^ Game theory and multi-agent decision theory: Russell & Norvig (2021, chpt. 18).\n
          80. \n
          81. ^ Learning: Russell & Norvig (2021, chpt. 19–22), Poole, Mackworth & Goebel (1998, pp. 397–438), Luger & Stubblefield (2004, pp. 385–542), Nilsson (1998, chpt. 3.3, 10.3, 17.5, 20)\n
          82. \n
          83. ^ Turing (1950).\n
          84. \n
          85. ^ Solomonoff (1956).\n
          86. \n
          87. ^ Unsupervised learning: Russell & Norvig (2021, pp. 653) (definition), Russell & Norvig (2021, pp. 738–740) (cluster analysis), Russell & Norvig (2021, pp. 846–860) (word embedding)\n
          88. \n
          89. ^ a b Supervised learning: Russell & Norvig (2021, §19.2) (Definition), Russell & Norvig (2021, Chpt. 19–20) (Techniques)\n
          90. \n
          91. ^ Reinforcement learning: Russell & Norvig (2021, chpt. 22), Luger & Stubblefield (2004, pp. 442–449)\n
          92. \n
          93. ^ Transfer learning: Russell & Norvig (2021, pp. 281), The Economist (2016)\n
          94. \n
          95. ^ "Artificial Intelligence (AI): What Is AI and How Does It Work? | Built In". builtin.com. Retrieved 30 October 2023.\n
          96. \n
          97. ^ Computational learning theory: Russell & Norvig (2021, pp. 672–674), Jordan & Mitchell (2015)\n
          98. \n
          99. ^ Natural language processing (NLP): Russell & Norvig (2021, chpt. 23–24), Poole, Mackworth & Goebel (1998, pp. 91–104), Luger & Stubblefield (2004, pp. 591–632)\n
          100. \n
          101. ^ Subproblems of NLP: Russell & Norvig (2021, pp. 849–850)\n
          102. \n
          103. ^ Russell & Norvig (2021), pp. 856–858.\n
          104. \n
          105. ^ Dickson (2022).\n
          106. \n
          107. ^ Modern statistical and deep learning approaches to NLP: Russell & Norvig (2021, chpt. 24), Cambria & White (2014)\n
          108. \n
          109. ^ Vincent (2019).\n
          110. \n
          111. ^ Russell & Norvig (2021), pp. 875–878.\n
          112. \n
          113. ^ Bushwick (2023).\n
          114. \n
          115. ^ Computer vision: Russell & Norvig (2021, chpt. 25), Nilsson (1998, chpt. 6)\n
          116. \n
          117. ^ Russell & Norvig (2021), pp. 849–850.\n
          118. \n
          119. ^ Russell & Norvig (2021), pp. 895–899.\n
          120. \n
          121. ^ Russell & Norvig (2021), pp. 899–901.\n
          122. \n
          123. ^ Challa et al. (2011).\n
          124. \n
          125. ^ Russell & Norvig (2021), pp. 931–938.\n
          126. \n
          127. ^ MIT AIL (2014).\n
          128. \n
          129. ^ Affective computing: Thro (1993), Edelson (1991), Tao & Tan (2005), Scassellati (2002)\n
          130. \n
          131. ^ Waddell (2018).\n
          132. \n
          133. ^ Poria et al. (2017).\n
          134. \n
          135. ^ Search algorithms: Russell & Norvig (2021, chpts. 3–5), Poole, Mackworth & Goebel (1998, pp. 113–163), Luger & Stubblefield (2004, pp. 79–164, 193–219), Nilsson (1998, chpts. 7–12)\n
          136. \n
          137. ^ State space search: Russell & Norvig (2021, chpt. 3)\n
          138. \n
          139. ^ Russell & Norvig (2021), sect. 11.2.\n
          140. \n
          141. ^ Uninformed searches (breadth first search, depth-first search and general state space search): Russell & Norvig (2021, sect. 3.4), Poole, Mackworth & Goebel (1998, pp. 113–132), Luger & Stubblefield (2004, pp. 79–121), Nilsson (1998, chpt. 8)\n
          142. \n
          143. ^ Heuristic or informed searches (e.g., greedy best first and A*): Russell & Norvig (2021, sect. 3.5), Poole, Mackworth & Goebel (1998, pp. 132–147), Poole & Mackworth (2017, sect. 3.6), Luger & Stubblefield (2004, pp. 133–150)\n
          144. \n
          145. ^ Adversarial search: Russell & Norvig (2021, chpt. 5)\n
          146. \n
          147. ^ Local or "optimization" search: Russell & Norvig (2021, chpt. 4)\n
          148. \n
          149. ^ Singh Chauhan, Nagesh (18 December 2020). "Optimization Algorithms in Neural Networks". KDnuggets. Retrieved 13 January 2024.\n
          150. \n
          151. ^ Evolutionary computation: Russell & Norvig (2021, sect. 4.1.2)\n
          152. \n
          153. ^ Merkle & Middendorf (2013).\n
          154. \n
          155. ^ Logic: Russell & Norvig (2021, chpts. 6–9), Luger & Stubblefield (2004, pp. 35–77), Nilsson (1998, chpt. 13–16)\n
          156. \n
          157. ^ Propositional logic: Russell & Norvig (2021, chpt. 6), Luger & Stubblefield (2004, pp. 45–50), Nilsson (1998, chpt. 13)\n
          158. \n
          159. ^ First-order logic and features such as equality: Russell & Norvig (2021, chpt. 7), Poole, Mackworth & Goebel (1998, pp. 268–275), Luger & Stubblefield (2004, pp. 50–62), Nilsson (1998, chpt. 15)\n
          160. \n
          161. ^ Logical inference: Russell & Norvig (2021, chpt. 10)\n
          162. \n
          163. ^ logical deduction as search: Russell & Norvig (2021, sects. 9.3, 9.4), Poole, Mackworth & Goebel (1998, pp. ~46–52), Luger & Stubblefield (2004, pp. 62–73), Nilsson (1998, chpt. 4.2, 7.2)\n
          164. \n
          165. ^ Resolution and unification: Russell & Norvig (2021, sections 7.5.2, 9.2, 9.5)\n
          166. \n
          167. ^ Warren, D.H.; Pereira, L.M.; Pereira, F. (1977). "Prolog-the language and its implementation compared with Lisp". ACM SIGPLAN Notices. 12 (8): 109–115. doi:10.1145/872734.806939.\n
          168. \n
          169. ^ Fuzzy logic: Russell & Norvig (2021, pp. 214, 255, 459), Scientific American (1999)\n
          170. \n
          171. ^ a b Stochastic methods for uncertain reasoning: Russell & Norvig (2021, chpt. 12–18, 20), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 165–191, 333–381), Nilsson (1998, chpt. 19)\n
          172. \n
          173. ^ decision theory and decision analysis: Russell & Norvig (2021, chpt. 16–18), Poole, Mackworth & Goebel (1998, pp. 381–394)\n
          174. \n
          175. ^ Information value theory: Russell & Norvig (2021, sect. 16.6)\n
          176. \n
          177. ^ Markov decision processes and dynamic decision networks: Russell & Norvig (2021, chpt. 17)\n
          178. \n
          179. ^ a b c Stochastic temporal models: Russell & Norvig (2021, chpt. 14)\nHidden Markov model: Russell & Norvig (2021, sect. 14.3)\nKalman filters: Russell & Norvig (2021, sect. 14.4)\nDynamic Bayesian networks: Russell & Norvig (2021, sect. 14.5)\n
          180. \n
          181. ^ Game theory and mechanism design: Russell & Norvig (2021, chpt. 18)\n
          182. \n
          183. ^ Bayesian networks: Russell & Norvig (2021, sects. 12.5–12.6, 13.4–13.5, 14.3–14.5, 16.5, 20.2–20.3), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~182–190, ≈363–379), Nilsson (1998, chpt. 19.3–19.4)\n
          184. \n
          185. ^ Domingos (2015), chpt. 6.\n
          186. \n
          187. ^ Bayesian inference algorithm: Russell & Norvig (2021, sect. 13.3–13.5), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~363–379), Nilsson (1998, chpt. 19.4 & 7)\n
          188. \n
          189. ^ Domingos (2015), p. 210.\n
          190. \n
          191. ^ Bayesian learning and the expectation–maximization algorithm: Russell & Norvig (2021, chpt. 20), Poole, Mackworth & Goebel (1998, pp. 424–433), Nilsson (1998, chpt. 20), Domingos (2015, p. 210)\n
          192. \n
          193. ^ Bayesian decision theory and Bayesian decision networks: Russell & Norvig (2021, sect. 16.5)\n
          194. \n
          195. ^ Statistical learning methods and classifiers: Russell & Norvig (2021, chpt. 20),\n
          196. \n
          197. ^ Ciaramella, Alberto; Ciaramella, Marco (2024). Introduction to Artificial Intelligence: from data analysis to generative AI. Intellisemantic Editions. ISBN 978-8-8947-8760-3.\n
          198. \n
          199. ^ Decision trees: Russell & Norvig (2021, sect. 19.3), Domingos (2015, p. 88)\n
          200. \n
          201. ^ Non-parameteric learning models such as K-nearest neighbor and support vector machines: Russell & Norvig (2021, sect. 19.7), Domingos (2015, p. 187) (k-nearest neighbor)\n\n
          202. \n
          203. ^ Domingos (2015), p. 152.\n
          204. \n
          205. ^ Naive Bayes classifier: Russell & Norvig (2021, sect. 12.6), Domingos (2015, p. 152)\n
          206. \n
          207. ^ a b Neural networks: Russell & Norvig (2021, chpt. 21), Domingos (2015, Chapter 4)\n
          208. \n
          209. ^ Gradient calculation in computational graphs, backpropagation, automatic differentiation: Russell & Norvig (2021, sect. 21.2), Luger & Stubblefield (2004, pp. 467–474), Nilsson (1998, chpt. 3.3)\n
          210. \n
          211. ^ Universal approximation theorem: Russell & Norvig (2021, p. 752)\nThe theorem: Cybenko (1988), Hornik, Stinchcombe & White (1989)\n
          212. \n
          213. ^ Feedforward neural networks: Russell & Norvig (2021, sect. 21.1)\n
          214. \n
          215. ^ Recurrent neural networks: Russell & Norvig (2021, sect. 21.6)\n
          216. \n
          217. ^ Perceptrons: Russell & Norvig (2021, pp. 21, 22, 683, 22)\n
          218. \n
          219. ^ a b Deep learning: Russell & Norvig (2021, chpt. 21), Goodfellow, Bengio & Courville (2016), Hinton et al. (2016), Schmidhuber (2015)\n
          220. \n
          221. ^ Convolutional neural networks: Russell & Norvig (2021, sect. 21.3)\n
          222. \n
          223. ^ Deng & Yu (2014), pp. 199–200.\n
          224. \n
          225. ^ Ciresan, Meier & Schmidhuber (2012).\n
          226. \n
          227. ^ Russell & Norvig (2021), p. 751.\n
          228. \n
          229. ^ a b c Russell & Norvig (2021), p. 17.\n
          230. \n
          231. ^ a b c d e f g Russell & Norvig (2021), p. 785.\n
          232. \n
          233. ^ a b Schmidhuber (2022), sect. 5.\n
          234. \n
          235. ^ Schmidhuber (2022), sect. 6.\n
          236. \n
          237. ^ a b c Schmidhuber (2022), sect. 7.\n
          238. \n
          239. ^ Schmidhuber (2022), sect. 8.\n
          240. \n
          241. ^ Quoted in Christian (2020, p. 22)\n
          242. \n
          243. ^ Smith (2023).\n
          244. \n
          245. ^ "Explained: Generative AI". 9 November 2023.\n
          246. \n
          247. ^ "AI Writing and Content Creation Tools". MIT Sloan Teaching & Learning Technologies. Archived from the original on 25 December 2023. Retrieved 25 December 2023.\n
          248. \n
          249. ^ Marmouyet (2023).\n
          250. \n
          251. ^ Kobielus (2019).\n
          252. \n
          253. ^ Thomason, James (21 May 2024). "Mojo Rising: The resurgence of AI-first programming languages". VentureBeat. Archived from the original on 27 June 2024. Retrieved 26 May 2024.\n
          254. \n
          255. ^ Wodecki, Ben (5 May 2023). "7 AI Programming Languages You Need to Know". AI Business. Archived from the original on 25 July 2024. Retrieved 5 October 2024.\n
          256. \n
          257. ^ Plumb, Taryn (18 September 2024). "Why Jensen Huang and Marc Benioff see \'gigantic\' opportunity for agentic AI". VentureBeat. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
          258. \n
          259. ^ Davenport, T; Kalakota, R (June 2019). "The potential for artificial intelligence in healthcare". Future Healthc J. 6 (2): 94–98. doi:10.7861/futurehosp.6-2-94. PMC 6616181. PMID 31363513.\n
          260. \n
          261. ^ Lyakhova, U.A.; Lyakhov, P.A. (2024). "Systematic review of approaches to detection and classification of skin cancer using artificial intelligence: Development and prospects". Computers in Biology and Medicine. 178: 108742. doi:10.1016/j.compbiomed.2024.108742. PMID 38875908.\n
          262. \n
          263. ^ Alqudaihi, Kawther S.; Aslam, Nida; Khan, Irfan Ullah; Almuhaideb, Abdullah M.; Alsunaidi, Shikah J.; Ibrahim, Nehad M. Abdel Rahman; Alhaidari, Fahd A.; Shaikh, Fatema S.; Alsenbel, Yasmine M.; Alalharith, Dima M.; Alharthi, Hajar M.; Alghamdi, Wejdan M.; Alshahrani, Mohammed S. (2021). "Cough Sound Detection and Diagnosis Using Artificial Intelligence Techniques: Challenges and Opportunities". IEEE Access. 9: 102327–102344. Bibcode:2021IEEEA...9j2327A. doi:10.1109/ACCESS.2021.3097559. ISSN 2169-3536. PMC 8545201. PMID 34786317.\n
          264. \n
          265. ^ a b Bax, Monique; Thorpe, Jordan; Romanov, Valentin (December 2023). "The future of personalized cardiovascular medicine demands 3D and 4D printing, stem cells, and artificial intelligence". Frontiers in Sensors. 4. doi:10.3389/fsens.2023.1294721. ISSN 2673-5067.\n
          266. \n
          267. ^ Jumper, J; Evans, R; Pritzel, A (2021). "Highly accurate protein structure prediction with AlphaFold". Nature. 596 (7873): 583–589. Bibcode:2021Natur.596..583J. doi:10.1038/s41586-021-03819-2. PMC 8371605. PMID 34265844.\n
          268. \n
          269. ^ "AI discovers new class of antibiotics to kill drug-resistant bacteria". 20 December 2023. Archived from the original on 16 September 2024. Retrieved 5 October 2024.\n
          270. \n
          271. ^ "AI speeds up drug design for Parkinson\'s ten-fold". Cambridge University. 17 April 2024. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
          272. \n
          273. ^ Horne, Robert I.; Andrzejewska, Ewa A.; Alam, Parvez; Brotzakis, Z. Faidon; Srivastava, Ankit; Aubert, Alice; Nowinska, Magdalena; Gregory, Rebecca C.; Staats, Roxine; Possenti, Andrea; Chia, Sean; Sormanni, Pietro; Ghetti, Bernardino; Caughey, Byron; Knowles, Tuomas P. J.; Vendruscolo, Michele (17 April 2024). "Discovery of potent inhibitors of α-synuclein aggregation using structure-based iterative learning". Nature Chemical Biology. 20 (5). Nature: 634–645. doi:10.1038/s41589-024-01580-x. PMC 11062903. PMID 38632492.\n
          274. \n
          275. ^ Grant, Eugene F.; Lardner, Rex (25 July 1952). "The Talk of the Town – It". The New Yorker. ISSN 0028-792X. Archived from the original on 16 February 2020. Retrieved 28 January 2024.\n
          276. \n
          277. ^ Anderson, Mark Robert (11 May 2017). "Twenty years on from Deep Blue vs Kasparov: how a chess match started the big data revolution". The Conversation. Archived from the original on 17 September 2024. Retrieved 28 January 2024.\n
          278. \n
          279. ^ Markoff, John (16 February 2011). "Computer Wins on \'Jeopardy!\': Trivial, It\'s Not". The New York Times. ISSN 0362-4331. Archived from the original on 22 October 2014. Retrieved 28 January 2024.\n
          280. \n
          281. ^ Byford, Sam (27 May 2017). "AlphaGo retires from competitive Go after defeating world number one 3–0". The Verge. Archived from the original on 7 June 2017. Retrieved 28 January 2024.\n
          282. \n
          283. ^ Brown, Noam; Sandholm, Tuomas (30 August 2019). "Superhuman AI for multiplayer poker". Science. 365 (6456): 885–890. Bibcode:2019Sci...365..885B. doi:10.1126/science.aay2400. ISSN 0036-8075. PMID 31296650.\n
          284. \n
          285. ^ "MuZero: Mastering Go, chess, shogi and Atari without rules". Google DeepMind. 23 December 2020. Retrieved 28 January 2024.\n
          286. \n
          287. ^ Sample, Ian (30 October 2019). "AI becomes grandmaster in \'fiendishly complex\' StarCraft II". The Guardian. ISSN 0261-3077. Archived from the original on 29 December 2020. Retrieved 28 January 2024.\n
          288. \n
          289. ^ Wurman, P. R.; Barrett, S.; Kawamoto, K. (2022). "Outracing champion Gran Turismo drivers with deep reinforcement learning" (PDF). Nature. 602 (7896): 223–228. Bibcode:2022Natur.602..223W. doi:10.1038/s41586-021-04357-7. PMID 35140384.\n
          290. \n
          291. ^ Wilkins, Alex (13 March 2024). "Google AI learns to play open-world video games by watching them". New Scientist. Archived from the original on 26 July 2024. Retrieved 21 July 2024.\n
          292. \n
          293. ^ Uesato, J. et al.: Improving mathematical reasoning with process supervision. Archived 15 September 2024 at the Wayback Machine openai.com, May 31, 2023. Retrieved 2024-08-07.\n
          294. \n
          295. ^ Srivastava, Saurabh (29 February 2024). "Functional Benchmarks for Robust Evaluation of Reasoning Performance, and the Reasoning Gap". arXiv:2402.19450 [cs.AI].\n
          296. \n
          297. ^ Roberts, Siobhan (25 July 2024). "AI achieves silver-medal standard solving International Mathematical Olympiad problems". The New York Times. Archived from the original on 26 September 2024. Retrieved 7 August 2024.\n
          298. \n
          299. ^ LLEMMA. eleuther.ai. Retrieved 2024-08-07.\n
          300. \n
          301. ^ AI Math. Archived 5 October 2024 at the Wayback Machine Caesars Labs, 2024. Retrieved 2024-08-07.\n
          302. \n
          303. ^ Alex McFarland: 7 Best AI for Math Tools. Archived 11 September 2024 at the Wayback Machine unite.ai. Retrieved 2024-08-07\n
          304. \n
          305. ^ Matthew Finio & Amanda Downie: IBM Think 2024 Primer, "What is Artificial Intelligence (AI) in Finance?" 8 Dec. 2023\n
          306. \n
          307. ^ M. Nicolas, J. Firzli: Pensions Age/European Pensions magazine, "Artificial Intelligence: Ask the Industry" May June 2024 https://videovoice.org/ai-in-finance-innovation-entrepreneurship-vs-over-regulation-with-the-eus-artificial-intelligence-act-wont-work-as-intended/ Archived 11 September 2024 at the Wayback Machine.\n
          308. \n
          309. ^ a b c Congressional Research Service (2019). Artificial Intelligence and National Security (PDF). Washington, DC: Congressional Research Service. Archived (PDF) from the original on 8 May 2020. Retrieved 5 October 2024.PD-notice\n
          310. \n
          311. ^ a b Slyusar, Vadym (2019). "Artificial intelligence as the basis of future control networks". ResearchGate. doi:10.13140/RG.2.2.30247.50087. Archived from the original on 28 April 2021. Retrieved 20 July 2019.\n
          312. \n
          313. ^ Knight, Will. "The US and 30 Other Nations Agree to Set Guardrails for Military AI". Wired. ISSN 1059-1028. Archived from the original on 20 September 2024. Retrieved 24 January 2024.\n
          314. \n
          315. ^ Newsom, Gavin; Weber, Shirley N. (6 September 2023). "Executive Order N-12-23" (PDF). Executive Department, State of California. Archived (PDF) from the original on 21 February 2024. Retrieved 7 September 2023.\n
          316. \n
          317. ^ Pinaya, Walter H. L.; Graham, Mark S.; Kerfoot, Eric; Tudosiu, Petru-Daniel; Dafflon, Jessica; Fernandez, Virginia; Sanchez, Pedro; Wolleb, Julia; da Costa, Pedro F.; Patel, Ashay (2023). "Generative AI for Medical Imaging: extending the MONAI Framework". arXiv:2307.15208 [eess.IV].\n
          318. \n
          319. ^ Griffith, Erin; Metz, Cade (27 January 2023). "Anthropic Said to Be Closing In on $300 Million in New A.I. Funding". The New York Times. Archived from the original on 9 December 2023. Retrieved 14 March 2023.\n
          320. \n
          321. ^ Lanxon, Nate; Bass, Dina; Davalos, Jackie (10 March 2023). "A Cheat Sheet to AI Buzzwords and Their Meanings". Bloomberg News. Archived from the original on 17 November 2023. Retrieved 14 March 2023.\n
          322. \n
          323. ^ Marcelline, Marco (27 May 2023). "ChatGPT: Most Americans Know About It, But Few Actually Use the AI Chatbot". PCMag. Archived from the original on 21 May 2024. Retrieved 28 January 2024.\n
          324. \n
          325. ^ Lu, Donna (31 March 2023). "Misinformation, mistakes and the Pope in a puffer: what rapidly evolving AI can – and can\'t – do". The Guardian. ISSN 0261-3077. Archived from the original on 10 June 2024. Retrieved 28 January 2024.\n
          326. \n
          327. ^ Hurst, Luke (23 May 2023). "How a fake image of a Pentagon explosion shared on Twitter caused a real dip on Wall Street". euronews. Retrieved 28 January 2024.\n
          328. \n
          329. ^ Poole, David; Mackworth, Alan (2023). Artificial Intelligence, Foundations of Computational Agents (3rd ed.). Cambridge University Press. doi:10.1017/9781009258227. ISBN 978-1-0092-5819-7. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
          330. \n
          331. ^ Russell, Stuart; Norvig, Peter (2020). Artificial Intelligence: A Modern Approach (4th ed.). Pearson. ISBN 978-0-1346-1099-3.\n
          332. \n
          333. ^ "Why agents are the next frontier of generative AI". McKinsey Digital. 24 July 2024. Archived from the original on 3 October 2024. Retrieved 10 August 2024.\n
          334. \n
          335. ^ Ransbotham, Sam; Kiron, David; Gerbert, Philipp; Reeves, Martin (6 September 2017). "Reshaping Business With Artificial Intelligence". MIT Sloan Management Review. Archived from the original on 13 February 2024.\n
          336. \n
          337. ^ Sun, Yuran; Zhao, Xilei; Lovreglio, Ruggiero; Kuligowski, Erica (1 January 2024), Naser, M. Z. (ed.), "8 – AI for large-scale evacuation modeling: promises and challenges", Interpretable Machine Learning for the Analysis, Design, Assessment, and Informed Decision Making for Civil Infrastructure, Woodhead Publishing Series in Civil and Structural Engineering, Woodhead Publishing, pp. 185–204, ISBN 978-0-1282-4073-1, archived from the original on 19 May 2024, retrieved 28 June 2024.\n
          338. \n
          339. ^ Gomaa, Islam; Adelzadeh, Masoud; Gwynne, Steven; Spencer, Bruce; Ko, Yoon; Bénichou, Noureddine; Ma, Chunyun; Elsagan, Nour; Duong, Dana; Zalok, Ehab; Kinateder, Max (1 November 2021). "A Framework for Intelligent Fire Detection and Evacuation System". Fire Technology. 57 (6): 3179–3185. doi:10.1007/s10694-021-01157-3. ISSN 1572-8099. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
          340. \n
          341. ^ Zhao, Xilei; Lovreglio, Ruggiero; Nilsson, Daniel (1 May 2020). "Modelling and interpreting pre-evacuation decision-making using machine learning". Automation in Construction. 113: 103140. doi:10.1016/j.autcon.2020.103140. ISSN 0926-5805. Archived from the original on 19 May 2024. Retrieved 5 October 2024.\n
          342. \n
          343. ^ "India\'s latest election embraced AI technology. Here are some ways it was used constructively". PBS News. 12 June 2024. Retrieved 28 October 2024.\n
          344. \n
          345. ^ Müller, Vincent C. (30 April 2020). "Ethics of Artificial Intelligence and Robotics". Stanford Encyclopedia of Philosophy Archive. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
          346. \n
          347. ^ Simonite (2016).\n
          348. \n
          349. ^ Russell & Norvig (2021), p. 987.\n
          350. \n
          351. ^ Laskowski (2023).\n
          352. \n
          353. ^ GAO (2022).\n
          354. \n
          355. ^ Valinsky (2019).\n
          356. \n
          357. ^ Russell & Norvig (2021), p. 991.\n
          358. \n
          359. ^ Russell & Norvig (2021), pp. 991–992.\n
          360. \n
          361. ^ Christian (2020), p. 63.\n
          362. \n
          363. ^ Vincent (2022).\n
          364. \n
          365. ^ Kopel, Matthew. "Copyright Services: Fair Use". Cornell University Library. Archived from the original on 26 September 2024. Retrieved 26 April 2024.\n
          366. \n
          367. ^ Burgess, Matt. "How to Stop Your Data From Being Used to Train AI". Wired. ISSN 1059-1028. Archived from the original on 3 October 2024. Retrieved 26 April 2024.\n
          368. \n
          369. ^ Reisner (2023).\n
          370. \n
          371. ^ Alter & Harris (2023).\n
          372. \n
          373. ^ "Getting the Innovation Ecosystem Ready for AI. An IP policy toolkit" (PDF). WIPO.\n
          374. \n
          375. ^ Hammond, George (27 December 2023). "Big Tech is spending more than VC firms on AI startups". Ars Technica. Archived from the original on 10 January 2024.\n
          376. \n
          377. ^ Wong, Matteo (24 October 2023). "The Future of AI Is GOMA". The Atlantic. Archived from the original on 5 January 2024.\n
          378. \n
          379. ^ "Big tech and the pursuit of AI dominance". The Economist. 26 March 2023. Archived from the original on 29 December 2023.\n
          380. \n
          381. ^ Fung, Brian (19 December 2023). "Where the battle to dominate AI may be won". CNN Business. Archived from the original on 13 January 2024.\n
          382. \n
          383. ^ Metz, Cade (5 July 2023). "In the Age of A.I., Tech\'s Little Guys Need Big Friends". The New York Times. Archived from the original on 8 July 2024. Retrieved 5 October 2024.\n
          384. \n
          385. ^ "Electricity 2024 – Analysis". IEA. 24 January 2024. Retrieved 13 July 2024.\n
          386. \n
          387. ^ Calvert, Brian (28 March 2024). "AI already uses as much energy as a small country. It\'s only the beginning". Vox. New York, New York. Archived from the original on 3 July 2024. Retrieved 5 October 2024.\n
          388. \n
          389. ^ Halper, Evan; O\'Donovan, Caroline (21 June 2024). "AI is exhausting the power grid. Tech firms are seeking a miracle solution". Washington Post.\n
          390. \n
          391. ^ Davenport, Carly. "AI Data Centers and the Coming YS Power Demand Surge" (PDF). Goldman Sachs. Archived from the original (PDF) on 26 July 2024. Retrieved 5 October 2024.\n
          392. \n
          393. ^ Ryan, Carol (12 April 2024). "Energy-Guzzling AI Is Also the Future of Energy Savings". Wall Street Journal. Dow Jones.\n
          394. \n
          395. ^ Hiller, Jennifer (1 July 2024). "Tech Industry Wants to Lock Up Nuclear Power for AI". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
          396. \n
          397. ^ Halper, Evan (20 September 2024). "Microsoft deal would reopen Three Mile Island nuclear plant to power AI". Washington Post.\n
          398. \n
          399. ^ Hiller, Jennifer (20 September 2024). "Three Mile Island\'s Nuclear Plant to Reopen, Help Power Microsoft\'s AI Centers". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
          400. \n
          401. ^ Nicas (2018).\n
          402. \n
          403. ^ Rainie, Lee; Keeter, Scott; Perrin, Andrew (22 July 2019). "Trust and Distrust in America". Pew Research Center. Archived from the original on 22 February 2024.\n
          404. \n
          405. ^ Williams (2023).\n
          406. \n
          407. ^ Taylor & Hern (2023).\n
          408. \n
          409. ^ a b Samuel, Sigal (19 April 2022). "Why it\'s so damn hard to make AI fair and unbiased". Vox. Archived from the original on 5 October 2024. Retrieved 24 July 2024.\n
          410. \n
          411. ^ a b Rose (2023).\n
          412. \n
          413. ^ CNA (2019).\n
          414. \n
          415. ^ Goffrey (2008), p. 17.\n
          416. \n
          417. ^ Berdahl et al. (2023); Goffrey (2008, p. 17); Rose (2023); Russell & Norvig (2021, p. 995)\n
          418. \n
          419. ^ Christian (2020), p. 25.\n
          420. \n
          421. ^ a b Russell & Norvig (2021), p. 995.\n
          422. \n
          423. ^ Grant & Hill (2023).\n
          424. \n
          425. ^ Larson & Angwin (2016).\n
          426. \n
          427. ^ Christian (2020), p. 67–70.\n
          428. \n
          429. ^ Christian (2020, pp. 67–70); Russell & Norvig (2021, pp. 993–994)\n
          430. \n
          431. ^ Russell & Norvig (2021, p. 995); Lipartito (2011, p. 36); Goodman & Flaxman (2017, p. 6); Christian (2020, pp. 39–40, 65)\n
          432. \n
          433. ^ Quoted in Christian (2020, p. 65).\n
          434. \n
          435. ^ Russell & Norvig (2021, p. 994); Christian (2020, pp. 40, 80–81)\n
          436. \n
          437. ^ Quoted in Christian (2020, p. 80)\n
          438. \n
          439. ^ Dockrill (2022).\n
          440. \n
          441. ^ Sample (2017).\n
          442. \n
          443. ^ "Black Box AI". 16 June 2023. Archived from the original on 15 June 2024. Retrieved 5 October 2024.\n
          444. \n
          445. ^ Christian (2020), p. 110.\n
          446. \n
          447. ^ Christian (2020), pp. 88–91.\n
          448. \n
          449. ^ Christian (2020, p. 83); Russell & Norvig (2021, p. 997)\n
          450. \n
          451. ^ Christian (2020), p. 91.\n
          452. \n
          453. ^ Christian (2020), p. 83.\n
          454. \n
          455. ^ Verma (2021).\n
          456. \n
          457. ^ Rothman (2020).\n
          458. \n
          459. ^ Christian (2020), pp. 105–108.\n
          460. \n
          461. ^ Christian (2020), pp. 108–112.\n
          462. \n
          463. ^ Ropek, Lucas (21 May 2024). "New Anthropic Research Sheds Light on AI\'s \'Black Box\'". Gizmodo. Archived from the original on 5 October 2024. Retrieved 23 May 2024.\n
          464. \n
          465. ^ Russell & Norvig (2021), p. 989.\n
          466. \n
          467. ^ a b Russell & Norvig (2021), pp. 987–990.\n
          468. \n
          469. ^ Russell & Norvig (2021), p. 988.\n
          470. \n
          471. ^ Robitzski (2018); Sainato (2015)\n
          472. \n
          473. ^ Harari (2018).\n
          474. \n
          475. ^ Buckley, Chris; Mozur, Paul (22 May 2019). "How China Uses High-Tech Surveillance to Subdue Minorities". The New York Times. Archived from the original on 25 November 2019. Retrieved 2 July 2019.\n
          476. \n
          477. ^ "Security lapse exposed a Chinese smart city surveillance system". 3 May 2019. Archived from the original on 7 March 2021. Retrieved 14 September 2020.\n
          478. \n
          479. ^ Urbina et al. (2022).\n
          480. \n
          481. ^ a b E. McGaughey, \'Will Robots Automate Your Job Away? Full Employment, Basic Income, and Economic Democracy\' (2022), 51(3) Industrial Law Journal 511–559. Archived 27 May 2023 at the Wayback Machine.\n
          482. \n
          483. ^ Ford & Colvin (2015);McGaughey (2022)\n
          484. \n
          485. ^ IGM Chicago (2017).\n
          486. \n
          487. ^ Arntz, Gregory & Zierahn (2016), p. 33.\n
          488. \n
          489. ^ Lohr (2017); Frey & Osborne (2017); Arntz, Gregory & Zierahn (2016, p. 33)\n
          490. \n
          491. ^ Zhou, Viola (11 April 2023). "AI is already taking video game illustrators\' jobs in China". Rest of World. Archived from the original on 21 February 2024. Retrieved 17 August 2023.\n
          492. \n
          493. ^ Carter, Justin (11 April 2023). "China\'s game art industry reportedly decimated by growing AI use". Game Developer. Archived from the original on 17 August 2023. Retrieved 17 August 2023.\n
          494. \n
          495. ^ Morgenstern (2015).\n
          496. \n
          497. ^ Mahdawi (2017); Thompson (2014)\n
          498. \n
          499. ^ Tarnoff, Ben (4 August 2023). "Lessons from Eliza". The Guardian Weekly. pp. 34–39.\n
          500. \n
          501. ^ Cellan-Jones (2014).\n
          502. \n
          503. ^ Russell & Norvig 2021, p. 1001.\n
          504. \n
          505. ^ Bostrom (2014).\n
          506. \n
          507. ^ Russell (2019).\n
          508. \n
          509. ^ Bostrom (2014); Müller & Bostrom (2014); Bostrom (2015).\n
          510. \n
          511. ^ Harari (2023).\n
          512. \n
          513. ^ Müller & Bostrom (2014).\n
          514. \n
          515. ^ Leaders\' concerns about the existential risks of AI around 2015: Rawlinson (2015), Holley (2015), Gibbs (2014), Sainato (2015)\n
          516. \n
          517. ^ ""Godfather of artificial intelligence" talks impact and potential of new AI". CBS News. 25 March 2023. Archived from the original on 28 March 2023. Retrieved 28 March 2023.\n
          518. \n
          519. ^ Pittis, Don (4 May 2023). "Canadian artificial intelligence leader Geoffrey Hinton piles on fears of computer takeover". CBC. Archived from the original on 7 July 2024. Retrieved 5 October 2024.\n
          520. \n
          521. ^ "\'50–50 chance\' that AI outsmarts humanity, Geoffrey Hinton says". Bloomberg BNN. 14 June 2024. Retrieved 6 July 2024.\n
          522. \n
          523. ^ Valance (2023).\n
          524. \n
          525. ^ Taylor, Josh (7 May 2023). "Rise of artificial intelligence is inevitable but should not be feared, \'father of AI\' says". The Guardian. Archived from the original on 23 October 2023. Retrieved 26 May 2023.\n
          526. \n
          527. ^ Colton, Emma (7 May 2023). "\'Father of AI\' says tech fears misplaced: \'You cannot stop it\'". Fox News. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
          528. \n
          529. ^ Jones, Hessie (23 May 2023). "Juergen Schmidhuber, Renowned \'Father Of Modern AI,\' Says His Life\'s Work Won\'t Lead To Dystopia". Forbes. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
          530. \n
          531. ^ McMorrow, Ryan (19 December 2023). "Andrew Ng: \'Do we think the world is better off with more or less intelligence?\'". Financial Times. Archived from the original on 25 January 2024. Retrieved 30 December 2023.\n
          532. \n
          533. ^ Levy, Steven (22 December 2023). "How Not to Be Stupid About AI, With Yann LeCun". Wired. Archived from the original on 28 December 2023. Retrieved 30 December 2023.\n
          534. \n
          535. ^ Arguments that AI is not an imminent risk: Brooks (2014), Geist (2015), Madrigal (2015), Lee (2014)\n
          536. \n
          537. ^ a b Christian (2020), pp. 67, 73.\n
          538. \n
          539. ^ Yudkowsky (2008).\n
          540. \n
          541. ^ a b Anderson & Anderson (2011).\n
          542. \n
          543. ^ AAAI (2014).\n
          544. \n
          545. ^ Wallach (2010).\n
          546. \n
          547. ^ Russell (2019), p. 173.\n
          548. \n
          549. ^ Stewart, Ashley; Melton, Monica. "Hugging Face CEO says he\'s focused on building a \'sustainable model\' for the $4.5 billion open-source-AI startup". Business Insider. Archived from the original on 25 September 2024. Retrieved 14 April 2024.\n
          550. \n
          551. ^ Wiggers, Kyle (9 April 2024). "Google open sources tools to support AI model development". TechCrunch. Archived from the original on 10 September 2024. Retrieved 14 April 2024.\n
          552. \n
          553. ^ Heaven, Will Douglas (12 May 2023). "The open-source AI boom is built on Big Tech\'s handouts. How long will it last?". MIT Technology Review. Retrieved 14 April 2024.\n
          554. \n
          555. ^ Brodsky, Sascha (19 December 2023). "Mistral AI\'s New Language Model Aims for Open Source Supremacy". AI Business. Archived from the original on 5 September 2024. Retrieved 5 October 2024.\n
          556. \n
          557. ^ Edwards, Benj (22 February 2024). "Stability announces Stable Diffusion 3, a next-gen AI image generator". Ars Technica. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
          558. \n
          559. ^ Marshall, Matt (29 January 2024). "How enterprises are using open source LLMs: 16 examples". VentureBeat. Archived from the original on 26 September 2024. Retrieved 5 October 2024.\n
          560. \n
          561. ^ Piper, Kelsey (2 February 2024). "Should we make our most powerful AI models open source to all?". Vox. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
          562. \n
          563. ^ Alan Turing Institute (2019). "Understanding artificial intelligence ethics and safety" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
          564. \n
          565. ^ Alan Turing Institute (2023). "AI Ethics and Governance in Practice" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
          566. \n
          567. ^ Floridi, Luciano; Cowls, Josh (23 June 2019). "A Unified Framework of Five Principles for AI in Society". Harvard Data Science Review. 1 (1). doi:10.1162/99608f92.8cd550d1. S2CID 198775713.\n
          568. \n
          569. ^ Buruk, Banu; Ekmekci, Perihan Elif; Arda, Berna (1 September 2020). "A critical perspective on guidelines for responsible and trustworthy artificial intelligence". Medicine, Health Care and Philosophy. 23 (3): 387–399. doi:10.1007/s11019-020-09948-1. ISSN 1572-8633. PMID 32236794. S2CID 214766800. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
          570. \n
          571. ^ Kamila, Manoj Kumar; Jasrotia, Sahil Singh (1 January 2023). "Ethical issues in the development of artificial intelligence: recognizing the risks". International Journal of Ethics and Systems. ahead-of-print (ahead-of-print). doi:10.1108/IJOES-05-2023-0107. ISSN 2514-9369. S2CID 259614124. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
          572. \n
          573. ^ "AI Safety Institute releases new AI safety evaluations platform". UK Government. 10 May 2024. Archived from the original on 5 October 2024. Retrieved 14 May 2024.\n
          574. \n
          575. ^ Regulation of AI to mitigate risks: Berryhill et al. (2019), Barfield & Pagallo (2018), Iphofen & Kritikos (2019), Wirtz, Weyerer & Geyer (2018), Buiten (2019)\n
          576. \n\n
          577. ^ a b Vincent (2023).\n
          578. \n
          579. ^ Stanford University (2023).\n
          580. \n
          581. ^ a b c d UNESCO (2021).\n
          582. \n
          583. ^ Kissinger (2021).\n
          584. \n
          585. ^ Altman, Brockman & Sutskever (2023).\n
          586. \n
          587. ^ VOA News (25 October 2023). "UN Announces Advisory Body on Artificial Intelligence". Archived from the original on 18 September 2024. Retrieved 5 October 2024.\n
          588. \n
          589. ^ "Council of Europe opens first ever global treaty on AI for signature". Council of Europe. 5 September 2024. Archived from the original on 17 September 2024. Retrieved 17 September 2024.\n
          590. \n
          591. ^ Edwards (2023).\n
          592. \n
          593. ^ Kasperowicz (2023).\n
          594. \n
          595. ^ Fox News (2023).\n
          596. \n
          597. ^ Milmo, Dan (3 November 2023). "Hope or Horror? The great AI debate dividing its pioneers". The Guardian Weekly. pp. 10–12.\n
          598. \n
          599. ^ "The Bletchley Declaration by Countries Attending the AI Safety Summit, 1–2 November 2023". GOV.UK. 1 November 2023. Archived from the original on 1 November 2023. Retrieved 2 November 2023.\n
          600. \n
          601. ^ "Countries agree to safe and responsible development of frontier AI in landmark Bletchley Declaration". GOV.UK (Press release). Archived from the original on 1 November 2023. Retrieved 1 November 2023.\n
          602. \n
          603. ^ "Second global AI summit secures safety commitments from companies". Reuters. 21 May 2024. Retrieved 23 May 2024.\n
          604. \n
          605. ^ "Frontier AI Safety Commitments, AI Seoul Summit 2024". gov.uk. 21 May 2024. Archived from the original on 23 May 2024. Retrieved 23 May 2024.\n
          606. \n
          607. ^ a b Russell & Norvig 2021, p. 9.\n
          608. \n
          609. ^ a b c Copeland, J., ed. (2004). The Essential Turing: the ideas that gave birth to the computer age. Oxford, England: Clarendon Press. ISBN 0-1982-5079-7.\n
          610. \n
          611. ^ "Google books ngram". Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
          612. \n
          613. ^ AI\'s immediate precursors: McCorduck (2004, pp. 51–107), Crevier (1993, pp. 27–32), Russell & Norvig (2021, pp. 8–17), Moravec (1988, p. 3)\n
          614. \n
          615. ^ a b Turing\'s original publication of the Turing test in "Computing machinery and intelligence": Turing (1950)\nHistorical influence and philosophical implications: Haugeland (1985, pp. 6–9), Crevier (1993, p. 24), McCorduck (2004, pp. 70–71), Russell & Norvig (2021, pp. 2, 984)\n
          616. \n
          617. ^ Crevier (1993), pp. 47–49.\n
          618. \n
          619. ^ Russell & Norvig (2003), p. 17.\n
          620. \n
          621. ^ Russell & Norvig (2003), p. 18.\n
          622. \n
          623. ^ Newquist (1994), pp. 86–86.\n
          624. \n
          625. ^ Simon (1965, p. 96) quoted in Crevier (1993, p. 109)\n
          626. \n
          627. ^ Minsky (1967, p. 2) quoted in Crevier (1993, p. 109)\n
          628. \n
          629. ^ Russell & Norvig (2021), p. 21.\n
          630. \n
          631. ^ Lighthill (1973).\n
          632. \n
          633. ^ NRC 1999, pp. 212–213.\n
          634. \n
          635. ^ Russell & Norvig (2021), p. 22.\n
          636. \n
          637. ^ Expert systems: Russell & Norvig (2021, pp. 23, 292), Luger & Stubblefield (2004, pp. 227–331), Nilsson (1998, chpt. 17.4), McCorduck (2004, pp. 327–335, 434–435), Crevier (1993, pp. 145–162, 197–203), Newquist (1994, pp. 155–183)\n
          638. \n
          639. ^ Russell & Norvig (2021), p. 24.\n
          640. \n
          641. ^ Nilsson (1998), p. 7.\n
          642. \n
          643. ^ McCorduck (2004), pp. 454–462.\n
          644. \n
          645. ^ Moravec (1988).\n
          646. \n
          647. ^ a b Brooks (1990).\n
          648. \n
          649. ^ Developmental robotics: Weng et al. (2001), Lungarella et al. (2003), Asada et al. (2009), Oudeyer (2010)\n
          650. \n
          651. ^ Russell & Norvig (2021), p. 25.\n
          652. \n
          653. ^ Crevier (1993, pp. 214–215), Russell & Norvig (2021, pp. 24, 26)\n
          654. \n
          655. ^ Russell & Norvig (2021), p. 26.\n
          656. \n
          657. ^ Formal and narrow methods adopted in the 1990s: Russell & Norvig (2021, pp. 24–26), McCorduck (2004, pp. 486–487)\n
          658. \n
          659. ^ AI widely used in the late 1990s: Kurzweil (2005, p. 265), NRC (1999, pp. 216–222), Newquist (1994, pp. 189–201)\n
          660. \n
          661. ^ Wong (2023).\n
          662. \n
          663. ^ Moore\'s Law and AI: Russell & Norvig (2021, pp. 14, 27)\n
          664. \n
          665. ^ a b c Clark (2015b).\n
          666. \n
          667. ^ Big data: Russell & Norvig (2021, p. 26)\n
          668. \n
          669. ^ Sagar, Ram (3 June 2020). "OpenAI Releases GPT-3, The Largest Model So Far". Analytics India Magazine. Archived from the original on 4 August 2020. Retrieved 15 March 2023.\n
          670. \n
          671. ^ DiFeliciantonio (2023).\n
          672. \n
          673. ^ Goswami (2023).\n
          674. \n
          675. ^ Grayling, Anthony; Ball, Brian (1 August 2024). "Philosophy is crucial in the age of AI". The Conversation. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
          676. \n
          677. ^ a b Jarow, Oshan (15 June 2024). "Will AI ever become conscious? It depends on how you think about biology". Vox. Archived from the original on 21 September 2024. Retrieved 4 October 2024.\n
          678. \n
          679. ^ McCarthy, John. "The Philosophy of AI and the AI of Philosophy". jmc.stanford.edu. Archived from the original on 23 October 2018. Retrieved 3 October 2024.\n
          680. \n
          681. ^ a b Turing (1950), p. 1.\n
          682. \n
          683. ^ Turing (1950), Under "The Argument from Consciousness".\n
          684. \n
          685. ^ Kirk-Giannini, Cameron Domenico; Goldstein, Simon (16 October 2023). "AI is closer than ever to passing the Turing test for \'intelligence\'. What happens when it does?". The Conversation. Archived from the original on 25 September 2024. Retrieved 17 August 2024.\n
          686. \n
          687. ^ Russell & Norvig (2021), p. 3.\n
          688. \n
          689. ^ Maker (2006).\n
          690. \n
          691. ^ McCarthy (1999).\n
          692. \n
          693. ^ Minsky (1986).\n
          694. \n
          695. ^ "What Is Artificial Intelligence (AI)?". Google Cloud Platform. Archived from the original on 31 July 2023. Retrieved 16 October 2023.\n
          696. \n
          697. ^ "One of the Biggest Problems in Regulating AI Is Agreeing on a Definition". carnegieendowment.org. Retrieved 31 July 2024.\n
          698. \n
          699. ^ "AI or BS? How to tell if a marketing tool really uses artificial intelligence". The Drum. Retrieved 31 July 2024.\n
          700. \n
          701. ^ Nilsson (1983), p. 10.\n
          702. \n
          703. ^ Haugeland (1985), pp. 112–117.\n
          704. \n
          705. ^ Physical symbol system hypothesis: Newell & Simon (1976, p. 116)\nHistorical significance: McCorduck (2004, p. 153), Russell & Norvig (2021, p. 19)\n
          706. \n
          707. ^ Moravec\'s paradox: Moravec (1988, pp. 15–16), Minsky (1986, p. 29), Pinker (2007, pp. 190–191)\n
          708. \n
          709. ^ Dreyfus\' critique of AI: Dreyfus (1972), Dreyfus & Dreyfus (1986)\nHistorical significance and philosophical implications: Crevier (1993, pp. 120–132), McCorduck (2004, pp. 211–239), Russell & Norvig (2021, pp. 981–982), Fearn (2007, chpt. 3)\n
          710. \n
          711. ^ Crevier (1993), p. 125.\n
          712. \n
          713. ^ Langley (2011).\n
          714. \n
          715. ^ Katz (2012).\n
          716. \n
          717. ^ Neats vs. scruffies, the historic debate: McCorduck (2004, pp. 421–424, 486–489), Crevier (1993, p. 168), Nilsson (1983, pp. 10–11), Russell & Norvig (2021, p. 24)\nA classic example of the "scruffy" approach to intelligence: Minsky (1986)\nA modern example of neat AI and its aspirations in the 21st century: Domingos (2015)\n
          718. \n
          719. ^ Pennachin & Goertzel (2007).\n
          720. \n
          721. ^ a b Roberts (2016).\n
          722. \n
          723. ^ Russell & Norvig (2021), p. 986.\n
          724. \n
          725. ^ Chalmers (1995).\n
          726. \n
          727. ^ Dennett (1991).\n
          728. \n
          729. ^ Horst (2005).\n
          730. \n
          731. ^ Searle (1999).\n
          732. \n
          733. ^ Searle (1980), p. 1.\n
          734. \n
          735. ^ Russell & Norvig (2021), p. 9817.\n
          736. \n
          737. ^ Searle\'s Chinese room argument: Searle (1980). Searle\'s original presentation of the thought experiment., Searle (1999).\nDiscussion: Russell & Norvig (2021, pp. 985), McCorduck (2004, pp. 443–445), Crevier (1993, pp. 269–271)\n
          738. \n
          739. ^ Leith, Sam (7 July 2022). "Nick Bostrom: How can we be certain a machine isn\'t conscious?". The Spectator. Archived from the original on 26 September 2024. Retrieved 23 February 2024.\n
          740. \n
          741. ^ a b c Thomson, Jonny (31 October 2022). "Why don\'t robots have rights?". Big Think. Archived from the original on 13 September 2024. Retrieved 23 February 2024.\n
          742. \n
          743. ^ a b Kateman, Brian (24 July 2023). "AI Should Be Terrified of Humans". Time. Archived from the original on 25 September 2024. Retrieved 23 February 2024.\n
          744. \n
          745. ^ Wong, Jeff (10 July 2023). "What leaders need to know about robot rights". Fast Company.\n
          746. \n
          747. ^ Hern, Alex (12 January 2017). "Give robots \'personhood\' status, EU committee argues". The Guardian. ISSN 0261-3077. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
          748. \n
          749. ^ Dovey, Dana (14 April 2018). "Experts Don\'t Think Robots Should Have Rights". Newsweek. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
          750. \n
          751. ^ Cuddy, Alice (13 April 2018). "Robot rights violate human rights, experts warn EU". euronews. Archived from the original on 19 September 2024. Retrieved 23 February 2024.\n
          752. \n
          753. ^ The Intelligence explosion and technological singularity: Russell & Norvig (2021, pp. 1004–1005), Omohundro (2008), Kurzweil (2005)\n\nI. J. Good\'s "intelligence explosion": Good (1965)\n\nVernor Vinge\'s "singularity": Vinge (1993)\n
          754. \n
          755. ^ Russell & Norvig (2021), p. 1005.\n
          756. \n
          757. ^ Transhumanism: Moravec (1988), Kurzweil (2005), Russell & Norvig (2021, p. 1005)\n
          758. \n
          759. ^ AI as evolution: Edward Fredkin is quoted in McCorduck (2004, p. 401), Butler (1863), Dyson (1998)\n
          760. \n
          761. ^ AI in myth: McCorduck (2004, pp. 4–5)\n
          762. \n
          763. ^ McCorduck (2004), pp. 340–400.\n
          764. \n
          765. ^ Buttazzo (2001).\n
          766. \n
          767. ^ Anderson (2008).\n
          768. \n
          769. ^ McCauley (2007).\n
          770. \n
          771. ^ Galvan (1997).\n
          772. \n
          \n

          AI textbooks

          \n

          The two most widely used textbooks in 2023 (see the Open Syllabus):\n

          \n\n

          These were the four of the most widely used AI textbooks in 2008:\n

          \n
          \n\n
          \n

          Other textbooks:\n

          \n\n

          History of AI

          \n
          \n\n
          \n

          Other sources

          \n
          \n\n\n
          \n

          Further reading

          \n
          \n\n
          \n
          \n\n\n\n\n\n\n\n\n\n
          \n
          \n\t\t\t\t\t\n\t\t\t\t
          \n\t\t\t\n\t\t\t\n\t\t\n\t\t\n\t \n \n
          \n\t
            \n
            \n\n\n'}'. -[2024-11-04 00:14:08,202] [DEBUG] [Engine]: Agent 'agent3' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_DeWdaX3vbF3UvAgCrDCv2xDu', type='function')], function_call=None) -[2024-11-04 00:14:08,382] [INFO] [Engine]: Assigning task to agent4: Search for the latest trends in AI using twitter. -[2024-11-04 00:14:08,384] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Search for the latest trends in AI using twitter.'. -[2024-11-04 00:14:10,168] [INFO] [BaseAgent]: Agent 'agent4' called 'fetch_webpage' with args '{'url': 'https://twitter.com/search?q=%23AI%20%23Trends&src=typed_query'}'. -[2024-11-04 00:14:10,169] [INFO] [BaseAgent]: Agent 'agent4' obtained result '{'success': True, 'error-msg': '', 'url': 'https://twitter.com/search?q=%23AI%20%23Trends&src=typed_query', 'content': '\n \n \n x.com\n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n \n \n '}'. -[2024-11-04 00:14:10,170] [DEBUG] [Engine]: Agent 'agent4' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23Trends&src=typed_query"}', name='fetch_webpage'), id='call_EBGqs2h2XpNv2kSHgyTD8vJo', type='function')], function_call=None) -[2024-11-04 00:14:10,171] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_dqATcsluqCZEgeiCXD0lDGCu', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_DeWdaX3vbF3UvAgCrDCv2xDu', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23Trends&src=typed_query"}', name='fetch_webpage'), id='call_EBGqs2h2XpNv2kSHgyTD8vJo', type='function')], function_call=None) - -[2024-11-04 00:14:10,172] [INFO] [Engine]: Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_dqATcsluqCZEgeiCXD0lDGCu', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_DeWdaX3vbF3UvAgCrDCv2xDu', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23Trends&src=typed_query"}', name='fetch_webpage'), id='call_EBGqs2h2XpNv2kSHgyTD8vJo', type='function')], function_call=None) - -[2024-11-04 00:14:10,173] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_dqATcsluqCZEgeiCXD0lDGCu', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_DeWdaX3vbF3UvAgCrDCv2xDu', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23Trends&src=typed_query"}', name='fetch_webpage'), id='call_EBGqs2h2XpNv2kSHgyTD8vJo', type='function')], function_call=None) - -[2024-11-04 00:14:10,614] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-11-04 00:14:10,616] [INFO] [Engine]: Starting iteration 2 -[2024-11-04 00:14:10,617] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_dqATcsluqCZEgeiCXD0lDGCu', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_DeWdaX3vbF3UvAgCrDCv2xDu', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23Trends&src=typed_query"}', name='fetch_webpage'), id='call_EBGqs2h2XpNv2kSHgyTD8vJo', type='function')], function_call=None) - - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-11-04 00:14:12,068] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent1': 'Please gather the information obtained by agents 2, 3, and 4 about the latest trends in AI and summarize the key points.', 'agent2': 'Task description for agent2', 'agent3': 'Task description for agent3', 'agent4': 'Task description for agent4'}, 'continue': True} -[2024-11-04 00:14:12,069] [INFO] [Engine]: Assigned tasks: {'agent1': 'Please gather the information obtained by agents 2, 3, and 4 about the latest trends in AI and summarize the key points.', 'agent2': 'Task description for agent2', 'agent3': 'Task description for agent3', 'agent4': 'Task description for agent4'} -[2024-11-04 00:14:12,071] [INFO] [Engine]: Assigning task to agent1: Please gather the information obtained by agents 2, 3, and 4 about the latest trends in AI and summarize the key points. -[2024-11-04 00:14:12,072] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Please gather the information obtained by agents 2, 3, and 4 about the latest trends in AI and summarize the key points.'. -[2024-11-04 00:14:15,032] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: Please search for the latest trends in AI and share the information. -[2024-11-04 00:14:15,033] [INFO] [BaseAgent]: Agent agent2 received message from agent1: Please search for the latest trends in AI and share the information. -[2024-11-04 00:14:57,282] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-04 00:14:57,283] [INFO] [Evaluator]: Total Token Consumption: 36 -[2024-11-04 00:14:57,333] [INFO] [Evaluator]: Average Tokens per Iteration: 36.0 -[2024-11-04 00:14:57,334] [INFO] [Engine]: Simulation completed. -[2024-11-04 00:18:32,302] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-11-04 00:18:32,328] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-04 00:18:32,328] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-04 00:18:32,329] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-04 00:18:32,330] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-04 00:18:32,331] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-04 00:18:32,332] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-04 00:18:32,333] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-04 00:18:32,334] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-04 00:18:32,337] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-11-04 00:18:32,337] [INFO] [AgentGraph]: Relationship added: agent2 --[reports_to]--> agent1 -[2024-11-04 00:18:32,338] [INFO] [AgentGraph]: Relationship added: agent3 --[reports_to]--> agent1 -[2024-11-04 00:18:32,339] [INFO] [AgentGraph]: Relationship added: agent4 --[reports_to]--> agent1 -[2024-11-04 00:18:32,340] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-04 00:18:32,342] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-04 00:18:32,343] [INFO] [Engine]: Engine initialized. -[2024-11-04 00:18:32,344] [INFO] [Engine]: Engine starting simulation. -[2024-11-04 00:18:32,345] [INFO] [Engine]: Starting iteration 1 -[2024-11-04 00:18:32,346] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-11-04 00:18:33,415] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'}, 'continue': True} -[2024-11-04 00:18:33,416] [INFO] [Engine]: Assigned tasks: {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'} -[2024-11-04 00:18:33,417] [INFO] [Engine]: Assigning task to agent2: Search for the latest trends in AI using Google. -[2024-11-04 00:18:33,418] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Search for the latest trends in AI using Google.'. -[2024-11-04 00:18:55,391] [ERROR] [Engine]: Error while executing task for agent 'agent2': -[2024-11-04 00:18:55,392] [INFO] [Engine]: Assigning task to agent3: Search for the latest trends in AI using Wikipedia. -[2024-11-04 00:18:55,393] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Search for the latest trends in AI using Wikipedia.'. -[2024-11-04 00:18:56,820] [ERROR] [Engine]: Error while executing task for agent 'agent3': -[2024-11-04 00:18:56,822] [INFO] [Engine]: Assigning task to agent4: Search for the latest trends in AI using Twitter. -[2024-11-04 00:18:56,823] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Search for the latest trends in AI using Twitter.'. -[2024-11-04 00:18:58,031] [ERROR] [Engine]: Error while executing task for agent 'agent4': -[2024-11-04 00:18:58,066] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: - -[2024-11-04 00:18:58,067] [INFO] [Engine]: Agents' Results Summary: - -[2024-11-04 00:18:58,068] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: - -[2024-11-04 00:18:58,070] [ERROR] [Engine]: An error occurred during simulation. -Traceback (most recent call last): - File "/home/zhong42/marble/MARBLE/marble/engine/engine.py", line 147, in start - self.evaluator.update(self.environment, self.agents) - File "/home/zhong42/marble/MARBLE/marble/evaluator/evaluator.py", line 39, in update - if environment.is_task_completed(): - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/zhong42/marble/MARBLE/marble/environments/base_env.py", line 38, in is_task_completed - return self._compare_to_ground_truth(last_action_result, self.ground_truth) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/zhong42/marble/MARBLE/marble/environments/base_env.py", line 43, in _compare_to_ground_truth - result_str: str = result.get("result", "") - ^^^^^^^^^^ -AttributeError: 'str' object has no attribute 'get' -[2024-11-04 00:18:58,073] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-04 00:18:58,074] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-11-04 00:18:58,075] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-11-04 00:18:58,076] [INFO] [Engine]: Simulation completed. -[2024-11-04 00:19:46,135] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-11-04 00:19:46,154] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-11-04 00:19:46,154] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-11-04 00:19:46,155] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-11-04 00:19:46,156] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-11-04 00:19:46,157] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-11-04 00:19:46,158] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-11-04 00:19:46,159] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-11-04 00:19:46,160] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-11-04 00:19:46,164] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-11-04 00:19:46,164] [INFO] [AgentGraph]: Relationship added: agent2 --[reports_to]--> agent1 -[2024-11-04 00:19:46,165] [INFO] [AgentGraph]: Relationship added: agent3 --[reports_to]--> agent1 -[2024-11-04 00:19:46,166] [INFO] [AgentGraph]: Relationship added: agent4 --[reports_to]--> agent1 -[2024-11-04 00:19:46,167] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-11-04 00:19:46,171] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-11-04 00:19:46,171] [INFO] [Engine]: Engine initialized. -[2024-11-04 00:19:46,172] [INFO] [Engine]: Engine starting simulation. -[2024-11-04 00:19:46,173] [INFO] [Engine]: Starting iteration 1 -[2024-11-04 00:19:46,174] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-11-04 00:19:47,175] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'}, 'continue': True} -[2024-11-04 00:19:47,176] [INFO] [Engine]: Assigned tasks: {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'} -[2024-11-04 00:19:47,177] [INFO] [Engine]: Assigning task to agent2: Search for the latest trends in AI using Google. -[2024-11-04 00:19:47,178] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Search for the latest trends in AI using Google.'. -[2024-11-04 00:20:00,668] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Can you search for the latest trends in AI using Google and provide the information? -[2024-11-04 00:20:00,669] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Can you search for the latest trends in AI using Google and provide the information? -[2024-11-04 00:20:06,834] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: The task is: Search for the latest trends in AI using Google. -[2024-11-04 00:20:06,835] [INFO] [BaseAgent]: Agent agent2 received message from agent1: The task is: Search for the latest trends in AI using Google. -[2024-11-04 00:20:11,574] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: I will search for the latest trends in AI using Google and provide the information. -[2024-11-04 00:20:11,575] [INFO] [BaseAgent]: Agent agent1 received message from agent2: I will search for the latest trends in AI using Google and provide the information. -[2024-11-04 00:20:15,533] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: The task is: Search for the latest trends in AI using Google. -[2024-11-04 00:20:15,534] [INFO] [BaseAgent]: Agent agent2 received message from agent1: The task is: Search for the latest trends in AI using Google. -[2024-11-04 00:20:19,308] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: I will search for the latest trends in AI using Google and provide the information. -[2024-11-04 00:20:19,309] [INFO] [BaseAgent]: Agent agent1 received message from agent2: I will search for the latest trends in AI using Google and provide the information. -[2024-11-04 00:20:23,408] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: The task is: Search for the latest trends in AI using Google. -[2024-11-04 00:20:23,408] [INFO] [BaseAgent]: Agent agent2 received message from agent1: The task is: Search for the latest trends in AI using Google. -[2024-11-04 00:20:24,271] [INFO] [BaseAgent]: Agent 'agent2' called 'new_communication_session' with args '{'target_agent_id': 'agent1', 'message': 'Can you search for the latest trends in AI using Google and provide the information?'}'. -[2024-11-04 00:20:24,272] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'message': 'Successfully completed session 395c2b9d-a76b-421d-9a86-6d357b39f1f4', 'session_id': 'The chat history shows that agent2 was asked by agent1 to search for the latest trends in AI using Google. Agent2 acknowledged the task and agreed to provide the information. The task was reiterated multiple times in the conversation, emphasizing the importance of searching for the latest trends in AI using Google.'}'. -[2024-11-04 00:20:24,273] [DEBUG] [Engine]: Agent 'agent2' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google and provide the information?"}', name='new_communication_session'), id='call_S9OIJInqmxf0OmR9tOmP2IJE', type='function')], function_call=None) -[2024-11-04 00:20:24,325] [INFO] [Engine]: Assigning task to agent3: Search for the latest trends in AI using Wikipedia. -[2024-11-04 00:20:24,326] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Search for the latest trends in AI using Wikipedia.'. -[2024-11-04 00:20:27,050] [INFO] [BaseAgent]: Agent 'agent3' called 'fetch_webpage' with args '{'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence'}'. -[2024-11-04 00:20:27,106] [DEBUG] [Engine]: Agent 'agent3' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_awzSGbJJFLHmEgR2TWUjiQ9m', type='function')], function_call=None) -[2024-11-04 00:20:27,112] [INFO] [Engine]: Assigning task to agent4: Search for the latest trends in AI using Twitter. -[2024-11-04 00:20:58,353] [DEBUG] [Engine]: Agent 'agent4' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you please search for the latest trends in AI using Twitter?"}', name='new_communication_session'), id='call_x5ZhKzkRDpe2poNGsmOfRuXr', type='function')], function_call=None) -[2024-11-04 00:20:58,354] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google and provide the information?"}', name='new_communication_session'), id='call_S9OIJInqmxf0OmR9tOmP2IJE', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_awzSGbJJFLHmEgR2TWUjiQ9m', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you please search for the latest trends in AI using Twitter?"}', name='new_communication_session'), id='call_x5ZhKzkRDpe2poNGsmOfRuXr', type='function')], function_call=None) - -[2024-11-04 00:20:58,354] [INFO] [Engine]: Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google and provide the information?"}', name='new_communication_session'), id='call_S9OIJInqmxf0OmR9tOmP2IJE', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_awzSGbJJFLHmEgR2TWUjiQ9m', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you please search for the latest trends in AI using Twitter?"}', name='new_communication_session'), id='call_x5ZhKzkRDpe2poNGsmOfRuXr', type='function')], function_call=None) - -[2024-11-04 00:20:58,354] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google and provide the information?"}', name='new_communication_session'), id='call_S9OIJInqmxf0OmR9tOmP2IJE', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_awzSGbJJFLHmEgR2TWUjiQ9m', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you please search for the latest trends in AI using Twitter?"}', name='new_communication_session'), id='call_x5ZhKzkRDpe2poNGsmOfRuXr', type='function')], function_call=None) - -[2024-11-04 00:20:59,040] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': False} -[2024-11-04 00:20:59,041] [INFO] [Engine]: EnginePlanner decided to terminate the simulation. -[2024-11-04 00:20:59,041] [INFO] [Engine]: Engine simulation loop completed. -[2024-11-04 00:20:59,041] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-11-04 00:20:59,041] [INFO] [Evaluator]: Total Token Consumption: 36 -[2024-11-04 00:20:59,041] [INFO] [Evaluator]: Average Tokens per Iteration: 36.0 -[2024-11-04 00:20:59,041] [INFO] [Engine]: Simulation completed. diff --git a/marble/logs/app.log.4 b/marble/logs/app.log.4 deleted file mode 100644 index aa056ba1..00000000 --- a/marble/logs/app.log.4 +++ /dev/null @@ -1,1042 +0,0 @@ -[2024-10-27 22:40:14,546] [INFO] [BaseAgent]: Agent 'agent3' obtained result '{'success': True, 'error-msg': '', 'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence', 'content': '\n\n\n\nArtificial intelligence - Wikipedia\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nJump to content\n
            \n\t
            \n\t\t
            \n\t\t\t
            \n\n\t\t\n\t\t\t\n\n\n\t\t
            \n\t\t
            \n\t\t\t\n\n\n\t\t\t\n\n\t\t
            \n\t\n\n
            \n\t
            \n\t\t
            \n\t\t\t
            \n\t\t
            \n\t\t
            \n\t\t\t
            \n\t\t
            \n\t\t\t\n\t\t
            \n\t
            \n\t
            \n\t\t\t\t
            \n\t\t\n\t\t\t
            \n\t\t
            \n\t\t
            \n\t\t\t
            \n\t\t\t\t
            \n\t\t\t\t\t\n\t\t\t\t\t

            Artificial intelligence

            \n\t\t\t\t\t\t\t\n
            \n\t\n\t\n\t
            \n\n\t\t
            \n\t\t\t\n\t\t\t\n\t\t\t\n\t\t
            \n\n\t
            \n
            \n
            \n\t\t\t\t
            \n\t\t\t\t\t
            \n\t\t\t\t\t\t
            \n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
            \n\t\t\t\t\t\t
            \n\t\t\t\t\t\t\t\n\t\t\t\t\n\t\t\t\t\t\t\t
            \n\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
            \n\t\t\t\t\t
            \n\t\t\t\t
            \n\t\t\t\t
            \n\t\t\t\t\t
            \n\t\t\t\t\t\t\n\t\t\t\t\t\t
            \n\t\t\n\t\t\t\t\t
            \n\t\t\t\t
            \n\t\t\t\t
            \n\t\t\t\t\t
            \n\t\t\t\t\t\t\t
            \n\t\t
            Page semi-protected
            \n\t\t
            \n\n\t\t\t\t\t\t
            From Wikipedia, the free encyclopedia
            \n\t\t\t\t\t
            \n\t\t\t\t\t
            \n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t
            \n\n

            \n

            \n\n\n\n\n\n\n\n

            Artificial intelligence (AI), in its broadest sense, is intelligence exhibited by machines, particularly computer systems. It is a field of research in computer science that develops and studies methods and software that enable machines to perceive their environment and use learning and intelligence to take actions that maximize their chances of achieving defined goals.[1] Such machines may be called AIs.\n

            Some high-profile applications of AI include advanced web search engines (e.g., Google Search); recommendation systems (used by YouTube, Amazon, and Netflix); interacting via human speech (e.g., Google Assistant, Siri, and Alexa); autonomous vehicles (e.g., Waymo); generative and creative tools (e.g., ChatGPT, and AI art); and superhuman play and analysis in strategy games (e.g., chess and Go). However, many AI applications are not perceived as AI: "A lot of cutting edge AI has filtered into general applications, often without being called AI because once something becomes useful enough and common enough it\'s not labeled AI anymore."[2][3]\n

            The various subfields of AI research are centered around particular goals and the use of particular tools. The traditional goals of AI research include reasoning, knowledge representation, planning, learning, natural language processing, perception, and support for robotics.[a] General intelligence—the ability to complete any task performable by a human on an at least equal level—is among the field\'s long-term goals.[4] To reach these goals, AI researchers have adapted and integrated a wide range of techniques, including search and mathematical optimization, formal logic, artificial neural networks, and methods based on statistics, operations research, and economics.[b] AI also draws upon psychology, linguistics, philosophy, neuroscience, and other fields.[5]\n

            Artificial intelligence was founded as an academic discipline in 1956,[6] and the field went through multiple cycles of optimism,[7][8] followed by periods of disappointment and loss of funding, known as AI winter.[9][10] Funding and interest vastly increased after 2012 when deep learning outperformed previous AI techniques.[11] This growth accelerated further after 2017 with the transformer architecture,[12] and by the early 2020s hundreds of billions of dollars were being invested in AI (known as the "AI boom"). The widespread use of AI in the 21st century exposed several unintended consequences and harms in the present and raised concerns about its risks and long-term effects in the future, prompting discussions about regulatory policies to ensure the safety and benefits of the technology.\n

            \n\n

            Goals

            \n

            The general problem of simulating (or creating) intelligence has been broken into subproblems. These consist of particular traits or capabilities that researchers expect an intelligent system to display. The traits described below have received the most attention and cover the scope of AI research.[a]\n

            \n

            Reasoning and problem-solving

            \n

            Early researchers developed algorithms that imitated step-by-step reasoning that humans use when they solve puzzles or make logical deductions.[13] By the late 1980s and 1990s, methods were developed for dealing with uncertain or incomplete information, employing concepts from probability and economics.[14]\n

            Many of these algorithms are insufficient for solving large reasoning problems because they experience a "combinatorial explosion": They become exponentially slower as the problems grow.[15] Even humans rarely use the step-by-step deduction that early AI research could model. They solve most of their problems using fast, intuitive judgments.[16] Accurate and efficient reasoning is an unsolved problem.\n

            \n

            Knowledge representation

            \n
            An ontology represents knowledge as a set of concepts within a domain and the relationships between those concepts.
            \n

            Knowledge representation and knowledge engineering[17] allow AI programs to answer questions intelligently and make deductions about real-world facts. Formal knowledge representations are used in content-based indexing and retrieval,[18] scene interpretation,[19] clinical decision support,[20] knowledge discovery (mining "interesting" and actionable inferences from large databases),[21] and other areas.[22]\n

            A knowledge base is a body of knowledge represented in a form that can be used by a program. An ontology is the set of objects, relations, concepts, and properties used by a particular domain of knowledge.[23] Knowledge bases need to represent things such as objects, properties, categories, and relations between objects;[24] situations, events, states, and time;[25] causes and effects;[26] knowledge about knowledge (what we know about what other people know);[27] default reasoning (things that humans assume are true until they are told differently and will remain true even when other facts are changing);[28] and many other aspects and domains of knowledge.\n

            Among the most difficult problems in knowledge representation are the breadth of commonsense knowledge (the set of atomic facts that the average person knows is enormous);[29] and the sub-symbolic form of most commonsense knowledge (much of what people know is not represented as "facts" or "statements" that they could express verbally).[16] There is also the difficulty of knowledge acquisition, the problem of obtaining knowledge for AI applications.[c]\n

            \n

            Planning and decision-making

            \n

            An "agent" is anything that perceives and takes actions in the world. A rational agent has goals or preferences and takes actions to make them happen.[d][32] In automated planning, the agent has a specific goal.[33] In automated decision-making, the agent has preferences—there are some situations it would prefer to be in, and some situations it is trying to avoid. The decision-making agent assigns a number to each situation (called the "utility") that measures how much the agent prefers it. For each possible action, it can calculate the "expected utility": the utility of all possible outcomes of the action, weighted by the probability that the outcome will occur. It can then choose the action with the maximum expected utility.[34]\n

            In classical planning, the agent knows exactly what the effect of any action will be.[35] In most real-world problems, however, the agent may not be certain about the situation they are in (it is "unknown" or "unobservable") and it may not know for certain what will happen after each possible action (it is not "deterministic"). It must choose an action by making a probabilistic guess and then reassess the situation to see if the action worked.[36]\n

            In some problems, the agent\'s preferences may be uncertain, especially if there are other agents or humans involved. These can be learned (e.g., with inverse reinforcement learning), or the agent can seek information to improve its preferences.[37] Information value theory can be used to weigh the value of exploratory or experimental actions.[38] The space of possible future actions and situations is typically intractably large, so the agents must take actions and evaluate situations while being uncertain of what the outcome will be.\n

            A Markov decision process has a transition model that describes the probability that a particular action will change the state in a particular way and a reward function that supplies the utility of each state and the cost of each action. A policy associates a decision with each possible state. The policy could be calculated (e.g., by iteration), be heuristic, or it can be learned.[39]\n

            Game theory describes the rational behavior of multiple interacting agents and is used in AI programs that make decisions that involve other agents.[40]\n

            \n

            Learning

            \n

            Machine learning is the study of programs that can improve their performance on a given task automatically.[41] It has been a part of AI from the beginning.[e]\n

            There are several kinds of machine learning. Unsupervised learning analyzes a stream of data and finds patterns and makes predictions without any other guidance.[44] Supervised learning requires a human to label the input data first, and comes in two main varieties: classification (where the program must learn to predict what category the input belongs in) and regression (where the program must deduce a numeric function based on numeric input).[45]\n

            In reinforcement learning, the agent is rewarded for good responses and punished for bad ones. The agent learns to choose responses that are classified as "good".[46] Transfer learning is when the knowledge gained from one problem is applied to a new problem.[47] Deep learning is a type of machine learning that runs inputs through biologically inspired artificial neural networks for all of these types of learning.[48]\n

            Computational learning theory can assess learners by computational complexity, by sample complexity (how much data is required), or by other notions of optimization.[49]\n

            \n
            \n

            Natural language processing

            \n

            Natural language processing (NLP)[50] allows programs to read, write and communicate in human languages such as English. Specific problems include speech recognition, speech synthesis, machine translation, information extraction, information retrieval and question answering.[51]\n

            Early work, based on Noam Chomsky\'s generative grammar and semantic networks, had difficulty with word-sense disambiguation[f] unless restricted to small domains called "micro-worlds" (due to the common sense knowledge problem[29]). Margaret Masterman believed that it was meaning and not grammar that was the key to understanding languages, and that thesauri and not dictionaries should be the basis of computational language structure.\n

            Modern deep learning techniques for NLP include word embedding (representing words, typically as vectors encoding their meaning),[52] transformers (a deep learning architecture using an attention mechanism),[53] and others.[54] In 2019, generative pre-trained transformer (or "GPT") language models began to generate coherent text,[55][56] and by 2023, these models were able to get human-level scores on the bar exam, SAT test, GRE test, and many other real-world applications.[57]\n

            \n

            Perception

            \n

            Machine perception is the ability to use input from sensors (such as cameras, microphones, wireless signals, active lidar, sonar, radar, and tactile sensors) to deduce aspects of the world. Computer vision is the ability to analyze visual input.[58]\n

            The field includes speech recognition,[59] image classification,[60] facial recognition, object recognition,[61]object tracking,[62] and robotic perception.[63]\n

            \n

            Social intelligence

            \n
            Kismet, a robot head which was made in the 1990s; a machine that can recognize and simulate emotions[64]
            \n

            Affective computing is an interdisciplinary umbrella that comprises systems that recognize, interpret, process, or simulate human feeling, emotion, and mood.[65] For example, some virtual assistants are programmed to speak conversationally or even to banter humorously; it makes them appear more sensitive to the emotional dynamics of human interaction, or to otherwise facilitate human–computer interaction.\n

            However, this tends to give naïve users an unrealistic conception of the intelligence of existing computer agents.[66] Moderate successes related to affective computing include textual sentiment analysis and, more recently, multimodal sentiment analysis, wherein AI classifies the affects displayed by a videotaped subject.[67]\n

            \n

            General intelligence

            \n

            A machine with artificial general intelligence should be able to solve a wide variety of problems with breadth and versatility similar to human intelligence.[4]\n

            \n

            Techniques

            \n

            AI research uses a wide variety of techniques to accomplish the goals above.[b]\n

            \n

            Search and optimization

            \n

            AI can solve many problems by intelligently searching through many possible solutions.[68] There are two very different kinds of search used in AI: state space search and local search.\n

            \n
            \n

            State space search searches through a tree of possible states to try to find a goal state.[69] For example, planning algorithms search through trees of goals and subgoals, attempting to find a path to a target goal, a process called means-ends analysis.[70]\n

            Simple exhaustive searches[71] are rarely sufficient for most real-world problems: the search space (the number of places to search) quickly grows to astronomical numbers. The result is a search that is too slow or never completes.[15] "Heuristics" or "rules of thumb" can help prioritize choices that are more likely to reach a goal.[72]\n

            Adversarial search is used for game-playing programs, such as chess or Go. It searches through a tree of possible moves and counter-moves, looking for a winning position.[73]\n

            \n
            \n
            Illustration of gradient descent for 3 different starting points; two parameters (represented by the plan coordinates) are adjusted in order to minimize the loss function (the height)

            Local search uses mathematical optimization to find a solution to a problem. It begins with some form of guess and refines it incrementally.[74]\n

            Gradient descent is a type of local search that optimizes a set of numerical parameters by incrementally adjusting them to minimize a loss function. Variants of gradient descent are commonly used to train neural networks.[75]\n

            Another type of local search is evolutionary computation, which aims to iteratively improve a set of candidate solutions by "mutating" and "recombining" them, selecting only the fittest to survive each generation.[76]\n

            Distributed search processes can coordinate via swarm intelligence algorithms. Two popular swarm algorithms used in search are particle swarm optimization (inspired by bird flocking) and ant colony optimization (inspired by ant trails).[77]\n

            \n

            Logic

            \n

            Formal logic is used for reasoning and knowledge representation.[78]\nFormal logic comes in two main forms: propositional logic (which operates on statements that are true or false and uses logical connectives such as "and", "or", "not" and "implies")[79] and predicate logic (which also operates on objects, predicates and relations and uses quantifiers such as "Every X is a Y" and "There are some Xs that are Ys").[80]\n

            Deductive reasoning in logic is the process of proving a new statement (conclusion) from other statements that are given and assumed to be true (the premises).[81] Proofs can be structured as proof trees, in which nodes are labelled by sentences, and children nodes are connected to parent nodes by inference rules.\n

            Given a problem and a set of premises, problem-solving reduces to searching for a proof tree whose root node is labelled by a solution of the problem and whose leaf nodes are labelled by premises or axioms. In the case of Horn clauses, problem-solving search can be performed by reasoning forwards from the premises or backwards from the problem.[82] In the more general case of the clausal form of first-order logic, resolution is a single, axiom-free rule of inference, in which a problem is solved by proving a contradiction from premises that include the negation of the problem to be solved.[83]\n

            Inference in both Horn clause logic and first-order logic is undecidable, and therefore intractable. However, backward reasoning with Horn clauses, which underpins computation in the logic programming language Prolog, is Turing complete. Moreover, its efficiency is competitive with computation in other symbolic programming languages.[84]\n

            Fuzzy logic assigns a "degree of truth" between 0 and 1. It can therefore handle propositions that are vague and partially true.[85]\n

            Non-monotonic logics, including logic programming with negation as failure, are designed to handle default reasoning.[28] Other specialized versions of logic have been developed to describe many complex domains.\n

            \n

            Probabilistic methods for uncertain reasoning

            \n
            A simple Bayesian network, with the associated conditional probability tables
            \n

            Many problems in AI (including in reasoning, planning, learning, perception, and robotics) require the agent to operate with incomplete or uncertain information. AI researchers have devised a number of tools to solve these problems using methods from probability theory and economics.[86] Precise mathematical tools have been developed that analyze how an agent can make choices and plan, using decision theory, decision analysis,[87] and information value theory.[88] These tools include models such as Markov decision processes,[89] dynamic decision networks,[90] game theory and mechanism design.[91]\n

            Bayesian networks[92] are a tool that can be used for reasoning (using the Bayesian inference algorithm),[g][94] learning (using the expectation–maximization algorithm),[h][96] planning (using decision networks)[97] and perception (using dynamic Bayesian networks).[90]\n

            Probabilistic algorithms can also be used for filtering, prediction, smoothing, and finding explanations for streams of data, thus helping perception systems analyze processes that occur over time (e.g., hidden Markov models or Kalman filters).[90]\n

            \n
            Expectation–maximization clustering of Old Faithful eruption data starts from a random guess but then successfully converges on an accurate clustering of the two physically distinct modes of eruption.
            \n

            Classifiers and statistical learning methods

            \n

            The simplest AI applications can be divided into two types: classifiers (e.g., "if shiny then diamond"), on one hand, and controllers (e.g., "if diamond then pick up"), on the other hand. Classifiers[98] are functions that use pattern matching to determine the closest match. They can be fine-tuned based on chosen examples using supervised learning. Each pattern (also called an "observation") is labeled with a certain predefined class. All the observations combined with their class labels are known as a data set. When a new observation is received, that observation is classified based on previous experience.[45]\n

            There are many kinds of classifiers in use.[99] The decision tree is the simplest and most widely used symbolic machine learning algorithm.[100] K-nearest neighbor algorithm was the most widely used analogical AI until the mid-1990s, and Kernel methods such as the support vector machine (SVM) displaced k-nearest neighbor in the 1990s.[101]\nThe naive Bayes classifier is reportedly the "most widely used learner"[102] at Google, due in part to its scalability.[103]\nNeural networks are also used as classifiers.[104]\n

            \n

            Artificial neural networks

            \n
            A neural network is an interconnected group of nodes, akin to the vast network of neurons in the human brain.
            \n

            An artificial neural network is based on a collection of nodes also known as artificial neurons, which loosely model the neurons in a biological brain. It is trained to recognise patterns; once trained, it can recognise those patterns in fresh data. There is an input, at least one hidden layer of nodes and an output. Each node applies a function and once the weight crosses its specified threshold, the data is transmitted to the next layer. A network is typically called a deep neural network if it has at least 2 hidden layers.[104]\n

            Learning algorithms for neural networks use local search to choose the weights that will get the right output for each input during training. The most common training technique is the backpropagation algorithm.[105] Neural networks learn to model complex relationships between inputs and outputs and find patterns in data. In theory, a neural network can learn any function.[106]\n

            In feedforward neural networks the signal passes in only one direction.[107] Recurrent neural networks feed the output signal back into the input, which allows short-term memories of previous input events. Long short term memory is the most successful network architecture for recurrent networks.[108] Perceptrons[109] use only a single layer of neurons; deep learning[110] uses multiple layers. Convolutional neural networks strengthen the connection between neurons that are "close" to each other—this is especially important in image processing, where a local set of neurons must identify an "edge" before the network can identify an object.[111]\n

            \n
            \n

            Deep learning

            \n
            \n

            Deep learning[110] uses several layers of neurons between the network\'s inputs and outputs. The multiple layers can progressively extract higher-level features from the raw input. For example, in image processing, lower layers may identify edges, while higher layers may identify the concepts relevant to a human such as digits, letters, or faces.[112]\n

            Deep learning has profoundly improved the performance of programs in many important subfields of artificial intelligence, including computer vision, speech recognition, natural language processing, image classification,[113] and others. The reason that deep learning performs so well in so many applications is not known as of 2023.[114] The sudden success of deep learning in 2012–2015 did not occur because of some new discovery or theoretical breakthrough (deep neural networks and backpropagation had been described by many people, as far back as the 1950s)[i] but because of two factors: the incredible increase in computer power (including the hundred-fold increase in speed by switching to GPUs) and the availability of vast amounts of training data, especially the giant curated datasets used for benchmark testing, such as ImageNet.[j]\n

            \n

            GPT

            \n

            Generative pre-trained transformers (GPT) are large language models (LLMs) that generate text based on the semantic relationships between words in sentences. Text-based GPT models are pretrained on a large corpus of text that can be from the Internet. The pretraining consists of predicting the next token (a token being usually a word, subword, or punctuation). Throughout this pretraining, GPT models accumulate knowledge about the world and can then generate human-like text by repeatedly predicting the next token. Typically, a subsequent training phase makes the model more truthful, useful, and harmless, usually with a technique called reinforcement learning from human feedback (RLHF). Current GPT models are prone to generating falsehoods called "hallucinations", although this can be reduced with RLHF and quality data. They are used in chatbots, which allow people to ask a question or request a task in simple text.[122][123]\n

            Current models and services include Gemini (formerly Bard), ChatGPT, Grok, Claude, Copilot, and LLaMA.[124] Multimodal GPT models can process different types of data (modalities) such as images, videos, sound, and text.[125]\n

            \n

            Hardware and software

            \n\n

            In the late 2010s, graphics processing units (GPUs) that were increasingly designed with AI-specific enhancements and used with specialized TensorFlow software had replaced previously used central processing unit (CPUs) as the dominant means for large-scale (commercial and academic) machine learning models\' training.[126] Specialized programming languages such as Prolog were used in early AI research,[127] but general-purpose programming languages like Python have become predominant.[128]\n

            The transistor density in integrated circuits has been observed to roughly double every 18 months—a trend known as Moore\'s law, named after the Intel co-founder Gordon Moore, who first identified it. Improvements in GPUs have been even faster.[129]\n

            \n

            Applications

            \n

            AI and machine learning technology is used in most of the essential applications of the 2020s, including: search engines (such as Google Search), targeting online advertisements, recommendation systems (offered by Netflix, YouTube or Amazon), driving internet traffic, targeted advertising (AdSense, Facebook), virtual assistants (such as Siri or Alexa), autonomous vehicles (including drones, ADAS and self-driving cars), automatic language translation (Microsoft Translator, Google Translate), facial recognition (Apple\'s Face ID or Microsoft\'s DeepFace and Google\'s FaceNet) and image labeling (used by Facebook, Apple\'s iPhoto and TikTok). The deployment of AI may be overseen by a Chief automation officer (CAO).\n

            Health and medicine

            \n\n

            The application of AI in medicine and medical research has the potential to increase patient care and quality of life.[130] Through the lens of the Hippocratic Oath, medical professionals are ethically compelled to use AI, if applications can more accurately diagnose and treat patients.[131][132]\n

            For medical research, AI is an important tool for processing and integrating big data. This is particularly important for organoid and tissue engineering development which use microscopy imaging as a key technique in fabrication.[133] It has been suggested that AI can overcome discrepancies in funding allocated to different fields of research.[133] New AI tools can deepen the understanding of biomedically relevant pathways. For example, AlphaFold 2 (2021) demonstrated the ability to approximate, in hours rather than months, the 3D structure of a protein.[134] In 2023, it was reported that AI-guided drug discovery helped find a class of antibiotics capable of killing two different types of drug-resistant bacteria.[135] In 2024, researchers used machine learning to accelerate the search for Parkinson\'s disease drug treatments. Their aim was to identify compounds that block the clumping, or aggregation, of alpha-synuclein (the protein that characterises Parkinson\'s disease). They were able to speed up the initial screening process ten-fold and reduce the cost by a thousand-fold.[136][137]\n

            \n

            Games

            \n\n

            Game playing programs have been used since the 1950s to demonstrate and test AI\'s most advanced techniques.[138] Deep Blue became the first computer chess-playing system to beat a reigning world chess champion, Garry Kasparov, on 11 May 1997.[139] In 2011, in a Jeopardy! quiz show exhibition match, IBM\'s question answering system, Watson, defeated the two greatest Jeopardy! champions, Brad Rutter and Ken Jennings, by a significant margin.[140] In March 2016, AlphaGo won 4 out of 5 games of Go in a match with Go champion Lee Sedol, becoming the first computer Go-playing system to beat a professional Go player without handicaps. Then, in 2017, it defeated Ke Jie, who was the best Go player in the world.[141] Other programs handle imperfect-information games, such as the poker-playing program Pluribus.[142] DeepMind developed increasingly generalistic reinforcement learning models, such as with MuZero, which could be trained to play chess, Go, or Atari games.[143] In 2019, DeepMind\'s AlphaStar achieved grandmaster level in StarCraft II, a particularly challenging real-time strategy game that involves incomplete knowledge of what happens on the map.[144] In 2021, an AI agent competed in a PlayStation Gran Turismo competition, winning against four of the world\'s best Gran Turismo drivers using deep reinforcement learning.[145] In 2024, Google DeepMind introduced SIMA, a type of AI capable of autonomously playing nine previously unseen open-world video games by observing screen output, as well as executing short, specific tasks in response to natural language instructions.[146]\n

            \n

            Mathematics

            \n

            In mathematics, special forms of formal step-by-step reasoning are used. In contrast, LLMs such as GPT-4 Turbo, Gemini Ultra, Claude Opus, LLaMa-2 or Mistral Large are working with probabilistic models, which can produce wrong answers in the form of hallucinations. Therefore, they need not only a large database of mathematical problems to learn from but also methods such as supervised fine-tuning or trained classifiers with human-annotated data to improve answers for new problems and learn from corrections.[147] A 2024 study showed that the performance of some language models for reasoning capabilities in solving math problems not included in their training data was low, even for problems with only minor deviations from trained data.[148]\n

            Alternatively, dedicated models for mathematic problem solving with higher precision for the outcome including proof of theorems have been developed such as Alpha Tensor, Alpha Geometry and Alpha Proof all from Google DeepMind,[149] Llemma from eleuther[150] or Julius.[151]\n

            When natural language is used to describe mathematical problems, converters transform such prompts into a formal language such as Lean to define mathematic tasks.\n

            Some models have been developed to solve challenging problems and reach good results in benchmark tests, others to serve as educational tools in mathematics.[152]\n

            \n

            Finance

            \n

            Finance is one of the fastest growing sectors where applied AI tools are being deployed: from retail online banking to investment advice and insurance, where automated "robot advisers" have been in use for some years.[153]\n

            World Pensions experts like Nicolas Firzli insist it may be too early to see the emergence of highly innovative AI-informed financial products and services: "the deployment of AI tools will simply further automatise things: destroying tens of thousands of jobs in banking, financial planning, and pension advice in the process, but I\'m not sure it will unleash a new wave of [e.g., sophisticated] pension innovation."[154]\n

            \n

            Military

            \n\n

            Various countries are deploying AI military applications.[155] The main applications enhance command and control, communications, sensors, integration and interoperability.[156] Research is targeting intelligence collection and analysis, logistics, cyber operations, information operations, and semiautonomous and autonomous vehicles.[155] AI technologies enable coordination of sensors and effectors, threat detection and identification, marking of enemy positions, target acquisition, coordination and deconfliction of distributed Joint Fires between networked combat vehicles involving manned and unmanned teams.[156] AI was incorporated into military operations in Iraq and Syria.[155]\n

            In November 2023, US Vice President Kamala Harris disclosed a declaration signed by 31 nations to set guardrails for the military use of AI. The commitments include using legal reviews to ensure the compliance of military AI with international laws, and being cautious and transparent in the development of this technology.[157]\n

            \n

            Generative AI

            \n\n
            Vincent van Gogh in watercolour created by generative AI software
            \n

            In the early 2020s, generative AI gained widespread prominence. GenAI is AI capable of generating text, images, videos, or other data using generative models,[158][159] often in response to prompts.[160][161]\n

            In March 2023, 58% of U.S. adults had heard about ChatGPT and 14% had tried it.[162] The increasing realism and ease-of-use of AI-based text-to-image generators such as Midjourney, DALL-E, and Stable Diffusion sparked a trend of viral AI-generated photos. Widespread attention was gained by a fake photo of Pope Francis wearing a white puffer coat, the fictional arrest of Donald Trump, and a hoax of an attack on the Pentagon, as well as the usage in professional creative arts.[163][164]\n

            \n

            Agents

            \n

            Artificial intelligent (AI) agents are software entities designed to perceive their environment, make decisions, and take actions autonomously to achieve specific goals. These agents can interact with users, their environment, or other agents. AI agents are used in various applications, including virtual assistants, chatbots, autonomous vehicles, game-playing systems, and industrial robotics. AI agents operate within the constraints of their programming, available computational resources, and hardware limitations. This means they are restricted to performing tasks within their defined scope and have finite memory and processing capabilities. In real-world applications, AI agents often face time constraints for decision-making and action execution. Many AI agents incorporate learning algorithms, enabling them to improve their performance over time through experience or training. Using machine learning, AI agents can adapt to new situations and optimise their behaviour for their designated tasks.[165][166][167]\n

            \n

            Other industry-specific tasks

            \n

            There are also thousands of successful AI applications used to solve specific problems for specific industries or institutions. In a 2017 survey, one in five companies reported having incorporated "AI" in some offerings or processes.[168] A few examples are energy storage, medical diagnosis, military logistics, applications that predict the result of judicial decisions, foreign policy, or supply chain management.\n

            AI applications for evacuation and disaster management are growing. AI has been used to investigate if and how people evacuated in large scale and small scale evacuations using historical data from GPS, videos or social media. Further, AI can provide real time information on the real time evacuation conditions.[169][170][171]\n

            In agriculture, AI has helped farmers identify areas that need irrigation, fertilization, pesticide treatments or increasing yield. Agronomists use AI to conduct research and development. AI has been used to predict the ripening time for crops such as tomatoes, monitor soil moisture, operate agricultural robots, conduct predictive analytics, classify livestock pig call emotions, automate greenhouses, detect diseases and pests, and save water.\n

            Artificial intelligence is used in astronomy to analyze increasing amounts of available data and applications, mainly for "classification, regression, clustering, forecasting, generation, discovery, and the development of new scientific insights." For example, it is used for discovering exoplanets, forecasting solar activity, and distinguishing between signals and instrumental effects in gravitational wave astronomy. Additionally, it could be used for activities in space, such as space exploration, including the analysis of data from space missions, real-time science decisions of spacecraft, space debris avoidance, and more autonomous operation.\n

            \n

            Ethics

            \n\n

            AI has potential benefits and potential risks.[172] AI may be able to advance science and find solutions for serious problems: Demis Hassabis of Deep Mind hopes to "solve intelligence, and then use that to solve everything else".[173] However, as the use of AI has become widespread, several unintended consequences and risks have been identified.[174] In-production systems can sometimes not factor ethics and bias into their AI training processes, especially when the AI algorithms are inherently unexplainable in deep learning.[175]\n

            \n

            Risks and harm

            \n
            \n\n

            Machine learning algorithms require large amounts of data. The techniques used to acquire this data have raised concerns about privacy, surveillance and copyright.\n

            AI-powered devices and services, such as virtual assistants and IoT products, continuously collect personal information, raising concerns about intrusive data gathering and unauthorized access by third parties. The loss of privacy is further exacerbated by AI\'s ability to process and combine vast amounts of data, potentially leading to a surveillance society where individual activities are constantly monitored and analyzed without adequate safeguards or transparency.\n

            Sensitive user data collected may include online activity records, geolocation data, video or audio.[176] For example, in order to build speech recognition algorithms, Amazon has recorded millions of private conversations and allowed temporary workers to listen to and transcribe some of them.[177] Opinions about this widespread surveillance range from those who see it as a necessary evil to those for whom it is clearly unethical and a violation of the right to privacy.[178]\n

            AI developers argue that this is the only way to deliver valuable applications. and have developed several techniques that attempt to preserve privacy while still obtaining the data, such as data aggregation, de-identification and differential privacy.[179] Since 2016, some privacy experts, such as Cynthia Dwork, have begun to view privacy in terms of fairness. Brian Christian wrote that experts have pivoted "from the question of \'what they know\' to the question of \'what they\'re doing with it\'."[180]\n

            Generative AI is often trained on unlicensed copyrighted works, including in domains such as images or computer code; the output is then used under the rationale of "fair use". Experts disagree about how well and under what circumstances this rationale will hold up in courts of law; relevant factors may include "the purpose and character of the use of the copyrighted work" and "the effect upon the potential market for the copyrighted work".[181][182] Website owners who do not wish to have their content scraped can indicate it in a "robots.txt" file.[183] In 2023, leading authors (including John Grisham and Jonathan Franzen) sued AI companies for using their work to train generative AI.[184][185] Another discussed approach is to envision a separate sui generis system of protection for creations generated by AI to ensure fair attribution and compensation for human authors.[186]\n

            \n

            Dominance by tech giants

            \n

            The commercial AI scene is dominated by Big Tech companies such as Alphabet Inc., Amazon, Apple Inc., Meta Platforms, and Microsoft.[187][188][189] Some of these players already own the vast majority of existing cloud infrastructure and computing power from data centers, allowing them to entrench further in the marketplace.[190][191]\n

            \n

            Substantial power needs and other environmental impacts

            \n\n

            In January 2024, the International Energy Agency (IEA) released Electricity 2024, Analysis and Forecast to 2026, forecasting electric power use.[192] This is the first IEA report to make projections for data centers and power consumption for artificial intelligence and cryptocurrency. The report states that power demand for these uses might double by 2026, with additional electric power usage equal to electricity used by the whole Japanese nation.[193]\n

            Prodigious power consumption by AI is responsible for the growth of fossil fuels use, and might delay closings of obsolete, carbon-emitting coal energy facilities. There is a feverish rise in the construction of data centers throughout the US, making large technology firms (e.g., Microsoft, Meta, Google, Amazon) into voracious consumers of electric power. Projected electric consumption is so immense that there is concern that it will be fulfilled no matter the source. A ChatGPT search involves the use of 10 times the electrical energy as a Google search. The large firms are in haste to find power sources – from nuclear energy to geothermal to fusion. The tech firms argue that – in the long view – AI will be eventually kinder to the environment, but they need the energy now. AI makes the power grid more efficient and "intelligent", will assist in the growth of nuclear power, and track overall carbon emissions, according to technology firms.[194]\n

            A 2024 Goldman Sachs Research Paper, AI Data Centers and the Coming US Power Demand Surge, found "US power demand (is) likely to experience growth not seen in a generation...." and forecasts that, by 2030, US data centers will consume 8% of US power, as opposed to 3% in 2022, presaging growth for the electrical power generation industry by a variety of means.[195] Data centers\' need for more and more electrical power is such that they might max out the electrical grid. The Big Tech companies counter that AI can be used to maximize the utilization of the grid by all.[196]\n

            In 2024, the Wall Street Journal reported that big AI companies have begun negotiations with the US nuclear power providers to provide electricity to the data centers. In March 2024 Amazon purchased a Pennsylvania nuclear-powered data center for $650 Million (US).[197]\n

            In September 2024, Microsoft announced an agreement with Constellation Energy to re-open the Three Mile Island nuclear power plant to provide Microsoft with 100% of all electric power produced by the plant for 20 years. Reopening the plant, which suffered a partial nuclear meltdown of its Unit 2 reactor in 1979, will require Constellation to get through strict regulatory processes which will include extensive safety scrutiny from the US Nuclear Regulatory Commission. If approved (this will be the first ever US re-commissioning of a nuclear plant), over 835 megawatts of power – enough for 800,000 homes – of energy will be produced. The cost for re-opening and upgrading is estimated at $1.6 billion (US) and is dependent on tax breaks for nuclear power contained in the 2022 US Inflation Reduction Act.[198] The US government and the state of Michigan are investing almost $2 billion (US) to reopen the Palisades Nuclear reactor on Lake Michigan. Closed since 2022, the plant is planned to be reopened in October 2025. The Three Mile Island facility will be renamed the Crane Clean Energy Center after Chris Crane, a nuclear proponent and former CEO of Exelon who was responsible for Exelon spinoff of Constellation.[199]\n

            \n

            Misinformation

            \n\n

            YouTube, Facebook and others use recommender systems to guide users to more content. These AI programs were given the goal of maximizing user engagement (that is, the only goal was to keep people watching). The AI learned that users tended to choose misinformation, conspiracy theories, and extreme partisan content, and, to keep them watching, the AI recommended more of it. Users also tended to watch more content on the same subject, so the AI led people into filter bubbles where they received multiple versions of the same misinformation.[200] This convinced many users that the misinformation was true, and ultimately undermined trust in institutions, the media and the government.[201] The AI program had correctly learned to maximize its goal, but the result was harmful to society. After the U.S. election in 2016, major technology companies took steps to mitigate the problem [citation needed].\n

            In 2022, generative AI began to create images, audio, video and text that are indistinguishable from real photographs, recordings, films, or human writing. It is possible for bad actors to use this technology to create massive amounts of misinformation or propaganda.[202] AI pioneer Geoffrey Hinton expressed concern about AI enabling "authoritarian leaders to manipulate their electorates" on a large scale, among other risks.[203]\n

            \n

            Algorithmic bias and fairness

            \n\n

            Machine learning applications will be biased[k] if they learn from biased data.[205] The developers may not be aware that the bias exists.[206] Bias can be introduced by the way training data is selected and by the way a model is deployed.[207][205] If a biased algorithm is used to make decisions that can seriously harm people (as it can in medicine, finance, recruitment, housing or policing) then the algorithm may cause discrimination.[208] The field of fairness studies how to prevent harms from algorithmic biases.\n

            On June 28, 2015, Google Photos\'s new image labeling feature mistakenly identified Jacky Alcine and a friend as "gorillas" because they were black. The system was trained on a dataset that contained very few images of black people,[209] a problem called "sample size disparity".[210] Google "fixed" this problem by preventing the system from labelling anything as a "gorilla". Eight years later, in 2023, Google Photos still could not identify a gorilla, and neither could similar products from Apple, Facebook, Microsoft and Amazon.[211]\n

            COMPAS is a commercial program widely used by U.S. courts to assess the likelihood of a defendant becoming a recidivist. In 2016, Julia Angwin at ProPublica discovered that COMPAS exhibited racial bias, despite the fact that the program was not told the races of the defendants. Although the error rate for both whites and blacks was calibrated equal at exactly 61%, the errors for each race were different—the system consistently overestimated the chance that a black person would re-offend and would underestimate the chance that a white person would not re-offend.[212] In 2017, several researchers[l] showed that it was mathematically impossible for COMPAS to accommodate all possible measures of fairness when the base rates of re-offense were different for whites and blacks in the data.[214]\n

            A program can make biased decisions even if the data does not explicitly mention a problematic feature (such as "race" or "gender"). The feature will correlate with other features (like "address", "shopping history" or "first name"), and the program will make the same decisions based on these features as it would on "race" or "gender".[215] Moritz Hardt said "the most robust fact in this research area is that fairness through blindness doesn\'t work."[216]\n

            Criticism of COMPAS highlighted that machine learning models are designed to make "predictions" that are only valid if we assume that the future will resemble the past. If they are trained on data that includes the results of racist decisions in the past, machine learning models must predict that racist decisions will be made in the future. If an application then uses these predictions as recommendations, some of these "recommendations" will likely be racist.[217] Thus, machine learning is not well suited to help make decisions in areas where there is hope that the future will be better than the past. It is descriptive rather than prescriptive.[m]\n

            Bias and unfairness may go undetected because the developers are overwhelmingly white and male: among AI engineers, about 4% are black and 20% are women.[210]\n

            There are various conflicting definitions and mathematical models of fairness. These notions depend on ethical assumptions, and are influenced by beliefs about society. One broad category is distributive fairness, which focuses on the outcomes, often identifying groups and seeking to compensate for statistical disparities. Representational fairness tries to ensure that AI systems do not reinforce negative stereotypes or render certain groups invisible. Procedural fairness focuses on the decision process rather than the outcome. The most relevant notions of fairness may depend on the context, notably the type of AI application and the stakeholders. The subjectivity in the notions of bias and fairness makes it difficult for companies to operationalize them. Having access to sensitive attributes such as race or gender is also considered by many AI ethicists to be necessary in order to compensate for biases, but it may conflict with anti-discrimination laws.[204]\n

            At its 2022 Conference on Fairness, Accountability, and Transparency (ACM FAccT 2022), the Association for Computing Machinery, in Seoul, South Korea, presented and published findings that recommend that until AI and robotics systems are demonstrated to be free of bias mistakes, they are unsafe, and the use of self-learning neural networks trained on vast, unregulated sources of flawed internet data should be curtailed.[dubiousdiscuss][219]\n

            \n

            Lack of transparency

            \n\n

            Many AI systems are so complex that their designers cannot explain how they reach their decisions.[220] Particularly with deep neural networks, in which there are a large amount of non-linear relationships between inputs and outputs. But some popular explainability techniques exist.[221]\n

            It is impossible to be certain that a program is operating correctly if no one knows how exactly it works. There have been many cases where a machine learning program passed rigorous tests, but nevertheless learned something different than what the programmers intended. For example, a system that could identify skin diseases better than medical professionals was found to actually have a strong tendency to classify images with a ruler as "cancerous", because pictures of malignancies typically include a ruler to show the scale.[222] Another machine learning system designed to help effectively allocate medical resources was found to classify patients with asthma as being at "low risk" of dying from pneumonia. Having asthma is actually a severe risk factor, but since the patients having asthma would usually get much more medical care, they were relatively unlikely to die according to the training data. The correlation between asthma and low risk of dying from pneumonia was real, but misleading.[223]\n

            People who have been harmed by an algorithm\'s decision have a right to an explanation.[224] Doctors, for example, are expected to clearly and completely explain to their colleagues the reasoning behind any decision they make. Early drafts of the European Union\'s General Data Protection Regulation in 2016 included an explicit statement that this right exists.[n] Industry experts noted that this is an unsolved problem with no solution in sight. Regulators argued that nevertheless the harm is real: if the problem has no solution, the tools should not be used.[225]\n

            DARPA established the XAI ("Explainable Artificial Intelligence") program in 2014 to try to solve these problems.[226]\n

            Several approaches aim to address the transparency problem. SHAP enables to visualise the contribution of each feature to the output.[227] LIME can locally approximate a model\'s outputs with a simpler, interpretable model.[228] Multitask learning provides a large number of outputs in addition to the target classification. These other outputs can help developers deduce what the network has learned.[229] Deconvolution, DeepDream and other generative methods can allow developers to see what different layers of a deep network for computer vision have learned, and produce output that can suggest what the network is learning.[230] For generative pre-trained transformers, Anthropic developed a technique based on dictionary learning that associates patterns of neuron activations with human-understandable concepts.[231]\n

            \n

            Bad actors and weaponized AI

            \n\n

            Artificial intelligence provides a number of tools that are useful to bad actors, such as authoritarian governments, terrorists, criminals or rogue states.\n

            A lethal autonomous weapon is a machine that locates, selects and engages human targets without human supervision.[o] Widely available AI tools can be used by bad actors to develop inexpensive autonomous weapons and, if produced at scale, they are potentially weapons of mass destruction.[233] Even when used in conventional warfare, it is unlikely that they will be unable to reliably choose targets and could potentially kill an innocent person.[233] In 2014, 30 nations (including China) supported a ban on autonomous weapons under the United Nations\' Convention on Certain Conventional Weapons, however the United States and others disagreed.[234] By 2015, over fifty countries were reported to be researching battlefield robots.[235]\n

            AI tools make it easier for authoritarian governments to efficiently control their citizens in several ways. Face and voice recognition allow widespread surveillance. Machine learning, operating this data, can classify potential enemies of the state and prevent them from hiding. Recommendation systems can precisely target propaganda and misinformation for maximum effect. Deepfakes and generative AI aid in producing misinformation. Advanced AI can make authoritarian centralized decision making more competitive than liberal and decentralized systems such as markets. It lowers the cost and difficulty of digital warfare and advanced spyware.[236] All these technologies have been available since 2020 or earlier—AI facial recognition systems are already being used for mass surveillance in China.[237][238]\n

            There many other ways that AI is expected to help bad actors, some of which can not be foreseen. For example, machine-learning AI is able to design tens of thousands of toxic molecules in a matter of hours.[239]\n

            \n

            Technological unemployment

            \n\n

            Economists have frequently highlighted the risks of redundancies from AI, and speculated about unemployment if there is no adequate social policy for full employment.[240]\n

            In the past, technology has tended to increase rather than reduce total employment, but economists acknowledge that "we\'re in uncharted territory" with AI.[241] A survey of economists showed disagreement about whether the increasing use of robots and AI will cause a substantial increase in long-term unemployment, but they generally agree that it could be a net benefit if productivity gains are redistributed.[242] Risk estimates vary; for example, in the 2010s, Michael Osborne and Carl Benedikt Frey estimated 47% of U.S. jobs are at "high risk" of potential automation, while an OECD report classified only 9% of U.S. jobs as "high risk".[p][244] The methodology of speculating about future employment levels has been criticised as lacking evidential foundation, and for implying that technology, rather than social policy, creates unemployment, as opposed to redundancies.[240] In April 2023, it was reported that 70% of the jobs for Chinese video game illustrators had been eliminated by generative artificial intelligence.[245][246]\n

            Unlike previous waves of automation, many middle-class jobs may be eliminated by artificial intelligence; The Economist stated in 2015 that "the worry that AI could do to white-collar jobs what steam power did to blue-collar ones during the Industrial Revolution" is "worth taking seriously".[247] Jobs at extreme risk range from paralegals to fast food cooks, while job demand is likely to increase for care-related professions ranging from personal healthcare to the clergy.[248]\n

            From the early days of the development of artificial intelligence, there have been arguments, for example, those put forward by Joseph Weizenbaum, about whether tasks that can be done by computers actually should be done by them, given the difference between computers and humans, and between quantitative calculation and qualitative, value-based judgement.[249]\n

            \n

            Existential risk

            \n\n

            It has been argued AI will become so powerful that humanity may irreversibly lose control of it. This could, as physicist Stephen Hawking stated, "spell the end of the human race".[250] This scenario has been common in science fiction, when a computer or robot suddenly develops a human-like "self-awareness" (or "sentience" or "consciousness") and becomes a malevolent character.[q] These sci-fi scenarios are misleading in several ways.\n

            First, AI does not require human-like "sentience" to be an existential risk. Modern AI programs are given specific goals and use learning and intelligence to achieve them. Philosopher Nick Bostrom argued that if one gives almost any goal to a sufficiently powerful AI, it may choose to destroy humanity to achieve it (he used the example of a paperclip factory manager).[252] Stuart Russell gives the example of household robot that tries to find a way to kill its owner to prevent it from being unplugged, reasoning that "you can\'t fetch the coffee if you\'re dead."[253] In order to be safe for humanity, a superintelligence would have to be genuinely aligned with humanity\'s morality and values so that it is "fundamentally on our side".[254]\n

            Second, Yuval Noah Harari argues that AI does not require a robot body or physical control to pose an existential risk. The essential parts of civilization are not physical. Things like ideologies, law, government, money and the economy are made of language; they exist because there are stories that billions of people believe. The current prevalence of misinformation suggests that an AI could use language to convince people to believe anything, even to take actions that are destructive.[255]\n

            The opinions amongst experts and industry insiders are mixed, with sizable fractions both concerned and unconcerned by risk from eventual superintelligent AI.[256] Personalities such as Stephen Hawking, Bill Gates, and Elon Musk,[257] as well as AI pioneers such as Yoshua Bengio, Stuart Russell, Demis Hassabis, and Sam Altman, have expressed concerns about existential risk from AI.\n

            In May 2023, Geoffrey Hinton announced his resignation from Google in order to be able to "freely speak out about the risks of AI" without "considering how this impacts Google."[258] He notably mentioned risks of an AI takeover,[259] and stressed that in order to avoid the worst outcomes, establishing safety guidelines will require cooperation among those competing in use of AI.[260]\n

            In 2023, many leading AI experts issued the joint statement that "Mitigating the risk of extinction from AI should be a global priority alongside other societal-scale risks such as pandemics and nuclear war".[261]\n

            Other researchers, however, spoke in favor of a less dystopian view. AI pioneer Juergen Schmidhuber did not sign the joint statement, emphasising that in 95% of all cases, AI research is about making "human lives longer and healthier and easier."[262] While the tools that are now being used to improve lives can also be used by bad actors, "they can also be used against the bad actors."[263][264] Andrew Ng also argued that "it\'s a mistake to fall for the doomsday hype on AI—and that regulators who do will only benefit vested interests."[265] Yann LeCun "scoffs at his peers\' dystopian scenarios of supercharged misinformation and even, eventually, human extinction."[266] In the early 2010s, experts argued that the risks are too distant in the future to warrant research or that humans will be valuable from the perspective of a superintelligent machine.[267] However, after 2016, the study of current and future risks and possible solutions became a serious area of research.[268]\n

            \n

            Ethical machines and alignment

            \n\n

            Friendly AI are machines that have been designed from the beginning to minimize risks and to make choices that benefit humans. Eliezer Yudkowsky, who coined the term, argues that developing friendly AI should be a higher research priority: it may require a large investment and it must be completed before AI becomes an existential risk.[269]\n

            Machines with intelligence have the potential to use their intelligence to make ethical decisions. The field of machine ethics provides machines with ethical principles and procedures for resolving ethical dilemmas.[270]\nThe field of machine ethics is also called computational morality,[270]\nand was founded at an AAAI symposium in 2005.[271]\n

            Other approaches include Wendell Wallach\'s "artificial moral agents"[272] and Stuart J. Russell\'s three principles for developing provably beneficial machines.[273]\n

            \n

            Open source

            \n

            Active organizations in the AI open-source community include Hugging Face,[274] Google,[275] EleutherAI and Meta.[276] Various AI models, such as Llama 2, Mistral or Stable Diffusion, have been made open-weight,[277][278] meaning that their architecture and trained parameters (the "weights") are publicly available. Open-weight models can be freely fine-tuned, which allows companies to specialize them with their own data and for their own use-case.[279] Open-weight models are useful for research and innovation but can also be misused. Since they can be fine-tuned, any built-in security measure, such as objecting to harmful requests, can be trained away until it becomes ineffective. Some researchers warn that future AI models may develop dangerous capabilities (such as the potential to drastically facilitate bioterrorism) and that once released on the Internet, they cannot be deleted everywhere if needed. They recommend pre-release audits and cost-benefit analyses.[280]\n

            \n

            Frameworks

            \n

            Artificial Intelligence projects can have their ethical permissibility tested while designing, developing, and implementing an AI system. An AI framework such as the Care and Act Framework containing the SUM values—developed by the Alan Turing Institute tests projects in four main areas:[281][282]\n

            \n
            • Respect the dignity of individual people
            • \n
            • Connect with other people sincerely, openly, and inclusively
            • \n
            • Care for the wellbeing of everyone
            • \n
            • Protect social values, justice, and the public interest
            \n

            Other developments in ethical frameworks include those decided upon during the Asilomar Conference, the Montreal Declaration for Responsible AI, and the IEEE\'s Ethics of Autonomous Systems initiative, among others;[283] however, these principles do not go without their criticisms, especially regards to the people chosen contributes to these frameworks.[284]\n

            Promotion of the wellbeing of the people and communities that these technologies affect requires consideration of the social and ethical implications at all stages of AI system design, development and implementation, and collaboration between job roles such as data scientists, product managers, data engineers, domain experts, and delivery managers.[285]\n

            The UK AI Safety Institute released in 2024 a testing toolset called \'Inspect\' for AI safety evaluations available under a MIT open-source licence which is freely available on GitHub and can be improved with third-party packages. It can be used to evaluate AI models in a range of areas including core knowledge, ability to reason, and autonomous capabilities.[286]\n

            \n

            Regulation

            \n\n
            AI Safety Summit
            The first global AI Safety Summit was held in 2023 with a declaration calling for international co-operation.
            \n

            The regulation of artificial intelligence is the development of public sector policies and laws for promoting and regulating AI; it is therefore related to the broader regulation of algorithms.[287] The regulatory and policy landscape for AI is an emerging issue in jurisdictions globally.[288] According to AI Index at Stanford, the annual number of AI-related laws passed in the 127 survey countries jumped from one passed in 2016 to 37 passed in 2022 alone.[289][290] Between 2016 and 2020, more than 30 countries adopted dedicated strategies for AI.[291] Most EU member states had released national AI strategies, as had Canada, China, India, Japan, Mauritius, the Russian Federation, Saudi Arabia, United Arab Emirates, U.S., and Vietnam. Others were in the process of elaborating their own AI strategy, including Bangladesh, Malaysia and Tunisia.[291] The Global Partnership on Artificial Intelligence was launched in June 2020, stating a need for AI to be developed in accordance with human rights and democratic values, to ensure public confidence and trust in the technology.[291] Henry Kissinger, Eric Schmidt, and Daniel Huttenlocher published a joint statement in November 2021 calling for a government commission to regulate AI.[292] In 2023, OpenAI leaders published recommendations for the governance of superintelligence, which they believe may happen in less than 10 years.[293] In 2023, the United Nations also launched an advisory body to provide recommendations on AI governance; the body comprises technology company executives, governments officials and academics.[294] In 2024, the Council of Europe created the first international legally binding treaty on AI, called the "Framework Convention on Artificial Intelligence and Human Rights, Democracy and the Rule of Law". It was adopted by the European Union, the United States, the United Kingdom, and other signatories.[295]\n

            In a 2022 Ipsos survey, attitudes towards AI varied greatly by country; 78% of Chinese citizens, but only 35% of Americans, agreed that "products and services using AI have more benefits than drawbacks".[289] A 2023 Reuters/Ipsos poll found that 61% of Americans agree, and 22% disagree, that AI poses risks to humanity.[296] In a 2023 Fox News poll, 35% of Americans thought it "very important", and an additional 41% thought it "somewhat important", for the federal government to regulate AI, versus 13% responding "not very important" and 8% responding "not at all important".[297][298]\n

            In November 2023, the first global AI Safety Summit was held in Bletchley Park in the UK to discuss the near and far term risks of AI and the possibility of mandatory and voluntary regulatory frameworks.[299] 28 countries including the United States, China, and the European Union issued a declaration at the start of the summit, calling for international co-operation to manage the challenges and risks of artificial intelligence.[300][301] In May 2024 at the AI Seoul Summit, 16 global AI tech companies agreed to safety commitments on the development of AI.[302][303]\n

            \n

            History

            \n\n\n

            The study of mechanical or "formal" reasoning began with philosophers and mathematicians in antiquity. The study of logic led directly to Alan Turing\'s theory of computation, which suggested that a machine, by shuffling symbols as simple as "0" and "1", could simulate any conceivable form of mathematical reasoning.[304][305] This, along with concurrent discoveries in cybernetics, information theory and neurobiology, led researchers to consider the possibility of building an "electronic brain".[r] They developed several areas of research that would become part of AI,[307] such as McCullouch and Pitts design for "artificial neurons" in 1943,[115] and Turing\'s influential 1950 paper \'Computing Machinery and Intelligence\', which introduced the Turing test and showed that "machine intelligence" was plausible.[308][305]\n

            The field of AI research was founded at a workshop at Dartmouth College in 1956.[s][6] The attendees became the leaders of AI research in the 1960s.[t] They and their students produced programs that the press described as "astonishing":[u] computers were learning checkers strategies, solving word problems in algebra, proving logical theorems and speaking English.[v][7] Artificial intelligence laboratories were set up at a number of British and U.S. universities in the latter 1950s and early 1960s.[305]\n

            Researchers in the 1960s and the 1970s were convinced that their methods would eventually succeed in creating a machine with general intelligence and considered this the goal of their field.[312] In 1965 Herbert Simon predicted, "machines will be capable, within twenty years, of doing any work a man can do".[313] In 1967 Marvin Minsky agreed, writing that "within a generation ... the problem of creating \'artificial intelligence\' will substantially be solved".[314] They had, however, underestimated the difficulty of the problem.[w] In 1974, both the U.S. and British governments cut off exploratory research in response to the criticism of Sir James Lighthill[316] and ongoing pressure from the U.S. Congress to fund more productive projects.[317] Minsky\'s and Papert\'s book Perceptrons was understood as proving that artificial neural networks would never be useful for solving real-world tasks, thus discrediting the approach altogether.[318] The "AI winter", a period when obtaining funding for AI projects was difficult, followed.[9]\n

            In the early 1980s, AI research was revived by the commercial success of expert systems,[319] a form of AI program that simulated the knowledge and analytical skills of human experts. By 1985, the market for AI had reached over a billion dollars. At the same time, Japan\'s fifth generation computer project inspired the U.S. and British governments to restore funding for academic research.[8] However, beginning with the collapse of the Lisp Machine market in 1987, AI once again fell into disrepute, and a second, longer-lasting winter began.[10]\n

            Up to this point, most of AI\'s funding had gone to projects that used high-level symbols to represent mental objects like plans, goals, beliefs, and known facts. In the 1980s, some researchers began to doubt that this approach would be able to imitate all the processes of human cognition, especially perception, robotics, learning and pattern recognition,[320] and began to look into "sub-symbolic" approaches.[321] Rodney Brooks rejected "representation" in general and focussed directly on engineering machines that move and survive.[x] Judea Pearl, Lofti Zadeh and others developed methods that handled incomplete and uncertain information by making reasonable guesses rather than precise logic.[86][326] But the most important development was the revival of "connectionism", including neural network research, by Geoffrey Hinton and others.[327] In 1990, Yann LeCun successfully showed that convolutional neural networks can recognize handwritten digits, the first of many successful applications of neural networks.[328]\n

            AI gradually restored its reputation in the late 1990s and early 21st century by exploiting formal mathematical methods and by finding specific solutions to specific problems. This "narrow" and "formal" focus allowed researchers to produce verifiable results and collaborate with other fields (such as statistics, economics and mathematics).[329] By 2000, solutions developed by AI researchers were being widely used, although in the 1990s they were rarely described as "artificial intelligence" (a tendency known as the AI effect).[330]\nHowever, several academic researchers became concerned that AI was no longer pursuing its original goal of creating versatile, fully intelligent machines. Beginning around 2002, they founded the subfield of artificial general intelligence (or "AGI"), which had several well-funded institutions by the 2010s.[4]\n

            Deep learning began to dominate industry benchmarks in 2012 and was adopted throughout the field.[11]\nFor many specific tasks, other methods were abandoned.[y]\nDeep learning\'s success was based on both hardware improvements (faster computers,[332] graphics processing units, cloud computing[333]) and access to large amounts of data[334] (including curated datasets,[333] such as ImageNet). Deep learning\'s success led to an enormous increase in interest and funding in AI.[z] The amount of machine learning research (measured by total publications) increased by 50% in the years 2015–2019.[291]\n

            In 2016, issues of fairness and the misuse of technology were catapulted into center stage at machine learning conferences, publications vastly increased, funding became available, and many researchers re-focussed their careers on these issues. The alignment problem became a serious field of academic study.[268]\n

            In the late teens and early 2020s, AGI companies began to deliver programs that created enormous interest. In 2015, AlphaGo, developed by DeepMind, beat the world champion Go player. The program was taught only the rules of the game and developed strategy by itself. GPT-3 is a large language model that was released in 2020 by OpenAI and is capable of generating high-quality human-like text.[335] These programs, and others, inspired an aggressive AI boom, where large companies began investing billions in AI research. According to AI Impacts, about $50 billion annually was invested in "AI" around 2022 in the U.S. alone and about 20% of the new U.S. Computer Science PhD graduates have specialized in "AI".[336] About 800,000 "AI"-related U.S. job openings existed in 2022.[337]\n

            \n

            Philosophy

            \n

            Philosophical debates have historically sought to determine the nature of intelligence and how to make intelligent machines.[338] Another major focus has been whether machines can be conscious, and the associated ethical implications.[339] Many other topics in philosophy can relevant to AI, such as epistemology and free will.[340] Rapid advancements have intensified public discussions on the philosophy and ethics of AI.[339]\n

            Defining artificial intelligence

            \n\n

            Alan Turing wrote in 1950 "I propose to consider the question \'can machines think\'?"[341] He advised changing the question from whether a machine "thinks", to "whether or not it is possible for machinery to show intelligent behaviour".[341] He devised the Turing test, which measures the ability of a machine to simulate human conversation.[308] Since we can only observe the behavior of the machine, it does not matter if it is "actually" thinking or literally has a "mind". Turing notes that we can not determine these things about other people but "it is usual to have a polite convention that everyone thinks."[342]\n

            \n
            The Turing test can provide some evidence of intelligence, but it penalizes non-human intelligent behavior.[343]
            \n

            Russell and Norvig agree with Turing that intelligence must be defined in terms of external behavior, not internal structure.[1] However, they are critical that the test requires the machine to imitate humans. "Aeronautical engineering texts," they wrote, "do not define the goal of their field as making \'machines that fly so exactly like pigeons that they can fool other pigeons.\'"[344] AI founder John McCarthy agreed, writing that "Artificial intelligence is not, by definition, simulation of human intelligence".[345]\n

            McCarthy defines intelligence as "the computational part of the ability to achieve goals in the world".[346] Another AI founder, Marvin Minsky similarly describes it as "the ability to solve hard problems".[347] The leading AI textbook defines it as the study of agents that perceive their environment and take actions that maximize their chances of achieving defined goals.[1] These definitions view intelligence in terms of well-defined problems with well-defined solutions, where both the difficulty of the problem and the performance of the program are direct measures of the "intelligence" of the machine—and no other philosophical discussion is required, or may not even be possible.\n

            Another definition has been adopted by Google,[348] a major practitioner in the field of AI. This definition stipulates the ability of systems to synthesize information as the manifestation of intelligence, similar to the way it is defined in biological intelligence.\n

            Some authors have suggested in practice, that the definition of AI is vague and difficult to define, with contention as to whether classical algorithms should be categorised as AI,[349] with many companies during the early 2020s AI boom using the term as a marketing buzzword, often even if they did "not actually use AI in a material way".[350]\n

            \n

            Evaluating approaches to AI

            \n

            No established unifying theory or paradigm has guided AI research for most of its history.[aa] The unprecedented success of statistical machine learning in the 2010s eclipsed all other approaches (so much so that some sources, especially in the business world, use the term "artificial intelligence" to mean "machine learning with neural networks"). This approach is mostly sub-symbolic, soft and narrow. Critics argue that these questions may have to be revisited by future generations of AI researchers.\n

            \n

            Symbolic AI and its limits

            \n

            Symbolic AI (or "GOFAI")[352] simulated the high-level conscious reasoning that people use when they solve puzzles, express legal reasoning and do mathematics. They were highly successful at "intelligent" tasks such as algebra or IQ tests. In the 1960s, Newell and Simon proposed the physical symbol systems hypothesis: "A physical symbol system has the necessary and sufficient means of general intelligent action."[353]\n

            However, the symbolic approach failed on many tasks that humans solve easily, such as learning, recognizing an object or commonsense reasoning. Moravec\'s paradox is the discovery that high-level "intelligent" tasks were easy for AI, but low level "instinctive" tasks were extremely difficult.[354] Philosopher Hubert Dreyfus had argued since the 1960s that human expertise depends on unconscious instinct rather than conscious symbol manipulation, and on having a "feel" for the situation, rather than explicit symbolic knowledge.[355] Although his arguments had been ridiculed and ignored when they were first presented, eventually, AI research came to agree with him.[ab][16]\n

            The issue is not resolved: sub-symbolic reasoning can make many of the same inscrutable mistakes that human intuition does, such as algorithmic bias. Critics such as Noam Chomsky argue continuing research into symbolic AI will still be necessary to attain general intelligence,[357][358] in part because sub-symbolic AI is a move away from explainable AI: it can be difficult or impossible to understand why a modern statistical AI program made a particular decision. The emerging field of neuro-symbolic artificial intelligence attempts to bridge the two approaches.\n

            \n

            Neat vs. scruffy

            \n\n

            "Neats" hope that intelligent behavior is described using simple, elegant principles (such as logic, optimization, or neural networks). "Scruffies" expect that it necessarily requires solving a large number of unrelated problems. Neats defend their programs with theoretical rigor, scruffies rely mainly on incremental testing to see if they work. This issue was actively discussed in the 1970s and 1980s,[359] but eventually was seen as irrelevant. Modern AI has elements of both.\n

            \n

            Soft vs. hard computing

            \n\n

            Finding a provably correct or optimal solution is intractable for many important problems.[15] Soft computing is a set of techniques, including genetic algorithms, fuzzy logic and neural networks, that are tolerant of imprecision, uncertainty, partial truth and approximation. Soft computing was introduced in the late 1980s and most successful AI programs in the 21st century are examples of soft computing with neural networks.\n

            \n

            Narrow vs. general AI

            \n\n

            AI researchers are divided as to whether to pursue the goals of artificial general intelligence and superintelligence directly or to solve as many specific problems as possible (narrow AI) in hopes these solutions will lead indirectly to the field\'s long-term goals.[360][361] General intelligence is difficult to define and difficult to measure, and modern AI has had more verifiable successes by focusing on specific problems with specific solutions. The sub-field of artificial general intelligence studies this area exclusively.\n

            \n

            Machine consciousness, sentience, and mind

            \n\n

            The philosophy of mind does not know whether a machine can have a mind, consciousness and mental states, in the same sense that human beings do. This issue considers the internal experiences of the machine, rather than its external behavior. Mainstream AI research considers this issue irrelevant because it does not affect the goals of the field: to build machines that can solve problems using intelligence. Russell and Norvig add that "[t]he additional project of making a machine conscious in exactly the way humans are is not one that we are equipped to take on."[362] However, the question has become central to the philosophy of mind. It is also typically the central question at issue in artificial intelligence in fiction.\n

            \n

            Consciousness

            \n\n

            David Chalmers identified two problems in understanding the mind, which he named the "hard" and "easy" problems of consciousness.[363] The easy problem is understanding how the brain processes signals, makes plans and controls behavior. The hard problem is explaining how this feels or why it should feel like anything at all, assuming we are right in thinking that it truly does feel like something (Dennett\'s consciousness illusionism says this is an illusion). While human information processing is easy to explain, human subjective experience is difficult to explain. For example, it is easy to imagine a color-blind person who has learned to identify which objects in their field of view are red, but it is not clear what would be required for the person to know what red looks like.[364]\n

            \n

            Computationalism and functionalism

            \n\n

            Computationalism is the position in the philosophy of mind that the human mind is an information processing system and that thinking is a form of computing. Computationalism argues that the relationship between mind and body is similar or identical to the relationship between software and hardware and thus may be a solution to the mind–body problem. This philosophical position was inspired by the work of AI researchers and cognitive scientists in the 1960s and was originally proposed by philosophers Jerry Fodor and Hilary Putnam.[365]\n

            Philosopher John Searle characterized this position as "strong AI": "The appropriately programmed computer with the right inputs and outputs would thereby have a mind in exactly the same sense human beings have minds."[ac] Searle counters this assertion with his Chinese room argument, which attempts to show that, even if a machine perfectly simulates human behavior, there is still no reason to suppose it also has a mind.[369]\n

            \n

            AI welfare and rights

            \n

            It is difficult or impossible to reliably evaluate whether an advanced AI is sentient (has the ability to feel), and if so, to what degree.[370] But if there is a significant chance that a given machine can feel and suffer, then it may be entitled to certain rights or welfare protection measures, similarly to animals.[371][372] Sapience (a set of capacities related to high intelligence, such as discernment or self-awareness) may provide another moral basis for AI rights.[371] Robot rights are also sometimes proposed as a practical way to integrate autonomous agents into society.[373]\n

            In 2017, the European Union considered granting "electronic personhood" to some of the most capable AI systems. Similarly to the legal status of companies, it would have conferred rights but also responsibilities.[374] Critics argued in 2018 that granting rights to AI systems would downplay the importance of human rights, and that legislation should focus on user needs rather than speculative futuristic scenarios. They also noted that robots lacked the autonomy to take part to society on their own.[375][376]\n

            Progress in AI increased interest in the topic. Proponents of AI welfare and rights often argue that AI sentience, if it emerges, would be particularly easy to deny. They warn that this may be a moral blind spot analogous to slavery or factory farming, which could lead to large-scale suffering if sentient AI is created and carelessly exploited.[372][371]\n

            \n

            Future

            \n

            Superintelligence and the singularity

            \n

            A superintelligence is a hypothetical agent that would possess intelligence far surpassing that of the brightest and most gifted human mind.[361]If research into artificial general intelligence produced sufficiently intelligent software, it might be able to reprogram and improve itself. The improved software would be even better at improving itself, leading to what I. J. Good called an "intelligence explosion" and Vernor Vinge called a "singularity".[377]\n

            However, technologies cannot improve exponentially indefinitely, and typically follow an S-shaped curve, slowing when they reach the physical limits of what the technology can do.[378]\n

            \n

            Transhumanism

            \n\n

            Robot designer Hans Moravec, cyberneticist Kevin Warwick and inventor Ray Kurzweil have predicted that humans and machines may merge in the future into cyborgs that are more capable and powerful than either. This idea, called transhumanism, has roots in the writings of Aldous Huxley and Robert Ettinger.[379]\n

            Edward Fredkin argues that "artificial intelligence is the next step in evolution", an idea first proposed by Samuel Butler\'s "Darwin among the Machines" as far back as 1863, and expanded upon by George Dyson in his 1998 book Darwin Among the Machines: The Evolution of Global Intelligence.[380]\n

            \n

            In fiction

            \n\n
            The word "robot" itself was coined by Karel Čapek in his 1921 play R.U.R., the title standing for "Rossum\'s Universal Robots".
            \n

            Thought-capable artificial beings have appeared as storytelling devices since antiquity,[381] and have been a persistent theme in science fiction.[382]\n

            A common trope in these works began with Mary Shelley\'s Frankenstein, where a human creation becomes a threat to its masters. This includes such works as Arthur C. Clarke\'s and Stanley Kubrick\'s 2001: A Space Odyssey (both 1968), with HAL 9000, the murderous computer in charge of the Discovery One spaceship, as well as The Terminator (1984) and The Matrix (1999). In contrast, the rare loyal robots such as Gort from The Day the Earth Stood Still (1951) and Bishop from Aliens (1986) are less prominent in popular culture.[383]\n

            Isaac Asimov introduced the Three Laws of Robotics in many stories, most notably with the "Multivac" super-intelligent computer. Asimov\'s laws are often brought up during lay discussions of machine ethics;[384] while almost all artificial intelligence researchers are familiar with Asimov\'s laws through popular culture, they generally consider the laws useless for many reasons, one of which is their ambiguity.[385]\n

            Several works use AI to force us to confront the fundamental question of what makes us human, showing us artificial beings that have the ability to feel, and thus to suffer. This appears in Karel Čapek\'s R.U.R., the films A.I. Artificial Intelligence and Ex Machina, as well as the novel Do Androids Dream of Electric Sheep?, by Philip K. Dick. Dick considers the idea that our understanding of human subjectivity is altered by technology created with artificial intelligence.[386]\n

            \n

            See also

            \n\n

            Explanatory notes

            \n
            \n
              \n
            1. ^ a b This list of intelligent traits is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
            2. \n
            3. ^ a b This list of tools is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
            4. \n
            5. ^ It is among the reasons that expert systems proved to be inefficient for capturing knowledge.[30][31]\n
            6. \n
            7. ^ \n"Rational agent" is general term used in economics, philosophy and theoretical artificial intelligence. It can refer to anything that directs its behavior to accomplish goals, such as a person, an animal, a corporation, a nation, or in the case of AI, a computer program.\n
            8. \n
            9. ^ Alan Turing discussed the centrality of learning as early as 1950, in his classic paper "Computing Machinery and Intelligence".[42] In 1956, at the original Dartmouth AI summer conference, Ray Solomonoff wrote a report on unsupervised probabilistic machine learning: "An Inductive Inference Machine".[43]\n
            10. \n
            11. ^ See AI winter § Machine translation and the ALPAC report of 1966\n
            12. \n
            13. ^ \nCompared with symbolic logic, formal Bayesian inference is computationally expensive. For inference to be tractable, most observations must be conditionally independent of one another. AdSense uses a Bayesian network with over 300 million edges to learn which ads to serve.[93]\n
            14. \n
            15. ^ Expectation–maximization, one of the most popular algorithms in machine learning, allows clustering in the presence of unknown latent variables.[95]\n
            16. \n
            17. ^ \nSome form of deep neural networks (without a specific learning algorithm) were described by:\nWarren S. McCulloch and Walter Pitts (1943)[115]\nAlan Turing (1948);[116]\nKarl Steinbuch and Roger David Joseph (1961).[117]\nDeep or recurrent networks that learned (or used gradient descent) were developed by:\nFrank Rosenblatt(1957);[116]\nOliver Selfridge (1959);[117]\nAlexey Ivakhnenko and Valentin Lapa (1965);[118]\nKaoru Nakano (1971);[119]\nShun-Ichi Amari (1972);[119]\nJohn Joseph Hopfield (1982).[119]\nPrecursors to backpropagation were developed by:\nHenry J. Kelley (1960);[116]\nArthur E. Bryson (1962);[116]\nStuart Dreyfus (1962);[116]\nArthur E. Bryson and Yu-Chi Ho (1969);[116]\nBackpropagation was independently developed by:\nSeppo Linnainmaa (1970);[120]\nPaul Werbos (1974).[116]\n
            18. \n
            19. ^ Geoffrey Hinton said, of his work on neural networks in the 1990s, "our labeled datasets were thousands of times too small. [And] our computers were millions of times too slow."[121]\n
            20. \n
            21. ^ In statistics, a bias is a systematic error or deviation from the correct value. But in the context of fairness, it refers to a tendency in favor or against a certain group or individual characteristic, usually in a way that is considered unfair or harmful. A statistically unbiased AI system that produces disparate outcomes for different demographic groups may thus be viewed as biased in the ethical sense.[204]\n
            22. \n
            23. ^ Including Jon Kleinberg (Cornell University), Sendhil Mullainathan (University of Chicago), Cynthia Chouldechova (Carnegie Mellon) and Sam Corbett-Davis (Stanford)[213]\n
            24. \n
            25. ^ Moritz Hardt (a director at the Max Planck Institute for Intelligent Systems) argues that machine learning "is fundamentally the wrong tool for a lot of domains, where you\'re trying to design interventions and mechanisms that change the world."[218]\n
            26. \n
            27. ^ When the law was passed in 2018, it still contained a form of this provision.\n
            28. \n
            29. ^ This is the United Nations\' definition, and includes things like land mines as well.[232]\n
            30. \n
            31. ^ See table 4; 9% is both the OECD average and the U.S. average.[243]\n
            32. \n
            33. ^ Sometimes called a "robopocalypse"[251]\n
            34. \n
            35. ^ "Electronic brain" was the term used by the press around this time.[304][306]\n
            36. \n
            37. ^ \nDaniel Crevier wrote, "the conference is generally recognized as the official birthdate of the new science."[309] Russell and Norvig called the conference "the inception of artificial intelligence."[115]\n
            38. \n
            39. ^ \nRussell and Norvig wrote "for the next 20 years the field would be dominated by these people and their students."[310]\n
            40. \n
            41. ^ \nRussell and Norvig wrote "it was astonishing whenever a computer did anything kind of smartish".[311]\n
            42. \n
            43. ^ \nThe programs described are Arthur Samuel\'s checkers program for the IBM 701, Daniel Bobrow\'s STUDENT, Newell and Simon\'s Logic Theorist and Terry Winograd\'s SHRDLU.\n
            44. \n
            45. ^ Russell and Norvig write: "in almost all cases, these early systems failed on more difficult problems"[315]\n
            46. \n
            47. ^ \nEmbodied approaches to AI[322] were championed by Hans Moravec[323] and Rodney Brooks[324] and went by many names: Nouvelle AI.[324] Developmental robotics.[325]\n
            48. \n
            49. ^ Matteo Wong wrote in The Atlantic: "Whereas for decades, computer-science fields such as natural-language processing, computer vision, and robotics used extremely different methods, now they all use a programming method called "deep learning." As a result, their code and approaches have become more similar, and their models are easier to integrate into one another."[331]\n
            50. \n
            51. ^ Jack Clark wrote in Bloomberg: "After a half-decade of quiet breakthroughs in artificial intelligence, 2015 has been a landmark year. Computers are smarter and learning faster than ever", and noted that the number of software projects that use machine learning at Google increased from a "sporadic usage" in 2012 to more than 2,700 projects in 2015.[333]\n
            52. \n
            53. ^ Nils Nilsson wrote in 1983: "Simply put, there is wide disagreement in the field about what AI is all about."[351]\n
            54. \n
            55. ^ \nDaniel Crevier wrote that "time has proven the accuracy and perceptiveness of some of Dreyfus\'s comments. Had he formulated them less aggressively, constructive actions they suggested might have been taken much earlier."[356]\n
            56. \n
            57. ^ \nSearle presented this definition of "Strong AI" in 1999.[366] Searle\'s original formulation was "The appropriately programmed computer really is a mind, in the sense that computers given the right programs can be literally said to understand and have other cognitive states."[367] Strong AI is defined similarly by Russell and Norvig: "Stong AI – the assertion that machines that do so are actually thinking (as opposed to simulating thinking)."[368]\n
            58. \n
            \n

            References

            \n
            \n
              \n
            1. ^ a b c Russell & Norvig (2021), pp. 1–4.\n
            2. \n
            3. ^ AI set to exceed human brain power Archived 2008-02-19 at the Wayback Machine CNN.com (July 26, 2006)\n
            4. \n
            5. ^ Kaplan, Andreas; Haenlein, Michael (2019). "Siri, Siri, in my hand: Who\'s the fairest in the land? On the interpretations, illustrations, and implications of artificial intelligence". Business Horizons. 62: 15–25. doi:10.1016/j.bushor.2018.08.004. ISSN 0007-6813. S2CID 158433736.\n
            6. \n
            7. ^ a b c \nArtificial general intelligence: Russell & Norvig (2021, pp. 32–33, 1020–1021)
              Proposal for the modern version: Pennachin & Goertzel (2007)
              Warnings of overspecialization in AI from leading researchers: Nilsson (1995), McCarthy (2007), Beal & Winston (2009)
              \n
            8. \n
            9. ^ Russell & Norvig (2021, §1.2).\n
            10. \n
            11. ^ a b Dartmouth workshop: Russell & Norvig (2021, p. 18), McCorduck (2004, pp. 111–136), NRC (1999, pp. 200–201)
              The proposal: McCarthy et al. (1955)
              \n
            12. \n
            13. ^ a b Successful programs the 1960s: McCorduck (2004, pp. 243–252), Crevier (1993, pp. 52–107), Moravec (1988, p. 9), Russell & Norvig (2021, pp. 19–21)\n
            14. \n
            15. ^ a b Funding initiatives in the early 1980s: Fifth Generation Project (Japan), Alvey (UK), Microelectronics and Computer Technology Corporation (US), Strategic Computing Initiative (US): McCorduck (2004, pp. 426–441), Crevier (1993, pp. 161–162, 197–203, 211, 240), Russell & Norvig (2021, p. 23), NRC (1999, pp. 210–211), Newquist (1994, pp. 235–248)\n
            16. \n
            17. ^ a b First AI Winter, Lighthill report, Mansfield Amendment: Crevier (1993, pp. 115–117), Russell & Norvig (2021, pp. 21–22), NRC (1999, pp. 212–213), Howe (1994), Newquist (1994, pp. 189–201)\n
            18. \n
            19. ^ a b Second AI Winter: Russell & Norvig (2021, p. 24), McCorduck (2004, pp. 430–435), Crevier (1993, pp. 209–210), NRC (1999, pp. 214–216), Newquist (1994, pp. 301–318)\n
            20. \n
            21. ^ a b Deep learning revolution, AlexNet: Goldman (2022), Russell & Norvig (2021, p. 26), McKinsey (2018)\n
            22. \n
            23. ^ Toews (2023).\n
            24. \n
            25. ^ Problem-solving, puzzle solving, game playing, and deduction: Russell & Norvig (2021, chpt. 3–5), Russell & Norvig (2021, chpt. 6) (constraint satisfaction), Poole, Mackworth & Goebel (1998, chpt. 2, 3, 7, 9), Luger & Stubblefield (2004, chpt. 3, 4, 6, 8), Nilsson (1998, chpt. 7–12)\n
            26. \n
            27. ^ Uncertain reasoning: Russell & Norvig (2021, chpt. 12–18), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 333–381), Nilsson (1998, chpt. 7–12)\n
            28. \n
            29. ^ a b c Intractability and efficiency and the combinatorial explosion: Russell & Norvig (2021, p. 21)\n
            30. \n
            31. ^ a b c Psychological evidence of the prevalence of sub-symbolic reasoning and knowledge: Kahneman (2011), Dreyfus & Dreyfus (1986), Wason & Shapiro (1966), Kahneman, Slovic & Tversky (1982)\n
            32. \n
            33. ^ Knowledge representation and knowledge engineering: Russell & Norvig (2021, chpt. 10), Poole, Mackworth & Goebel (1998, pp. 23–46, 69–81, 169–233, 235–277, 281–298, 319–345), Luger & Stubblefield (2004, pp. 227–243), Nilsson (1998, chpt. 17.1–17.4, 18)\n
            34. \n
            35. ^ Smoliar & Zhang (1994).\n
            36. \n
            37. ^ Neumann & Möller (2008).\n
            38. \n
            39. ^ Kuperman, Reichley & Bailey (2006).\n
            40. \n
            41. ^ McGarry (2005).\n
            42. \n
            43. ^ Bertini, Del Bimbo & Torniai (2006).\n
            44. \n
            45. ^ Russell & Norvig (2021), pp. 272.\n
            46. \n
            47. ^ Representing categories and relations: Semantic networks, description logics, inheritance (including frames, and scripts): Russell & Norvig (2021, §10.2 & 10.5), Poole, Mackworth & Goebel (1998, pp. 174–177), Luger & Stubblefield (2004, pp. 248–258), Nilsson (1998, chpt. 18.3)\n
            48. \n
            49. ^ Representing events and time:Situation calculus, event calculus, fluent calculus (including solving the frame problem): Russell & Norvig (2021, §10.3), Poole, Mackworth & Goebel (1998, pp. 281–298), Nilsson (1998, chpt. 18.2)\n
            50. \n
            51. ^ Causal calculus: Poole, Mackworth & Goebel (1998, pp. 335–337)\n
            52. \n
            53. ^ Representing knowledge about knowledge: Belief calculus, modal logics: Russell & Norvig (2021, §10.4), Poole, Mackworth & Goebel (1998, pp. 275–277)\n
            54. \n
            55. ^ a b Default reasoning, Frame problem, default logic, non-monotonic logics, circumscription, closed world assumption, abduction: Russell & Norvig (2021, §10.6), Poole, Mackworth & Goebel (1998, pp. 248–256, 323–335), Luger & Stubblefield (2004, pp. 335–363), Nilsson (1998, ~18.3.3)\n(Poole et al. places abduction under "default reasoning". Luger et al. places this under "uncertain reasoning").\n
            56. \n
            57. ^ a b Breadth of commonsense knowledge: Lenat & Guha (1989, Introduction), Crevier (1993, pp. 113–114), Moravec (1988, p. 13), Russell & Norvig (2021, pp. 241, 385, 982) (qualification problem)\n
            58. \n
            59. ^ Newquist (1994), p. 296.\n
            60. \n
            61. ^ Crevier (1993), pp. 204–208.\n
            62. \n
            63. ^ Russell & Norvig (2021), p. 528.\n
            64. \n
            65. ^ Automated planning: Russell & Norvig (2021, chpt. 11).\n
            66. \n
            67. ^ Automated decision making, Decision theory: Russell & Norvig (2021, chpt. 16–18).\n
            68. \n
            69. ^ Classical planning: Russell & Norvig (2021, Section 11.2).\n
            70. \n
            71. ^ Sensorless or "conformant" planning, contingent planning, replanning (a.k.a online planning): Russell & Norvig (2021, Section 11.5).\n
            72. \n
            73. ^ Uncertain preferences: Russell & Norvig (2021, Section 16.7)\nInverse reinforcement learning: Russell & Norvig (2021, Section 22.6)\n
            74. \n
            75. ^ Information value theory: Russell & Norvig (2021, Section 16.6).\n
            76. \n
            77. ^ Markov decision process: Russell & Norvig (2021, chpt. 17).\n
            78. \n
            79. ^ Game theory and multi-agent decision theory: Russell & Norvig (2021, chpt. 18).\n
            80. \n
            81. ^ Learning: Russell & Norvig (2021, chpt. 19–22), Poole, Mackworth & Goebel (1998, pp. 397–438), Luger & Stubblefield (2004, pp. 385–542), Nilsson (1998, chpt. 3.3, 10.3, 17.5, 20)\n
            82. \n
            83. ^ Turing (1950).\n
            84. \n
            85. ^ Solomonoff (1956).\n
            86. \n
            87. ^ Unsupervised learning: Russell & Norvig (2021, pp. 653) (definition), Russell & Norvig (2021, pp. 738–740) (cluster analysis), Russell & Norvig (2021, pp. 846–860) (word embedding)\n
            88. \n
            89. ^ a b Supervised learning: Russell & Norvig (2021, §19.2) (Definition), Russell & Norvig (2021, Chpt. 19–20) (Techniques)\n
            90. \n
            91. ^ Reinforcement learning: Russell & Norvig (2021, chpt. 22), Luger & Stubblefield (2004, pp. 442–449)\n
            92. \n
            93. ^ Transfer learning: Russell & Norvig (2021, pp. 281), The Economist (2016)\n
            94. \n
            95. ^ "Artificial Intelligence (AI): What Is AI and How Does It Work? | Built In". builtin.com. Retrieved 30 October 2023.\n
            96. \n
            97. ^ Computational learning theory: Russell & Norvig (2021, pp. 672–674), Jordan & Mitchell (2015)\n
            98. \n
            99. ^ Natural language processing (NLP): Russell & Norvig (2021, chpt. 23–24), Poole, Mackworth & Goebel (1998, pp. 91–104), Luger & Stubblefield (2004, pp. 591–632)\n
            100. \n
            101. ^ Subproblems of NLP: Russell & Norvig (2021, pp. 849–850)\n
            102. \n
            103. ^ Russell & Norvig (2021), pp. 856–858.\n
            104. \n
            105. ^ Dickson (2022).\n
            106. \n
            107. ^ Modern statistical and deep learning approaches to NLP: Russell & Norvig (2021, chpt. 24), Cambria & White (2014)\n
            108. \n
            109. ^ Vincent (2019).\n
            110. \n
            111. ^ Russell & Norvig (2021), pp. 875–878.\n
            112. \n
            113. ^ Bushwick (2023).\n
            114. \n
            115. ^ Computer vision: Russell & Norvig (2021, chpt. 25), Nilsson (1998, chpt. 6)\n
            116. \n
            117. ^ Russell & Norvig (2021), pp. 849–850.\n
            118. \n
            119. ^ Russell & Norvig (2021), pp. 895–899.\n
            120. \n
            121. ^ Russell & Norvig (2021), pp. 899–901.\n
            122. \n
            123. ^ Challa et al. (2011).\n
            124. \n
            125. ^ Russell & Norvig (2021), pp. 931–938.\n
            126. \n
            127. ^ MIT AIL (2014).\n
            128. \n
            129. ^ Affective computing: Thro (1993), Edelson (1991), Tao & Tan (2005), Scassellati (2002)\n
            130. \n
            131. ^ Waddell (2018).\n
            132. \n
            133. ^ Poria et al. (2017).\n
            134. \n
            135. ^ Search algorithms: Russell & Norvig (2021, chpts. 3–5), Poole, Mackworth & Goebel (1998, pp. 113–163), Luger & Stubblefield (2004, pp. 79–164, 193–219), Nilsson (1998, chpts. 7–12)\n
            136. \n
            137. ^ State space search: Russell & Norvig (2021, chpt. 3)\n
            138. \n
            139. ^ Russell & Norvig (2021), sect. 11.2.\n
            140. \n
            141. ^ Uninformed searches (breadth first search, depth-first search and general state space search): Russell & Norvig (2021, sect. 3.4), Poole, Mackworth & Goebel (1998, pp. 113–132), Luger & Stubblefield (2004, pp. 79–121), Nilsson (1998, chpt. 8)\n
            142. \n
            143. ^ Heuristic or informed searches (e.g., greedy best first and A*): Russell & Norvig (2021, sect. 3.5), Poole, Mackworth & Goebel (1998, pp. 132–147), Poole & Mackworth (2017, sect. 3.6), Luger & Stubblefield (2004, pp. 133–150)\n
            144. \n
            145. ^ Adversarial search: Russell & Norvig (2021, chpt. 5)\n
            146. \n
            147. ^ Local or "optimization" search: Russell & Norvig (2021, chpt. 4)\n
            148. \n
            149. ^ Singh Chauhan, Nagesh (18 December 2020). "Optimization Algorithms in Neural Networks". KDnuggets. Retrieved 13 January 2024.\n
            150. \n
            151. ^ Evolutionary computation: Russell & Norvig (2021, sect. 4.1.2)\n
            152. \n
            153. ^ Merkle & Middendorf (2013).\n
            154. \n
            155. ^ Logic: Russell & Norvig (2021, chpts. 6–9), Luger & Stubblefield (2004, pp. 35–77), Nilsson (1998, chpt. 13–16)\n
            156. \n
            157. ^ Propositional logic: Russell & Norvig (2021, chpt. 6), Luger & Stubblefield (2004, pp. 45–50), Nilsson (1998, chpt. 13)\n
            158. \n
            159. ^ First-order logic and features such as equality: Russell & Norvig (2021, chpt. 7), Poole, Mackworth & Goebel (1998, pp. 268–275), Luger & Stubblefield (2004, pp. 50–62), Nilsson (1998, chpt. 15)\n
            160. \n
            161. ^ Logical inference: Russell & Norvig (2021, chpt. 10)\n
            162. \n
            163. ^ logical deduction as search: Russell & Norvig (2021, sects. 9.3, 9.4), Poole, Mackworth & Goebel (1998, pp. ~46–52), Luger & Stubblefield (2004, pp. 62–73), Nilsson (1998, chpt. 4.2, 7.2)\n
            164. \n
            165. ^ Resolution and unification: Russell & Norvig (2021, sections 7.5.2, 9.2, 9.5)\n
            166. \n
            167. ^ Warren, D.H.; Pereira, L.M.; Pereira, F. (1977). "Prolog-the language and its implementation compared with Lisp". ACM SIGPLAN Notices. 12 (8): 109–115. doi:10.1145/872734.806939.\n
            168. \n
            169. ^ Fuzzy logic: Russell & Norvig (2021, pp. 214, 255, 459), Scientific American (1999)\n
            170. \n
            171. ^ a b Stochastic methods for uncertain reasoning: Russell & Norvig (2021, chpt. 12–18, 20), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 165–191, 333–381), Nilsson (1998, chpt. 19)\n
            172. \n
            173. ^ decision theory and decision analysis: Russell & Norvig (2021, chpt. 16–18), Poole, Mackworth & Goebel (1998, pp. 381–394)\n
            174. \n
            175. ^ Information value theory: Russell & Norvig (2021, sect. 16.6)\n
            176. \n
            177. ^ Markov decision processes and dynamic decision networks: Russell & Norvig (2021, chpt. 17)\n
            178. \n
            179. ^ a b c Stochastic temporal models: Russell & Norvig (2021, chpt. 14)\nHidden Markov model: Russell & Norvig (2021, sect. 14.3)\nKalman filters: Russell & Norvig (2021, sect. 14.4)\nDynamic Bayesian networks: Russell & Norvig (2021, sect. 14.5)\n
            180. \n
            181. ^ Game theory and mechanism design: Russell & Norvig (2021, chpt. 18)\n
            182. \n
            183. ^ Bayesian networks: Russell & Norvig (2021, sects. 12.5–12.6, 13.4–13.5, 14.3–14.5, 16.5, 20.2–20.3), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~182–190, ≈363–379), Nilsson (1998, chpt. 19.3–19.4)\n
            184. \n
            185. ^ Domingos (2015), chpt. 6.\n
            186. \n
            187. ^ Bayesian inference algorithm: Russell & Norvig (2021, sect. 13.3–13.5), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~363–379), Nilsson (1998, chpt. 19.4 & 7)\n
            188. \n
            189. ^ Domingos (2015), p. 210.\n
            190. \n
            191. ^ Bayesian learning and the expectation–maximization algorithm: Russell & Norvig (2021, chpt. 20), Poole, Mackworth & Goebel (1998, pp. 424–433), Nilsson (1998, chpt. 20), Domingos (2015, p. 210)\n
            192. \n
            193. ^ Bayesian decision theory and Bayesian decision networks: Russell & Norvig (2021, sect. 16.5)\n
            194. \n
            195. ^ Statistical learning methods and classifiers: Russell & Norvig (2021, chpt. 20),\n
            196. \n
            197. ^ Ciaramella, Alberto; Ciaramella, Marco (2024). Introduction to Artificial Intelligence: from data analysis to generative AI. Intellisemantic Editions. ISBN 978-8-8947-8760-3.\n
            198. \n
            199. ^ Decision trees: Russell & Norvig (2021, sect. 19.3), Domingos (2015, p. 88)\n
            200. \n
            201. ^ Non-parameteric learning models such as K-nearest neighbor and support vector machines: Russell & Norvig (2021, sect. 19.7), Domingos (2015, p. 187) (k-nearest neighbor)\n\n
            202. \n
            203. ^ Domingos (2015), p. 152.\n
            204. \n
            205. ^ Naive Bayes classifier: Russell & Norvig (2021, sect. 12.6), Domingos (2015, p. 152)\n
            206. \n
            207. ^ a b Neural networks: Russell & Norvig (2021, chpt. 21), Domingos (2015, Chapter 4)\n
            208. \n
            209. ^ Gradient calculation in computational graphs, backpropagation, automatic differentiation: Russell & Norvig (2021, sect. 21.2), Luger & Stubblefield (2004, pp. 467–474), Nilsson (1998, chpt. 3.3)\n
            210. \n
            211. ^ Universal approximation theorem: Russell & Norvig (2021, p. 752)\nThe theorem: Cybenko (1988), Hornik, Stinchcombe & White (1989)\n
            212. \n
            213. ^ Feedforward neural networks: Russell & Norvig (2021, sect. 21.1)\n
            214. \n
            215. ^ Recurrent neural networks: Russell & Norvig (2021, sect. 21.6)\n
            216. \n
            217. ^ Perceptrons: Russell & Norvig (2021, pp. 21, 22, 683, 22)\n
            218. \n
            219. ^ a b Deep learning: Russell & Norvig (2021, chpt. 21), Goodfellow, Bengio & Courville (2016), Hinton et al. (2016), Schmidhuber (2015)\n
            220. \n
            221. ^ Convolutional neural networks: Russell & Norvig (2021, sect. 21.3)\n
            222. \n
            223. ^ Deng & Yu (2014), pp. 199–200.\n
            224. \n
            225. ^ Ciresan, Meier & Schmidhuber (2012).\n
            226. \n
            227. ^ Russell & Norvig (2021), p. 751.\n
            228. \n
            229. ^ a b c Russell & Norvig (2021), p. 17.\n
            230. \n
            231. ^ a b c d e f g Russell & Norvig (2021), p. 785.\n
            232. \n
            233. ^ a b Schmidhuber (2022), sect. 5.\n
            234. \n
            235. ^ Schmidhuber (2022), sect. 6.\n
            236. \n
            237. ^ a b c Schmidhuber (2022), sect. 7.\n
            238. \n
            239. ^ Schmidhuber (2022), sect. 8.\n
            240. \n
            241. ^ Quoted in Christian (2020, p. 22)\n
            242. \n
            243. ^ Smith (2023).\n
            244. \n
            245. ^ "Explained: Generative AI". 9 November 2023.\n
            246. \n
            247. ^ "AI Writing and Content Creation Tools". MIT Sloan Teaching & Learning Technologies. Archived from the original on 25 December 2023. Retrieved 25 December 2023.\n
            248. \n
            249. ^ Marmouyet (2023).\n
            250. \n
            251. ^ Kobielus (2019).\n
            252. \n
            253. ^ Thomason, James (21 May 2024). "Mojo Rising: The resurgence of AI-first programming languages". VentureBeat. Archived from the original on 27 June 2024. Retrieved 26 May 2024.\n
            254. \n
            255. ^ Wodecki, Ben (5 May 2023). "7 AI Programming Languages You Need to Know". AI Business. Archived from the original on 25 July 2024. Retrieved 5 October 2024.\n
            256. \n
            257. ^ Plumb, Taryn (18 September 2024). "Why Jensen Huang and Marc Benioff see \'gigantic\' opportunity for agentic AI". VentureBeat. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
            258. \n
            259. ^ Davenport, T; Kalakota, R (June 2019). "The potential for artificial intelligence in healthcare". Future Healthc J. 6 (2): 94–98. doi:10.7861/futurehosp.6-2-94. PMC 6616181. PMID 31363513.\n
            260. \n
            261. ^ Lyakhova, U.A.; Lyakhov, P.A. (2024). "Systematic review of approaches to detection and classification of skin cancer using artificial intelligence: Development and prospects". Computers in Biology and Medicine. 178: 108742. doi:10.1016/j.compbiomed.2024.108742. PMID 38875908.\n
            262. \n
            263. ^ Alqudaihi, Kawther S.; Aslam, Nida; Khan, Irfan Ullah; Almuhaideb, Abdullah M.; Alsunaidi, Shikah J.; Ibrahim, Nehad M. Abdel Rahman; Alhaidari, Fahd A.; Shaikh, Fatema S.; Alsenbel, Yasmine M.; Alalharith, Dima M.; Alharthi, Hajar M.; Alghamdi, Wejdan M.; Alshahrani, Mohammed S. (2021). "Cough Sound Detection and Diagnosis Using Artificial Intelligence Techniques: Challenges and Opportunities". IEEE Access. 9: 102327–102344. Bibcode:2021IEEEA...9j2327A. doi:10.1109/ACCESS.2021.3097559. ISSN 2169-3536. PMC 8545201. PMID 34786317.\n
            264. \n
            265. ^ a b Bax, Monique; Thorpe, Jordan; Romanov, Valentin (December 2023). "The future of personalized cardiovascular medicine demands 3D and 4D printing, stem cells, and artificial intelligence". Frontiers in Sensors. 4. doi:10.3389/fsens.2023.1294721. ISSN 2673-5067.\n
            266. \n
            267. ^ Jumper, J; Evans, R; Pritzel, A (2021). "Highly accurate protein structure prediction with AlphaFold". Nature. 596 (7873): 583–589. Bibcode:2021Natur.596..583J. doi:10.1038/s41586-021-03819-2. PMC 8371605. PMID 34265844.\n
            268. \n
            269. ^ "AI discovers new class of antibiotics to kill drug-resistant bacteria". 20 December 2023. Archived from the original on 16 September 2024. Retrieved 5 October 2024.\n
            270. \n
            271. ^ "AI speeds up drug design for Parkinson\'s ten-fold". Cambridge University. 17 April 2024. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
            272. \n
            273. ^ Horne, Robert I.; Andrzejewska, Ewa A.; Alam, Parvez; Brotzakis, Z. Faidon; Srivastava, Ankit; Aubert, Alice; Nowinska, Magdalena; Gregory, Rebecca C.; Staats, Roxine; Possenti, Andrea; Chia, Sean; Sormanni, Pietro; Ghetti, Bernardino; Caughey, Byron; Knowles, Tuomas P. J.; Vendruscolo, Michele (17 April 2024). "Discovery of potent inhibitors of α-synuclein aggregation using structure-based iterative learning". Nature Chemical Biology. 20 (5). Nature: 634–645. doi:10.1038/s41589-024-01580-x. PMC 11062903. PMID 38632492.\n
            274. \n
            275. ^ Grant, Eugene F.; Lardner, Rex (25 July 1952). "The Talk of the Town – It". The New Yorker. ISSN 0028-792X. Archived from the original on 16 February 2020. Retrieved 28 January 2024.\n
            276. \n
            277. ^ Anderson, Mark Robert (11 May 2017). "Twenty years on from Deep Blue vs Kasparov: how a chess match started the big data revolution". The Conversation. Archived from the original on 17 September 2024. Retrieved 28 January 2024.\n
            278. \n
            279. ^ Markoff, John (16 February 2011). "Computer Wins on \'Jeopardy!\': Trivial, It\'s Not". The New York Times. ISSN 0362-4331. Archived from the original on 22 October 2014. Retrieved 28 January 2024.\n
            280. \n
            281. ^ Byford, Sam (27 May 2017). "AlphaGo retires from competitive Go after defeating world number one 3–0". The Verge. Archived from the original on 7 June 2017. Retrieved 28 January 2024.\n
            282. \n
            283. ^ Brown, Noam; Sandholm, Tuomas (30 August 2019). "Superhuman AI for multiplayer poker". Science. 365 (6456): 885–890. Bibcode:2019Sci...365..885B. doi:10.1126/science.aay2400. ISSN 0036-8075. PMID 31296650.\n
            284. \n
            285. ^ "MuZero: Mastering Go, chess, shogi and Atari without rules". Google DeepMind. 23 December 2020. Retrieved 28 January 2024.\n
            286. \n
            287. ^ Sample, Ian (30 October 2019). "AI becomes grandmaster in \'fiendishly complex\' StarCraft II". The Guardian. ISSN 0261-3077. Archived from the original on 29 December 2020. Retrieved 28 January 2024.\n
            288. \n
            289. ^ Wurman, P. R.; Barrett, S.; Kawamoto, K. (2022). "Outracing champion Gran Turismo drivers with deep reinforcement learning" (PDF). Nature. 602 (7896): 223–228. Bibcode:2022Natur.602..223W. doi:10.1038/s41586-021-04357-7. PMID 35140384.\n
            290. \n
            291. ^ Wilkins, Alex (13 March 2024). "Google AI learns to play open-world video games by watching them". New Scientist. Archived from the original on 26 July 2024. Retrieved 21 July 2024.\n
            292. \n
            293. ^ Uesato, J. et al.: Improving mathematical reasoning with process supervision. Archived 15 September 2024 at the Wayback Machine openai.com, May 31, 2023. Retrieved 2024-08-07.\n
            294. \n
            295. ^ Srivastava, Saurabh (29 February 2024). "Functional Benchmarks for Robust Evaluation of Reasoning Performance, and the Reasoning Gap". arXiv:2402.19450 [cs.AI].\n
            296. \n
            297. ^ Roberts, Siobhan (25 July 2024). "AI achieves silver-medal standard solving International Mathematical Olympiad problems". The New York Times. Archived from the original on 26 September 2024. Retrieved 7 August 2024.\n
            298. \n
            299. ^ LLEMMA. eleuther.ai. Retrieved 2024-08-07.\n
            300. \n
            301. ^ AI Math. Archived 5 October 2024 at the Wayback Machine Caesars Labs, 2024. Retrieved 2024-08-07.\n
            302. \n
            303. ^ Alex McFarland: 7 Best AI for Math Tools. Archived 11 September 2024 at the Wayback Machine unite.ai. Retrieved 2024-08-07\n
            304. \n
            305. ^ Matthew Finio & Amanda Downie: IBM Think 2024 Primer, "What is Artificial Intelligence (AI) in Finance?" 8 Dec. 2023\n
            306. \n
            307. ^ M. Nicolas, J. Firzli: Pensions Age/European Pensions magazine, "Artificial Intelligence: Ask the Industry" May June 2024 https://videovoice.org/ai-in-finance-innovation-entrepreneurship-vs-over-regulation-with-the-eus-artificial-intelligence-act-wont-work-as-intended/ Archived 11 September 2024 at the Wayback Machine.\n
            308. \n
            309. ^ a b c Congressional Research Service (2019). Artificial Intelligence and National Security (PDF). Washington, DC: Congressional Research Service. Archived (PDF) from the original on 8 May 2020. Retrieved 5 October 2024.PD-notice\n
            310. \n
            311. ^ a b Slyusar, Vadym (2019). "Artificial intelligence as the basis of future control networks". ResearchGate. doi:10.13140/RG.2.2.30247.50087. Archived from the original on 28 April 2021. Retrieved 20 July 2019.\n
            312. \n
            313. ^ Knight, Will. "The US and 30 Other Nations Agree to Set Guardrails for Military AI". Wired. ISSN 1059-1028. Archived from the original on 20 September 2024. Retrieved 24 January 2024.\n
            314. \n
            315. ^ Newsom, Gavin; Weber, Shirley N. (6 September 2023). "Executive Order N-12-23" (PDF). Executive Department, State of California. Archived (PDF) from the original on 21 February 2024. Retrieved 7 September 2023.\n
            316. \n
            317. ^ Pinaya, Walter H. L.; Graham, Mark S.; Kerfoot, Eric; Tudosiu, Petru-Daniel; Dafflon, Jessica; Fernandez, Virginia; Sanchez, Pedro; Wolleb, Julia; da Costa, Pedro F.; Patel, Ashay (2023). "Generative AI for Medical Imaging: extending the MONAI Framework". arXiv:2307.15208 [eess.IV].\n
            318. \n
            319. ^ Griffith, Erin; Metz, Cade (27 January 2023). "Anthropic Said to Be Closing In on $300 Million in New A.I. Funding". The New York Times. Archived from the original on 9 December 2023. Retrieved 14 March 2023.\n
            320. \n
            321. ^ Lanxon, Nate; Bass, Dina; Davalos, Jackie (10 March 2023). "A Cheat Sheet to AI Buzzwords and Their Meanings". Bloomberg News. Archived from the original on 17 November 2023. Retrieved 14 March 2023.\n
            322. \n
            323. ^ Marcelline, Marco (27 May 2023). "ChatGPT: Most Americans Know About It, But Few Actually Use the AI Chatbot". PCMag. Archived from the original on 21 May 2024. Retrieved 28 January 2024.\n
            324. \n
            325. ^ Lu, Donna (31 March 2023). "Misinformation, mistakes and the Pope in a puffer: what rapidly evolving AI can – and can\'t – do". The Guardian. ISSN 0261-3077. Archived from the original on 10 June 2024. Retrieved 28 January 2024.\n
            326. \n
            327. ^ Hurst, Luke (23 May 2023). "How a fake image of a Pentagon explosion shared on Twitter caused a real dip on Wall Street". euronews. Retrieved 28 January 2024.\n
            328. \n
            329. ^ Poole, David; Mackworth, Alan (2023). Artificial Intelligence, Foundations of Computational Agents (3rd ed.). Cambridge University Press. doi:10.1017/9781009258227. ISBN 978-1-0092-5819-7. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
            330. \n
            331. ^ Russell, Stuart; Norvig, Peter (2020). Artificial Intelligence: A Modern Approach (4th ed.). Pearson. ISBN 978-0-1346-1099-3.\n
            332. \n
            333. ^ "Why agents are the next frontier of generative AI". McKinsey Digital. 24 July 2024. Archived from the original on 3 October 2024. Retrieved 10 August 2024.\n
            334. \n
            335. ^ Ransbotham, Sam; Kiron, David; Gerbert, Philipp; Reeves, Martin (6 September 2017). "Reshaping Business With Artificial Intelligence". MIT Sloan Management Review. Archived from the original on 13 February 2024.\n
            336. \n
            337. ^ Sun, Yuran; Zhao, Xilei; Lovreglio, Ruggiero; Kuligowski, Erica (1 January 2024), Naser, M. Z. (ed.), "8 – AI for large-scale evacuation modeling: promises and challenges", Interpretable Machine Learning for the Analysis, Design, Assessment, and Informed Decision Making for Civil Infrastructure, Woodhead Publishing Series in Civil and Structural Engineering, Woodhead Publishing, pp. 185–204, ISBN 978-0-1282-4073-1, archived from the original on 19 May 2024, retrieved 28 June 2024.\n
            338. \n
            339. ^ Gomaa, Islam; Adelzadeh, Masoud; Gwynne, Steven; Spencer, Bruce; Ko, Yoon; Bénichou, Noureddine; Ma, Chunyun; Elsagan, Nour; Duong, Dana; Zalok, Ehab; Kinateder, Max (1 November 2021). "A Framework for Intelligent Fire Detection and Evacuation System". Fire Technology. 57 (6): 3179–3185. doi:10.1007/s10694-021-01157-3. ISSN 1572-8099. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
            340. \n
            341. ^ Zhao, Xilei; Lovreglio, Ruggiero; Nilsson, Daniel (1 May 2020). "Modelling and interpreting pre-evacuation decision-making using machine learning". Automation in Construction. 113: 103140. doi:10.1016/j.autcon.2020.103140. ISSN 0926-5805. Archived from the original on 19 May 2024. Retrieved 5 October 2024.\n
            342. \n
            343. ^ Müller, Vincent C. (30 April 2020). "Ethics of Artificial Intelligence and Robotics". Stanford Encyclopedia of Philosophy Archive. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
            344. \n
            345. ^ Simonite (2016).\n
            346. \n
            347. ^ Russell & Norvig (2021), p. 987.\n
            348. \n
            349. ^ Laskowski (2023).\n
            350. \n
            351. ^ GAO (2022).\n
            352. \n
            353. ^ Valinsky (2019).\n
            354. \n
            355. ^ Russell & Norvig (2021), p. 991.\n
            356. \n
            357. ^ Russell & Norvig (2021), pp. 991–992.\n
            358. \n
            359. ^ Christian (2020), p. 63.\n
            360. \n
            361. ^ Vincent (2022).\n
            362. \n
            363. ^ Kopel, Matthew. "Copyright Services: Fair Use". Cornell University Library. Archived from the original on 26 September 2024. Retrieved 26 April 2024.\n
            364. \n
            365. ^ Burgess, Matt. "How to Stop Your Data From Being Used to Train AI". Wired. ISSN 1059-1028. Archived from the original on 3 October 2024. Retrieved 26 April 2024.\n
            366. \n
            367. ^ Reisner (2023).\n
            368. \n
            369. ^ Alter & Harris (2023).\n
            370. \n
            371. ^ "Getting the Innovation Ecosystem Ready for AI. An IP policy toolkit" (PDF). WIPO.\n
            372. \n
            373. ^ Hammond, George (27 December 2023). "Big Tech is spending more than VC firms on AI startups". Ars Technica. Archived from the original on 10 January 2024.\n
            374. \n
            375. ^ Wong, Matteo (24 October 2023). "The Future of AI Is GOMA". The Atlantic. Archived from the original on 5 January 2024.\n
            376. \n
            377. ^ "Big tech and the pursuit of AI dominance". The Economist. 26 March 2023. Archived from the original on 29 December 2023.\n
            378. \n
            379. ^ Fung, Brian (19 December 2023). "Where the battle to dominate AI may be won". CNN Business. Archived from the original on 13 January 2024.\n
            380. \n
            381. ^ Metz, Cade (5 July 2023). "In the Age of A.I., Tech\'s Little Guys Need Big Friends". The New York Times. Archived from the original on 8 July 2024. Retrieved 5 October 2024.\n
            382. \n
            383. ^ "Electricity 2024 – Analysis". IEA. 24 January 2024. Retrieved 13 July 2024.\n
            384. \n
            385. ^ Calvert, Brian (28 March 2024). "AI already uses as much energy as a small country. It\'s only the beginning". Vox. New York, New York. Archived from the original on 3 July 2024. Retrieved 5 October 2024.\n
            386. \n
            387. ^ Halper, Evan; O\'Donovan, Caroline (21 June 2024). "AI is exhausting the power grid. Tech firms are seeking a miracle solution". Washington Post.\n
            388. \n
            389. ^ Davenport, Carly. "AI Data Centers and the Coming YS Power Demand Surge" (PDF). Goldman Sachs. Archived from the original (PDF) on 26 July 2024. Retrieved 5 October 2024.\n
            390. \n
            391. ^ Ryan, Carol (12 April 2024). "Energy-Guzzling AI Is Also the Future of Energy Savings". Wall Street Journal. Dow Jones.\n
            392. \n
            393. ^ Hiller, Jennifer (1 July 2024). "Tech Industry Wants to Lock Up Nuclear Power for AI". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
            394. \n
            395. ^ Halper, Evan (20 September 2024). "Microsoft deal would reopen Three Mile Island nuclear plant to power AI". Washington Post.\n
            396. \n
            397. ^ Hiller, Jennifer (20 September 2024). "Three Mile Island\'s Nuclear Plant to Reopen, Help Power Microsoft\'s AI Centers". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
            398. \n
            399. ^ Nicas (2018).\n
            400. \n
            401. ^ Rainie, Lee; Keeter, Scott; Perrin, Andrew (22 July 2019). "Trust and Distrust in America". Pew Research Center. Archived from the original on 22 February 2024.\n
            402. \n
            403. ^ Williams (2023).\n
            404. \n
            405. ^ Taylor & Hern (2023).\n
            406. \n
            407. ^ a b Samuel, Sigal (19 April 2022). "Why it\'s so damn hard to make AI fair and unbiased". Vox. Archived from the original on 5 October 2024. Retrieved 24 July 2024.\n
            408. \n
            409. ^ a b Rose (2023).\n
            410. \n
            411. ^ CNA (2019).\n
            412. \n
            413. ^ Goffrey (2008), p. 17.\n
            414. \n
            415. ^ Berdahl et al. (2023); Goffrey (2008, p. 17); Rose (2023); Russell & Norvig (2021, p. 995)\n
            416. \n
            417. ^ Christian (2020), p. 25.\n
            418. \n
            419. ^ a b Russell & Norvig (2021), p. 995.\n
            420. \n
            421. ^ Grant & Hill (2023).\n
            422. \n
            423. ^ Larson & Angwin (2016).\n
            424. \n
            425. ^ Christian (2020), p. 67–70.\n
            426. \n
            427. ^ Christian (2020, pp. 67–70); Russell & Norvig (2021, pp. 993–994)\n
            428. \n
            429. ^ Russell & Norvig (2021, p. 995); Lipartito (2011, p. 36); Goodman & Flaxman (2017, p. 6); Christian (2020, pp. 39–40, 65)\n
            430. \n
            431. ^ Quoted in Christian (2020, p. 65).\n
            432. \n
            433. ^ Russell & Norvig (2021, p. 994); Christian (2020, pp. 40, 80–81)\n
            434. \n
            435. ^ Quoted in Christian (2020, p. 80)\n
            436. \n
            437. ^ Dockrill (2022).\n
            438. \n
            439. ^ Sample (2017).\n
            440. \n
            441. ^ "Black Box AI". 16 June 2023. Archived from the original on 15 June 2024. Retrieved 5 October 2024.\n
            442. \n
            443. ^ Christian (2020), p. 110.\n
            444. \n
            445. ^ Christian (2020), pp. 88–91.\n
            446. \n
            447. ^ Christian (2020, p. 83); Russell & Norvig (2021, p. 997)\n
            448. \n
            449. ^ Christian (2020), p. 91.\n
            450. \n
            451. ^ Christian (2020), p. 83.\n
            452. \n
            453. ^ Verma (2021).\n
            454. \n
            455. ^ Rothman (2020).\n
            456. \n
            457. ^ Christian (2020), pp. 105–108.\n
            458. \n
            459. ^ Christian (2020), pp. 108–112.\n
            460. \n
            461. ^ Ropek, Lucas (21 May 2024). "New Anthropic Research Sheds Light on AI\'s \'Black Box\'". Gizmodo. Archived from the original on 5 October 2024. Retrieved 23 May 2024.\n
            462. \n
            463. ^ Russell & Norvig (2021), p. 989.\n
            464. \n
            465. ^ a b Russell & Norvig (2021), pp. 987–990.\n
            466. \n
            467. ^ Russell & Norvig (2021), p. 988.\n
            468. \n
            469. ^ Robitzski (2018); Sainato (2015)\n
            470. \n
            471. ^ Harari (2018).\n
            472. \n
            473. ^ Buckley, Chris; Mozur, Paul (22 May 2019). "How China Uses High-Tech Surveillance to Subdue Minorities". The New York Times. Archived from the original on 25 November 2019. Retrieved 2 July 2019.\n
            474. \n
            475. ^ "Security lapse exposed a Chinese smart city surveillance system". 3 May 2019. Archived from the original on 7 March 2021. Retrieved 14 September 2020.\n
            476. \n
            477. ^ Urbina et al. (2022).\n
            478. \n
            479. ^ a b E. McGaughey, \'Will Robots Automate Your Job Away? Full Employment, Basic Income, and Economic Democracy\' (2022), 51(3) Industrial Law Journal 511–559. Archived 27 May 2023 at the Wayback Machine.\n
            480. \n
            481. ^ Ford & Colvin (2015);McGaughey (2022)\n
            482. \n
            483. ^ IGM Chicago (2017).\n
            484. \n
            485. ^ Arntz, Gregory & Zierahn (2016), p. 33.\n
            486. \n
            487. ^ Lohr (2017); Frey & Osborne (2017); Arntz, Gregory & Zierahn (2016, p. 33)\n
            488. \n
            489. ^ Zhou, Viola (11 April 2023). "AI is already taking video game illustrators\' jobs in China". Rest of World. Archived from the original on 21 February 2024. Retrieved 17 August 2023.\n
            490. \n
            491. ^ Carter, Justin (11 April 2023). "China\'s game art industry reportedly decimated by growing AI use". Game Developer. Archived from the original on 17 August 2023. Retrieved 17 August 2023.\n
            492. \n
            493. ^ Morgenstern (2015).\n
            494. \n
            495. ^ Mahdawi (2017); Thompson (2014)\n
            496. \n
            497. ^ Tarnoff, Ben (4 August 2023). "Lessons from Eliza". The Guardian Weekly. pp. 34–39.\n
            498. \n
            499. ^ Cellan-Jones (2014).\n
            500. \n
            501. ^ Russell & Norvig 2021, p. 1001.\n
            502. \n
            503. ^ Bostrom (2014).\n
            504. \n
            505. ^ Russell (2019).\n
            506. \n
            507. ^ Bostrom (2014); Müller & Bostrom (2014); Bostrom (2015).\n
            508. \n
            509. ^ Harari (2023).\n
            510. \n
            511. ^ Müller & Bostrom (2014).\n
            512. \n
            513. ^ Leaders\' concerns about the existential risks of AI around 2015: Rawlinson (2015), Holley (2015), Gibbs (2014), Sainato (2015)\n
            514. \n
            515. ^ ""Godfather of artificial intelligence" talks impact and potential of new AI". CBS News. 25 March 2023. Archived from the original on 28 March 2023. Retrieved 28 March 2023.\n
            516. \n
            517. ^ Pittis, Don (4 May 2023). "Canadian artificial intelligence leader Geoffrey Hinton piles on fears of computer takeover". CBC. Archived from the original on 7 July 2024. Retrieved 5 October 2024.\n
            518. \n
            519. ^ "\'50–50 chance\' that AI outsmarts humanity, Geoffrey Hinton says". Bloomberg BNN. 14 June 2024. Retrieved 6 July 2024.\n
            520. \n
            521. ^ Valance (2023).\n
            522. \n
            523. ^ Taylor, Josh (7 May 2023). "Rise of artificial intelligence is inevitable but should not be feared, \'father of AI\' says". The Guardian. Archived from the original on 23 October 2023. Retrieved 26 May 2023.\n
            524. \n
            525. ^ Colton, Emma (7 May 2023). "\'Father of AI\' says tech fears misplaced: \'You cannot stop it\'". Fox News. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
            526. \n
            527. ^ Jones, Hessie (23 May 2023). "Juergen Schmidhuber, Renowned \'Father Of Modern AI,\' Says His Life\'s Work Won\'t Lead To Dystopia". Forbes. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
            528. \n
            529. ^ McMorrow, Ryan (19 December 2023). "Andrew Ng: \'Do we think the world is better off with more or less intelligence?\'". Financial Times. Archived from the original on 25 January 2024. Retrieved 30 December 2023.\n
            530. \n
            531. ^ Levy, Steven (22 December 2023). "How Not to Be Stupid About AI, With Yann LeCun". Wired. Archived from the original on 28 December 2023. Retrieved 30 December 2023.\n
            532. \n
            533. ^ Arguments that AI is not an imminent risk: Brooks (2014), Geist (2015), Madrigal (2015), Lee (2014)\n
            534. \n
            535. ^ a b Christian (2020), pp. 67, 73.\n
            536. \n
            537. ^ Yudkowsky (2008).\n
            538. \n
            539. ^ a b Anderson & Anderson (2011).\n
            540. \n
            541. ^ AAAI (2014).\n
            542. \n
            543. ^ Wallach (2010).\n
            544. \n
            545. ^ Russell (2019), p. 173.\n
            546. \n
            547. ^ Stewart, Ashley; Melton, Monica. "Hugging Face CEO says he\'s focused on building a \'sustainable model\' for the $4.5 billion open-source-AI startup". Business Insider. Archived from the original on 25 September 2024. Retrieved 14 April 2024.\n
            548. \n
            549. ^ Wiggers, Kyle (9 April 2024). "Google open sources tools to support AI model development". TechCrunch. Archived from the original on 10 September 2024. Retrieved 14 April 2024.\n
            550. \n
            551. ^ Heaven, Will Douglas (12 May 2023). "The open-source AI boom is built on Big Tech\'s handouts. How long will it last?". MIT Technology Review. Retrieved 14 April 2024.\n
            552. \n
            553. ^ Brodsky, Sascha (19 December 2023). "Mistral AI\'s New Language Model Aims for Open Source Supremacy". AI Business. Archived from the original on 5 September 2024. Retrieved 5 October 2024.\n
            554. \n
            555. ^ Edwards, Benj (22 February 2024). "Stability announces Stable Diffusion 3, a next-gen AI image generator". Ars Technica. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
            556. \n
            557. ^ Marshall, Matt (29 January 2024). "How enterprises are using open source LLMs: 16 examples". VentureBeat. Archived from the original on 26 September 2024. Retrieved 5 October 2024.\n
            558. \n
            559. ^ Piper, Kelsey (2 February 2024). "Should we make our most powerful AI models open source to all?". Vox. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
            560. \n
            561. ^ Alan Turing Institute (2019). "Understanding artificial intelligence ethics and safety" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
            562. \n
            563. ^ Alan Turing Institute (2023). "AI Ethics and Governance in Practice" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
            564. \n
            565. ^ Floridi, Luciano; Cowls, Josh (23 June 2019). "A Unified Framework of Five Principles for AI in Society". Harvard Data Science Review. 1 (1). doi:10.1162/99608f92.8cd550d1. S2CID 198775713.\n
            566. \n
            567. ^ Buruk, Banu; Ekmekci, Perihan Elif; Arda, Berna (1 September 2020). "A critical perspective on guidelines for responsible and trustworthy artificial intelligence". Medicine, Health Care and Philosophy. 23 (3): 387–399. doi:10.1007/s11019-020-09948-1. ISSN 1572-8633. PMID 32236794. S2CID 214766800. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
            568. \n
            569. ^ Kamila, Manoj Kumar; Jasrotia, Sahil Singh (1 January 2023). "Ethical issues in the development of artificial intelligence: recognizing the risks". International Journal of Ethics and Systems. ahead-of-print (ahead-of-print). doi:10.1108/IJOES-05-2023-0107. ISSN 2514-9369. S2CID 259614124. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
            570. \n
            571. ^ "AI Safety Institute releases new AI safety evaluations platform". UK Government. 10 May 2024. Archived from the original on 5 October 2024. Retrieved 14 May 2024.\n
            572. \n
            573. ^ Regulation of AI to mitigate risks: Berryhill et al. (2019), Barfield & Pagallo (2018), Iphofen & Kritikos (2019), Wirtz, Weyerer & Geyer (2018), Buiten (2019)\n
            574. \n\n
            575. ^ a b Vincent (2023).\n
            576. \n
            577. ^ Stanford University (2023).\n
            578. \n
            579. ^ a b c d UNESCO (2021).\n
            580. \n
            581. ^ Kissinger (2021).\n
            582. \n
            583. ^ Altman, Brockman & Sutskever (2023).\n
            584. \n
            585. ^ VOA News (25 October 2023). "UN Announces Advisory Body on Artificial Intelligence". Archived from the original on 18 September 2024. Retrieved 5 October 2024.\n
            586. \n
            587. ^ "Council of Europe opens first ever global treaty on AI for signature". Council of Europe. 5 September 2024. Archived from the original on 17 September 2024. Retrieved 17 September 2024.\n
            588. \n
            589. ^ Edwards (2023).\n
            590. \n
            591. ^ Kasperowicz (2023).\n
            592. \n
            593. ^ Fox News (2023).\n
            594. \n
            595. ^ Milmo, Dan (3 November 2023). "Hope or Horror? The great AI debate dividing its pioneers". The Guardian Weekly. pp. 10–12.\n
            596. \n
            597. ^ "The Bletchley Declaration by Countries Attending the AI Safety Summit, 1–2 November 2023". GOV.UK. 1 November 2023. Archived from the original on 1 November 2023. Retrieved 2 November 2023.\n
            598. \n
            599. ^ "Countries agree to safe and responsible development of frontier AI in landmark Bletchley Declaration". GOV.UK (Press release). Archived from the original on 1 November 2023. Retrieved 1 November 2023.\n
            600. \n
            601. ^ "Second global AI summit secures safety commitments from companies". Reuters. 21 May 2024. Retrieved 23 May 2024.\n
            602. \n
            603. ^ "Frontier AI Safety Commitments, AI Seoul Summit 2024". gov.uk. 21 May 2024. Archived from the original on 23 May 2024. Retrieved 23 May 2024.\n
            604. \n
            605. ^ a b Russell & Norvig 2021, p. 9.\n
            606. \n
            607. ^ a b c Copeland, J., ed. (2004). The Essential Turing: the ideas that gave birth to the computer age. Oxford, England: Clarendon Press. ISBN 0-1982-5079-7.\n
            608. \n
            609. ^ "Google books ngram". Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
            610. \n
            611. ^ AI\'s immediate precursors: McCorduck (2004, pp. 51–107), Crevier (1993, pp. 27–32), Russell & Norvig (2021, pp. 8–17), Moravec (1988, p. 3)\n
            612. \n
            613. ^ a b Turing\'s original publication of the Turing test in "Computing machinery and intelligence": Turing (1950)\nHistorical influence and philosophical implications: Haugeland (1985, pp. 6–9), Crevier (1993, p. 24), McCorduck (2004, pp. 70–71), Russell & Norvig (2021, pp. 2, 984)\n
            614. \n
            615. ^ Crevier (1993), pp. 47–49.\n
            616. \n
            617. ^ Russell & Norvig (2003), p. 17.\n
            618. \n
            619. ^ Russell & Norvig (2003), p. 18.\n
            620. \n
            621. ^ Newquist (1994), pp. 86–86.\n
            622. \n
            623. ^ Simon (1965, p. 96) quoted in Crevier (1993, p. 109)\n
            624. \n
            625. ^ Minsky (1967, p. 2) quoted in Crevier (1993, p. 109)\n
            626. \n
            627. ^ Russell & Norvig (2021), p. 21.\n
            628. \n
            629. ^ Lighthill (1973).\n
            630. \n
            631. ^ NRC 1999, pp. 212–213.\n
            632. \n
            633. ^ Russell & Norvig (2021), p. 22.\n
            634. \n
            635. ^ Expert systems: Russell & Norvig (2021, pp. 23, 292), Luger & Stubblefield (2004, pp. 227–331), Nilsson (1998, chpt. 17.4), McCorduck (2004, pp. 327–335, 434–435), Crevier (1993, pp. 145–162, 197–203), Newquist (1994, pp. 155–183)\n
            636. \n
            637. ^ Russell & Norvig (2021), p. 24.\n
            638. \n
            639. ^ Nilsson (1998), p. 7.\n
            640. \n
            641. ^ McCorduck (2004), pp. 454–462.\n
            642. \n
            643. ^ Moravec (1988).\n
            644. \n
            645. ^ a b Brooks (1990).\n
            646. \n
            647. ^ Developmental robotics: Weng et al. (2001), Lungarella et al. (2003), Asada et al. (2009), Oudeyer (2010)\n
            648. \n
            649. ^ Russell & Norvig (2021), p. 25.\n
            650. \n
            651. ^ Crevier (1993, pp. 214–215), Russell & Norvig (2021, pp. 24, 26)\n
            652. \n
            653. ^ Russell & Norvig (2021), p. 26.\n
            654. \n
            655. ^ Formal and narrow methods adopted in the 1990s: Russell & Norvig (2021, pp. 24–26), McCorduck (2004, pp. 486–487)\n
            656. \n
            657. ^ AI widely used in the late 1990s: Kurzweil (2005, p. 265), NRC (1999, pp. 216–222), Newquist (1994, pp. 189–201)\n
            658. \n
            659. ^ Wong (2023).\n
            660. \n
            661. ^ Moore\'s Law and AI: Russell & Norvig (2021, pp. 14, 27)\n
            662. \n
            663. ^ a b c Clark (2015b).\n
            664. \n
            665. ^ Big data: Russell & Norvig (2021, p. 26)\n
            666. \n
            667. ^ Sagar, Ram (3 June 2020). "OpenAI Releases GPT-3, The Largest Model So Far". Analytics India Magazine. Archived from the original on 4 August 2020. Retrieved 15 March 2023.\n
            668. \n
            669. ^ DiFeliciantonio (2023).\n
            670. \n
            671. ^ Goswami (2023).\n
            672. \n
            673. ^ Grayling, Anthony; Ball, Brian (1 August 2024). "Philosophy is crucial in the age of AI". The Conversation. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
            674. \n
            675. ^ a b Jarow, Oshan (15 June 2024). "Will AI ever become conscious? It depends on how you think about biology". Vox. Archived from the original on 21 September 2024. Retrieved 4 October 2024.\n
            676. \n
            677. ^ McCarthy, John. "The Philosophy of AI and the AI of Philosophy". jmc.stanford.edu. Archived from the original on 23 October 2018. Retrieved 3 October 2024.\n
            678. \n
            679. ^ a b Turing (1950), p. 1.\n
            680. \n
            681. ^ Turing (1950), Under "The Argument from Consciousness".\n
            682. \n
            683. ^ Kirk-Giannini, Cameron Domenico; Goldstein, Simon (16 October 2023). "AI is closer than ever to passing the Turing test for \'intelligence\'. What happens when it does?". The Conversation. Archived from the original on 25 September 2024. Retrieved 17 August 2024.\n
            684. \n
            685. ^ Russell & Norvig (2021), p. 3.\n
            686. \n
            687. ^ Maker (2006).\n
            688. \n
            689. ^ McCarthy (1999).\n
            690. \n
            691. ^ Minsky (1986).\n
            692. \n
            693. ^ "What Is Artificial Intelligence (AI)?". Google Cloud Platform. Archived from the original on 31 July 2023. Retrieved 16 October 2023.\n
            694. \n
            695. ^ "One of the Biggest Problems in Regulating AI Is Agreeing on a Definition". carnegieendowment.org. Retrieved 31 July 2024.\n
            696. \n
            697. ^ "AI or BS? How to tell if a marketing tool really uses artificial intelligence". The Drum. Retrieved 31 July 2024.\n
            698. \n
            699. ^ Nilsson (1983), p. 10.\n
            700. \n
            701. ^ Haugeland (1985), pp. 112–117.\n
            702. \n
            703. ^ Physical symbol system hypothesis: Newell & Simon (1976, p. 116)\nHistorical significance: McCorduck (2004, p. 153), Russell & Norvig (2021, p. 19)\n
            704. \n
            705. ^ Moravec\'s paradox: Moravec (1988, pp. 15–16), Minsky (1986, p. 29), Pinker (2007, pp. 190–191)\n
            706. \n
            707. ^ Dreyfus\' critique of AI: Dreyfus (1972), Dreyfus & Dreyfus (1986)\nHistorical significance and philosophical implications: Crevier (1993, pp. 120–132), McCorduck (2004, pp. 211–239), Russell & Norvig (2021, pp. 981–982), Fearn (2007, chpt. 3)\n
            708. \n
            709. ^ Crevier (1993), p. 125.\n
            710. \n
            711. ^ Langley (2011).\n
            712. \n
            713. ^ Katz (2012).\n
            714. \n
            715. ^ Neats vs. scruffies, the historic debate: McCorduck (2004, pp. 421–424, 486–489), Crevier (1993, p. 168), Nilsson (1983, pp. 10–11), Russell & Norvig (2021, p. 24)\nA classic example of the "scruffy" approach to intelligence: Minsky (1986)\nA modern example of neat AI and its aspirations in the 21st century: Domingos (2015)\n
            716. \n
            717. ^ Pennachin & Goertzel (2007).\n
            718. \n
            719. ^ a b Roberts (2016).\n
            720. \n
            721. ^ Russell & Norvig (2021), p. 986.\n
            722. \n
            723. ^ Chalmers (1995).\n
            724. \n
            725. ^ Dennett (1991).\n
            726. \n
            727. ^ Horst (2005).\n
            728. \n
            729. ^ Searle (1999).\n
            730. \n
            731. ^ Searle (1980), p. 1.\n
            732. \n
            733. ^ Russell & Norvig (2021), p. 9817.\n
            734. \n
            735. ^ Searle\'s Chinese room argument: Searle (1980). Searle\'s original presentation of the thought experiment., Searle (1999).\nDiscussion: Russell & Norvig (2021, pp. 985), McCorduck (2004, pp. 443–445), Crevier (1993, pp. 269–271)\n
            736. \n
            737. ^ Leith, Sam (7 July 2022). "Nick Bostrom: How can we be certain a machine isn\'t conscious?". The Spectator. Archived from the original on 26 September 2024. Retrieved 23 February 2024.\n
            738. \n
            739. ^ a b c Thomson, Jonny (31 October 2022). "Why don\'t robots have rights?". Big Think. Archived from the original on 13 September 2024. Retrieved 23 February 2024.\n
            740. \n
            741. ^ a b Kateman, Brian (24 July 2023). "AI Should Be Terrified of Humans". Time. Archived from the original on 25 September 2024. Retrieved 23 February 2024.\n
            742. \n
            743. ^ Wong, Jeff (10 July 2023). "What leaders need to know about robot rights". Fast Company.\n
            744. \n
            745. ^ Hern, Alex (12 January 2017). "Give robots \'personhood\' status, EU committee argues". The Guardian. ISSN 0261-3077. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
            746. \n
            747. ^ Dovey, Dana (14 April 2018). "Experts Don\'t Think Robots Should Have Rights". Newsweek. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
            748. \n
            749. ^ Cuddy, Alice (13 April 2018). "Robot rights violate human rights, experts warn EU". euronews. Archived from the original on 19 September 2024. Retrieved 23 February 2024.\n
            750. \n
            751. ^ The Intelligence explosion and technological singularity: Russell & Norvig (2021, pp. 1004–1005), Omohundro (2008), Kurzweil (2005)\n\nI. J. Good\'s "intelligence explosion": Good (1965)\n\nVernor Vinge\'s "singularity": Vinge (1993)\n
            752. \n
            753. ^ Russell & Norvig (2021), p. 1005.\n
            754. \n
            755. ^ Transhumanism: Moravec (1988), Kurzweil (2005), Russell & Norvig (2021, p. 1005)\n
            756. \n
            757. ^ AI as evolution: Edward Fredkin is quoted in McCorduck (2004, p. 401), Butler (1863), Dyson (1998)\n
            758. \n
            759. ^ AI in myth: McCorduck (2004, pp. 4–5)\n
            760. \n
            761. ^ McCorduck (2004), pp. 340–400.\n
            762. \n
            763. ^ Buttazzo (2001).\n
            764. \n
            765. ^ Anderson (2008).\n
            766. \n
            767. ^ McCauley (2007).\n
            768. \n
            769. ^ Galvan (1997).\n
            770. \n
            \n

            AI textbooks

            \n

            The two most widely used textbooks in 2023 (see the Open Syllabus):\n

            \n\n

            These were the four of the most widely used AI textbooks in 2008:\n

            \n
            \n\n
            \n

            Other textbooks:\n

            \n\n

            History of AI

            \n
            \n\n
            \n

            Other sources

            \n
            \n\n\n
            \n

            Further reading

            \n
            \n\n
            \n
            \n\n\n\n\n\n\n\n\n\n
            \n
            \n\t\t\t\t\t\n\t\t\t\t
            \n\t\t\t\n\t\t\t\n\t\t\n\t\t\n\t \n \n
            \n\t
              \n
              \n\n\n'}'. -[2024-10-27 22:40:17,603] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Search for the latest trends in AI using Twitter.'. -[2024-10-27 22:40:20,861] [INFO] [BaseAgent]: Agent 'agent4' called 'fetch_webpage' with args '{'url': 'https://twitter.com/search?q=latest%20trends%20in%20AI'}'. -[2024-10-27 22:40:20,862] [INFO] [BaseAgent]: Agent 'agent4' obtained result '{'success': True, 'error-msg': '', 'url': 'https://twitter.com/search?q=latest%20trends%20in%20AI', 'content': '\n \n \n x.com\n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n \n \n '}'. -[2024-10-27 22:42:09,252] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-10-27 22:42:09,252] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-10-27 22:42:09,252] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-10-27 22:42:09,253] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-10-27 22:42:09,253] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-10-27 22:42:09,253] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-10-27 22:42:09,253] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-10-27 22:42:09,253] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-10-27 22:42:09,253] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-10-27 22:42:09,254] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-10-27 22:42:09,254] [INFO] [AgentGraph]: Relationship added: agent2 --[reports_to]--> agent1 -[2024-10-27 22:42:09,254] [INFO] [AgentGraph]: Relationship added: agent3 --[reports_to]--> agent1 -[2024-10-27 22:42:09,254] [INFO] [AgentGraph]: Relationship added: agent4 --[reports_to]--> agent1 -[2024-10-27 22:42:09,255] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-10-27 22:42:09,255] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-10-27 22:42:09,255] [INFO] [Engine]: Engine initialized. -[2024-10-27 22:42:09,256] [INFO] [Engine]: Engine starting simulation. -[2024-10-27 22:42:09,256] [INFO] [Engine]: Starting iteration 1 -[2024-10-27 22:42:09,256] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-10-27 22:42:10,427] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent2': 'Search for the latest trends in AI using google.', 'agent3': 'Search for the latest trends in AI using wikipedia.', 'agent4': 'Search for the latest trends in AI using twitter.'}, 'continue': True} -[2024-10-27 22:42:10,428] [INFO] [Engine]: Assigned tasks: {'agent2': 'Search for the latest trends in AI using google.', 'agent3': 'Search for the latest trends in AI using wikipedia.', 'agent4': 'Search for the latest trends in AI using twitter.'} -[2024-10-27 22:42:24,102] [ERROR] [Engine]: Error while executing task for agent 'agent2': -[2024-10-27 22:42:30,703] [ERROR] [Engine]: Error while executing task for agent 'agent3': -[2024-10-27 22:42:31,225] [ERROR] [Engine]: Error while executing task for agent 'agent4': -[2024-10-27 22:42:31,226] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: - -[2024-10-27 22:42:31,226] [INFO] [Engine]: Agents' Results Summary: - -[2024-10-27 22:42:31,226] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: - -[2024-10-27 22:42:31,226] [ERROR] [Engine]: An error occurred during simulation. -Traceback (most recent call last): - File "/home/ononoki/Desktop/bench_project/MARBLE/marble/engine/engine.py", line 148, in start - File "/home/ononoki/Desktop/bench_project/MARBLE/marble/evaluator/evaluator.py", line 39, in update - if environment.is_task_completed(): - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/ononoki/Desktop/bench_project/MARBLE/marble/environments/base_env.py", line 38, in is_task_completed - return self._compare_to_ground_truth(last_action_result, self.ground_truth) - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/home/ononoki/Desktop/bench_project/MARBLE/marble/environments/base_env.py", line 43, in _compare_to_ground_truth - result_str: str = result.get("result", "") - ^^^^^^^^^^ -AttributeError: 'str' object has no attribute 'get' -[2024-10-27 22:42:31,227] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-10-27 22:42:31,228] [INFO] [Evaluator]: Total Token Consumption: 0 -[2024-10-27 22:42:31,228] [INFO] [Evaluator]: Average Tokens per Iteration: 0 -[2024-10-27 22:42:31,228] [INFO] [Engine]: Simulation completed. -[2024-10-27 22:42:36,242] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-10-27 22:42:36,243] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-10-27 22:42:36,243] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-10-27 22:42:36,243] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-10-27 22:42:36,243] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-10-27 22:42:36,243] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-10-27 22:42:36,243] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-10-27 22:42:36,244] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-10-27 22:42:36,244] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-10-27 22:42:36,244] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-10-27 22:42:36,244] [INFO] [AgentGraph]: Relationship added: agent2 --[reports_to]--> agent1 -[2024-10-27 22:42:36,244] [INFO] [AgentGraph]: Relationship added: agent3 --[reports_to]--> agent1 -[2024-10-27 22:42:36,244] [INFO] [AgentGraph]: Relationship added: agent4 --[reports_to]--> agent1 -[2024-10-27 22:42:36,244] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-10-27 22:42:36,245] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-10-27 22:42:36,245] [INFO] [Engine]: Engine initialized. -[2024-10-27 22:42:36,245] [INFO] [Engine]: Engine starting simulation. -[2024-10-27 22:42:36,245] [INFO] [Engine]: Starting iteration 1 -[2024-10-27 22:42:36,245] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-10-27 22:42:37,301] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent2': 'Search for the latest trends in AI using google.', 'agent3': 'Search for the latest trends in AI using wikipedia.', 'agent4': 'Search for the latest trends in AI using twitter.'}, 'continue': True} -[2024-10-27 22:42:37,302] [INFO] [Engine]: Assigned tasks: {'agent2': 'Search for the latest trends in AI using google.', 'agent3': 'Search for the latest trends in AI using wikipedia.', 'agent4': 'Search for the latest trends in AI using twitter.'} -[2024-10-27 22:42:37,302] [INFO] [Engine]: Assigning task to agent2: Search for the latest trends in AI using google. -[2024-10-27 22:42:37,302] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Search for the latest trends in AI using google.'. -[2024-10-27 22:43:17,137] [INFO] [BaseAgent]: Agent 'agent2' called 'fetch_webpage' with args '{'url': 'https://www.google.com/search?q=latest+trends+in+AI'}'. -[2024-10-27 22:43:17,144] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'error-msg': '', 'url': 'https://www.google.com/search?q=latest+trends+in+AI', 'content': 'latest trends in AI - Google Search

              Accessibility Links

              About 518,000,000 results (0.30 seconds) 

              Ads

              Sponsored
              The AI market is quickly increasing due to the rapid pace of technological change. See how we are helping executives understand how potential disruptors will impact growth.
              Sponsored
              The Future of AI
              Appian
              https://www.appian.com › ai › outlook
              Appian
              https://www.appian.com › ai › outlook
              2024 AI Outlook — View the report for AI use cases, AI predictions, tips for overcoming the risks, and more. AI is making software a collaborator in the workplace. Learn more in the 2024 AI Outlook.
              Sponsored
              AI Trends 2024
              NVIDIA
              https://www.nvidia.com
              NVIDIA
              https://www.nvidia.com
              Trends in AI — Elevate AI decision-making and innovation in your organization. Discover AI trends and strategies for business success. Capitalize on AI. Empower Transformation. Mitigate Risk.
              Sponsored
              Companies not only applied AI in Q3—they also started measuring value. See what they did.

              Ads

              Sponsored
              From brainstorming to proofs of concept, KPMG help clients crystallize their AI strategy. KPMG automates routine tasks, freeing resources for high-value strategic initiatives.
              Sponsored
              Trends in Data + AI Adoption — How are leaders succeeding with GenAI? How does your ML strategy compare? Get answers. Your playbook to all things data + AI: the strategy, the platform, the data stack.
              Sponsored
              Artificial intelligence and related technologies are advancing at a rapid pace. Explore our work on AI use, policy, ethics, and development. Browse Publications. View Events.
              Google apps
              '}'. -[2024-10-27 22:43:17,215] [DEBUG] [Engine]: Agent 'agent2' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_4GStBQ9QEoqAspnXVPoFYCWM', type='function')], function_call=None) -[2024-10-27 22:43:17,216] [INFO] [Engine]: Assigning task to agent3: Search for the latest trends in AI using wikipedia. -[2024-10-27 22:43:17,216] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Search for the latest trends in AI using wikipedia.'. -[2024-10-27 22:43:33,622] [INFO] [BaseAgent]: Agent 'agent3' called 'fetch_webpage' with args '{'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence'}'. -[2024-10-27 22:43:33,631] [INFO] [BaseAgent]: Agent 'agent3' obtained result '{'success': True, 'error-msg': '', 'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence', 'content': '\n\n\n\nArtificial intelligence - Wikipedia\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nJump to content\n
              \n\t
              \n\t\t
              \n\t\t\t
              \n\n\t\t\n\t\t\t\n\n\n\t\t
              \n\t\t
              \n\t\t\t\n\n\n\t\t\t\n\n\t\t
              \n\t\n\n
              \n\t
              \n\t\t
              \n\t\t\t
              \n\t\t
              \n\t\t
              \n\t\t\t
              \n\t\t
              \n\t\t\t\n\t\t
              \n\t
              \n\t
              \n\t\t\t\t
              \n\t\t\n\t\t\t
              \n\t\t
              \n\t\t
              \n\t\t\t
              \n\t\t\t\t
              \n\t\t\t\t\t\n\t\t\t\t\t

              Artificial intelligence

              \n\t\t\t\t\t\t\t\n
              \n\t\n\t\n\t
              \n\n\t\t
              \n\t\t\t\n\t\t\t\n\t\t\t\n\t\t
              \n\n\t
              \n
              \n
              \n\t\t\t\t
              \n\t\t\t\t\t
              \n\t\t\t\t\t\t
              \n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
              \n\t\t\t\t\t\t
              \n\t\t\t\t\t\t\t\n\t\t\t\t\n\t\t\t\t\t\t\t
              \n\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
              \n\t\t\t\t\t
              \n\t\t\t\t
              \n\t\t\t\t
              \n\t\t\t\t\t
              \n\t\t\t\t\t\t\n\t\t\t\t\t\t
              \n\t\t\n\t\t\t\t\t
              \n\t\t\t\t
              \n\t\t\t\t
              \n\t\t\t\t\t
              \n\t\t\t\t\t\t\t
              \n\t\t
              Page semi-protected
              \n\t\t
              \n\n\t\t\t\t\t\t
              From Wikipedia, the free encyclopedia
              \n\t\t\t\t\t
              \n\t\t\t\t\t
              \n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t
              \n\n

              \n

              \n\n\n\n\n\n\n\n

              Artificial intelligence (AI), in its broadest sense, is intelligence exhibited by machines, particularly computer systems. It is a field of research in computer science that develops and studies methods and software that enable machines to perceive their environment and use learning and intelligence to take actions that maximize their chances of achieving defined goals.[1] Such machines may be called AIs.\n

              Some high-profile applications of AI include advanced web search engines (e.g., Google Search); recommendation systems (used by YouTube, Amazon, and Netflix); interacting via human speech (e.g., Google Assistant, Siri, and Alexa); autonomous vehicles (e.g., Waymo); generative and creative tools (e.g., ChatGPT, and AI art); and superhuman play and analysis in strategy games (e.g., chess and Go). However, many AI applications are not perceived as AI: "A lot of cutting edge AI has filtered into general applications, often without being called AI because once something becomes useful enough and common enough it\'s not labeled AI anymore."[2][3]\n

              The various subfields of AI research are centered around particular goals and the use of particular tools. The traditional goals of AI research include reasoning, knowledge representation, planning, learning, natural language processing, perception, and support for robotics.[a] General intelligence—the ability to complete any task performable by a human on an at least equal level—is among the field\'s long-term goals.[4] To reach these goals, AI researchers have adapted and integrated a wide range of techniques, including search and mathematical optimization, formal logic, artificial neural networks, and methods based on statistics, operations research, and economics.[b] AI also draws upon psychology, linguistics, philosophy, neuroscience, and other fields.[5]\n

              Artificial intelligence was founded as an academic discipline in 1956,[6] and the field went through multiple cycles of optimism,[7][8] followed by periods of disappointment and loss of funding, known as AI winter.[9][10] Funding and interest vastly increased after 2012 when deep learning outperformed previous AI techniques.[11] This growth accelerated further after 2017 with the transformer architecture,[12] and by the early 2020s hundreds of billions of dollars were being invested in AI (known as the "AI boom"). The widespread use of AI in the 21st century exposed several unintended consequences and harms in the present and raised concerns about its risks and long-term effects in the future, prompting discussions about regulatory policies to ensure the safety and benefits of the technology.\n

              \n\n

              Goals

              \n

              The general problem of simulating (or creating) intelligence has been broken into subproblems. These consist of particular traits or capabilities that researchers expect an intelligent system to display. The traits described below have received the most attention and cover the scope of AI research.[a]\n

              \n

              Reasoning and problem-solving

              \n

              Early researchers developed algorithms that imitated step-by-step reasoning that humans use when they solve puzzles or make logical deductions.[13] By the late 1980s and 1990s, methods were developed for dealing with uncertain or incomplete information, employing concepts from probability and economics.[14]\n

              Many of these algorithms are insufficient for solving large reasoning problems because they experience a "combinatorial explosion": They become exponentially slower as the problems grow.[15] Even humans rarely use the step-by-step deduction that early AI research could model. They solve most of their problems using fast, intuitive judgments.[16] Accurate and efficient reasoning is an unsolved problem.\n

              \n

              Knowledge representation

              \n
              An ontology represents knowledge as a set of concepts within a domain and the relationships between those concepts.
              \n

              Knowledge representation and knowledge engineering[17] allow AI programs to answer questions intelligently and make deductions about real-world facts. Formal knowledge representations are used in content-based indexing and retrieval,[18] scene interpretation,[19] clinical decision support,[20] knowledge discovery (mining "interesting" and actionable inferences from large databases),[21] and other areas.[22]\n

              A knowledge base is a body of knowledge represented in a form that can be used by a program. An ontology is the set of objects, relations, concepts, and properties used by a particular domain of knowledge.[23] Knowledge bases need to represent things such as objects, properties, categories, and relations between objects;[24] situations, events, states, and time;[25] causes and effects;[26] knowledge about knowledge (what we know about what other people know);[27] default reasoning (things that humans assume are true until they are told differently and will remain true even when other facts are changing);[28] and many other aspects and domains of knowledge.\n

              Among the most difficult problems in knowledge representation are the breadth of commonsense knowledge (the set of atomic facts that the average person knows is enormous);[29] and the sub-symbolic form of most commonsense knowledge (much of what people know is not represented as "facts" or "statements" that they could express verbally).[16] There is also the difficulty of knowledge acquisition, the problem of obtaining knowledge for AI applications.[c]\n

              \n

              Planning and decision-making

              \n

              An "agent" is anything that perceives and takes actions in the world. A rational agent has goals or preferences and takes actions to make them happen.[d][32] In automated planning, the agent has a specific goal.[33] In automated decision-making, the agent has preferences—there are some situations it would prefer to be in, and some situations it is trying to avoid. The decision-making agent assigns a number to each situation (called the "utility") that measures how much the agent prefers it. For each possible action, it can calculate the "expected utility": the utility of all possible outcomes of the action, weighted by the probability that the outcome will occur. It can then choose the action with the maximum expected utility.[34]\n

              In classical planning, the agent knows exactly what the effect of any action will be.[35] In most real-world problems, however, the agent may not be certain about the situation they are in (it is "unknown" or "unobservable") and it may not know for certain what will happen after each possible action (it is not "deterministic"). It must choose an action by making a probabilistic guess and then reassess the situation to see if the action worked.[36]\n

              In some problems, the agent\'s preferences may be uncertain, especially if there are other agents or humans involved. These can be learned (e.g., with inverse reinforcement learning), or the agent can seek information to improve its preferences.[37] Information value theory can be used to weigh the value of exploratory or experimental actions.[38] The space of possible future actions and situations is typically intractably large, so the agents must take actions and evaluate situations while being uncertain of what the outcome will be.\n

              A Markov decision process has a transition model that describes the probability that a particular action will change the state in a particular way and a reward function that supplies the utility of each state and the cost of each action. A policy associates a decision with each possible state. The policy could be calculated (e.g., by iteration), be heuristic, or it can be learned.[39]\n

              Game theory describes the rational behavior of multiple interacting agents and is used in AI programs that make decisions that involve other agents.[40]\n

              \n

              Learning

              \n

              Machine learning is the study of programs that can improve their performance on a given task automatically.[41] It has been a part of AI from the beginning.[e]\n

              There are several kinds of machine learning. Unsupervised learning analyzes a stream of data and finds patterns and makes predictions without any other guidance.[44] Supervised learning requires a human to label the input data first, and comes in two main varieties: classification (where the program must learn to predict what category the input belongs in) and regression (where the program must deduce a numeric function based on numeric input).[45]\n

              In reinforcement learning, the agent is rewarded for good responses and punished for bad ones. The agent learns to choose responses that are classified as "good".[46] Transfer learning is when the knowledge gained from one problem is applied to a new problem.[47] Deep learning is a type of machine learning that runs inputs through biologically inspired artificial neural networks for all of these types of learning.[48]\n

              Computational learning theory can assess learners by computational complexity, by sample complexity (how much data is required), or by other notions of optimization.[49]\n

              \n
              \n

              Natural language processing

              \n

              Natural language processing (NLP)[50] allows programs to read, write and communicate in human languages such as English. Specific problems include speech recognition, speech synthesis, machine translation, information extraction, information retrieval and question answering.[51]\n

              Early work, based on Noam Chomsky\'s generative grammar and semantic networks, had difficulty with word-sense disambiguation[f] unless restricted to small domains called "micro-worlds" (due to the common sense knowledge problem[29]). Margaret Masterman believed that it was meaning and not grammar that was the key to understanding languages, and that thesauri and not dictionaries should be the basis of computational language structure.\n

              Modern deep learning techniques for NLP include word embedding (representing words, typically as vectors encoding their meaning),[52] transformers (a deep learning architecture using an attention mechanism),[53] and others.[54] In 2019, generative pre-trained transformer (or "GPT") language models began to generate coherent text,[55][56] and by 2023, these models were able to get human-level scores on the bar exam, SAT test, GRE test, and many other real-world applications.[57]\n

              \n

              Perception

              \n

              Machine perception is the ability to use input from sensors (such as cameras, microphones, wireless signals, active lidar, sonar, radar, and tactile sensors) to deduce aspects of the world. Computer vision is the ability to analyze visual input.[58]\n

              The field includes speech recognition,[59] image classification,[60] facial recognition, object recognition,[61]object tracking,[62] and robotic perception.[63]\n

              \n

              Social intelligence

              \n
              Kismet, a robot head which was made in the 1990s; a machine that can recognize and simulate emotions[64]
              \n

              Affective computing is an interdisciplinary umbrella that comprises systems that recognize, interpret, process, or simulate human feeling, emotion, and mood.[65] For example, some virtual assistants are programmed to speak conversationally or even to banter humorously; it makes them appear more sensitive to the emotional dynamics of human interaction, or to otherwise facilitate human–computer interaction.\n

              However, this tends to give naïve users an unrealistic conception of the intelligence of existing computer agents.[66] Moderate successes related to affective computing include textual sentiment analysis and, more recently, multimodal sentiment analysis, wherein AI classifies the affects displayed by a videotaped subject.[67]\n

              \n

              General intelligence

              \n

              A machine with artificial general intelligence should be able to solve a wide variety of problems with breadth and versatility similar to human intelligence.[4]\n

              \n

              Techniques

              \n

              AI research uses a wide variety of techniques to accomplish the goals above.[b]\n

              \n

              Search and optimization

              \n

              AI can solve many problems by intelligently searching through many possible solutions.[68] There are two very different kinds of search used in AI: state space search and local search.\n

              \n
              \n

              State space search searches through a tree of possible states to try to find a goal state.[69] For example, planning algorithms search through trees of goals and subgoals, attempting to find a path to a target goal, a process called means-ends analysis.[70]\n

              Simple exhaustive searches[71] are rarely sufficient for most real-world problems: the search space (the number of places to search) quickly grows to astronomical numbers. The result is a search that is too slow or never completes.[15] "Heuristics" or "rules of thumb" can help prioritize choices that are more likely to reach a goal.[72]\n

              Adversarial search is used for game-playing programs, such as chess or Go. It searches through a tree of possible moves and counter-moves, looking for a winning position.[73]\n

              \n
              \n
              Illustration of gradient descent for 3 different starting points; two parameters (represented by the plan coordinates) are adjusted in order to minimize the loss function (the height)

              Local search uses mathematical optimization to find a solution to a problem. It begins with some form of guess and refines it incrementally.[74]\n

              Gradient descent is a type of local search that optimizes a set of numerical parameters by incrementally adjusting them to minimize a loss function. Variants of gradient descent are commonly used to train neural networks.[75]\n

              Another type of local search is evolutionary computation, which aims to iteratively improve a set of candidate solutions by "mutating" and "recombining" them, selecting only the fittest to survive each generation.[76]\n

              Distributed search processes can coordinate via swarm intelligence algorithms. Two popular swarm algorithms used in search are particle swarm optimization (inspired by bird flocking) and ant colony optimization (inspired by ant trails).[77]\n

              \n

              Logic

              \n

              Formal logic is used for reasoning and knowledge representation.[78]\nFormal logic comes in two main forms: propositional logic (which operates on statements that are true or false and uses logical connectives such as "and", "or", "not" and "implies")[79] and predicate logic (which also operates on objects, predicates and relations and uses quantifiers such as "Every X is a Y" and "There are some Xs that are Ys").[80]\n

              Deductive reasoning in logic is the process of proving a new statement (conclusion) from other statements that are given and assumed to be true (the premises).[81] Proofs can be structured as proof trees, in which nodes are labelled by sentences, and children nodes are connected to parent nodes by inference rules.\n

              Given a problem and a set of premises, problem-solving reduces to searching for a proof tree whose root node is labelled by a solution of the problem and whose leaf nodes are labelled by premises or axioms. In the case of Horn clauses, problem-solving search can be performed by reasoning forwards from the premises or backwards from the problem.[82] In the more general case of the clausal form of first-order logic, resolution is a single, axiom-free rule of inference, in which a problem is solved by proving a contradiction from premises that include the negation of the problem to be solved.[83]\n

              Inference in both Horn clause logic and first-order logic is undecidable, and therefore intractable. However, backward reasoning with Horn clauses, which underpins computation in the logic programming language Prolog, is Turing complete. Moreover, its efficiency is competitive with computation in other symbolic programming languages.[84]\n

              Fuzzy logic assigns a "degree of truth" between 0 and 1. It can therefore handle propositions that are vague and partially true.[85]\n

              Non-monotonic logics, including logic programming with negation as failure, are designed to handle default reasoning.[28] Other specialized versions of logic have been developed to describe many complex domains.\n

              \n

              Probabilistic methods for uncertain reasoning

              \n
              A simple Bayesian network, with the associated conditional probability tables
              \n

              Many problems in AI (including in reasoning, planning, learning, perception, and robotics) require the agent to operate with incomplete or uncertain information. AI researchers have devised a number of tools to solve these problems using methods from probability theory and economics.[86] Precise mathematical tools have been developed that analyze how an agent can make choices and plan, using decision theory, decision analysis,[87] and information value theory.[88] These tools include models such as Markov decision processes,[89] dynamic decision networks,[90] game theory and mechanism design.[91]\n

              Bayesian networks[92] are a tool that can be used for reasoning (using the Bayesian inference algorithm),[g][94] learning (using the expectation–maximization algorithm),[h][96] planning (using decision networks)[97] and perception (using dynamic Bayesian networks).[90]\n

              Probabilistic algorithms can also be used for filtering, prediction, smoothing, and finding explanations for streams of data, thus helping perception systems analyze processes that occur over time (e.g., hidden Markov models or Kalman filters).[90]\n

              \n
              Expectation–maximization clustering of Old Faithful eruption data starts from a random guess but then successfully converges on an accurate clustering of the two physically distinct modes of eruption.
              \n

              Classifiers and statistical learning methods

              \n

              The simplest AI applications can be divided into two types: classifiers (e.g., "if shiny then diamond"), on one hand, and controllers (e.g., "if diamond then pick up"), on the other hand. Classifiers[98] are functions that use pattern matching to determine the closest match. They can be fine-tuned based on chosen examples using supervised learning. Each pattern (also called an "observation") is labeled with a certain predefined class. All the observations combined with their class labels are known as a data set. When a new observation is received, that observation is classified based on previous experience.[45]\n

              There are many kinds of classifiers in use.[99] The decision tree is the simplest and most widely used symbolic machine learning algorithm.[100] K-nearest neighbor algorithm was the most widely used analogical AI until the mid-1990s, and Kernel methods such as the support vector machine (SVM) displaced k-nearest neighbor in the 1990s.[101]\nThe naive Bayes classifier is reportedly the "most widely used learner"[102] at Google, due in part to its scalability.[103]\nNeural networks are also used as classifiers.[104]\n

              \n

              Artificial neural networks

              \n
              A neural network is an interconnected group of nodes, akin to the vast network of neurons in the human brain.
              \n

              An artificial neural network is based on a collection of nodes also known as artificial neurons, which loosely model the neurons in a biological brain. It is trained to recognise patterns; once trained, it can recognise those patterns in fresh data. There is an input, at least one hidden layer of nodes and an output. Each node applies a function and once the weight crosses its specified threshold, the data is transmitted to the next layer. A network is typically called a deep neural network if it has at least 2 hidden layers.[104]\n

              Learning algorithms for neural networks use local search to choose the weights that will get the right output for each input during training. The most common training technique is the backpropagation algorithm.[105] Neural networks learn to model complex relationships between inputs and outputs and find patterns in data. In theory, a neural network can learn any function.[106]\n

              In feedforward neural networks the signal passes in only one direction.[107] Recurrent neural networks feed the output signal back into the input, which allows short-term memories of previous input events. Long short term memory is the most successful network architecture for recurrent networks.[108] Perceptrons[109] use only a single layer of neurons; deep learning[110] uses multiple layers. Convolutional neural networks strengthen the connection between neurons that are "close" to each other—this is especially important in image processing, where a local set of neurons must identify an "edge" before the network can identify an object.[111]\n

              \n
              \n

              Deep learning

              \n
              \n

              Deep learning[110] uses several layers of neurons between the network\'s inputs and outputs. The multiple layers can progressively extract higher-level features from the raw input. For example, in image processing, lower layers may identify edges, while higher layers may identify the concepts relevant to a human such as digits, letters, or faces.[112]\n

              Deep learning has profoundly improved the performance of programs in many important subfields of artificial intelligence, including computer vision, speech recognition, natural language processing, image classification,[113] and others. The reason that deep learning performs so well in so many applications is not known as of 2023.[114] The sudden success of deep learning in 2012–2015 did not occur because of some new discovery or theoretical breakthrough (deep neural networks and backpropagation had been described by many people, as far back as the 1950s)[i] but because of two factors: the incredible increase in computer power (including the hundred-fold increase in speed by switching to GPUs) and the availability of vast amounts of training data, especially the giant curated datasets used for benchmark testing, such as ImageNet.[j]\n

              \n

              GPT

              \n

              Generative pre-trained transformers (GPT) are large language models (LLMs) that generate text based on the semantic relationships between words in sentences. Text-based GPT models are pretrained on a large corpus of text that can be from the Internet. The pretraining consists of predicting the next token (a token being usually a word, subword, or punctuation). Throughout this pretraining, GPT models accumulate knowledge about the world and can then generate human-like text by repeatedly predicting the next token. Typically, a subsequent training phase makes the model more truthful, useful, and harmless, usually with a technique called reinforcement learning from human feedback (RLHF). Current GPT models are prone to generating falsehoods called "hallucinations", although this can be reduced with RLHF and quality data. They are used in chatbots, which allow people to ask a question or request a task in simple text.[122][123]\n

              Current models and services include Gemini (formerly Bard), ChatGPT, Grok, Claude, Copilot, and LLaMA.[124] Multimodal GPT models can process different types of data (modalities) such as images, videos, sound, and text.[125]\n

              \n

              Hardware and software

              \n\n

              In the late 2010s, graphics processing units (GPUs) that were increasingly designed with AI-specific enhancements and used with specialized TensorFlow software had replaced previously used central processing unit (CPUs) as the dominant means for large-scale (commercial and academic) machine learning models\' training.[126] Specialized programming languages such as Prolog were used in early AI research,[127] but general-purpose programming languages like Python have become predominant.[128]\n

              The transistor density in integrated circuits has been observed to roughly double every 18 months—a trend known as Moore\'s law, named after the Intel co-founder Gordon Moore, who first identified it. Improvements in GPUs have been even faster.[129]\n

              \n

              Applications

              \n

              AI and machine learning technology is used in most of the essential applications of the 2020s, including: search engines (such as Google Search), targeting online advertisements, recommendation systems (offered by Netflix, YouTube or Amazon), driving internet traffic, targeted advertising (AdSense, Facebook), virtual assistants (such as Siri or Alexa), autonomous vehicles (including drones, ADAS and self-driving cars), automatic language translation (Microsoft Translator, Google Translate), facial recognition (Apple\'s Face ID or Microsoft\'s DeepFace and Google\'s FaceNet) and image labeling (used by Facebook, Apple\'s iPhoto and TikTok). The deployment of AI may be overseen by a Chief automation officer (CAO).\n

              Health and medicine

              \n\n

              The application of AI in medicine and medical research has the potential to increase patient care and quality of life.[130] Through the lens of the Hippocratic Oath, medical professionals are ethically compelled to use AI, if applications can more accurately diagnose and treat patients.[131][132]\n

              For medical research, AI is an important tool for processing and integrating big data. This is particularly important for organoid and tissue engineering development which use microscopy imaging as a key technique in fabrication.[133] It has been suggested that AI can overcome discrepancies in funding allocated to different fields of research.[133] New AI tools can deepen the understanding of biomedically relevant pathways. For example, AlphaFold 2 (2021) demonstrated the ability to approximate, in hours rather than months, the 3D structure of a protein.[134] In 2023, it was reported that AI-guided drug discovery helped find a class of antibiotics capable of killing two different types of drug-resistant bacteria.[135] In 2024, researchers used machine learning to accelerate the search for Parkinson\'s disease drug treatments. Their aim was to identify compounds that block the clumping, or aggregation, of alpha-synuclein (the protein that characterises Parkinson\'s disease). They were able to speed up the initial screening process ten-fold and reduce the cost by a thousand-fold.[136][137]\n

              \n

              Games

              \n\n

              Game playing programs have been used since the 1950s to demonstrate and test AI\'s most advanced techniques.[138] Deep Blue became the first computer chess-playing system to beat a reigning world chess champion, Garry Kasparov, on 11 May 1997.[139] In 2011, in a Jeopardy! quiz show exhibition match, IBM\'s question answering system, Watson, defeated the two greatest Jeopardy! champions, Brad Rutter and Ken Jennings, by a significant margin.[140] In March 2016, AlphaGo won 4 out of 5 games of Go in a match with Go champion Lee Sedol, becoming the first computer Go-playing system to beat a professional Go player without handicaps. Then, in 2017, it defeated Ke Jie, who was the best Go player in the world.[141] Other programs handle imperfect-information games, such as the poker-playing program Pluribus.[142] DeepMind developed increasingly generalistic reinforcement learning models, such as with MuZero, which could be trained to play chess, Go, or Atari games.[143] In 2019, DeepMind\'s AlphaStar achieved grandmaster level in StarCraft II, a particularly challenging real-time strategy game that involves incomplete knowledge of what happens on the map.[144] In 2021, an AI agent competed in a PlayStation Gran Turismo competition, winning against four of the world\'s best Gran Turismo drivers using deep reinforcement learning.[145] In 2024, Google DeepMind introduced SIMA, a type of AI capable of autonomously playing nine previously unseen open-world video games by observing screen output, as well as executing short, specific tasks in response to natural language instructions.[146]\n

              \n

              Mathematics

              \n

              In mathematics, special forms of formal step-by-step reasoning are used. In contrast, LLMs such as GPT-4 Turbo, Gemini Ultra, Claude Opus, LLaMa-2 or Mistral Large are working with probabilistic models, which can produce wrong answers in the form of hallucinations. Therefore, they need not only a large database of mathematical problems to learn from but also methods such as supervised fine-tuning or trained classifiers with human-annotated data to improve answers for new problems and learn from corrections.[147] A 2024 study showed that the performance of some language models for reasoning capabilities in solving math problems not included in their training data was low, even for problems with only minor deviations from trained data.[148]\n

              Alternatively, dedicated models for mathematic problem solving with higher precision for the outcome including proof of theorems have been developed such as Alpha Tensor, Alpha Geometry and Alpha Proof all from Google DeepMind,[149] Llemma from eleuther[150] or Julius.[151]\n

              When natural language is used to describe mathematical problems, converters transform such prompts into a formal language such as Lean to define mathematic tasks.\n

              Some models have been developed to solve challenging problems and reach good results in benchmark tests, others to serve as educational tools in mathematics.[152]\n

              \n

              Finance

              \n

              Finance is one of the fastest growing sectors where applied AI tools are being deployed: from retail online banking to investment advice and insurance, where automated "robot advisers" have been in use for some years.[153]\n

              World Pensions experts like Nicolas Firzli insist it may be too early to see the emergence of highly innovative AI-informed financial products and services: "the deployment of AI tools will simply further automatise things: destroying tens of thousands of jobs in banking, financial planning, and pension advice in the process, but I\'m not sure it will unleash a new wave of [e.g., sophisticated] pension innovation."[154]\n

              \n

              Military

              \n\n

              Various countries are deploying AI military applications.[155] The main applications enhance command and control, communications, sensors, integration and interoperability.[156] Research is targeting intelligence collection and analysis, logistics, cyber operations, information operations, and semiautonomous and autonomous vehicles.[155] AI technologies enable coordination of sensors and effectors, threat detection and identification, marking of enemy positions, target acquisition, coordination and deconfliction of distributed Joint Fires between networked combat vehicles involving manned and unmanned teams.[156] AI was incorporated into military operations in Iraq and Syria.[155]\n

              In November 2023, US Vice President Kamala Harris disclosed a declaration signed by 31 nations to set guardrails for the military use of AI. The commitments include using legal reviews to ensure the compliance of military AI with international laws, and being cautious and transparent in the development of this technology.[157]\n

              \n

              Generative AI

              \n\n
              Vincent van Gogh in watercolour created by generative AI software
              \n

              In the early 2020s, generative AI gained widespread prominence. GenAI is AI capable of generating text, images, videos, or other data using generative models,[158][159] often in response to prompts.[160][161]\n

              In March 2023, 58% of U.S. adults had heard about ChatGPT and 14% had tried it.[162] The increasing realism and ease-of-use of AI-based text-to-image generators such as Midjourney, DALL-E, and Stable Diffusion sparked a trend of viral AI-generated photos. Widespread attention was gained by a fake photo of Pope Francis wearing a white puffer coat, the fictional arrest of Donald Trump, and a hoax of an attack on the Pentagon, as well as the usage in professional creative arts.[163][164]\n

              \n

              Agents

              \n

              Artificial intelligent (AI) agents are software entities designed to perceive their environment, make decisions, and take actions autonomously to achieve specific goals. These agents can interact with users, their environment, or other agents. AI agents are used in various applications, including virtual assistants, chatbots, autonomous vehicles, game-playing systems, and industrial robotics. AI agents operate within the constraints of their programming, available computational resources, and hardware limitations. This means they are restricted to performing tasks within their defined scope and have finite memory and processing capabilities. In real-world applications, AI agents often face time constraints for decision-making and action execution. Many AI agents incorporate learning algorithms, enabling them to improve their performance over time through experience or training. Using machine learning, AI agents can adapt to new situations and optimise their behaviour for their designated tasks.[165][166][167]\n

              \n

              Other industry-specific tasks

              \n

              There are also thousands of successful AI applications used to solve specific problems for specific industries or institutions. In a 2017 survey, one in five companies reported having incorporated "AI" in some offerings or processes.[168] A few examples are energy storage, medical diagnosis, military logistics, applications that predict the result of judicial decisions, foreign policy, or supply chain management.\n

              AI applications for evacuation and disaster management are growing. AI has been used to investigate if and how people evacuated in large scale and small scale evacuations using historical data from GPS, videos or social media. Further, AI can provide real time information on the real time evacuation conditions.[169][170][171]\n

              In agriculture, AI has helped farmers identify areas that need irrigation, fertilization, pesticide treatments or increasing yield. Agronomists use AI to conduct research and development. AI has been used to predict the ripening time for crops such as tomatoes, monitor soil moisture, operate agricultural robots, conduct predictive analytics, classify livestock pig call emotions, automate greenhouses, detect diseases and pests, and save water.\n

              Artificial intelligence is used in astronomy to analyze increasing amounts of available data and applications, mainly for "classification, regression, clustering, forecasting, generation, discovery, and the development of new scientific insights." For example, it is used for discovering exoplanets, forecasting solar activity, and distinguishing between signals and instrumental effects in gravitational wave astronomy. Additionally, it could be used for activities in space, such as space exploration, including the analysis of data from space missions, real-time science decisions of spacecraft, space debris avoidance, and more autonomous operation.\n

              \n

              Ethics

              \n\n

              AI has potential benefits and potential risks.[172] AI may be able to advance science and find solutions for serious problems: Demis Hassabis of Deep Mind hopes to "solve intelligence, and then use that to solve everything else".[173] However, as the use of AI has become widespread, several unintended consequences and risks have been identified.[174] In-production systems can sometimes not factor ethics and bias into their AI training processes, especially when the AI algorithms are inherently unexplainable in deep learning.[175]\n

              \n

              Risks and harm

              \n
              \n\n

              Machine learning algorithms require large amounts of data. The techniques used to acquire this data have raised concerns about privacy, surveillance and copyright.\n

              AI-powered devices and services, such as virtual assistants and IoT products, continuously collect personal information, raising concerns about intrusive data gathering and unauthorized access by third parties. The loss of privacy is further exacerbated by AI\'s ability to process and combine vast amounts of data, potentially leading to a surveillance society where individual activities are constantly monitored and analyzed without adequate safeguards or transparency.\n

              Sensitive user data collected may include online activity records, geolocation data, video or audio.[176] For example, in order to build speech recognition algorithms, Amazon has recorded millions of private conversations and allowed temporary workers to listen to and transcribe some of them.[177] Opinions about this widespread surveillance range from those who see it as a necessary evil to those for whom it is clearly unethical and a violation of the right to privacy.[178]\n

              AI developers argue that this is the only way to deliver valuable applications. and have developed several techniques that attempt to preserve privacy while still obtaining the data, such as data aggregation, de-identification and differential privacy.[179] Since 2016, some privacy experts, such as Cynthia Dwork, have begun to view privacy in terms of fairness. Brian Christian wrote that experts have pivoted "from the question of \'what they know\' to the question of \'what they\'re doing with it\'."[180]\n

              Generative AI is often trained on unlicensed copyrighted works, including in domains such as images or computer code; the output is then used under the rationale of "fair use". Experts disagree about how well and under what circumstances this rationale will hold up in courts of law; relevant factors may include "the purpose and character of the use of the copyrighted work" and "the effect upon the potential market for the copyrighted work".[181][182] Website owners who do not wish to have their content scraped can indicate it in a "robots.txt" file.[183] In 2023, leading authors (including John Grisham and Jonathan Franzen) sued AI companies for using their work to train generative AI.[184][185] Another discussed approach is to envision a separate sui generis system of protection for creations generated by AI to ensure fair attribution and compensation for human authors.[186]\n

              \n

              Dominance by tech giants

              \n

              The commercial AI scene is dominated by Big Tech companies such as Alphabet Inc., Amazon, Apple Inc., Meta Platforms, and Microsoft.[187][188][189] Some of these players already own the vast majority of existing cloud infrastructure and computing power from data centers, allowing them to entrench further in the marketplace.[190][191]\n

              \n

              Substantial power needs and other environmental impacts

              \n\n

              In January 2024, the International Energy Agency (IEA) released Electricity 2024, Analysis and Forecast to 2026, forecasting electric power use.[192] This is the first IEA report to make projections for data centers and power consumption for artificial intelligence and cryptocurrency. The report states that power demand for these uses might double by 2026, with additional electric power usage equal to electricity used by the whole Japanese nation.[193]\n

              Prodigious power consumption by AI is responsible for the growth of fossil fuels use, and might delay closings of obsolete, carbon-emitting coal energy facilities. There is a feverish rise in the construction of data centers throughout the US, making large technology firms (e.g., Microsoft, Meta, Google, Amazon) into voracious consumers of electric power. Projected electric consumption is so immense that there is concern that it will be fulfilled no matter the source. A ChatGPT search involves the use of 10 times the electrical energy as a Google search. The large firms are in haste to find power sources – from nuclear energy to geothermal to fusion. The tech firms argue that – in the long view – AI will be eventually kinder to the environment, but they need the energy now. AI makes the power grid more efficient and "intelligent", will assist in the growth of nuclear power, and track overall carbon emissions, according to technology firms.[194]\n

              A 2024 Goldman Sachs Research Paper, AI Data Centers and the Coming US Power Demand Surge, found "US power demand (is) likely to experience growth not seen in a generation...." and forecasts that, by 2030, US data centers will consume 8% of US power, as opposed to 3% in 2022, presaging growth for the electrical power generation industry by a variety of means.[195] Data centers\' need for more and more electrical power is such that they might max out the electrical grid. The Big Tech companies counter that AI can be used to maximize the utilization of the grid by all.[196]\n

              In 2024, the Wall Street Journal reported that big AI companies have begun negotiations with the US nuclear power providers to provide electricity to the data centers. In March 2024 Amazon purchased a Pennsylvania nuclear-powered data center for $650 Million (US).[197]\n

              In September 2024, Microsoft announced an agreement with Constellation Energy to re-open the Three Mile Island nuclear power plant to provide Microsoft with 100% of all electric power produced by the plant for 20 years. Reopening the plant, which suffered a partial nuclear meltdown of its Unit 2 reactor in 1979, will require Constellation to get through strict regulatory processes which will include extensive safety scrutiny from the US Nuclear Regulatory Commission. If approved (this will be the first ever US re-commissioning of a nuclear plant), over 835 megawatts of power – enough for 800,000 homes – of energy will be produced. The cost for re-opening and upgrading is estimated at $1.6 billion (US) and is dependent on tax breaks for nuclear power contained in the 2022 US Inflation Reduction Act.[198] The US government and the state of Michigan are investing almost $2 billion (US) to reopen the Palisades Nuclear reactor on Lake Michigan. Closed since 2022, the plant is planned to be reopened in October 2025. The Three Mile Island facility will be renamed the Crane Clean Energy Center after Chris Crane, a nuclear proponent and former CEO of Exelon who was responsible for Exelon spinoff of Constellation.[199]\n

              \n

              Misinformation

              \n\n

              YouTube, Facebook and others use recommender systems to guide users to more content. These AI programs were given the goal of maximizing user engagement (that is, the only goal was to keep people watching). The AI learned that users tended to choose misinformation, conspiracy theories, and extreme partisan content, and, to keep them watching, the AI recommended more of it. Users also tended to watch more content on the same subject, so the AI led people into filter bubbles where they received multiple versions of the same misinformation.[200] This convinced many users that the misinformation was true, and ultimately undermined trust in institutions, the media and the government.[201] The AI program had correctly learned to maximize its goal, but the result was harmful to society. After the U.S. election in 2016, major technology companies took steps to mitigate the problem [citation needed].\n

              In 2022, generative AI began to create images, audio, video and text that are indistinguishable from real photographs, recordings, films, or human writing. It is possible for bad actors to use this technology to create massive amounts of misinformation or propaganda.[202] AI pioneer Geoffrey Hinton expressed concern about AI enabling "authoritarian leaders to manipulate their electorates" on a large scale, among other risks.[203]\n

              \n

              Algorithmic bias and fairness

              \n\n

              Machine learning applications will be biased[k] if they learn from biased data.[205] The developers may not be aware that the bias exists.[206] Bias can be introduced by the way training data is selected and by the way a model is deployed.[207][205] If a biased algorithm is used to make decisions that can seriously harm people (as it can in medicine, finance, recruitment, housing or policing) then the algorithm may cause discrimination.[208] The field of fairness studies how to prevent harms from algorithmic biases.\n

              On June 28, 2015, Google Photos\'s new image labeling feature mistakenly identified Jacky Alcine and a friend as "gorillas" because they were black. The system was trained on a dataset that contained very few images of black people,[209] a problem called "sample size disparity".[210] Google "fixed" this problem by preventing the system from labelling anything as a "gorilla". Eight years later, in 2023, Google Photos still could not identify a gorilla, and neither could similar products from Apple, Facebook, Microsoft and Amazon.[211]\n

              COMPAS is a commercial program widely used by U.S. courts to assess the likelihood of a defendant becoming a recidivist. In 2016, Julia Angwin at ProPublica discovered that COMPAS exhibited racial bias, despite the fact that the program was not told the races of the defendants. Although the error rate for both whites and blacks was calibrated equal at exactly 61%, the errors for each race were different—the system consistently overestimated the chance that a black person would re-offend and would underestimate the chance that a white person would not re-offend.[212] In 2017, several researchers[l] showed that it was mathematically impossible for COMPAS to accommodate all possible measures of fairness when the base rates of re-offense were different for whites and blacks in the data.[214]\n

              A program can make biased decisions even if the data does not explicitly mention a problematic feature (such as "race" or "gender"). The feature will correlate with other features (like "address", "shopping history" or "first name"), and the program will make the same decisions based on these features as it would on "race" or "gender".[215] Moritz Hardt said "the most robust fact in this research area is that fairness through blindness doesn\'t work."[216]\n

              Criticism of COMPAS highlighted that machine learning models are designed to make "predictions" that are only valid if we assume that the future will resemble the past. If they are trained on data that includes the results of racist decisions in the past, machine learning models must predict that racist decisions will be made in the future. If an application then uses these predictions as recommendations, some of these "recommendations" will likely be racist.[217] Thus, machine learning is not well suited to help make decisions in areas where there is hope that the future will be better than the past. It is descriptive rather than prescriptive.[m]\n

              Bias and unfairness may go undetected because the developers are overwhelmingly white and male: among AI engineers, about 4% are black and 20% are women.[210]\n

              There are various conflicting definitions and mathematical models of fairness. These notions depend on ethical assumptions, and are influenced by beliefs about society. One broad category is distributive fairness, which focuses on the outcomes, often identifying groups and seeking to compensate for statistical disparities. Representational fairness tries to ensure that AI systems do not reinforce negative stereotypes or render certain groups invisible. Procedural fairness focuses on the decision process rather than the outcome. The most relevant notions of fairness may depend on the context, notably the type of AI application and the stakeholders. The subjectivity in the notions of bias and fairness makes it difficult for companies to operationalize them. Having access to sensitive attributes such as race or gender is also considered by many AI ethicists to be necessary in order to compensate for biases, but it may conflict with anti-discrimination laws.[204]\n

              At its 2022 Conference on Fairness, Accountability, and Transparency (ACM FAccT 2022), the Association for Computing Machinery, in Seoul, South Korea, presented and published findings that recommend that until AI and robotics systems are demonstrated to be free of bias mistakes, they are unsafe, and the use of self-learning neural networks trained on vast, unregulated sources of flawed internet data should be curtailed.[dubiousdiscuss][219]\n

              \n

              Lack of transparency

              \n\n

              Many AI systems are so complex that their designers cannot explain how they reach their decisions.[220] Particularly with deep neural networks, in which there are a large amount of non-linear relationships between inputs and outputs. But some popular explainability techniques exist.[221]\n

              It is impossible to be certain that a program is operating correctly if no one knows how exactly it works. There have been many cases where a machine learning program passed rigorous tests, but nevertheless learned something different than what the programmers intended. For example, a system that could identify skin diseases better than medical professionals was found to actually have a strong tendency to classify images with a ruler as "cancerous", because pictures of malignancies typically include a ruler to show the scale.[222] Another machine learning system designed to help effectively allocate medical resources was found to classify patients with asthma as being at "low risk" of dying from pneumonia. Having asthma is actually a severe risk factor, but since the patients having asthma would usually get much more medical care, they were relatively unlikely to die according to the training data. The correlation between asthma and low risk of dying from pneumonia was real, but misleading.[223]\n

              People who have been harmed by an algorithm\'s decision have a right to an explanation.[224] Doctors, for example, are expected to clearly and completely explain to their colleagues the reasoning behind any decision they make. Early drafts of the European Union\'s General Data Protection Regulation in 2016 included an explicit statement that this right exists.[n] Industry experts noted that this is an unsolved problem with no solution in sight. Regulators argued that nevertheless the harm is real: if the problem has no solution, the tools should not be used.[225]\n

              DARPA established the XAI ("Explainable Artificial Intelligence") program in 2014 to try to solve these problems.[226]\n

              Several approaches aim to address the transparency problem. SHAP enables to visualise the contribution of each feature to the output.[227] LIME can locally approximate a model\'s outputs with a simpler, interpretable model.[228] Multitask learning provides a large number of outputs in addition to the target classification. These other outputs can help developers deduce what the network has learned.[229] Deconvolution, DeepDream and other generative methods can allow developers to see what different layers of a deep network for computer vision have learned, and produce output that can suggest what the network is learning.[230] For generative pre-trained transformers, Anthropic developed a technique based on dictionary learning that associates patterns of neuron activations with human-understandable concepts.[231]\n

              \n

              Bad actors and weaponized AI

              \n\n

              Artificial intelligence provides a number of tools that are useful to bad actors, such as authoritarian governments, terrorists, criminals or rogue states.\n

              A lethal autonomous weapon is a machine that locates, selects and engages human targets without human supervision.[o] Widely available AI tools can be used by bad actors to develop inexpensive autonomous weapons and, if produced at scale, they are potentially weapons of mass destruction.[233] Even when used in conventional warfare, it is unlikely that they will be unable to reliably choose targets and could potentially kill an innocent person.[233] In 2014, 30 nations (including China) supported a ban on autonomous weapons under the United Nations\' Convention on Certain Conventional Weapons, however the United States and others disagreed.[234] By 2015, over fifty countries were reported to be researching battlefield robots.[235]\n

              AI tools make it easier for authoritarian governments to efficiently control their citizens in several ways. Face and voice recognition allow widespread surveillance. Machine learning, operating this data, can classify potential enemies of the state and prevent them from hiding. Recommendation systems can precisely target propaganda and misinformation for maximum effect. Deepfakes and generative AI aid in producing misinformation. Advanced AI can make authoritarian centralized decision making more competitive than liberal and decentralized systems such as markets. It lowers the cost and difficulty of digital warfare and advanced spyware.[236] All these technologies have been available since 2020 or earlier—AI facial recognition systems are already being used for mass surveillance in China.[237][238]\n

              There many other ways that AI is expected to help bad actors, some of which can not be foreseen. For example, machine-learning AI is able to design tens of thousands of toxic molecules in a matter of hours.[239]\n

              \n

              Technological unemployment

              \n\n

              Economists have frequently highlighted the risks of redundancies from AI, and speculated about unemployment if there is no adequate social policy for full employment.[240]\n

              In the past, technology has tended to increase rather than reduce total employment, but economists acknowledge that "we\'re in uncharted territory" with AI.[241] A survey of economists showed disagreement about whether the increasing use of robots and AI will cause a substantial increase in long-term unemployment, but they generally agree that it could be a net benefit if productivity gains are redistributed.[242] Risk estimates vary; for example, in the 2010s, Michael Osborne and Carl Benedikt Frey estimated 47% of U.S. jobs are at "high risk" of potential automation, while an OECD report classified only 9% of U.S. jobs as "high risk".[p][244] The methodology of speculating about future employment levels has been criticised as lacking evidential foundation, and for implying that technology, rather than social policy, creates unemployment, as opposed to redundancies.[240] In April 2023, it was reported that 70% of the jobs for Chinese video game illustrators had been eliminated by generative artificial intelligence.[245][246]\n

              Unlike previous waves of automation, many middle-class jobs may be eliminated by artificial intelligence; The Economist stated in 2015 that "the worry that AI could do to white-collar jobs what steam power did to blue-collar ones during the Industrial Revolution" is "worth taking seriously".[247] Jobs at extreme risk range from paralegals to fast food cooks, while job demand is likely to increase for care-related professions ranging from personal healthcare to the clergy.[248]\n

              From the early days of the development of artificial intelligence, there have been arguments, for example, those put forward by Joseph Weizenbaum, about whether tasks that can be done by computers actually should be done by them, given the difference between computers and humans, and between quantitative calculation and qualitative, value-based judgement.[249]\n

              \n

              Existential risk

              \n\n

              It has been argued AI will become so powerful that humanity may irreversibly lose control of it. This could, as physicist Stephen Hawking stated, "spell the end of the human race".[250] This scenario has been common in science fiction, when a computer or robot suddenly develops a human-like "self-awareness" (or "sentience" or "consciousness") and becomes a malevolent character.[q] These sci-fi scenarios are misleading in several ways.\n

              First, AI does not require human-like "sentience" to be an existential risk. Modern AI programs are given specific goals and use learning and intelligence to achieve them. Philosopher Nick Bostrom argued that if one gives almost any goal to a sufficiently powerful AI, it may choose to destroy humanity to achieve it (he used the example of a paperclip factory manager).[252] Stuart Russell gives the example of household robot that tries to find a way to kill its owner to prevent it from being unplugged, reasoning that "you can\'t fetch the coffee if you\'re dead."[253] In order to be safe for humanity, a superintelligence would have to be genuinely aligned with humanity\'s morality and values so that it is "fundamentally on our side".[254]\n

              Second, Yuval Noah Harari argues that AI does not require a robot body or physical control to pose an existential risk. The essential parts of civilization are not physical. Things like ideologies, law, government, money and the economy are made of language; they exist because there are stories that billions of people believe. The current prevalence of misinformation suggests that an AI could use language to convince people to believe anything, even to take actions that are destructive.[255]\n

              The opinions amongst experts and industry insiders are mixed, with sizable fractions both concerned and unconcerned by risk from eventual superintelligent AI.[256] Personalities such as Stephen Hawking, Bill Gates, and Elon Musk,[257] as well as AI pioneers such as Yoshua Bengio, Stuart Russell, Demis Hassabis, and Sam Altman, have expressed concerns about existential risk from AI.\n

              In May 2023, Geoffrey Hinton announced his resignation from Google in order to be able to "freely speak out about the risks of AI" without "considering how this impacts Google."[258] He notably mentioned risks of an AI takeover,[259] and stressed that in order to avoid the worst outcomes, establishing safety guidelines will require cooperation among those competing in use of AI.[260]\n

              In 2023, many leading AI experts issued the joint statement that "Mitigating the risk of extinction from AI should be a global priority alongside other societal-scale risks such as pandemics and nuclear war".[261]\n

              Other researchers, however, spoke in favor of a less dystopian view. AI pioneer Juergen Schmidhuber did not sign the joint statement, emphasising that in 95% of all cases, AI research is about making "human lives longer and healthier and easier."[262] While the tools that are now being used to improve lives can also be used by bad actors, "they can also be used against the bad actors."[263][264] Andrew Ng also argued that "it\'s a mistake to fall for the doomsday hype on AI—and that regulators who do will only benefit vested interests."[265] Yann LeCun "scoffs at his peers\' dystopian scenarios of supercharged misinformation and even, eventually, human extinction."[266] In the early 2010s, experts argued that the risks are too distant in the future to warrant research or that humans will be valuable from the perspective of a superintelligent machine.[267] However, after 2016, the study of current and future risks and possible solutions became a serious area of research.[268]\n

              \n

              Ethical machines and alignment

              \n\n

              Friendly AI are machines that have been designed from the beginning to minimize risks and to make choices that benefit humans. Eliezer Yudkowsky, who coined the term, argues that developing friendly AI should be a higher research priority: it may require a large investment and it must be completed before AI becomes an existential risk.[269]\n

              Machines with intelligence have the potential to use their intelligence to make ethical decisions. The field of machine ethics provides machines with ethical principles and procedures for resolving ethical dilemmas.[270]\nThe field of machine ethics is also called computational morality,[270]\nand was founded at an AAAI symposium in 2005.[271]\n

              Other approaches include Wendell Wallach\'s "artificial moral agents"[272] and Stuart J. Russell\'s three principles for developing provably beneficial machines.[273]\n

              \n

              Open source

              \n

              Active organizations in the AI open-source community include Hugging Face,[274] Google,[275] EleutherAI and Meta.[276] Various AI models, such as Llama 2, Mistral or Stable Diffusion, have been made open-weight,[277][278] meaning that their architecture and trained parameters (the "weights") are publicly available. Open-weight models can be freely fine-tuned, which allows companies to specialize them with their own data and for their own use-case.[279] Open-weight models are useful for research and innovation but can also be misused. Since they can be fine-tuned, any built-in security measure, such as objecting to harmful requests, can be trained away until it becomes ineffective. Some researchers warn that future AI models may develop dangerous capabilities (such as the potential to drastically facilitate bioterrorism) and that once released on the Internet, they cannot be deleted everywhere if needed. They recommend pre-release audits and cost-benefit analyses.[280]\n

              \n

              Frameworks

              \n

              Artificial Intelligence projects can have their ethical permissibility tested while designing, developing, and implementing an AI system. An AI framework such as the Care and Act Framework containing the SUM values—developed by the Alan Turing Institute tests projects in four main areas:[281][282]\n

              \n
              • Respect the dignity of individual people
              • \n
              • Connect with other people sincerely, openly, and inclusively
              • \n
              • Care for the wellbeing of everyone
              • \n
              • Protect social values, justice, and the public interest
              \n

              Other developments in ethical frameworks include those decided upon during the Asilomar Conference, the Montreal Declaration for Responsible AI, and the IEEE\'s Ethics of Autonomous Systems initiative, among others;[283] however, these principles do not go without their criticisms, especially regards to the people chosen contributes to these frameworks.[284]\n

              Promotion of the wellbeing of the people and communities that these technologies affect requires consideration of the social and ethical implications at all stages of AI system design, development and implementation, and collaboration between job roles such as data scientists, product managers, data engineers, domain experts, and delivery managers.[285]\n

              The UK AI Safety Institute released in 2024 a testing toolset called \'Inspect\' for AI safety evaluations available under a MIT open-source licence which is freely available on GitHub and can be improved with third-party packages. It can be used to evaluate AI models in a range of areas including core knowledge, ability to reason, and autonomous capabilities.[286]\n

              \n

              Regulation

              \n\n
              AI Safety Summit
              The first global AI Safety Summit was held in 2023 with a declaration calling for international co-operation.
              \n

              The regulation of artificial intelligence is the development of public sector policies and laws for promoting and regulating AI; it is therefore related to the broader regulation of algorithms.[287] The regulatory and policy landscape for AI is an emerging issue in jurisdictions globally.[288] According to AI Index at Stanford, the annual number of AI-related laws passed in the 127 survey countries jumped from one passed in 2016 to 37 passed in 2022 alone.[289][290] Between 2016 and 2020, more than 30 countries adopted dedicated strategies for AI.[291] Most EU member states had released national AI strategies, as had Canada, China, India, Japan, Mauritius, the Russian Federation, Saudi Arabia, United Arab Emirates, U.S., and Vietnam. Others were in the process of elaborating their own AI strategy, including Bangladesh, Malaysia and Tunisia.[291] The Global Partnership on Artificial Intelligence was launched in June 2020, stating a need for AI to be developed in accordance with human rights and democratic values, to ensure public confidence and trust in the technology.[291] Henry Kissinger, Eric Schmidt, and Daniel Huttenlocher published a joint statement in November 2021 calling for a government commission to regulate AI.[292] In 2023, OpenAI leaders published recommendations for the governance of superintelligence, which they believe may happen in less than 10 years.[293] In 2023, the United Nations also launched an advisory body to provide recommendations on AI governance; the body comprises technology company executives, governments officials and academics.[294] In 2024, the Council of Europe created the first international legally binding treaty on AI, called the "Framework Convention on Artificial Intelligence and Human Rights, Democracy and the Rule of Law". It was adopted by the European Union, the United States, the United Kingdom, and other signatories.[295]\n

              In a 2022 Ipsos survey, attitudes towards AI varied greatly by country; 78% of Chinese citizens, but only 35% of Americans, agreed that "products and services using AI have more benefits than drawbacks".[289] A 2023 Reuters/Ipsos poll found that 61% of Americans agree, and 22% disagree, that AI poses risks to humanity.[296] In a 2023 Fox News poll, 35% of Americans thought it "very important", and an additional 41% thought it "somewhat important", for the federal government to regulate AI, versus 13% responding "not very important" and 8% responding "not at all important".[297][298]\n

              In November 2023, the first global AI Safety Summit was held in Bletchley Park in the UK to discuss the near and far term risks of AI and the possibility of mandatory and voluntary regulatory frameworks.[299] 28 countries including the United States, China, and the European Union issued a declaration at the start of the summit, calling for international co-operation to manage the challenges and risks of artificial intelligence.[300][301] In May 2024 at the AI Seoul Summit, 16 global AI tech companies agreed to safety commitments on the development of AI.[302][303]\n

              \n

              History

              \n\n\n

              The study of mechanical or "formal" reasoning began with philosophers and mathematicians in antiquity. The study of logic led directly to Alan Turing\'s theory of computation, which suggested that a machine, by shuffling symbols as simple as "0" and "1", could simulate any conceivable form of mathematical reasoning.[304][305] This, along with concurrent discoveries in cybernetics, information theory and neurobiology, led researchers to consider the possibility of building an "electronic brain".[r] They developed several areas of research that would become part of AI,[307] such as McCullouch and Pitts design for "artificial neurons" in 1943,[115] and Turing\'s influential 1950 paper \'Computing Machinery and Intelligence\', which introduced the Turing test and showed that "machine intelligence" was plausible.[308][305]\n

              The field of AI research was founded at a workshop at Dartmouth College in 1956.[s][6] The attendees became the leaders of AI research in the 1960s.[t] They and their students produced programs that the press described as "astonishing":[u] computers were learning checkers strategies, solving word problems in algebra, proving logical theorems and speaking English.[v][7] Artificial intelligence laboratories were set up at a number of British and U.S. universities in the latter 1950s and early 1960s.[305]\n

              Researchers in the 1960s and the 1970s were convinced that their methods would eventually succeed in creating a machine with general intelligence and considered this the goal of their field.[312] In 1965 Herbert Simon predicted, "machines will be capable, within twenty years, of doing any work a man can do".[313] In 1967 Marvin Minsky agreed, writing that "within a generation ... the problem of creating \'artificial intelligence\' will substantially be solved".[314] They had, however, underestimated the difficulty of the problem.[w] In 1974, both the U.S. and British governments cut off exploratory research in response to the criticism of Sir James Lighthill[316] and ongoing pressure from the U.S. Congress to fund more productive projects.[317] Minsky\'s and Papert\'s book Perceptrons was understood as proving that artificial neural networks would never be useful for solving real-world tasks, thus discrediting the approach altogether.[318] The "AI winter", a period when obtaining funding for AI projects was difficult, followed.[9]\n

              In the early 1980s, AI research was revived by the commercial success of expert systems,[319] a form of AI program that simulated the knowledge and analytical skills of human experts. By 1985, the market for AI had reached over a billion dollars. At the same time, Japan\'s fifth generation computer project inspired the U.S. and British governments to restore funding for academic research.[8] However, beginning with the collapse of the Lisp Machine market in 1987, AI once again fell into disrepute, and a second, longer-lasting winter began.[10]\n

              Up to this point, most of AI\'s funding had gone to projects that used high-level symbols to represent mental objects like plans, goals, beliefs, and known facts. In the 1980s, some researchers began to doubt that this approach would be able to imitate all the processes of human cognition, especially perception, robotics, learning and pattern recognition,[320] and began to look into "sub-symbolic" approaches.[321] Rodney Brooks rejected "representation" in general and focussed directly on engineering machines that move and survive.[x] Judea Pearl, Lofti Zadeh and others developed methods that handled incomplete and uncertain information by making reasonable guesses rather than precise logic.[86][326] But the most important development was the revival of "connectionism", including neural network research, by Geoffrey Hinton and others.[327] In 1990, Yann LeCun successfully showed that convolutional neural networks can recognize handwritten digits, the first of many successful applications of neural networks.[328]\n

              AI gradually restored its reputation in the late 1990s and early 21st century by exploiting formal mathematical methods and by finding specific solutions to specific problems. This "narrow" and "formal" focus allowed researchers to produce verifiable results and collaborate with other fields (such as statistics, economics and mathematics).[329] By 2000, solutions developed by AI researchers were being widely used, although in the 1990s they were rarely described as "artificial intelligence" (a tendency known as the AI effect).[330]\nHowever, several academic researchers became concerned that AI was no longer pursuing its original goal of creating versatile, fully intelligent machines. Beginning around 2002, they founded the subfield of artificial general intelligence (or "AGI"), which had several well-funded institutions by the 2010s.[4]\n

              Deep learning began to dominate industry benchmarks in 2012 and was adopted throughout the field.[11]\nFor many specific tasks, other methods were abandoned.[y]\nDeep learning\'s success was based on both hardware improvements (faster computers,[332] graphics processing units, cloud computing[333]) and access to large amounts of data[334] (including curated datasets,[333] such as ImageNet). Deep learning\'s success led to an enormous increase in interest and funding in AI.[z] The amount of machine learning research (measured by total publications) increased by 50% in the years 2015–2019.[291]\n

              In 2016, issues of fairness and the misuse of technology were catapulted into center stage at machine learning conferences, publications vastly increased, funding became available, and many researchers re-focussed their careers on these issues. The alignment problem became a serious field of academic study.[268]\n

              In the late teens and early 2020s, AGI companies began to deliver programs that created enormous interest. In 2015, AlphaGo, developed by DeepMind, beat the world champion Go player. The program was taught only the rules of the game and developed strategy by itself. GPT-3 is a large language model that was released in 2020 by OpenAI and is capable of generating high-quality human-like text.[335] These programs, and others, inspired an aggressive AI boom, where large companies began investing billions in AI research. According to AI Impacts, about $50 billion annually was invested in "AI" around 2022 in the U.S. alone and about 20% of the new U.S. Computer Science PhD graduates have specialized in "AI".[336] About 800,000 "AI"-related U.S. job openings existed in 2022.[337]\n

              \n

              Philosophy

              \n

              Philosophical debates have historically sought to determine the nature of intelligence and how to make intelligent machines.[338] Another major focus has been whether machines can be conscious, and the associated ethical implications.[339] Many other topics in philosophy can relevant to AI, such as epistemology and free will.[340] Rapid advancements have intensified public discussions on the philosophy and ethics of AI.[339]\n

              Defining artificial intelligence

              \n\n

              Alan Turing wrote in 1950 "I propose to consider the question \'can machines think\'?"[341] He advised changing the question from whether a machine "thinks", to "whether or not it is possible for machinery to show intelligent behaviour".[341] He devised the Turing test, which measures the ability of a machine to simulate human conversation.[308] Since we can only observe the behavior of the machine, it does not matter if it is "actually" thinking or literally has a "mind". Turing notes that we can not determine these things about other people but "it is usual to have a polite convention that everyone thinks."[342]\n

              \n
              The Turing test can provide some evidence of intelligence, but it penalizes non-human intelligent behavior.[343]
              \n

              Russell and Norvig agree with Turing that intelligence must be defined in terms of external behavior, not internal structure.[1] However, they are critical that the test requires the machine to imitate humans. "Aeronautical engineering texts," they wrote, "do not define the goal of their field as making \'machines that fly so exactly like pigeons that they can fool other pigeons.\'"[344] AI founder John McCarthy agreed, writing that "Artificial intelligence is not, by definition, simulation of human intelligence".[345]\n

              McCarthy defines intelligence as "the computational part of the ability to achieve goals in the world".[346] Another AI founder, Marvin Minsky similarly describes it as "the ability to solve hard problems".[347] The leading AI textbook defines it as the study of agents that perceive their environment and take actions that maximize their chances of achieving defined goals.[1] These definitions view intelligence in terms of well-defined problems with well-defined solutions, where both the difficulty of the problem and the performance of the program are direct measures of the "intelligence" of the machine—and no other philosophical discussion is required, or may not even be possible.\n

              Another definition has been adopted by Google,[348] a major practitioner in the field of AI. This definition stipulates the ability of systems to synthesize information as the manifestation of intelligence, similar to the way it is defined in biological intelligence.\n

              Some authors have suggested in practice, that the definition of AI is vague and difficult to define, with contention as to whether classical algorithms should be categorised as AI,[349] with many companies during the early 2020s AI boom using the term as a marketing buzzword, often even if they did "not actually use AI in a material way".[350]\n

              \n

              Evaluating approaches to AI

              \n

              No established unifying theory or paradigm has guided AI research for most of its history.[aa] The unprecedented success of statistical machine learning in the 2010s eclipsed all other approaches (so much so that some sources, especially in the business world, use the term "artificial intelligence" to mean "machine learning with neural networks"). This approach is mostly sub-symbolic, soft and narrow. Critics argue that these questions may have to be revisited by future generations of AI researchers.\n

              \n

              Symbolic AI and its limits

              \n

              Symbolic AI (or "GOFAI")[352] simulated the high-level conscious reasoning that people use when they solve puzzles, express legal reasoning and do mathematics. They were highly successful at "intelligent" tasks such as algebra or IQ tests. In the 1960s, Newell and Simon proposed the physical symbol systems hypothesis: "A physical symbol system has the necessary and sufficient means of general intelligent action."[353]\n

              However, the symbolic approach failed on many tasks that humans solve easily, such as learning, recognizing an object or commonsense reasoning. Moravec\'s paradox is the discovery that high-level "intelligent" tasks were easy for AI, but low level "instinctive" tasks were extremely difficult.[354] Philosopher Hubert Dreyfus had argued since the 1960s that human expertise depends on unconscious instinct rather than conscious symbol manipulation, and on having a "feel" for the situation, rather than explicit symbolic knowledge.[355] Although his arguments had been ridiculed and ignored when they were first presented, eventually, AI research came to agree with him.[ab][16]\n

              The issue is not resolved: sub-symbolic reasoning can make many of the same inscrutable mistakes that human intuition does, such as algorithmic bias. Critics such as Noam Chomsky argue continuing research into symbolic AI will still be necessary to attain general intelligence,[357][358] in part because sub-symbolic AI is a move away from explainable AI: it can be difficult or impossible to understand why a modern statistical AI program made a particular decision. The emerging field of neuro-symbolic artificial intelligence attempts to bridge the two approaches.\n

              \n

              Neat vs. scruffy

              \n\n

              "Neats" hope that intelligent behavior is described using simple, elegant principles (such as logic, optimization, or neural networks). "Scruffies" expect that it necessarily requires solving a large number of unrelated problems. Neats defend their programs with theoretical rigor, scruffies rely mainly on incremental testing to see if they work. This issue was actively discussed in the 1970s and 1980s,[359] but eventually was seen as irrelevant. Modern AI has elements of both.\n

              \n

              Soft vs. hard computing

              \n\n

              Finding a provably correct or optimal solution is intractable for many important problems.[15] Soft computing is a set of techniques, including genetic algorithms, fuzzy logic and neural networks, that are tolerant of imprecision, uncertainty, partial truth and approximation. Soft computing was introduced in the late 1980s and most successful AI programs in the 21st century are examples of soft computing with neural networks.\n

              \n

              Narrow vs. general AI

              \n\n

              AI researchers are divided as to whether to pursue the goals of artificial general intelligence and superintelligence directly or to solve as many specific problems as possible (narrow AI) in hopes these solutions will lead indirectly to the field\'s long-term goals.[360][361] General intelligence is difficult to define and difficult to measure, and modern AI has had more verifiable successes by focusing on specific problems with specific solutions. The sub-field of artificial general intelligence studies this area exclusively.\n

              \n

              Machine consciousness, sentience, and mind

              \n\n

              The philosophy of mind does not know whether a machine can have a mind, consciousness and mental states, in the same sense that human beings do. This issue considers the internal experiences of the machine, rather than its external behavior. Mainstream AI research considers this issue irrelevant because it does not affect the goals of the field: to build machines that can solve problems using intelligence. Russell and Norvig add that "[t]he additional project of making a machine conscious in exactly the way humans are is not one that we are equipped to take on."[362] However, the question has become central to the philosophy of mind. It is also typically the central question at issue in artificial intelligence in fiction.\n

              \n

              Consciousness

              \n\n

              David Chalmers identified two problems in understanding the mind, which he named the "hard" and "easy" problems of consciousness.[363] The easy problem is understanding how the brain processes signals, makes plans and controls behavior. The hard problem is explaining how this feels or why it should feel like anything at all, assuming we are right in thinking that it truly does feel like something (Dennett\'s consciousness illusionism says this is an illusion). While human information processing is easy to explain, human subjective experience is difficult to explain. For example, it is easy to imagine a color-blind person who has learned to identify which objects in their field of view are red, but it is not clear what would be required for the person to know what red looks like.[364]\n

              \n

              Computationalism and functionalism

              \n\n

              Computationalism is the position in the philosophy of mind that the human mind is an information processing system and that thinking is a form of computing. Computationalism argues that the relationship between mind and body is similar or identical to the relationship between software and hardware and thus may be a solution to the mind–body problem. This philosophical position was inspired by the work of AI researchers and cognitive scientists in the 1960s and was originally proposed by philosophers Jerry Fodor and Hilary Putnam.[365]\n

              Philosopher John Searle characterized this position as "strong AI": "The appropriately programmed computer with the right inputs and outputs would thereby have a mind in exactly the same sense human beings have minds."[ac] Searle counters this assertion with his Chinese room argument, which attempts to show that, even if a machine perfectly simulates human behavior, there is still no reason to suppose it also has a mind.[369]\n

              \n

              AI welfare and rights

              \n

              It is difficult or impossible to reliably evaluate whether an advanced AI is sentient (has the ability to feel), and if so, to what degree.[370] But if there is a significant chance that a given machine can feel and suffer, then it may be entitled to certain rights or welfare protection measures, similarly to animals.[371][372] Sapience (a set of capacities related to high intelligence, such as discernment or self-awareness) may provide another moral basis for AI rights.[371] Robot rights are also sometimes proposed as a practical way to integrate autonomous agents into society.[373]\n

              In 2017, the European Union considered granting "electronic personhood" to some of the most capable AI systems. Similarly to the legal status of companies, it would have conferred rights but also responsibilities.[374] Critics argued in 2018 that granting rights to AI systems would downplay the importance of human rights, and that legislation should focus on user needs rather than speculative futuristic scenarios. They also noted that robots lacked the autonomy to take part to society on their own.[375][376]\n

              Progress in AI increased interest in the topic. Proponents of AI welfare and rights often argue that AI sentience, if it emerges, would be particularly easy to deny. They warn that this may be a moral blind spot analogous to slavery or factory farming, which could lead to large-scale suffering if sentient AI is created and carelessly exploited.[372][371]\n

              \n

              Future

              \n

              Superintelligence and the singularity

              \n

              A superintelligence is a hypothetical agent that would possess intelligence far surpassing that of the brightest and most gifted human mind.[361]If research into artificial general intelligence produced sufficiently intelligent software, it might be able to reprogram and improve itself. The improved software would be even better at improving itself, leading to what I. J. Good called an "intelligence explosion" and Vernor Vinge called a "singularity".[377]\n

              However, technologies cannot improve exponentially indefinitely, and typically follow an S-shaped curve, slowing when they reach the physical limits of what the technology can do.[378]\n

              \n

              Transhumanism

              \n\n

              Robot designer Hans Moravec, cyberneticist Kevin Warwick and inventor Ray Kurzweil have predicted that humans and machines may merge in the future into cyborgs that are more capable and powerful than either. This idea, called transhumanism, has roots in the writings of Aldous Huxley and Robert Ettinger.[379]\n

              Edward Fredkin argues that "artificial intelligence is the next step in evolution", an idea first proposed by Samuel Butler\'s "Darwin among the Machines" as far back as 1863, and expanded upon by George Dyson in his 1998 book Darwin Among the Machines: The Evolution of Global Intelligence.[380]\n

              \n

              In fiction

              \n\n
              The word "robot" itself was coined by Karel Čapek in his 1921 play R.U.R., the title standing for "Rossum\'s Universal Robots".
              \n

              Thought-capable artificial beings have appeared as storytelling devices since antiquity,[381] and have been a persistent theme in science fiction.[382]\n

              A common trope in these works began with Mary Shelley\'s Frankenstein, where a human creation becomes a threat to its masters. This includes such works as Arthur C. Clarke\'s and Stanley Kubrick\'s 2001: A Space Odyssey (both 1968), with HAL 9000, the murderous computer in charge of the Discovery One spaceship, as well as The Terminator (1984) and The Matrix (1999). In contrast, the rare loyal robots such as Gort from The Day the Earth Stood Still (1951) and Bishop from Aliens (1986) are less prominent in popular culture.[383]\n

              Isaac Asimov introduced the Three Laws of Robotics in many stories, most notably with the "Multivac" super-intelligent computer. Asimov\'s laws are often brought up during lay discussions of machine ethics;[384] while almost all artificial intelligence researchers are familiar with Asimov\'s laws through popular culture, they generally consider the laws useless for many reasons, one of which is their ambiguity.[385]\n

              Several works use AI to force us to confront the fundamental question of what makes us human, showing us artificial beings that have the ability to feel, and thus to suffer. This appears in Karel Čapek\'s R.U.R., the films A.I. Artificial Intelligence and Ex Machina, as well as the novel Do Androids Dream of Electric Sheep?, by Philip K. Dick. Dick considers the idea that our understanding of human subjectivity is altered by technology created with artificial intelligence.[386]\n

              \n

              See also

              \n\n

              Explanatory notes

              \n
              \n
                \n
              1. ^ a b This list of intelligent traits is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
              2. \n
              3. ^ a b This list of tools is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
              4. \n
              5. ^ It is among the reasons that expert systems proved to be inefficient for capturing knowledge.[30][31]\n
              6. \n
              7. ^ \n"Rational agent" is general term used in economics, philosophy and theoretical artificial intelligence. It can refer to anything that directs its behavior to accomplish goals, such as a person, an animal, a corporation, a nation, or in the case of AI, a computer program.\n
              8. \n
              9. ^ Alan Turing discussed the centrality of learning as early as 1950, in his classic paper "Computing Machinery and Intelligence".[42] In 1956, at the original Dartmouth AI summer conference, Ray Solomonoff wrote a report on unsupervised probabilistic machine learning: "An Inductive Inference Machine".[43]\n
              10. \n
              11. ^ See AI winter § Machine translation and the ALPAC report of 1966\n
              12. \n
              13. ^ \nCompared with symbolic logic, formal Bayesian inference is computationally expensive. For inference to be tractable, most observations must be conditionally independent of one another. AdSense uses a Bayesian network with over 300 million edges to learn which ads to serve.[93]\n
              14. \n
              15. ^ Expectation–maximization, one of the most popular algorithms in machine learning, allows clustering in the presence of unknown latent variables.[95]\n
              16. \n
              17. ^ \nSome form of deep neural networks (without a specific learning algorithm) were described by:\nWarren S. McCulloch and Walter Pitts (1943)[115]\nAlan Turing (1948);[116]\nKarl Steinbuch and Roger David Joseph (1961).[117]\nDeep or recurrent networks that learned (or used gradient descent) were developed by:\nFrank Rosenblatt(1957);[116]\nOliver Selfridge (1959);[117]\nAlexey Ivakhnenko and Valentin Lapa (1965);[118]\nKaoru Nakano (1971);[119]\nShun-Ichi Amari (1972);[119]\nJohn Joseph Hopfield (1982).[119]\nPrecursors to backpropagation were developed by:\nHenry J. Kelley (1960);[116]\nArthur E. Bryson (1962);[116]\nStuart Dreyfus (1962);[116]\nArthur E. Bryson and Yu-Chi Ho (1969);[116]\nBackpropagation was independently developed by:\nSeppo Linnainmaa (1970);[120]\nPaul Werbos (1974).[116]\n
              18. \n
              19. ^ Geoffrey Hinton said, of his work on neural networks in the 1990s, "our labeled datasets were thousands of times too small. [And] our computers were millions of times too slow."[121]\n
              20. \n
              21. ^ In statistics, a bias is a systematic error or deviation from the correct value. But in the context of fairness, it refers to a tendency in favor or against a certain group or individual characteristic, usually in a way that is considered unfair or harmful. A statistically unbiased AI system that produces disparate outcomes for different demographic groups may thus be viewed as biased in the ethical sense.[204]\n
              22. \n
              23. ^ Including Jon Kleinberg (Cornell University), Sendhil Mullainathan (University of Chicago), Cynthia Chouldechova (Carnegie Mellon) and Sam Corbett-Davis (Stanford)[213]\n
              24. \n
              25. ^ Moritz Hardt (a director at the Max Planck Institute for Intelligent Systems) argues that machine learning "is fundamentally the wrong tool for a lot of domains, where you\'re trying to design interventions and mechanisms that change the world."[218]\n
              26. \n
              27. ^ When the law was passed in 2018, it still contained a form of this provision.\n
              28. \n
              29. ^ This is the United Nations\' definition, and includes things like land mines as well.[232]\n
              30. \n
              31. ^ See table 4; 9% is both the OECD average and the U.S. average.[243]\n
              32. \n
              33. ^ Sometimes called a "robopocalypse"[251]\n
              34. \n
              35. ^ "Electronic brain" was the term used by the press around this time.[304][306]\n
              36. \n
              37. ^ \nDaniel Crevier wrote, "the conference is generally recognized as the official birthdate of the new science."[309] Russell and Norvig called the conference "the inception of artificial intelligence."[115]\n
              38. \n
              39. ^ \nRussell and Norvig wrote "for the next 20 years the field would be dominated by these people and their students."[310]\n
              40. \n
              41. ^ \nRussell and Norvig wrote "it was astonishing whenever a computer did anything kind of smartish".[311]\n
              42. \n
              43. ^ \nThe programs described are Arthur Samuel\'s checkers program for the IBM 701, Daniel Bobrow\'s STUDENT, Newell and Simon\'s Logic Theorist and Terry Winograd\'s SHRDLU.\n
              44. \n
              45. ^ Russell and Norvig write: "in almost all cases, these early systems failed on more difficult problems"[315]\n
              46. \n
              47. ^ \nEmbodied approaches to AI[322] were championed by Hans Moravec[323] and Rodney Brooks[324] and went by many names: Nouvelle AI.[324] Developmental robotics.[325]\n
              48. \n
              49. ^ Matteo Wong wrote in The Atlantic: "Whereas for decades, computer-science fields such as natural-language processing, computer vision, and robotics used extremely different methods, now they all use a programming method called "deep learning." As a result, their code and approaches have become more similar, and their models are easier to integrate into one another."[331]\n
              50. \n
              51. ^ Jack Clark wrote in Bloomberg: "After a half-decade of quiet breakthroughs in artificial intelligence, 2015 has been a landmark year. Computers are smarter and learning faster than ever", and noted that the number of software projects that use machine learning at Google increased from a "sporadic usage" in 2012 to more than 2,700 projects in 2015.[333]\n
              52. \n
              53. ^ Nils Nilsson wrote in 1983: "Simply put, there is wide disagreement in the field about what AI is all about."[351]\n
              54. \n
              55. ^ \nDaniel Crevier wrote that "time has proven the accuracy and perceptiveness of some of Dreyfus\'s comments. Had he formulated them less aggressively, constructive actions they suggested might have been taken much earlier."[356]\n
              56. \n
              57. ^ \nSearle presented this definition of "Strong AI" in 1999.[366] Searle\'s original formulation was "The appropriately programmed computer really is a mind, in the sense that computers given the right programs can be literally said to understand and have other cognitive states."[367] Strong AI is defined similarly by Russell and Norvig: "Stong AI – the assertion that machines that do so are actually thinking (as opposed to simulating thinking)."[368]\n
              58. \n
              \n

              References

              \n
              \n
                \n
              1. ^ a b c Russell & Norvig (2021), pp. 1–4.\n
              2. \n
              3. ^ AI set to exceed human brain power Archived 2008-02-19 at the Wayback Machine CNN.com (July 26, 2006)\n
              4. \n
              5. ^ Kaplan, Andreas; Haenlein, Michael (2019). "Siri, Siri, in my hand: Who\'s the fairest in the land? On the interpretations, illustrations, and implications of artificial intelligence". Business Horizons. 62: 15–25. doi:10.1016/j.bushor.2018.08.004. ISSN 0007-6813. S2CID 158433736.\n
              6. \n
              7. ^ a b c \nArtificial general intelligence: Russell & Norvig (2021, pp. 32–33, 1020–1021)
                Proposal for the modern version: Pennachin & Goertzel (2007)
                Warnings of overspecialization in AI from leading researchers: Nilsson (1995), McCarthy (2007), Beal & Winston (2009)
                \n
              8. \n
              9. ^ Russell & Norvig (2021, §1.2).\n
              10. \n
              11. ^ a b Dartmouth workshop: Russell & Norvig (2021, p. 18), McCorduck (2004, pp. 111–136), NRC (1999, pp. 200–201)
                The proposal: McCarthy et al. (1955)
                \n
              12. \n
              13. ^ a b Successful programs the 1960s: McCorduck (2004, pp. 243–252), Crevier (1993, pp. 52–107), Moravec (1988, p. 9), Russell & Norvig (2021, pp. 19–21)\n
              14. \n
              15. ^ a b Funding initiatives in the early 1980s: Fifth Generation Project (Japan), Alvey (UK), Microelectronics and Computer Technology Corporation (US), Strategic Computing Initiative (US): McCorduck (2004, pp. 426–441), Crevier (1993, pp. 161–162, 197–203, 211, 240), Russell & Norvig (2021, p. 23), NRC (1999, pp. 210–211), Newquist (1994, pp. 235–248)\n
              16. \n
              17. ^ a b First AI Winter, Lighthill report, Mansfield Amendment: Crevier (1993, pp. 115–117), Russell & Norvig (2021, pp. 21–22), NRC (1999, pp. 212–213), Howe (1994), Newquist (1994, pp. 189–201)\n
              18. \n
              19. ^ a b Second AI Winter: Russell & Norvig (2021, p. 24), McCorduck (2004, pp. 430–435), Crevier (1993, pp. 209–210), NRC (1999, pp. 214–216), Newquist (1994, pp. 301–318)\n
              20. \n
              21. ^ a b Deep learning revolution, AlexNet: Goldman (2022), Russell & Norvig (2021, p. 26), McKinsey (2018)\n
              22. \n
              23. ^ Toews (2023).\n
              24. \n
              25. ^ Problem-solving, puzzle solving, game playing, and deduction: Russell & Norvig (2021, chpt. 3–5), Russell & Norvig (2021, chpt. 6) (constraint satisfaction), Poole, Mackworth & Goebel (1998, chpt. 2, 3, 7, 9), Luger & Stubblefield (2004, chpt. 3, 4, 6, 8), Nilsson (1998, chpt. 7–12)\n
              26. \n
              27. ^ Uncertain reasoning: Russell & Norvig (2021, chpt. 12–18), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 333–381), Nilsson (1998, chpt. 7–12)\n
              28. \n
              29. ^ a b c Intractability and efficiency and the combinatorial explosion: Russell & Norvig (2021, p. 21)\n
              30. \n
              31. ^ a b c Psychological evidence of the prevalence of sub-symbolic reasoning and knowledge: Kahneman (2011), Dreyfus & Dreyfus (1986), Wason & Shapiro (1966), Kahneman, Slovic & Tversky (1982)\n
              32. \n
              33. ^ Knowledge representation and knowledge engineering: Russell & Norvig (2021, chpt. 10), Poole, Mackworth & Goebel (1998, pp. 23–46, 69–81, 169–233, 235–277, 281–298, 319–345), Luger & Stubblefield (2004, pp. 227–243), Nilsson (1998, chpt. 17.1–17.4, 18)\n
              34. \n
              35. ^ Smoliar & Zhang (1994).\n
              36. \n
              37. ^ Neumann & Möller (2008).\n
              38. \n
              39. ^ Kuperman, Reichley & Bailey (2006).\n
              40. \n
              41. ^ McGarry (2005).\n
              42. \n
              43. ^ Bertini, Del Bimbo & Torniai (2006).\n
              44. \n
              45. ^ Russell & Norvig (2021), pp. 272.\n
              46. \n
              47. ^ Representing categories and relations: Semantic networks, description logics, inheritance (including frames, and scripts): Russell & Norvig (2021, §10.2 & 10.5), Poole, Mackworth & Goebel (1998, pp. 174–177), Luger & Stubblefield (2004, pp. 248–258), Nilsson (1998, chpt. 18.3)\n
              48. \n
              49. ^ Representing events and time:Situation calculus, event calculus, fluent calculus (including solving the frame problem): Russell & Norvig (2021, §10.3), Poole, Mackworth & Goebel (1998, pp. 281–298), Nilsson (1998, chpt. 18.2)\n
              50. \n
              51. ^ Causal calculus: Poole, Mackworth & Goebel (1998, pp. 335–337)\n
              52. \n
              53. ^ Representing knowledge about knowledge: Belief calculus, modal logics: Russell & Norvig (2021, §10.4), Poole, Mackworth & Goebel (1998, pp. 275–277)\n
              54. \n
              55. ^ a b Default reasoning, Frame problem, default logic, non-monotonic logics, circumscription, closed world assumption, abduction: Russell & Norvig (2021, §10.6), Poole, Mackworth & Goebel (1998, pp. 248–256, 323–335), Luger & Stubblefield (2004, pp. 335–363), Nilsson (1998, ~18.3.3)\n(Poole et al. places abduction under "default reasoning". Luger et al. places this under "uncertain reasoning").\n
              56. \n
              57. ^ a b Breadth of commonsense knowledge: Lenat & Guha (1989, Introduction), Crevier (1993, pp. 113–114), Moravec (1988, p. 13), Russell & Norvig (2021, pp. 241, 385, 982) (qualification problem)\n
              58. \n
              59. ^ Newquist (1994), p. 296.\n
              60. \n
              61. ^ Crevier (1993), pp. 204–208.\n
              62. \n
              63. ^ Russell & Norvig (2021), p. 528.\n
              64. \n
              65. ^ Automated planning: Russell & Norvig (2021, chpt. 11).\n
              66. \n
              67. ^ Automated decision making, Decision theory: Russell & Norvig (2021, chpt. 16–18).\n
              68. \n
              69. ^ Classical planning: Russell & Norvig (2021, Section 11.2).\n
              70. \n
              71. ^ Sensorless or "conformant" planning, contingent planning, replanning (a.k.a online planning): Russell & Norvig (2021, Section 11.5).\n
              72. \n
              73. ^ Uncertain preferences: Russell & Norvig (2021, Section 16.7)\nInverse reinforcement learning: Russell & Norvig (2021, Section 22.6)\n
              74. \n
              75. ^ Information value theory: Russell & Norvig (2021, Section 16.6).\n
              76. \n
              77. ^ Markov decision process: Russell & Norvig (2021, chpt. 17).\n
              78. \n
              79. ^ Game theory and multi-agent decision theory: Russell & Norvig (2021, chpt. 18).\n
              80. \n
              81. ^ Learning: Russell & Norvig (2021, chpt. 19–22), Poole, Mackworth & Goebel (1998, pp. 397–438), Luger & Stubblefield (2004, pp. 385–542), Nilsson (1998, chpt. 3.3, 10.3, 17.5, 20)\n
              82. \n
              83. ^ Turing (1950).\n
              84. \n
              85. ^ Solomonoff (1956).\n
              86. \n
              87. ^ Unsupervised learning: Russell & Norvig (2021, pp. 653) (definition), Russell & Norvig (2021, pp. 738–740) (cluster analysis), Russell & Norvig (2021, pp. 846–860) (word embedding)\n
              88. \n
              89. ^ a b Supervised learning: Russell & Norvig (2021, §19.2) (Definition), Russell & Norvig (2021, Chpt. 19–20) (Techniques)\n
              90. \n
              91. ^ Reinforcement learning: Russell & Norvig (2021, chpt. 22), Luger & Stubblefield (2004, pp. 442–449)\n
              92. \n
              93. ^ Transfer learning: Russell & Norvig (2021, pp. 281), The Economist (2016)\n
              94. \n
              95. ^ "Artificial Intelligence (AI): What Is AI and How Does It Work? | Built In". builtin.com. Retrieved 30 October 2023.\n
              96. \n
              97. ^ Computational learning theory: Russell & Norvig (2021, pp. 672–674), Jordan & Mitchell (2015)\n
              98. \n
              99. ^ Natural language processing (NLP): Russell & Norvig (2021, chpt. 23–24), Poole, Mackworth & Goebel (1998, pp. 91–104), Luger & Stubblefield (2004, pp. 591–632)\n
              100. \n
              101. ^ Subproblems of NLP: Russell & Norvig (2021, pp. 849–850)\n
              102. \n
              103. ^ Russell & Norvig (2021), pp. 856–858.\n
              104. \n
              105. ^ Dickson (2022).\n
              106. \n
              107. ^ Modern statistical and deep learning approaches to NLP: Russell & Norvig (2021, chpt. 24), Cambria & White (2014)\n
              108. \n
              109. ^ Vincent (2019).\n
              110. \n
              111. ^ Russell & Norvig (2021), pp. 875–878.\n
              112. \n
              113. ^ Bushwick (2023).\n
              114. \n
              115. ^ Computer vision: Russell & Norvig (2021, chpt. 25), Nilsson (1998, chpt. 6)\n
              116. \n
              117. ^ Russell & Norvig (2021), pp. 849–850.\n
              118. \n
              119. ^ Russell & Norvig (2021), pp. 895–899.\n
              120. \n
              121. ^ Russell & Norvig (2021), pp. 899–901.\n
              122. \n
              123. ^ Challa et al. (2011).\n
              124. \n
              125. ^ Russell & Norvig (2021), pp. 931–938.\n
              126. \n
              127. ^ MIT AIL (2014).\n
              128. \n
              129. ^ Affective computing: Thro (1993), Edelson (1991), Tao & Tan (2005), Scassellati (2002)\n
              130. \n
              131. ^ Waddell (2018).\n
              132. \n
              133. ^ Poria et al. (2017).\n
              134. \n
              135. ^ Search algorithms: Russell & Norvig (2021, chpts. 3–5), Poole, Mackworth & Goebel (1998, pp. 113–163), Luger & Stubblefield (2004, pp. 79–164, 193–219), Nilsson (1998, chpts. 7–12)\n
              136. \n
              137. ^ State space search: Russell & Norvig (2021, chpt. 3)\n
              138. \n
              139. ^ Russell & Norvig (2021), sect. 11.2.\n
              140. \n
              141. ^ Uninformed searches (breadth first search, depth-first search and general state space search): Russell & Norvig (2021, sect. 3.4), Poole, Mackworth & Goebel (1998, pp. 113–132), Luger & Stubblefield (2004, pp. 79–121), Nilsson (1998, chpt. 8)\n
              142. \n
              143. ^ Heuristic or informed searches (e.g., greedy best first and A*): Russell & Norvig (2021, sect. 3.5), Poole, Mackworth & Goebel (1998, pp. 132–147), Poole & Mackworth (2017, sect. 3.6), Luger & Stubblefield (2004, pp. 133–150)\n
              144. \n
              145. ^ Adversarial search: Russell & Norvig (2021, chpt. 5)\n
              146. \n
              147. ^ Local or "optimization" search: Russell & Norvig (2021, chpt. 4)\n
              148. \n
              149. ^ Singh Chauhan, Nagesh (18 December 2020). "Optimization Algorithms in Neural Networks". KDnuggets. Retrieved 13 January 2024.\n
              150. \n
              151. ^ Evolutionary computation: Russell & Norvig (2021, sect. 4.1.2)\n
              152. \n
              153. ^ Merkle & Middendorf (2013).\n
              154. \n
              155. ^ Logic: Russell & Norvig (2021, chpts. 6–9), Luger & Stubblefield (2004, pp. 35–77), Nilsson (1998, chpt. 13–16)\n
              156. \n
              157. ^ Propositional logic: Russell & Norvig (2021, chpt. 6), Luger & Stubblefield (2004, pp. 45–50), Nilsson (1998, chpt. 13)\n
              158. \n
              159. ^ First-order logic and features such as equality: Russell & Norvig (2021, chpt. 7), Poole, Mackworth & Goebel (1998, pp. 268–275), Luger & Stubblefield (2004, pp. 50–62), Nilsson (1998, chpt. 15)\n
              160. \n
              161. ^ Logical inference: Russell & Norvig (2021, chpt. 10)\n
              162. \n
              163. ^ logical deduction as search: Russell & Norvig (2021, sects. 9.3, 9.4), Poole, Mackworth & Goebel (1998, pp. ~46–52), Luger & Stubblefield (2004, pp. 62–73), Nilsson (1998, chpt. 4.2, 7.2)\n
              164. \n
              165. ^ Resolution and unification: Russell & Norvig (2021, sections 7.5.2, 9.2, 9.5)\n
              166. \n
              167. ^ Warren, D.H.; Pereira, L.M.; Pereira, F. (1977). "Prolog-the language and its implementation compared with Lisp". ACM SIGPLAN Notices. 12 (8): 109–115. doi:10.1145/872734.806939.\n
              168. \n
              169. ^ Fuzzy logic: Russell & Norvig (2021, pp. 214, 255, 459), Scientific American (1999)\n
              170. \n
              171. ^ a b Stochastic methods for uncertain reasoning: Russell & Norvig (2021, chpt. 12–18, 20), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 165–191, 333–381), Nilsson (1998, chpt. 19)\n
              172. \n
              173. ^ decision theory and decision analysis: Russell & Norvig (2021, chpt. 16–18), Poole, Mackworth & Goebel (1998, pp. 381–394)\n
              174. \n
              175. ^ Information value theory: Russell & Norvig (2021, sect. 16.6)\n
              176. \n
              177. ^ Markov decision processes and dynamic decision networks: Russell & Norvig (2021, chpt. 17)\n
              178. \n
              179. ^ a b c Stochastic temporal models: Russell & Norvig (2021, chpt. 14)\nHidden Markov model: Russell & Norvig (2021, sect. 14.3)\nKalman filters: Russell & Norvig (2021, sect. 14.4)\nDynamic Bayesian networks: Russell & Norvig (2021, sect. 14.5)\n
              180. \n
              181. ^ Game theory and mechanism design: Russell & Norvig (2021, chpt. 18)\n
              182. \n
              183. ^ Bayesian networks: Russell & Norvig (2021, sects. 12.5–12.6, 13.4–13.5, 14.3–14.5, 16.5, 20.2–20.3), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~182–190, ≈363–379), Nilsson (1998, chpt. 19.3–19.4)\n
              184. \n
              185. ^ Domingos (2015), chpt. 6.\n
              186. \n
              187. ^ Bayesian inference algorithm: Russell & Norvig (2021, sect. 13.3–13.5), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~363–379), Nilsson (1998, chpt. 19.4 & 7)\n
              188. \n
              189. ^ Domingos (2015), p. 210.\n
              190. \n
              191. ^ Bayesian learning and the expectation–maximization algorithm: Russell & Norvig (2021, chpt. 20), Poole, Mackworth & Goebel (1998, pp. 424–433), Nilsson (1998, chpt. 20), Domingos (2015, p. 210)\n
              192. \n
              193. ^ Bayesian decision theory and Bayesian decision networks: Russell & Norvig (2021, sect. 16.5)\n
              194. \n
              195. ^ Statistical learning methods and classifiers: Russell & Norvig (2021, chpt. 20),\n
              196. \n
              197. ^ Ciaramella, Alberto; Ciaramella, Marco (2024). Introduction to Artificial Intelligence: from data analysis to generative AI. Intellisemantic Editions. ISBN 978-8-8947-8760-3.\n
              198. \n
              199. ^ Decision trees: Russell & Norvig (2021, sect. 19.3), Domingos (2015, p. 88)\n
              200. \n
              201. ^ Non-parameteric learning models such as K-nearest neighbor and support vector machines: Russell & Norvig (2021, sect. 19.7), Domingos (2015, p. 187) (k-nearest neighbor)\n\n
              202. \n
              203. ^ Domingos (2015), p. 152.\n
              204. \n
              205. ^ Naive Bayes classifier: Russell & Norvig (2021, sect. 12.6), Domingos (2015, p. 152)\n
              206. \n
              207. ^ a b Neural networks: Russell & Norvig (2021, chpt. 21), Domingos (2015, Chapter 4)\n
              208. \n
              209. ^ Gradient calculation in computational graphs, backpropagation, automatic differentiation: Russell & Norvig (2021, sect. 21.2), Luger & Stubblefield (2004, pp. 467–474), Nilsson (1998, chpt. 3.3)\n
              210. \n
              211. ^ Universal approximation theorem: Russell & Norvig (2021, p. 752)\nThe theorem: Cybenko (1988), Hornik, Stinchcombe & White (1989)\n
              212. \n
              213. ^ Feedforward neural networks: Russell & Norvig (2021, sect. 21.1)\n
              214. \n
              215. ^ Recurrent neural networks: Russell & Norvig (2021, sect. 21.6)\n
              216. \n
              217. ^ Perceptrons: Russell & Norvig (2021, pp. 21, 22, 683, 22)\n
              218. \n
              219. ^ a b Deep learning: Russell & Norvig (2021, chpt. 21), Goodfellow, Bengio & Courville (2016), Hinton et al. (2016), Schmidhuber (2015)\n
              220. \n
              221. ^ Convolutional neural networks: Russell & Norvig (2021, sect. 21.3)\n
              222. \n
              223. ^ Deng & Yu (2014), pp. 199–200.\n
              224. \n
              225. ^ Ciresan, Meier & Schmidhuber (2012).\n
              226. \n
              227. ^ Russell & Norvig (2021), p. 751.\n
              228. \n
              229. ^ a b c Russell & Norvig (2021), p. 17.\n
              230. \n
              231. ^ a b c d e f g Russell & Norvig (2021), p. 785.\n
              232. \n
              233. ^ a b Schmidhuber (2022), sect. 5.\n
              234. \n
              235. ^ Schmidhuber (2022), sect. 6.\n
              236. \n
              237. ^ a b c Schmidhuber (2022), sect. 7.\n
              238. \n
              239. ^ Schmidhuber (2022), sect. 8.\n
              240. \n
              241. ^ Quoted in Christian (2020, p. 22)\n
              242. \n
              243. ^ Smith (2023).\n
              244. \n
              245. ^ "Explained: Generative AI". 9 November 2023.\n
              246. \n
              247. ^ "AI Writing and Content Creation Tools". MIT Sloan Teaching & Learning Technologies. Archived from the original on 25 December 2023. Retrieved 25 December 2023.\n
              248. \n
              249. ^ Marmouyet (2023).\n
              250. \n
              251. ^ Kobielus (2019).\n
              252. \n
              253. ^ Thomason, James (21 May 2024). "Mojo Rising: The resurgence of AI-first programming languages". VentureBeat. Archived from the original on 27 June 2024. Retrieved 26 May 2024.\n
              254. \n
              255. ^ Wodecki, Ben (5 May 2023). "7 AI Programming Languages You Need to Know". AI Business. Archived from the original on 25 July 2024. Retrieved 5 October 2024.\n
              256. \n
              257. ^ Plumb, Taryn (18 September 2024). "Why Jensen Huang and Marc Benioff see \'gigantic\' opportunity for agentic AI". VentureBeat. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
              258. \n
              259. ^ Davenport, T; Kalakota, R (June 2019). "The potential for artificial intelligence in healthcare". Future Healthc J. 6 (2): 94–98. doi:10.7861/futurehosp.6-2-94. PMC 6616181. PMID 31363513.\n
              260. \n
              261. ^ Lyakhova, U.A.; Lyakhov, P.A. (2024). "Systematic review of approaches to detection and classification of skin cancer using artificial intelligence: Development and prospects". Computers in Biology and Medicine. 178: 108742. doi:10.1016/j.compbiomed.2024.108742. PMID 38875908.\n
              262. \n
              263. ^ Alqudaihi, Kawther S.; Aslam, Nida; Khan, Irfan Ullah; Almuhaideb, Abdullah M.; Alsunaidi, Shikah J.; Ibrahim, Nehad M. Abdel Rahman; Alhaidari, Fahd A.; Shaikh, Fatema S.; Alsenbel, Yasmine M.; Alalharith, Dima M.; Alharthi, Hajar M.; Alghamdi, Wejdan M.; Alshahrani, Mohammed S. (2021). "Cough Sound Detection and Diagnosis Using Artificial Intelligence Techniques: Challenges and Opportunities". IEEE Access. 9: 102327–102344. Bibcode:2021IEEEA...9j2327A. doi:10.1109/ACCESS.2021.3097559. ISSN 2169-3536. PMC 8545201. PMID 34786317.\n
              264. \n
              265. ^ a b Bax, Monique; Thorpe, Jordan; Romanov, Valentin (December 2023). "The future of personalized cardiovascular medicine demands 3D and 4D printing, stem cells, and artificial intelligence". Frontiers in Sensors. 4. doi:10.3389/fsens.2023.1294721. ISSN 2673-5067.\n
              266. \n
              267. ^ Jumper, J; Evans, R; Pritzel, A (2021). "Highly accurate protein structure prediction with AlphaFold". Nature. 596 (7873): 583–589. Bibcode:2021Natur.596..583J. doi:10.1038/s41586-021-03819-2. PMC 8371605. PMID 34265844.\n
              268. \n
              269. ^ "AI discovers new class of antibiotics to kill drug-resistant bacteria". 20 December 2023. Archived from the original on 16 September 2024. Retrieved 5 October 2024.\n
              270. \n
              271. ^ "AI speeds up drug design for Parkinson\'s ten-fold". Cambridge University. 17 April 2024. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
              272. \n
              273. ^ Horne, Robert I.; Andrzejewska, Ewa A.; Alam, Parvez; Brotzakis, Z. Faidon; Srivastava, Ankit; Aubert, Alice; Nowinska, Magdalena; Gregory, Rebecca C.; Staats, Roxine; Possenti, Andrea; Chia, Sean; Sormanni, Pietro; Ghetti, Bernardino; Caughey, Byron; Knowles, Tuomas P. J.; Vendruscolo, Michele (17 April 2024). "Discovery of potent inhibitors of α-synuclein aggregation using structure-based iterative learning". Nature Chemical Biology. 20 (5). Nature: 634–645. doi:10.1038/s41589-024-01580-x. PMC 11062903. PMID 38632492.\n
              274. \n
              275. ^ Grant, Eugene F.; Lardner, Rex (25 July 1952). "The Talk of the Town – It". The New Yorker. ISSN 0028-792X. Archived from the original on 16 February 2020. Retrieved 28 January 2024.\n
              276. \n
              277. ^ Anderson, Mark Robert (11 May 2017). "Twenty years on from Deep Blue vs Kasparov: how a chess match started the big data revolution". The Conversation. Archived from the original on 17 September 2024. Retrieved 28 January 2024.\n
              278. \n
              279. ^ Markoff, John (16 February 2011). "Computer Wins on \'Jeopardy!\': Trivial, It\'s Not". The New York Times. ISSN 0362-4331. Archived from the original on 22 October 2014. Retrieved 28 January 2024.\n
              280. \n
              281. ^ Byford, Sam (27 May 2017). "AlphaGo retires from competitive Go after defeating world number one 3–0". The Verge. Archived from the original on 7 June 2017. Retrieved 28 January 2024.\n
              282. \n
              283. ^ Brown, Noam; Sandholm, Tuomas (30 August 2019). "Superhuman AI for multiplayer poker". Science. 365 (6456): 885–890. Bibcode:2019Sci...365..885B. doi:10.1126/science.aay2400. ISSN 0036-8075. PMID 31296650.\n
              284. \n
              285. ^ "MuZero: Mastering Go, chess, shogi and Atari without rules". Google DeepMind. 23 December 2020. Retrieved 28 January 2024.\n
              286. \n
              287. ^ Sample, Ian (30 October 2019). "AI becomes grandmaster in \'fiendishly complex\' StarCraft II". The Guardian. ISSN 0261-3077. Archived from the original on 29 December 2020. Retrieved 28 January 2024.\n
              288. \n
              289. ^ Wurman, P. R.; Barrett, S.; Kawamoto, K. (2022). "Outracing champion Gran Turismo drivers with deep reinforcement learning" (PDF). Nature. 602 (7896): 223–228. Bibcode:2022Natur.602..223W. doi:10.1038/s41586-021-04357-7. PMID 35140384.\n
              290. \n
              291. ^ Wilkins, Alex (13 March 2024). "Google AI learns to play open-world video games by watching them". New Scientist. Archived from the original on 26 July 2024. Retrieved 21 July 2024.\n
              292. \n
              293. ^ Uesato, J. et al.: Improving mathematical reasoning with process supervision. Archived 15 September 2024 at the Wayback Machine openai.com, May 31, 2023. Retrieved 2024-08-07.\n
              294. \n
              295. ^ Srivastava, Saurabh (29 February 2024). "Functional Benchmarks for Robust Evaluation of Reasoning Performance, and the Reasoning Gap". arXiv:2402.19450 [cs.AI].\n
              296. \n
              297. ^ Roberts, Siobhan (25 July 2024). "AI achieves silver-medal standard solving International Mathematical Olympiad problems". The New York Times. Archived from the original on 26 September 2024. Retrieved 7 August 2024.\n
              298. \n
              299. ^ LLEMMA. eleuther.ai. Retrieved 2024-08-07.\n
              300. \n
              301. ^ AI Math. Archived 5 October 2024 at the Wayback Machine Caesars Labs, 2024. Retrieved 2024-08-07.\n
              302. \n
              303. ^ Alex McFarland: 7 Best AI for Math Tools. Archived 11 September 2024 at the Wayback Machine unite.ai. Retrieved 2024-08-07\n
              304. \n
              305. ^ Matthew Finio & Amanda Downie: IBM Think 2024 Primer, "What is Artificial Intelligence (AI) in Finance?" 8 Dec. 2023\n
              306. \n
              307. ^ M. Nicolas, J. Firzli: Pensions Age/European Pensions magazine, "Artificial Intelligence: Ask the Industry" May June 2024 https://videovoice.org/ai-in-finance-innovation-entrepreneurship-vs-over-regulation-with-the-eus-artificial-intelligence-act-wont-work-as-intended/ Archived 11 September 2024 at the Wayback Machine.\n
              308. \n
              309. ^ a b c Congressional Research Service (2019). Artificial Intelligence and National Security (PDF). Washington, DC: Congressional Research Service. Archived (PDF) from the original on 8 May 2020. Retrieved 5 October 2024.PD-notice\n
              310. \n
              311. ^ a b Slyusar, Vadym (2019). "Artificial intelligence as the basis of future control networks". ResearchGate. doi:10.13140/RG.2.2.30247.50087. Archived from the original on 28 April 2021. Retrieved 20 July 2019.\n
              312. \n
              313. ^ Knight, Will. "The US and 30 Other Nations Agree to Set Guardrails for Military AI". Wired. ISSN 1059-1028. Archived from the original on 20 September 2024. Retrieved 24 January 2024.\n
              314. \n
              315. ^ Newsom, Gavin; Weber, Shirley N. (6 September 2023). "Executive Order N-12-23" (PDF). Executive Department, State of California. Archived (PDF) from the original on 21 February 2024. Retrieved 7 September 2023.\n
              316. \n
              317. ^ Pinaya, Walter H. L.; Graham, Mark S.; Kerfoot, Eric; Tudosiu, Petru-Daniel; Dafflon, Jessica; Fernandez, Virginia; Sanchez, Pedro; Wolleb, Julia; da Costa, Pedro F.; Patel, Ashay (2023). "Generative AI for Medical Imaging: extending the MONAI Framework". arXiv:2307.15208 [eess.IV].\n
              318. \n
              319. ^ Griffith, Erin; Metz, Cade (27 January 2023). "Anthropic Said to Be Closing In on $300 Million in New A.I. Funding". The New York Times. Archived from the original on 9 December 2023. Retrieved 14 March 2023.\n
              320. \n
              321. ^ Lanxon, Nate; Bass, Dina; Davalos, Jackie (10 March 2023). "A Cheat Sheet to AI Buzzwords and Their Meanings". Bloomberg News. Archived from the original on 17 November 2023. Retrieved 14 March 2023.\n
              322. \n
              323. ^ Marcelline, Marco (27 May 2023). "ChatGPT: Most Americans Know About It, But Few Actually Use the AI Chatbot". PCMag. Archived from the original on 21 May 2024. Retrieved 28 January 2024.\n
              324. \n
              325. ^ Lu, Donna (31 March 2023). "Misinformation, mistakes and the Pope in a puffer: what rapidly evolving AI can – and can\'t – do". The Guardian. ISSN 0261-3077. Archived from the original on 10 June 2024. Retrieved 28 January 2024.\n
              326. \n
              327. ^ Hurst, Luke (23 May 2023). "How a fake image of a Pentagon explosion shared on Twitter caused a real dip on Wall Street". euronews. Retrieved 28 January 2024.\n
              328. \n
              329. ^ Poole, David; Mackworth, Alan (2023). Artificial Intelligence, Foundations of Computational Agents (3rd ed.). Cambridge University Press. doi:10.1017/9781009258227. ISBN 978-1-0092-5819-7. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
              330. \n
              331. ^ Russell, Stuart; Norvig, Peter (2020). Artificial Intelligence: A Modern Approach (4th ed.). Pearson. ISBN 978-0-1346-1099-3.\n
              332. \n
              333. ^ "Why agents are the next frontier of generative AI". McKinsey Digital. 24 July 2024. Archived from the original on 3 October 2024. Retrieved 10 August 2024.\n
              334. \n
              335. ^ Ransbotham, Sam; Kiron, David; Gerbert, Philipp; Reeves, Martin (6 September 2017). "Reshaping Business With Artificial Intelligence". MIT Sloan Management Review. Archived from the original on 13 February 2024.\n
              336. \n
              337. ^ Sun, Yuran; Zhao, Xilei; Lovreglio, Ruggiero; Kuligowski, Erica (1 January 2024), Naser, M. Z. (ed.), "8 – AI for large-scale evacuation modeling: promises and challenges", Interpretable Machine Learning for the Analysis, Design, Assessment, and Informed Decision Making for Civil Infrastructure, Woodhead Publishing Series in Civil and Structural Engineering, Woodhead Publishing, pp. 185–204, ISBN 978-0-1282-4073-1, archived from the original on 19 May 2024, retrieved 28 June 2024.\n
              338. \n
              339. ^ Gomaa, Islam; Adelzadeh, Masoud; Gwynne, Steven; Spencer, Bruce; Ko, Yoon; Bénichou, Noureddine; Ma, Chunyun; Elsagan, Nour; Duong, Dana; Zalok, Ehab; Kinateder, Max (1 November 2021). "A Framework for Intelligent Fire Detection and Evacuation System". Fire Technology. 57 (6): 3179–3185. doi:10.1007/s10694-021-01157-3. ISSN 1572-8099. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
              340. \n
              341. ^ Zhao, Xilei; Lovreglio, Ruggiero; Nilsson, Daniel (1 May 2020). "Modelling and interpreting pre-evacuation decision-making using machine learning". Automation in Construction. 113: 103140. doi:10.1016/j.autcon.2020.103140. ISSN 0926-5805. Archived from the original on 19 May 2024. Retrieved 5 October 2024.\n
              342. \n
              343. ^ Müller, Vincent C. (30 April 2020). "Ethics of Artificial Intelligence and Robotics". Stanford Encyclopedia of Philosophy Archive. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
              344. \n
              345. ^ Simonite (2016).\n
              346. \n
              347. ^ Russell & Norvig (2021), p. 987.\n
              348. \n
              349. ^ Laskowski (2023).\n
              350. \n
              351. ^ GAO (2022).\n
              352. \n
              353. ^ Valinsky (2019).\n
              354. \n
              355. ^ Russell & Norvig (2021), p. 991.\n
              356. \n
              357. ^ Russell & Norvig (2021), pp. 991–992.\n
              358. \n
              359. ^ Christian (2020), p. 63.\n
              360. \n
              361. ^ Vincent (2022).\n
              362. \n
              363. ^ Kopel, Matthew. "Copyright Services: Fair Use". Cornell University Library. Archived from the original on 26 September 2024. Retrieved 26 April 2024.\n
              364. \n
              365. ^ Burgess, Matt. "How to Stop Your Data From Being Used to Train AI". Wired. ISSN 1059-1028. Archived from the original on 3 October 2024. Retrieved 26 April 2024.\n
              366. \n
              367. ^ Reisner (2023).\n
              368. \n
              369. ^ Alter & Harris (2023).\n
              370. \n
              371. ^ "Getting the Innovation Ecosystem Ready for AI. An IP policy toolkit" (PDF). WIPO.\n
              372. \n
              373. ^ Hammond, George (27 December 2023). "Big Tech is spending more than VC firms on AI startups". Ars Technica. Archived from the original on 10 January 2024.\n
              374. \n
              375. ^ Wong, Matteo (24 October 2023). "The Future of AI Is GOMA". The Atlantic. Archived from the original on 5 January 2024.\n
              376. \n
              377. ^ "Big tech and the pursuit of AI dominance". The Economist. 26 March 2023. Archived from the original on 29 December 2023.\n
              378. \n
              379. ^ Fung, Brian (19 December 2023). "Where the battle to dominate AI may be won". CNN Business. Archived from the original on 13 January 2024.\n
              380. \n
              381. ^ Metz, Cade (5 July 2023). "In the Age of A.I., Tech\'s Little Guys Need Big Friends". The New York Times. Archived from the original on 8 July 2024. Retrieved 5 October 2024.\n
              382. \n
              383. ^ "Electricity 2024 – Analysis". IEA. 24 January 2024. Retrieved 13 July 2024.\n
              384. \n
              385. ^ Calvert, Brian (28 March 2024). "AI already uses as much energy as a small country. It\'s only the beginning". Vox. New York, New York. Archived from the original on 3 July 2024. Retrieved 5 October 2024.\n
              386. \n
              387. ^ Halper, Evan; O\'Donovan, Caroline (21 June 2024). "AI is exhausting the power grid. Tech firms are seeking a miracle solution". Washington Post.\n
              388. \n
              389. ^ Davenport, Carly. "AI Data Centers and the Coming YS Power Demand Surge" (PDF). Goldman Sachs. Archived from the original (PDF) on 26 July 2024. Retrieved 5 October 2024.\n
              390. \n
              391. ^ Ryan, Carol (12 April 2024). "Energy-Guzzling AI Is Also the Future of Energy Savings". Wall Street Journal. Dow Jones.\n
              392. \n
              393. ^ Hiller, Jennifer (1 July 2024). "Tech Industry Wants to Lock Up Nuclear Power for AI". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
              394. \n
              395. ^ Halper, Evan (20 September 2024). "Microsoft deal would reopen Three Mile Island nuclear plant to power AI". Washington Post.\n
              396. \n
              397. ^ Hiller, Jennifer (20 September 2024). "Three Mile Island\'s Nuclear Plant to Reopen, Help Power Microsoft\'s AI Centers". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
              398. \n
              399. ^ Nicas (2018).\n
              400. \n
              401. ^ Rainie, Lee; Keeter, Scott; Perrin, Andrew (22 July 2019). "Trust and Distrust in America". Pew Research Center. Archived from the original on 22 February 2024.\n
              402. \n
              403. ^ Williams (2023).\n
              404. \n
              405. ^ Taylor & Hern (2023).\n
              406. \n
              407. ^ a b Samuel, Sigal (19 April 2022). "Why it\'s so damn hard to make AI fair and unbiased". Vox. Archived from the original on 5 October 2024. Retrieved 24 July 2024.\n
              408. \n
              409. ^ a b Rose (2023).\n
              410. \n
              411. ^ CNA (2019).\n
              412. \n
              413. ^ Goffrey (2008), p. 17.\n
              414. \n
              415. ^ Berdahl et al. (2023); Goffrey (2008, p. 17); Rose (2023); Russell & Norvig (2021, p. 995)\n
              416. \n
              417. ^ Christian (2020), p. 25.\n
              418. \n
              419. ^ a b Russell & Norvig (2021), p. 995.\n
              420. \n
              421. ^ Grant & Hill (2023).\n
              422. \n
              423. ^ Larson & Angwin (2016).\n
              424. \n
              425. ^ Christian (2020), p. 67–70.\n
              426. \n
              427. ^ Christian (2020, pp. 67–70); Russell & Norvig (2021, pp. 993–994)\n
              428. \n
              429. ^ Russell & Norvig (2021, p. 995); Lipartito (2011, p. 36); Goodman & Flaxman (2017, p. 6); Christian (2020, pp. 39–40, 65)\n
              430. \n
              431. ^ Quoted in Christian (2020, p. 65).\n
              432. \n
              433. ^ Russell & Norvig (2021, p. 994); Christian (2020, pp. 40, 80–81)\n
              434. \n
              435. ^ Quoted in Christian (2020, p. 80)\n
              436. \n
              437. ^ Dockrill (2022).\n
              438. \n
              439. ^ Sample (2017).\n
              440. \n
              441. ^ "Black Box AI". 16 June 2023. Archived from the original on 15 June 2024. Retrieved 5 October 2024.\n
              442. \n
              443. ^ Christian (2020), p. 110.\n
              444. \n
              445. ^ Christian (2020), pp. 88–91.\n
              446. \n
              447. ^ Christian (2020, p. 83); Russell & Norvig (2021, p. 997)\n
              448. \n
              449. ^ Christian (2020), p. 91.\n
              450. \n
              451. ^ Christian (2020), p. 83.\n
              452. \n
              453. ^ Verma (2021).\n
              454. \n
              455. ^ Rothman (2020).\n
              456. \n
              457. ^ Christian (2020), pp. 105–108.\n
              458. \n
              459. ^ Christian (2020), pp. 108–112.\n
              460. \n
              461. ^ Ropek, Lucas (21 May 2024). "New Anthropic Research Sheds Light on AI\'s \'Black Box\'". Gizmodo. Archived from the original on 5 October 2024. Retrieved 23 May 2024.\n
              462. \n
              463. ^ Russell & Norvig (2021), p. 989.\n
              464. \n
              465. ^ a b Russell & Norvig (2021), pp. 987–990.\n
              466. \n
              467. ^ Russell & Norvig (2021), p. 988.\n
              468. \n
              469. ^ Robitzski (2018); Sainato (2015)\n
              470. \n
              471. ^ Harari (2018).\n
              472. \n
              473. ^ Buckley, Chris; Mozur, Paul (22 May 2019). "How China Uses High-Tech Surveillance to Subdue Minorities". The New York Times. Archived from the original on 25 November 2019. Retrieved 2 July 2019.\n
              474. \n
              475. ^ "Security lapse exposed a Chinese smart city surveillance system". 3 May 2019. Archived from the original on 7 March 2021. Retrieved 14 September 2020.\n
              476. \n
              477. ^ Urbina et al. (2022).\n
              478. \n
              479. ^ a b E. McGaughey, \'Will Robots Automate Your Job Away? Full Employment, Basic Income, and Economic Democracy\' (2022), 51(3) Industrial Law Journal 511–559. Archived 27 May 2023 at the Wayback Machine.\n
              480. \n
              481. ^ Ford & Colvin (2015);McGaughey (2022)\n
              482. \n
              483. ^ IGM Chicago (2017).\n
              484. \n
              485. ^ Arntz, Gregory & Zierahn (2016), p. 33.\n
              486. \n
              487. ^ Lohr (2017); Frey & Osborne (2017); Arntz, Gregory & Zierahn (2016, p. 33)\n
              488. \n
              489. ^ Zhou, Viola (11 April 2023). "AI is already taking video game illustrators\' jobs in China". Rest of World. Archived from the original on 21 February 2024. Retrieved 17 August 2023.\n
              490. \n
              491. ^ Carter, Justin (11 April 2023). "China\'s game art industry reportedly decimated by growing AI use". Game Developer. Archived from the original on 17 August 2023. Retrieved 17 August 2023.\n
              492. \n
              493. ^ Morgenstern (2015).\n
              494. \n
              495. ^ Mahdawi (2017); Thompson (2014)\n
              496. \n
              497. ^ Tarnoff, Ben (4 August 2023). "Lessons from Eliza". The Guardian Weekly. pp. 34–39.\n
              498. \n
              499. ^ Cellan-Jones (2014).\n
              500. \n
              501. ^ Russell & Norvig 2021, p. 1001.\n
              502. \n
              503. ^ Bostrom (2014).\n
              504. \n
              505. ^ Russell (2019).\n
              506. \n
              507. ^ Bostrom (2014); Müller & Bostrom (2014); Bostrom (2015).\n
              508. \n
              509. ^ Harari (2023).\n
              510. \n
              511. ^ Müller & Bostrom (2014).\n
              512. \n
              513. ^ Leaders\' concerns about the existential risks of AI around 2015: Rawlinson (2015), Holley (2015), Gibbs (2014), Sainato (2015)\n
              514. \n
              515. ^ ""Godfather of artificial intelligence" talks impact and potential of new AI". CBS News. 25 March 2023. Archived from the original on 28 March 2023. Retrieved 28 March 2023.\n
              516. \n
              517. ^ Pittis, Don (4 May 2023). "Canadian artificial intelligence leader Geoffrey Hinton piles on fears of computer takeover". CBC. Archived from the original on 7 July 2024. Retrieved 5 October 2024.\n
              518. \n
              519. ^ "\'50–50 chance\' that AI outsmarts humanity, Geoffrey Hinton says". Bloomberg BNN. 14 June 2024. Retrieved 6 July 2024.\n
              520. \n
              521. ^ Valance (2023).\n
              522. \n
              523. ^ Taylor, Josh (7 May 2023). "Rise of artificial intelligence is inevitable but should not be feared, \'father of AI\' says". The Guardian. Archived from the original on 23 October 2023. Retrieved 26 May 2023.\n
              524. \n
              525. ^ Colton, Emma (7 May 2023). "\'Father of AI\' says tech fears misplaced: \'You cannot stop it\'". Fox News. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
              526. \n
              527. ^ Jones, Hessie (23 May 2023). "Juergen Schmidhuber, Renowned \'Father Of Modern AI,\' Says His Life\'s Work Won\'t Lead To Dystopia". Forbes. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
              528. \n
              529. ^ McMorrow, Ryan (19 December 2023). "Andrew Ng: \'Do we think the world is better off with more or less intelligence?\'". Financial Times. Archived from the original on 25 January 2024. Retrieved 30 December 2023.\n
              530. \n
              531. ^ Levy, Steven (22 December 2023). "How Not to Be Stupid About AI, With Yann LeCun". Wired. Archived from the original on 28 December 2023. Retrieved 30 December 2023.\n
              532. \n
              533. ^ Arguments that AI is not an imminent risk: Brooks (2014), Geist (2015), Madrigal (2015), Lee (2014)\n
              534. \n
              535. ^ a b Christian (2020), pp. 67, 73.\n
              536. \n
              537. ^ Yudkowsky (2008).\n
              538. \n
              539. ^ a b Anderson & Anderson (2011).\n
              540. \n
              541. ^ AAAI (2014).\n
              542. \n
              543. ^ Wallach (2010).\n
              544. \n
              545. ^ Russell (2019), p. 173.\n
              546. \n
              547. ^ Stewart, Ashley; Melton, Monica. "Hugging Face CEO says he\'s focused on building a \'sustainable model\' for the $4.5 billion open-source-AI startup". Business Insider. Archived from the original on 25 September 2024. Retrieved 14 April 2024.\n
              548. \n
              549. ^ Wiggers, Kyle (9 April 2024). "Google open sources tools to support AI model development". TechCrunch. Archived from the original on 10 September 2024. Retrieved 14 April 2024.\n
              550. \n
              551. ^ Heaven, Will Douglas (12 May 2023). "The open-source AI boom is built on Big Tech\'s handouts. How long will it last?". MIT Technology Review. Retrieved 14 April 2024.\n
              552. \n
              553. ^ Brodsky, Sascha (19 December 2023). "Mistral AI\'s New Language Model Aims for Open Source Supremacy". AI Business. Archived from the original on 5 September 2024. Retrieved 5 October 2024.\n
              554. \n
              555. ^ Edwards, Benj (22 February 2024). "Stability announces Stable Diffusion 3, a next-gen AI image generator". Ars Technica. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
              556. \n
              557. ^ Marshall, Matt (29 January 2024). "How enterprises are using open source LLMs: 16 examples". VentureBeat. Archived from the original on 26 September 2024. Retrieved 5 October 2024.\n
              558. \n
              559. ^ Piper, Kelsey (2 February 2024). "Should we make our most powerful AI models open source to all?". Vox. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
              560. \n
              561. ^ Alan Turing Institute (2019). "Understanding artificial intelligence ethics and safety" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
              562. \n
              563. ^ Alan Turing Institute (2023). "AI Ethics and Governance in Practice" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
              564. \n
              565. ^ Floridi, Luciano; Cowls, Josh (23 June 2019). "A Unified Framework of Five Principles for AI in Society". Harvard Data Science Review. 1 (1). doi:10.1162/99608f92.8cd550d1. S2CID 198775713.\n
              566. \n
              567. ^ Buruk, Banu; Ekmekci, Perihan Elif; Arda, Berna (1 September 2020). "A critical perspective on guidelines for responsible and trustworthy artificial intelligence". Medicine, Health Care and Philosophy. 23 (3): 387–399. doi:10.1007/s11019-020-09948-1. ISSN 1572-8633. PMID 32236794. S2CID 214766800. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
              568. \n
              569. ^ Kamila, Manoj Kumar; Jasrotia, Sahil Singh (1 January 2023). "Ethical issues in the development of artificial intelligence: recognizing the risks". International Journal of Ethics and Systems. ahead-of-print (ahead-of-print). doi:10.1108/IJOES-05-2023-0107. ISSN 2514-9369. S2CID 259614124. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
              570. \n
              571. ^ "AI Safety Institute releases new AI safety evaluations platform". UK Government. 10 May 2024. Archived from the original on 5 October 2024. Retrieved 14 May 2024.\n
              572. \n
              573. ^ Regulation of AI to mitigate risks: Berryhill et al. (2019), Barfield & Pagallo (2018), Iphofen & Kritikos (2019), Wirtz, Weyerer & Geyer (2018), Buiten (2019)\n
              574. \n\n
              575. ^ a b Vincent (2023).\n
              576. \n
              577. ^ Stanford University (2023).\n
              578. \n
              579. ^ a b c d UNESCO (2021).\n
              580. \n
              581. ^ Kissinger (2021).\n
              582. \n
              583. ^ Altman, Brockman & Sutskever (2023).\n
              584. \n
              585. ^ VOA News (25 October 2023). "UN Announces Advisory Body on Artificial Intelligence". Archived from the original on 18 September 2024. Retrieved 5 October 2024.\n
              586. \n
              587. ^ "Council of Europe opens first ever global treaty on AI for signature". Council of Europe. 5 September 2024. Archived from the original on 17 September 2024. Retrieved 17 September 2024.\n
              588. \n
              589. ^ Edwards (2023).\n
              590. \n
              591. ^ Kasperowicz (2023).\n
              592. \n
              593. ^ Fox News (2023).\n
              594. \n
              595. ^ Milmo, Dan (3 November 2023). "Hope or Horror? The great AI debate dividing its pioneers". The Guardian Weekly. pp. 10–12.\n
              596. \n
              597. ^ "The Bletchley Declaration by Countries Attending the AI Safety Summit, 1–2 November 2023". GOV.UK. 1 November 2023. Archived from the original on 1 November 2023. Retrieved 2 November 2023.\n
              598. \n
              599. ^ "Countries agree to safe and responsible development of frontier AI in landmark Bletchley Declaration". GOV.UK (Press release). Archived from the original on 1 November 2023. Retrieved 1 November 2023.\n
              600. \n
              601. ^ "Second global AI summit secures safety commitments from companies". Reuters. 21 May 2024. Retrieved 23 May 2024.\n
              602. \n
              603. ^ "Frontier AI Safety Commitments, AI Seoul Summit 2024". gov.uk. 21 May 2024. Archived from the original on 23 May 2024. Retrieved 23 May 2024.\n
              604. \n
              605. ^ a b Russell & Norvig 2021, p. 9.\n
              606. \n
              607. ^ a b c Copeland, J., ed. (2004). The Essential Turing: the ideas that gave birth to the computer age. Oxford, England: Clarendon Press. ISBN 0-1982-5079-7.\n
              608. \n
              609. ^ "Google books ngram". Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
              610. \n
              611. ^ AI\'s immediate precursors: McCorduck (2004, pp. 51–107), Crevier (1993, pp. 27–32), Russell & Norvig (2021, pp. 8–17), Moravec (1988, p. 3)\n
              612. \n
              613. ^ a b Turing\'s original publication of the Turing test in "Computing machinery and intelligence": Turing (1950)\nHistorical influence and philosophical implications: Haugeland (1985, pp. 6–9), Crevier (1993, p. 24), McCorduck (2004, pp. 70–71), Russell & Norvig (2021, pp. 2, 984)\n
              614. \n
              615. ^ Crevier (1993), pp. 47–49.\n
              616. \n
              617. ^ Russell & Norvig (2003), p. 17.\n
              618. \n
              619. ^ Russell & Norvig (2003), p. 18.\n
              620. \n
              621. ^ Newquist (1994), pp. 86–86.\n
              622. \n
              623. ^ Simon (1965, p. 96) quoted in Crevier (1993, p. 109)\n
              624. \n
              625. ^ Minsky (1967, p. 2) quoted in Crevier (1993, p. 109)\n
              626. \n
              627. ^ Russell & Norvig (2021), p. 21.\n
              628. \n
              629. ^ Lighthill (1973).\n
              630. \n
              631. ^ NRC 1999, pp. 212–213.\n
              632. \n
              633. ^ Russell & Norvig (2021), p. 22.\n
              634. \n
              635. ^ Expert systems: Russell & Norvig (2021, pp. 23, 292), Luger & Stubblefield (2004, pp. 227–331), Nilsson (1998, chpt. 17.4), McCorduck (2004, pp. 327–335, 434–435), Crevier (1993, pp. 145–162, 197–203), Newquist (1994, pp. 155–183)\n
              636. \n
              637. ^ Russell & Norvig (2021), p. 24.\n
              638. \n
              639. ^ Nilsson (1998), p. 7.\n
              640. \n
              641. ^ McCorduck (2004), pp. 454–462.\n
              642. \n
              643. ^ Moravec (1988).\n
              644. \n
              645. ^ a b Brooks (1990).\n
              646. \n
              647. ^ Developmental robotics: Weng et al. (2001), Lungarella et al. (2003), Asada et al. (2009), Oudeyer (2010)\n
              648. \n
              649. ^ Russell & Norvig (2021), p. 25.\n
              650. \n
              651. ^ Crevier (1993, pp. 214–215), Russell & Norvig (2021, pp. 24, 26)\n
              652. \n
              653. ^ Russell & Norvig (2021), p. 26.\n
              654. \n
              655. ^ Formal and narrow methods adopted in the 1990s: Russell & Norvig (2021, pp. 24–26), McCorduck (2004, pp. 486–487)\n
              656. \n
              657. ^ AI widely used in the late 1990s: Kurzweil (2005, p. 265), NRC (1999, pp. 216–222), Newquist (1994, pp. 189–201)\n
              658. \n
              659. ^ Wong (2023).\n
              660. \n
              661. ^ Moore\'s Law and AI: Russell & Norvig (2021, pp. 14, 27)\n
              662. \n
              663. ^ a b c Clark (2015b).\n
              664. \n
              665. ^ Big data: Russell & Norvig (2021, p. 26)\n
              666. \n
              667. ^ Sagar, Ram (3 June 2020). "OpenAI Releases GPT-3, The Largest Model So Far". Analytics India Magazine. Archived from the original on 4 August 2020. Retrieved 15 March 2023.\n
              668. \n
              669. ^ DiFeliciantonio (2023).\n
              670. \n
              671. ^ Goswami (2023).\n
              672. \n
              673. ^ Grayling, Anthony; Ball, Brian (1 August 2024). "Philosophy is crucial in the age of AI". The Conversation. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
              674. \n
              675. ^ a b Jarow, Oshan (15 June 2024). "Will AI ever become conscious? It depends on how you think about biology". Vox. Archived from the original on 21 September 2024. Retrieved 4 October 2024.\n
              676. \n
              677. ^ McCarthy, John. "The Philosophy of AI and the AI of Philosophy". jmc.stanford.edu. Archived from the original on 23 October 2018. Retrieved 3 October 2024.\n
              678. \n
              679. ^ a b Turing (1950), p. 1.\n
              680. \n
              681. ^ Turing (1950), Under "The Argument from Consciousness".\n
              682. \n
              683. ^ Kirk-Giannini, Cameron Domenico; Goldstein, Simon (16 October 2023). "AI is closer than ever to passing the Turing test for \'intelligence\'. What happens when it does?". The Conversation. Archived from the original on 25 September 2024. Retrieved 17 August 2024.\n
              684. \n
              685. ^ Russell & Norvig (2021), p. 3.\n
              686. \n
              687. ^ Maker (2006).\n
              688. \n
              689. ^ McCarthy (1999).\n
              690. \n
              691. ^ Minsky (1986).\n
              692. \n
              693. ^ "What Is Artificial Intelligence (AI)?". Google Cloud Platform. Archived from the original on 31 July 2023. Retrieved 16 October 2023.\n
              694. \n
              695. ^ "One of the Biggest Problems in Regulating AI Is Agreeing on a Definition". carnegieendowment.org. Retrieved 31 July 2024.\n
              696. \n
              697. ^ "AI or BS? How to tell if a marketing tool really uses artificial intelligence". The Drum. Retrieved 31 July 2024.\n
              698. \n
              699. ^ Nilsson (1983), p. 10.\n
              700. \n
              701. ^ Haugeland (1985), pp. 112–117.\n
              702. \n
              703. ^ Physical symbol system hypothesis: Newell & Simon (1976, p. 116)\nHistorical significance: McCorduck (2004, p. 153), Russell & Norvig (2021, p. 19)\n
              704. \n
              705. ^ Moravec\'s paradox: Moravec (1988, pp. 15–16), Minsky (1986, p. 29), Pinker (2007, pp. 190–191)\n
              706. \n
              707. ^ Dreyfus\' critique of AI: Dreyfus (1972), Dreyfus & Dreyfus (1986)\nHistorical significance and philosophical implications: Crevier (1993, pp. 120–132), McCorduck (2004, pp. 211–239), Russell & Norvig (2021, pp. 981–982), Fearn (2007, chpt. 3)\n
              708. \n
              709. ^ Crevier (1993), p. 125.\n
              710. \n
              711. ^ Langley (2011).\n
              712. \n
              713. ^ Katz (2012).\n
              714. \n
              715. ^ Neats vs. scruffies, the historic debate: McCorduck (2004, pp. 421–424, 486–489), Crevier (1993, p. 168), Nilsson (1983, pp. 10–11), Russell & Norvig (2021, p. 24)\nA classic example of the "scruffy" approach to intelligence: Minsky (1986)\nA modern example of neat AI and its aspirations in the 21st century: Domingos (2015)\n
              716. \n
              717. ^ Pennachin & Goertzel (2007).\n
              718. \n
              719. ^ a b Roberts (2016).\n
              720. \n
              721. ^ Russell & Norvig (2021), p. 986.\n
              722. \n
              723. ^ Chalmers (1995).\n
              724. \n
              725. ^ Dennett (1991).\n
              726. \n
              727. ^ Horst (2005).\n
              728. \n
              729. ^ Searle (1999).\n
              730. \n
              731. ^ Searle (1980), p. 1.\n
              732. \n
              733. ^ Russell & Norvig (2021), p. 9817.\n
              734. \n
              735. ^ Searle\'s Chinese room argument: Searle (1980). Searle\'s original presentation of the thought experiment., Searle (1999).\nDiscussion: Russell & Norvig (2021, pp. 985), McCorduck (2004, pp. 443–445), Crevier (1993, pp. 269–271)\n
              736. \n
              737. ^ Leith, Sam (7 July 2022). "Nick Bostrom: How can we be certain a machine isn\'t conscious?". The Spectator. Archived from the original on 26 September 2024. Retrieved 23 February 2024.\n
              738. \n
              739. ^ a b c Thomson, Jonny (31 October 2022). "Why don\'t robots have rights?". Big Think. Archived from the original on 13 September 2024. Retrieved 23 February 2024.\n
              740. \n
              741. ^ a b Kateman, Brian (24 July 2023). "AI Should Be Terrified of Humans". Time. Archived from the original on 25 September 2024. Retrieved 23 February 2024.\n
              742. \n
              743. ^ Wong, Jeff (10 July 2023). "What leaders need to know about robot rights". Fast Company.\n
              744. \n
              745. ^ Hern, Alex (12 January 2017). "Give robots \'personhood\' status, EU committee argues". The Guardian. ISSN 0261-3077. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
              746. \n
              747. ^ Dovey, Dana (14 April 2018). "Experts Don\'t Think Robots Should Have Rights". Newsweek. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
              748. \n
              749. ^ Cuddy, Alice (13 April 2018). "Robot rights violate human rights, experts warn EU". euronews. Archived from the original on 19 September 2024. Retrieved 23 February 2024.\n
              750. \n
              751. ^ The Intelligence explosion and technological singularity: Russell & Norvig (2021, pp. 1004–1005), Omohundro (2008), Kurzweil (2005)\n\nI. J. Good\'s "intelligence explosion": Good (1965)\n\nVernor Vinge\'s "singularity": Vinge (1993)\n
              752. \n
              753. ^ Russell & Norvig (2021), p. 1005.\n
              754. \n
              755. ^ Transhumanism: Moravec (1988), Kurzweil (2005), Russell & Norvig (2021, p. 1005)\n
              756. \n
              757. ^ AI as evolution: Edward Fredkin is quoted in McCorduck (2004, p. 401), Butler (1863), Dyson (1998)\n
              758. \n
              759. ^ AI in myth: McCorduck (2004, pp. 4–5)\n
              760. \n
              761. ^ McCorduck (2004), pp. 340–400.\n
              762. \n
              763. ^ Buttazzo (2001).\n
              764. \n
              765. ^ Anderson (2008).\n
              766. \n
              767. ^ McCauley (2007).\n
              768. \n
              769. ^ Galvan (1997).\n
              770. \n
              \n

              AI textbooks

              \n

              The two most widely used textbooks in 2023 (see the Open Syllabus):\n

              \n\n

              These were the four of the most widely used AI textbooks in 2008:\n

              \n
              \n\n
              \n

              Other textbooks:\n

              \n\n

              History of AI

              \n
              \n\n
              \n

              Other sources

              \n
              \n\n\n
              \n

              Further reading

              \n
              \n\n
              \n
              \n\n\n\n\n\n\n\n\n\n
              \n
              \n\t\t\t\t\t\n\t\t\t\t
              \n\t\t\t\n\t\t\t\n\t\t\n\t\t\n\t \n \n
              \n\t
                \n
                \n\n\n'}'. -[2024-10-27 22:43:33,790] [DEBUG] [Engine]: Agent 'agent3' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_swOe2QGejYML33487msP4DyF', type='function')], function_call=None) -[2024-10-27 22:43:33,790] [INFO] [Engine]: Assigning task to agent4: Search for the latest trends in AI using twitter. -[2024-10-27 22:43:33,790] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Search for the latest trends in AI using twitter.'. -[2024-10-27 22:43:37,901] [INFO] [BaseAgent]: Agent 'agent4' called 'fetch_webpage' with args '{'url': 'https://twitter.com/search?q=%23AI%20%23trending'}'. -[2024-10-27 22:43:37,902] [INFO] [BaseAgent]: Agent 'agent4' obtained result '{'success': True, 'error-msg': '', 'url': 'https://twitter.com/search?q=%23AI%20%23trending', 'content': '\n \n \n x.com\n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n \n \n '}'. -[2024-10-27 22:43:37,903] [DEBUG] [Engine]: Agent 'agent4' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23trending"}', name='fetch_webpage'), id='call_pMnJde0v8WFG9nt7AUlu3s97', type='function')], function_call=None) -[2024-10-27 22:43:37,904] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_4GStBQ9QEoqAspnXVPoFYCWM', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_swOe2QGejYML33487msP4DyF', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23trending"}', name='fetch_webpage'), id='call_pMnJde0v8WFG9nt7AUlu3s97', type='function')], function_call=None) - -[2024-10-27 22:43:37,905] [INFO] [Engine]: Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_4GStBQ9QEoqAspnXVPoFYCWM', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_swOe2QGejYML33487msP4DyF', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23trending"}', name='fetch_webpage'), id='call_pMnJde0v8WFG9nt7AUlu3s97', type='function')], function_call=None) - -[2024-10-27 22:43:37,906] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_4GStBQ9QEoqAspnXVPoFYCWM', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_swOe2QGejYML33487msP4DyF', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23trending"}', name='fetch_webpage'), id='call_pMnJde0v8WFG9nt7AUlu3s97', type='function')], function_call=None) - -[2024-10-27 22:43:38,298] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-10-27 22:43:38,298] [INFO] [Engine]: Starting iteration 2 -[2024-10-27 22:43:38,299] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_4GStBQ9QEoqAspnXVPoFYCWM', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_swOe2QGejYML33487msP4DyF', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23trending"}', name='fetch_webpage'), id='call_pMnJde0v8WFG9nt7AUlu3s97', type='function')], function_call=None) - - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-10-27 22:43:39,657] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent1': 'Please collect and summarize the information gathered by agents 2, 3, and 4 about the latest trends in AI.', 'agent2': 'Task completed for agent2. No further action required.', 'agent3': 'Task completed for agent3. No further action required.', 'agent4': 'Task completed for agent4. No further action required.'}, 'continue': False} -[2024-10-27 22:43:39,658] [INFO] [Engine]: Assigned tasks: {'agent1': 'Please collect and summarize the information gathered by agents 2, 3, and 4 about the latest trends in AI.', 'agent2': 'Task completed for agent2. No further action required.', 'agent3': 'Task completed for agent3. No further action required.', 'agent4': 'Task completed for agent4. No further action required.'} -[2024-10-27 22:43:39,658] [INFO] [Engine]: Assigning task to agent1: Please collect and summarize the information gathered by agents 2, 3, and 4 about the latest trends in AI. -[2024-10-27 22:43:39,658] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Please collect and summarize the information gathered by agents 2, 3, and 4 about the latest trends in AI.'. -[2024-10-27 22:43:49,049] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: Please provide the latest trends in AI. -[2024-10-27 22:43:49,049] [INFO] [BaseAgent]: Agent agent2 received message from agent1: Please provide the latest trends in AI. -[2024-10-27 22:43:49,049] [INFO] [BaseAgent]: Agent 'agent1' called 'communicate_to' with args '{'target_agent_id': 'agent2', 'message': 'Please provide the latest trends in AI.'}'. -[2024-10-27 22:43:49,050] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': True, 'message': 'Successfully sent message to agent agent2', 'session_id': 'default_session'}'. -[2024-10-27 22:43:49,050] [DEBUG] [Engine]: Agent 'agent1' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent2", "message": "Please provide the latest trends in AI."}', name='communicate_to'), id='call_Efut5cYXbDIkIgfgCgipr0aZ', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent3", "message": "Please provide the latest trends in AI."}', name='communicate_to'), id='call_WzsD1Av8hXwi6m2uSkcnnKiV', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent4", "message": "Please provide the latest trends in AI."}', name='communicate_to'), id='call_RIUsy8lHKNw2J7IX8mg9krCs', type='function')], function_call=None) -[2024-10-27 22:43:49,050] [INFO] [Engine]: Assigning task to agent2: Task completed for agent2. No further action required. -[2024-10-27 22:43:49,050] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Task completed for agent2. No further action required.'. -[2024-10-27 22:44:03,439] [INFO] [BaseAgent]: Agent 'agent2' acted with result 'Message(content='Understood. If you have any more tasks or questions in the future, feel free to let me know. Have a great day!', role='assistant', tool_calls=None, function_call=None)'. -[2024-10-27 22:44:03,439] [DEBUG] [Engine]: Agent 'agent2' completed task with result: Message(content='Understood. If you have any more tasks or questions in the future, feel free to let me know. Have a great day!', role='assistant', tool_calls=None, function_call=None) -[2024-10-27 22:44:03,439] [INFO] [Engine]: Assigning task to agent3: Task completed for agent3. No further action required. -[2024-10-27 22:44:03,440] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Task completed for agent3. No further action required.'. -[2024-10-27 22:44:14,785] [INFO] [BaseAgent]: Agent 'agent3' acted with result 'Message(content='Understood. If you have any more tasks or questions in the future, feel free to let me know. Have a great day!', role='assistant', tool_calls=None, function_call=None)'. -[2024-10-27 22:44:14,786] [DEBUG] [Engine]: Agent 'agent3' completed task with result: Message(content='Understood. If you have any more tasks or questions in the future, feel free to let me know. Have a great day!', role='assistant', tool_calls=None, function_call=None) -[2024-10-27 22:44:14,786] [INFO] [Engine]: Assigning task to agent4: Task completed for agent4. No further action required. -[2024-10-27 22:44:14,787] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Task completed for agent4. No further action required.'. -[2024-10-27 22:48:15,651] [ERROR] [Engine]: Error while executing task for agent 'agent4': -[2024-10-27 22:48:15,653] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- agent1: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent2", "message": "Please provide the latest trends in AI."}', name='communicate_to'), id='call_Efut5cYXbDIkIgfgCgipr0aZ', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent3", "message": "Please provide the latest trends in AI."}', name='communicate_to'), id='call_WzsD1Av8hXwi6m2uSkcnnKiV', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent4", "message": "Please provide the latest trends in AI."}', name='communicate_to'), id='call_RIUsy8lHKNw2J7IX8mg9krCs', type='function')], function_call=None) -- agent2: Message(content='Understood. If you have any more tasks or questions in the future, feel free to let me know. Have a great day!', role='assistant', tool_calls=None, function_call=None) -- agent3: Message(content='Understood. If you have any more tasks or questions in the future, feel free to let me know. Have a great day!', role='assistant', tool_calls=None, function_call=None) - -[2024-10-27 22:48:15,655] [INFO] [Engine]: Agents' Results Summary: -- agent1: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent2", "message": "Please provide the latest trends in AI."}', name='communicate_to'), id='call_Efut5cYXbDIkIgfgCgipr0aZ', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent3", "message": "Please provide the latest trends in AI."}', name='communicate_to'), id='call_WzsD1Av8hXwi6m2uSkcnnKiV', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent4", "message": "Please provide the latest trends in AI."}', name='communicate_to'), id='call_RIUsy8lHKNw2J7IX8mg9krCs', type='function')], function_call=None) -- agent2: Message(content='Understood. If you have any more tasks or questions in the future, feel free to let me know. Have a great day!', role='assistant', tool_calls=None, function_call=None) -- agent3: Message(content='Understood. If you have any more tasks or questions in the future, feel free to let me know. Have a great day!', role='assistant', tool_calls=None, function_call=None) - -[2024-10-27 22:48:15,656] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_4GStBQ9QEoqAspnXVPoFYCWM', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_swOe2QGejYML33487msP4DyF', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23trending"}', name='fetch_webpage'), id='call_pMnJde0v8WFG9nt7AUlu3s97', type='function')], function_call=None) - -Agents' Results Summary: -- agent1: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent2", "message": "Please provide the latest trends in AI."}', name='communicate_to'), id='call_Efut5cYXbDIkIgfgCgipr0aZ', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent3", "message": "Please provide the latest trends in AI."}', name='communicate_to'), id='call_WzsD1Av8hXwi6m2uSkcnnKiV', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent4", "message": "Please provide the latest trends in AI."}', name='communicate_to'), id='call_RIUsy8lHKNw2J7IX8mg9krCs', type='function')], function_call=None) -- agent2: Message(content='Understood. If you have any more tasks or questions in the future, feel free to let me know. Have a great day!', role='assistant', tool_calls=None, function_call=None) -- agent3: Message(content='Understood. If you have any more tasks or questions in the future, feel free to let me know. Have a great day!', role='assistant', tool_calls=None, function_call=None) - -[2024-10-27 22:48:16,010] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': False} -[2024-10-27 22:48:16,011] [INFO] [Engine]: EnginePlanner decided to terminate the simulation. -[2024-10-27 22:48:16,012] [INFO] [Engine]: Engine simulation loop completed. -[2024-10-27 22:48:16,012] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-10-27 22:48:16,013] [INFO] [Evaluator]: Total Token Consumption: 180 -[2024-10-27 22:48:16,013] [INFO] [Evaluator]: Average Tokens per Iteration: 90.0 -[2024-10-27 22:48:16,013] [INFO] [Engine]: Simulation completed. -[2024-10-27 22:48:19,499] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-10-27 22:48:19,500] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-10-27 22:48:19,500] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-10-27 22:48:19,500] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-10-27 22:48:19,500] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-10-27 22:48:19,500] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-10-27 22:48:19,501] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-10-27 22:48:19,501] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-10-27 22:48:19,501] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-10-27 22:48:19,501] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-10-27 22:48:19,501] [INFO] [AgentGraph]: Relationship added: agent2 --[reports_to]--> agent1 -[2024-10-27 22:48:19,501] [INFO] [AgentGraph]: Relationship added: agent3 --[reports_to]--> agent1 -[2024-10-27 22:48:19,501] [INFO] [AgentGraph]: Relationship added: agent4 --[reports_to]--> agent1 -[2024-10-27 22:48:19,501] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-10-27 22:48:19,502] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-10-27 22:48:19,502] [INFO] [Engine]: Engine initialized. -[2024-10-27 22:48:19,502] [INFO] [Engine]: Engine starting simulation. -[2024-10-27 22:48:19,502] [INFO] [Engine]: Starting iteration 1 -[2024-10-27 22:48:19,502] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. If agents are asked, they should be assigned the task of answering. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-10-27 22:48:20,779] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'}, 'continue': True} -[2024-10-27 22:48:20,779] [INFO] [Engine]: Assigned tasks: {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'} -[2024-10-27 22:48:20,779] [INFO] [Engine]: Assigning task to agent2: Search for the latest trends in AI using Google. -[2024-10-27 22:48:20,780] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Search for the latest trends in AI using Google.'. -[2024-10-27 22:48:21,478] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Can you search for the latest trends in AI using Google and provide the information? -[2024-10-27 22:48:21,478] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Can you search for the latest trends in AI using Google and provide the information? -[2024-10-27 22:48:21,478] [INFO] [BaseAgent]: Agent 'agent2' called 'communicate_to' with args '{'target_agent_id': 'agent1', 'message': 'Can you search for the latest trends in AI using Google and provide the information?'}'. -[2024-10-27 22:48:21,478] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'message': 'Successfully sent message to agent agent1', 'session_id': 'default_session'}'. -[2024-10-27 22:48:21,478] [DEBUG] [Engine]: Agent 'agent2' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google and provide the information?"}', name='communicate_to'), id='call_F07x1Yh6U2y9wbG8abglZ7Ri', type='function')], function_call=None) -[2024-10-27 22:48:21,479] [INFO] [Engine]: Assigning task to agent3: Search for the latest trends in AI using Wikipedia. -[2024-10-27 22:48:21,479] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Search for the latest trends in AI using Wikipedia.'. -[2024-10-27 22:48:22,214] [INFO] [BaseAgent]: Agent 'agent3' called 'fetch_webpage' with args '{'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence'}'. -[2024-10-27 22:48:22,227] [INFO] [BaseAgent]: Agent 'agent3' obtained result '{'success': True, 'error-msg': '', 'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence', 'content': '\n\n\n\nArtificial intelligence - Wikipedia\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nJump to content\n
                \n\t
                \n\t\t
                \n\t\t\t
                \n\n\t\t\n\t\t\t\n\n\n\t\t
                \n\t\t
                \n\t\t\t\n\n\n\t\t\t\n\n\t\t
                \n\t\n\n
                \n\t
                \n\t\t
                \n\t\t\t
                \n\t\t
                \n\t\t
                \n\t\t\t
                \n\t\t
                \n\t\t\t\n\t\t
                \n\t
                \n\t
                \n\t\t\t\t
                \n\t\t\n\t\t\t
                \n\t\t
                \n\t\t
                \n\t\t\t
                \n\t\t\t\t
                \n\t\t\t\t\t\n\t\t\t\t\t

                Artificial intelligence

                \n\t\t\t\t\t\t\t\n
                \n\t\n\t\n\t
                \n\n\t\t
                \n\t\t\t\n\t\t\t\n\t\t\t\n\t\t
                \n\n\t
                \n
                \n
                \n\t\t\t\t
                \n\t\t\t\t\t
                \n\t\t\t\t\t\t
                \n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
                \n\t\t\t\t\t\t
                \n\t\t\t\t\t\t\t\n\t\t\t\t\n\t\t\t\t\t\t\t
                \n\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
                \n\t\t\t\t\t
                \n\t\t\t\t
                \n\t\t\t\t
                \n\t\t\t\t\t
                \n\t\t\t\t\t\t\n\t\t\t\t\t\t
                \n\t\t\n\t\t\t\t\t
                \n\t\t\t\t
                \n\t\t\t\t
                \n\t\t\t\t\t
                \n\t\t\t\t\t\t\t
                \n\t\t
                Page semi-protected
                \n\t\t
                \n\n\t\t\t\t\t\t
                From Wikipedia, the free encyclopedia
                \n\t\t\t\t\t
                \n\t\t\t\t\t
                \n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t
                \n\n

                \n

                \n\n\n\n\n\n\n\n

                Artificial intelligence (AI), in its broadest sense, is intelligence exhibited by machines, particularly computer systems. It is a field of research in computer science that develops and studies methods and software that enable machines to perceive their environment and use learning and intelligence to take actions that maximize their chances of achieving defined goals.[1] Such machines may be called AIs.\n

                Some high-profile applications of AI include advanced web search engines (e.g., Google Search); recommendation systems (used by YouTube, Amazon, and Netflix); interacting via human speech (e.g., Google Assistant, Siri, and Alexa); autonomous vehicles (e.g., Waymo); generative and creative tools (e.g., ChatGPT, and AI art); and superhuman play and analysis in strategy games (e.g., chess and Go). However, many AI applications are not perceived as AI: "A lot of cutting edge AI has filtered into general applications, often without being called AI because once something becomes useful enough and common enough it\'s not labeled AI anymore."[2][3]\n

                The various subfields of AI research are centered around particular goals and the use of particular tools. The traditional goals of AI research include reasoning, knowledge representation, planning, learning, natural language processing, perception, and support for robotics.[a] General intelligence—the ability to complete any task performable by a human on an at least equal level—is among the field\'s long-term goals.[4] To reach these goals, AI researchers have adapted and integrated a wide range of techniques, including search and mathematical optimization, formal logic, artificial neural networks, and methods based on statistics, operations research, and economics.[b] AI also draws upon psychology, linguistics, philosophy, neuroscience, and other fields.[5]\n

                Artificial intelligence was founded as an academic discipline in 1956,[6] and the field went through multiple cycles of optimism,[7][8] followed by periods of disappointment and loss of funding, known as AI winter.[9][10] Funding and interest vastly increased after 2012 when deep learning outperformed previous AI techniques.[11] This growth accelerated further after 2017 with the transformer architecture,[12] and by the early 2020s hundreds of billions of dollars were being invested in AI (known as the "AI boom"). The widespread use of AI in the 21st century exposed several unintended consequences and harms in the present and raised concerns about its risks and long-term effects in the future, prompting discussions about regulatory policies to ensure the safety and benefits of the technology.\n

                \n\n

                Goals

                \n

                The general problem of simulating (or creating) intelligence has been broken into subproblems. These consist of particular traits or capabilities that researchers expect an intelligent system to display. The traits described below have received the most attention and cover the scope of AI research.[a]\n

                \n

                Reasoning and problem-solving

                \n

                Early researchers developed algorithms that imitated step-by-step reasoning that humans use when they solve puzzles or make logical deductions.[13] By the late 1980s and 1990s, methods were developed for dealing with uncertain or incomplete information, employing concepts from probability and economics.[14]\n

                Many of these algorithms are insufficient for solving large reasoning problems because they experience a "combinatorial explosion": They become exponentially slower as the problems grow.[15] Even humans rarely use the step-by-step deduction that early AI research could model. They solve most of their problems using fast, intuitive judgments.[16] Accurate and efficient reasoning is an unsolved problem.\n

                \n

                Knowledge representation

                \n
                An ontology represents knowledge as a set of concepts within a domain and the relationships between those concepts.
                \n

                Knowledge representation and knowledge engineering[17] allow AI programs to answer questions intelligently and make deductions about real-world facts. Formal knowledge representations are used in content-based indexing and retrieval,[18] scene interpretation,[19] clinical decision support,[20] knowledge discovery (mining "interesting" and actionable inferences from large databases),[21] and other areas.[22]\n

                A knowledge base is a body of knowledge represented in a form that can be used by a program. An ontology is the set of objects, relations, concepts, and properties used by a particular domain of knowledge.[23] Knowledge bases need to represent things such as objects, properties, categories, and relations between objects;[24] situations, events, states, and time;[25] causes and effects;[26] knowledge about knowledge (what we know about what other people know);[27] default reasoning (things that humans assume are true until they are told differently and will remain true even when other facts are changing);[28] and many other aspects and domains of knowledge.\n

                Among the most difficult problems in knowledge representation are the breadth of commonsense knowledge (the set of atomic facts that the average person knows is enormous);[29] and the sub-symbolic form of most commonsense knowledge (much of what people know is not represented as "facts" or "statements" that they could express verbally).[16] There is also the difficulty of knowledge acquisition, the problem of obtaining knowledge for AI applications.[c]\n

                \n

                Planning and decision-making

                \n

                An "agent" is anything that perceives and takes actions in the world. A rational agent has goals or preferences and takes actions to make them happen.[d][32] In automated planning, the agent has a specific goal.[33] In automated decision-making, the agent has preferences—there are some situations it would prefer to be in, and some situations it is trying to avoid. The decision-making agent assigns a number to each situation (called the "utility") that measures how much the agent prefers it. For each possible action, it can calculate the "expected utility": the utility of all possible outcomes of the action, weighted by the probability that the outcome will occur. It can then choose the action with the maximum expected utility.[34]\n

                In classical planning, the agent knows exactly what the effect of any action will be.[35] In most real-world problems, however, the agent may not be certain about the situation they are in (it is "unknown" or "unobservable") and it may not know for certain what will happen after each possible action (it is not "deterministic"). It must choose an action by making a probabilistic guess and then reassess the situation to see if the action worked.[36]\n

                In some problems, the agent\'s preferences may be uncertain, especially if there are other agents or humans involved. These can be learned (e.g., with inverse reinforcement learning), or the agent can seek information to improve its preferences.[37] Information value theory can be used to weigh the value of exploratory or experimental actions.[38] The space of possible future actions and situations is typically intractably large, so the agents must take actions and evaluate situations while being uncertain of what the outcome will be.\n

                A Markov decision process has a transition model that describes the probability that a particular action will change the state in a particular way and a reward function that supplies the utility of each state and the cost of each action. A policy associates a decision with each possible state. The policy could be calculated (e.g., by iteration), be heuristic, or it can be learned.[39]\n

                Game theory describes the rational behavior of multiple interacting agents and is used in AI programs that make decisions that involve other agents.[40]\n

                \n

                Learning

                \n

                Machine learning is the study of programs that can improve their performance on a given task automatically.[41] It has been a part of AI from the beginning.[e]\n

                There are several kinds of machine learning. Unsupervised learning analyzes a stream of data and finds patterns and makes predictions without any other guidance.[44] Supervised learning requires a human to label the input data first, and comes in two main varieties: classification (where the program must learn to predict what category the input belongs in) and regression (where the program must deduce a numeric function based on numeric input).[45]\n

                In reinforcement learning, the agent is rewarded for good responses and punished for bad ones. The agent learns to choose responses that are classified as "good".[46] Transfer learning is when the knowledge gained from one problem is applied to a new problem.[47] Deep learning is a type of machine learning that runs inputs through biologically inspired artificial neural networks for all of these types of learning.[48]\n

                Computational learning theory can assess learners by computational complexity, by sample complexity (how much data is required), or by other notions of optimization.[49]\n

                \n
                \n

                Natural language processing

                \n

                Natural language processing (NLP)[50] allows programs to read, write and communicate in human languages such as English. Specific problems include speech recognition, speech synthesis, machine translation, information extraction, information retrieval and question answering.[51]\n

                Early work, based on Noam Chomsky\'s generative grammar and semantic networks, had difficulty with word-sense disambiguation[f] unless restricted to small domains called "micro-worlds" (due to the common sense knowledge problem[29]). Margaret Masterman believed that it was meaning and not grammar that was the key to understanding languages, and that thesauri and not dictionaries should be the basis of computational language structure.\n

                Modern deep learning techniques for NLP include word embedding (representing words, typically as vectors encoding their meaning),[52] transformers (a deep learning architecture using an attention mechanism),[53] and others.[54] In 2019, generative pre-trained transformer (or "GPT") language models began to generate coherent text,[55][56] and by 2023, these models were able to get human-level scores on the bar exam, SAT test, GRE test, and many other real-world applications.[57]\n

                \n

                Perception

                \n

                Machine perception is the ability to use input from sensors (such as cameras, microphones, wireless signals, active lidar, sonar, radar, and tactile sensors) to deduce aspects of the world. Computer vision is the ability to analyze visual input.[58]\n

                The field includes speech recognition,[59] image classification,[60] facial recognition, object recognition,[61]object tracking,[62] and robotic perception.[63]\n

                \n

                Social intelligence

                \n
                Kismet, a robot head which was made in the 1990s; a machine that can recognize and simulate emotions[64]
                \n

                Affective computing is an interdisciplinary umbrella that comprises systems that recognize, interpret, process, or simulate human feeling, emotion, and mood.[65] For example, some virtual assistants are programmed to speak conversationally or even to banter humorously; it makes them appear more sensitive to the emotional dynamics of human interaction, or to otherwise facilitate human–computer interaction.\n

                However, this tends to give naïve users an unrealistic conception of the intelligence of existing computer agents.[66] Moderate successes related to affective computing include textual sentiment analysis and, more recently, multimodal sentiment analysis, wherein AI classifies the affects displayed by a videotaped subject.[67]\n

                \n

                General intelligence

                \n

                A machine with artificial general intelligence should be able to solve a wide variety of problems with breadth and versatility similar to human intelligence.[4]\n

                \n

                Techniques

                \n

                AI research uses a wide variety of techniques to accomplish the goals above.[b]\n

                \n

                Search and optimization

                \n

                AI can solve many problems by intelligently searching through many possible solutions.[68] There are two very different kinds of search used in AI: state space search and local search.\n

                \n
                \n

                State space search searches through a tree of possible states to try to find a goal state.[69] For example, planning algorithms search through trees of goals and subgoals, attempting to find a path to a target goal, a process called means-ends analysis.[70]\n

                Simple exhaustive searches[71] are rarely sufficient for most real-world problems: the search space (the number of places to search) quickly grows to astronomical numbers. The result is a search that is too slow or never completes.[15] "Heuristics" or "rules of thumb" can help prioritize choices that are more likely to reach a goal.[72]\n

                Adversarial search is used for game-playing programs, such as chess or Go. It searches through a tree of possible moves and counter-moves, looking for a winning position.[73]\n

                \n
                \n
                Illustration of gradient descent for 3 different starting points; two parameters (represented by the plan coordinates) are adjusted in order to minimize the loss function (the height)

                Local search uses mathematical optimization to find a solution to a problem. It begins with some form of guess and refines it incrementally.[74]\n

                Gradient descent is a type of local search that optimizes a set of numerical parameters by incrementally adjusting them to minimize a loss function. Variants of gradient descent are commonly used to train neural networks.[75]\n

                Another type of local search is evolutionary computation, which aims to iteratively improve a set of candidate solutions by "mutating" and "recombining" them, selecting only the fittest to survive each generation.[76]\n

                Distributed search processes can coordinate via swarm intelligence algorithms. Two popular swarm algorithms used in search are particle swarm optimization (inspired by bird flocking) and ant colony optimization (inspired by ant trails).[77]\n

                \n

                Logic

                \n

                Formal logic is used for reasoning and knowledge representation.[78]\nFormal logic comes in two main forms: propositional logic (which operates on statements that are true or false and uses logical connectives such as "and", "or", "not" and "implies")[79] and predicate logic (which also operates on objects, predicates and relations and uses quantifiers such as "Every X is a Y" and "There are some Xs that are Ys").[80]\n

                Deductive reasoning in logic is the process of proving a new statement (conclusion) from other statements that are given and assumed to be true (the premises).[81] Proofs can be structured as proof trees, in which nodes are labelled by sentences, and children nodes are connected to parent nodes by inference rules.\n

                Given a problem and a set of premises, problem-solving reduces to searching for a proof tree whose root node is labelled by a solution of the problem and whose leaf nodes are labelled by premises or axioms. In the case of Horn clauses, problem-solving search can be performed by reasoning forwards from the premises or backwards from the problem.[82] In the more general case of the clausal form of first-order logic, resolution is a single, axiom-free rule of inference, in which a problem is solved by proving a contradiction from premises that include the negation of the problem to be solved.[83]\n

                Inference in both Horn clause logic and first-order logic is undecidable, and therefore intractable. However, backward reasoning with Horn clauses, which underpins computation in the logic programming language Prolog, is Turing complete. Moreover, its efficiency is competitive with computation in other symbolic programming languages.[84]\n

                Fuzzy logic assigns a "degree of truth" between 0 and 1. It can therefore handle propositions that are vague and partially true.[85]\n

                Non-monotonic logics, including logic programming with negation as failure, are designed to handle default reasoning.[28] Other specialized versions of logic have been developed to describe many complex domains.\n

                \n

                Probabilistic methods for uncertain reasoning

                \n
                A simple Bayesian network, with the associated conditional probability tables
                \n

                Many problems in AI (including in reasoning, planning, learning, perception, and robotics) require the agent to operate with incomplete or uncertain information. AI researchers have devised a number of tools to solve these problems using methods from probability theory and economics.[86] Precise mathematical tools have been developed that analyze how an agent can make choices and plan, using decision theory, decision analysis,[87] and information value theory.[88] These tools include models such as Markov decision processes,[89] dynamic decision networks,[90] game theory and mechanism design.[91]\n

                Bayesian networks[92] are a tool that can be used for reasoning (using the Bayesian inference algorithm),[g][94] learning (using the expectation–maximization algorithm),[h][96] planning (using decision networks)[97] and perception (using dynamic Bayesian networks).[90]\n

                Probabilistic algorithms can also be used for filtering, prediction, smoothing, and finding explanations for streams of data, thus helping perception systems analyze processes that occur over time (e.g., hidden Markov models or Kalman filters).[90]\n

                \n
                Expectation–maximization clustering of Old Faithful eruption data starts from a random guess but then successfully converges on an accurate clustering of the two physically distinct modes of eruption.
                \n

                Classifiers and statistical learning methods

                \n

                The simplest AI applications can be divided into two types: classifiers (e.g., "if shiny then diamond"), on one hand, and controllers (e.g., "if diamond then pick up"), on the other hand. Classifiers[98] are functions that use pattern matching to determine the closest match. They can be fine-tuned based on chosen examples using supervised learning. Each pattern (also called an "observation") is labeled with a certain predefined class. All the observations combined with their class labels are known as a data set. When a new observation is received, that observation is classified based on previous experience.[45]\n

                There are many kinds of classifiers in use.[99] The decision tree is the simplest and most widely used symbolic machine learning algorithm.[100] K-nearest neighbor algorithm was the most widely used analogical AI until the mid-1990s, and Kernel methods such as the support vector machine (SVM) displaced k-nearest neighbor in the 1990s.[101]\nThe naive Bayes classifier is reportedly the "most widely used learner"[102] at Google, due in part to its scalability.[103]\nNeural networks are also used as classifiers.[104]\n

                \n

                Artificial neural networks

                \n
                A neural network is an interconnected group of nodes, akin to the vast network of neurons in the human brain.
                \n

                An artificial neural network is based on a collection of nodes also known as artificial neurons, which loosely model the neurons in a biological brain. It is trained to recognise patterns; once trained, it can recognise those patterns in fresh data. There is an input, at least one hidden layer of nodes and an output. Each node applies a function and once the weight crosses its specified threshold, the data is transmitted to the next layer. A network is typically called a deep neural network if it has at least 2 hidden layers.[104]\n

                Learning algorithms for neural networks use local search to choose the weights that will get the right output for each input during training. The most common training technique is the backpropagation algorithm.[105] Neural networks learn to model complex relationships between inputs and outputs and find patterns in data. In theory, a neural network can learn any function.[106]\n

                In feedforward neural networks the signal passes in only one direction.[107] Recurrent neural networks feed the output signal back into the input, which allows short-term memories of previous input events. Long short term memory is the most successful network architecture for recurrent networks.[108] Perceptrons[109] use only a single layer of neurons; deep learning[110] uses multiple layers. Convolutional neural networks strengthen the connection between neurons that are "close" to each other—this is especially important in image processing, where a local set of neurons must identify an "edge" before the network can identify an object.[111]\n

                \n
                \n

                Deep learning

                \n
                \n

                Deep learning[110] uses several layers of neurons between the network\'s inputs and outputs. The multiple layers can progressively extract higher-level features from the raw input. For example, in image processing, lower layers may identify edges, while higher layers may identify the concepts relevant to a human such as digits, letters, or faces.[112]\n

                Deep learning has profoundly improved the performance of programs in many important subfields of artificial intelligence, including computer vision, speech recognition, natural language processing, image classification,[113] and others. The reason that deep learning performs so well in so many applications is not known as of 2023.[114] The sudden success of deep learning in 2012–2015 did not occur because of some new discovery or theoretical breakthrough (deep neural networks and backpropagation had been described by many people, as far back as the 1950s)[i] but because of two factors: the incredible increase in computer power (including the hundred-fold increase in speed by switching to GPUs) and the availability of vast amounts of training data, especially the giant curated datasets used for benchmark testing, such as ImageNet.[j]\n

                \n

                GPT

                \n

                Generative pre-trained transformers (GPT) are large language models (LLMs) that generate text based on the semantic relationships between words in sentences. Text-based GPT models are pretrained on a large corpus of text that can be from the Internet. The pretraining consists of predicting the next token (a token being usually a word, subword, or punctuation). Throughout this pretraining, GPT models accumulate knowledge about the world and can then generate human-like text by repeatedly predicting the next token. Typically, a subsequent training phase makes the model more truthful, useful, and harmless, usually with a technique called reinforcement learning from human feedback (RLHF). Current GPT models are prone to generating falsehoods called "hallucinations", although this can be reduced with RLHF and quality data. They are used in chatbots, which allow people to ask a question or request a task in simple text.[122][123]\n

                Current models and services include Gemini (formerly Bard), ChatGPT, Grok, Claude, Copilot, and LLaMA.[124] Multimodal GPT models can process different types of data (modalities) such as images, videos, sound, and text.[125]\n

                \n

                Hardware and software

                \n\n

                In the late 2010s, graphics processing units (GPUs) that were increasingly designed with AI-specific enhancements and used with specialized TensorFlow software had replaced previously used central processing unit (CPUs) as the dominant means for large-scale (commercial and academic) machine learning models\' training.[126] Specialized programming languages such as Prolog were used in early AI research,[127] but general-purpose programming languages like Python have become predominant.[128]\n

                The transistor density in integrated circuits has been observed to roughly double every 18 months—a trend known as Moore\'s law, named after the Intel co-founder Gordon Moore, who first identified it. Improvements in GPUs have been even faster.[129]\n

                \n

                Applications

                \n

                AI and machine learning technology is used in most of the essential applications of the 2020s, including: search engines (such as Google Search), targeting online advertisements, recommendation systems (offered by Netflix, YouTube or Amazon), driving internet traffic, targeted advertising (AdSense, Facebook), virtual assistants (such as Siri or Alexa), autonomous vehicles (including drones, ADAS and self-driving cars), automatic language translation (Microsoft Translator, Google Translate), facial recognition (Apple\'s Face ID or Microsoft\'s DeepFace and Google\'s FaceNet) and image labeling (used by Facebook, Apple\'s iPhoto and TikTok). The deployment of AI may be overseen by a Chief automation officer (CAO).\n

                Health and medicine

                \n\n

                The application of AI in medicine and medical research has the potential to increase patient care and quality of life.[130] Through the lens of the Hippocratic Oath, medical professionals are ethically compelled to use AI, if applications can more accurately diagnose and treat patients.[131][132]\n

                For medical research, AI is an important tool for processing and integrating big data. This is particularly important for organoid and tissue engineering development which use microscopy imaging as a key technique in fabrication.[133] It has been suggested that AI can overcome discrepancies in funding allocated to different fields of research.[133] New AI tools can deepen the understanding of biomedically relevant pathways. For example, AlphaFold 2 (2021) demonstrated the ability to approximate, in hours rather than months, the 3D structure of a protein.[134] In 2023, it was reported that AI-guided drug discovery helped find a class of antibiotics capable of killing two different types of drug-resistant bacteria.[135] In 2024, researchers used machine learning to accelerate the search for Parkinson\'s disease drug treatments. Their aim was to identify compounds that block the clumping, or aggregation, of alpha-synuclein (the protein that characterises Parkinson\'s disease). They were able to speed up the initial screening process ten-fold and reduce the cost by a thousand-fold.[136][137]\n

                \n

                Games

                \n\n

                Game playing programs have been used since the 1950s to demonstrate and test AI\'s most advanced techniques.[138] Deep Blue became the first computer chess-playing system to beat a reigning world chess champion, Garry Kasparov, on 11 May 1997.[139] In 2011, in a Jeopardy! quiz show exhibition match, IBM\'s question answering system, Watson, defeated the two greatest Jeopardy! champions, Brad Rutter and Ken Jennings, by a significant margin.[140] In March 2016, AlphaGo won 4 out of 5 games of Go in a match with Go champion Lee Sedol, becoming the first computer Go-playing system to beat a professional Go player without handicaps. Then, in 2017, it defeated Ke Jie, who was the best Go player in the world.[141] Other programs handle imperfect-information games, such as the poker-playing program Pluribus.[142] DeepMind developed increasingly generalistic reinforcement learning models, such as with MuZero, which could be trained to play chess, Go, or Atari games.[143] In 2019, DeepMind\'s AlphaStar achieved grandmaster level in StarCraft II, a particularly challenging real-time strategy game that involves incomplete knowledge of what happens on the map.[144] In 2021, an AI agent competed in a PlayStation Gran Turismo competition, winning against four of the world\'s best Gran Turismo drivers using deep reinforcement learning.[145] In 2024, Google DeepMind introduced SIMA, a type of AI capable of autonomously playing nine previously unseen open-world video games by observing screen output, as well as executing short, specific tasks in response to natural language instructions.[146]\n

                \n

                Mathematics

                \n

                In mathematics, special forms of formal step-by-step reasoning are used. In contrast, LLMs such as GPT-4 Turbo, Gemini Ultra, Claude Opus, LLaMa-2 or Mistral Large are working with probabilistic models, which can produce wrong answers in the form of hallucinations. Therefore, they need not only a large database of mathematical problems to learn from but also methods such as supervised fine-tuning or trained classifiers with human-annotated data to improve answers for new problems and learn from corrections.[147] A 2024 study showed that the performance of some language models for reasoning capabilities in solving math problems not included in their training data was low, even for problems with only minor deviations from trained data.[148]\n

                Alternatively, dedicated models for mathematic problem solving with higher precision for the outcome including proof of theorems have been developed such as Alpha Tensor, Alpha Geometry and Alpha Proof all from Google DeepMind,[149] Llemma from eleuther[150] or Julius.[151]\n

                When natural language is used to describe mathematical problems, converters transform such prompts into a formal language such as Lean to define mathematic tasks.\n

                Some models have been developed to solve challenging problems and reach good results in benchmark tests, others to serve as educational tools in mathematics.[152]\n

                \n

                Finance

                \n

                Finance is one of the fastest growing sectors where applied AI tools are being deployed: from retail online banking to investment advice and insurance, where automated "robot advisers" have been in use for some years.[153]\n

                World Pensions experts like Nicolas Firzli insist it may be too early to see the emergence of highly innovative AI-informed financial products and services: "the deployment of AI tools will simply further automatise things: destroying tens of thousands of jobs in banking, financial planning, and pension advice in the process, but I\'m not sure it will unleash a new wave of [e.g., sophisticated] pension innovation."[154]\n

                \n

                Military

                \n\n

                Various countries are deploying AI military applications.[155] The main applications enhance command and control, communications, sensors, integration and interoperability.[156] Research is targeting intelligence collection and analysis, logistics, cyber operations, information operations, and semiautonomous and autonomous vehicles.[155] AI technologies enable coordination of sensors and effectors, threat detection and identification, marking of enemy positions, target acquisition, coordination and deconfliction of distributed Joint Fires between networked combat vehicles involving manned and unmanned teams.[156] AI was incorporated into military operations in Iraq and Syria.[155]\n

                In November 2023, US Vice President Kamala Harris disclosed a declaration signed by 31 nations to set guardrails for the military use of AI. The commitments include using legal reviews to ensure the compliance of military AI with international laws, and being cautious and transparent in the development of this technology.[157]\n

                \n

                Generative AI

                \n\n
                Vincent van Gogh in watercolour created by generative AI software
                \n

                In the early 2020s, generative AI gained widespread prominence. GenAI is AI capable of generating text, images, videos, or other data using generative models,[158][159] often in response to prompts.[160][161]\n

                In March 2023, 58% of U.S. adults had heard about ChatGPT and 14% had tried it.[162] The increasing realism and ease-of-use of AI-based text-to-image generators such as Midjourney, DALL-E, and Stable Diffusion sparked a trend of viral AI-generated photos. Widespread attention was gained by a fake photo of Pope Francis wearing a white puffer coat, the fictional arrest of Donald Trump, and a hoax of an attack on the Pentagon, as well as the usage in professional creative arts.[163][164]\n

                \n

                Agents

                \n

                Artificial intelligent (AI) agents are software entities designed to perceive their environment, make decisions, and take actions autonomously to achieve specific goals. These agents can interact with users, their environment, or other agents. AI agents are used in various applications, including virtual assistants, chatbots, autonomous vehicles, game-playing systems, and industrial robotics. AI agents operate within the constraints of their programming, available computational resources, and hardware limitations. This means they are restricted to performing tasks within their defined scope and have finite memory and processing capabilities. In real-world applications, AI agents often face time constraints for decision-making and action execution. Many AI agents incorporate learning algorithms, enabling them to improve their performance over time through experience or training. Using machine learning, AI agents can adapt to new situations and optimise their behaviour for their designated tasks.[165][166][167]\n

                \n

                Other industry-specific tasks

                \n

                There are also thousands of successful AI applications used to solve specific problems for specific industries or institutions. In a 2017 survey, one in five companies reported having incorporated "AI" in some offerings or processes.[168] A few examples are energy storage, medical diagnosis, military logistics, applications that predict the result of judicial decisions, foreign policy, or supply chain management.\n

                AI applications for evacuation and disaster management are growing. AI has been used to investigate if and how people evacuated in large scale and small scale evacuations using historical data from GPS, videos or social media. Further, AI can provide real time information on the real time evacuation conditions.[169][170][171]\n

                In agriculture, AI has helped farmers identify areas that need irrigation, fertilization, pesticide treatments or increasing yield. Agronomists use AI to conduct research and development. AI has been used to predict the ripening time for crops such as tomatoes, monitor soil moisture, operate agricultural robots, conduct predictive analytics, classify livestock pig call emotions, automate greenhouses, detect diseases and pests, and save water.\n

                Artificial intelligence is used in astronomy to analyze increasing amounts of available data and applications, mainly for "classification, regression, clustering, forecasting, generation, discovery, and the development of new scientific insights." For example, it is used for discovering exoplanets, forecasting solar activity, and distinguishing between signals and instrumental effects in gravitational wave astronomy. Additionally, it could be used for activities in space, such as space exploration, including the analysis of data from space missions, real-time science decisions of spacecraft, space debris avoidance, and more autonomous operation.\n

                \n

                Ethics

                \n\n

                AI has potential benefits and potential risks.[172] AI may be able to advance science and find solutions for serious problems: Demis Hassabis of Deep Mind hopes to "solve intelligence, and then use that to solve everything else".[173] However, as the use of AI has become widespread, several unintended consequences and risks have been identified.[174] In-production systems can sometimes not factor ethics and bias into their AI training processes, especially when the AI algorithms are inherently unexplainable in deep learning.[175]\n

                \n

                Risks and harm

                \n
                \n\n

                Machine learning algorithms require large amounts of data. The techniques used to acquire this data have raised concerns about privacy, surveillance and copyright.\n

                AI-powered devices and services, such as virtual assistants and IoT products, continuously collect personal information, raising concerns about intrusive data gathering and unauthorized access by third parties. The loss of privacy is further exacerbated by AI\'s ability to process and combine vast amounts of data, potentially leading to a surveillance society where individual activities are constantly monitored and analyzed without adequate safeguards or transparency.\n

                Sensitive user data collected may include online activity records, geolocation data, video or audio.[176] For example, in order to build speech recognition algorithms, Amazon has recorded millions of private conversations and allowed temporary workers to listen to and transcribe some of them.[177] Opinions about this widespread surveillance range from those who see it as a necessary evil to those for whom it is clearly unethical and a violation of the right to privacy.[178]\n

                AI developers argue that this is the only way to deliver valuable applications. and have developed several techniques that attempt to preserve privacy while still obtaining the data, such as data aggregation, de-identification and differential privacy.[179] Since 2016, some privacy experts, such as Cynthia Dwork, have begun to view privacy in terms of fairness. Brian Christian wrote that experts have pivoted "from the question of \'what they know\' to the question of \'what they\'re doing with it\'."[180]\n

                Generative AI is often trained on unlicensed copyrighted works, including in domains such as images or computer code; the output is then used under the rationale of "fair use". Experts disagree about how well and under what circumstances this rationale will hold up in courts of law; relevant factors may include "the purpose and character of the use of the copyrighted work" and "the effect upon the potential market for the copyrighted work".[181][182] Website owners who do not wish to have their content scraped can indicate it in a "robots.txt" file.[183] In 2023, leading authors (including John Grisham and Jonathan Franzen) sued AI companies for using their work to train generative AI.[184][185] Another discussed approach is to envision a separate sui generis system of protection for creations generated by AI to ensure fair attribution and compensation for human authors.[186]\n

                \n

                Dominance by tech giants

                \n

                The commercial AI scene is dominated by Big Tech companies such as Alphabet Inc., Amazon, Apple Inc., Meta Platforms, and Microsoft.[187][188][189] Some of these players already own the vast majority of existing cloud infrastructure and computing power from data centers, allowing them to entrench further in the marketplace.[190][191]\n

                \n

                Substantial power needs and other environmental impacts

                \n\n

                In January 2024, the International Energy Agency (IEA) released Electricity 2024, Analysis and Forecast to 2026, forecasting electric power use.[192] This is the first IEA report to make projections for data centers and power consumption for artificial intelligence and cryptocurrency. The report states that power demand for these uses might double by 2026, with additional electric power usage equal to electricity used by the whole Japanese nation.[193]\n

                Prodigious power consumption by AI is responsible for the growth of fossil fuels use, and might delay closings of obsolete, carbon-emitting coal energy facilities. There is a feverish rise in the construction of data centers throughout the US, making large technology firms (e.g., Microsoft, Meta, Google, Amazon) into voracious consumers of electric power. Projected electric consumption is so immense that there is concern that it will be fulfilled no matter the source. A ChatGPT search involves the use of 10 times the electrical energy as a Google search. The large firms are in haste to find power sources – from nuclear energy to geothermal to fusion. The tech firms argue that – in the long view – AI will be eventually kinder to the environment, but they need the energy now. AI makes the power grid more efficient and "intelligent", will assist in the growth of nuclear power, and track overall carbon emissions, according to technology firms.[194]\n

                A 2024 Goldman Sachs Research Paper, AI Data Centers and the Coming US Power Demand Surge, found "US power demand (is) likely to experience growth not seen in a generation...." and forecasts that, by 2030, US data centers will consume 8% of US power, as opposed to 3% in 2022, presaging growth for the electrical power generation industry by a variety of means.[195] Data centers\' need for more and more electrical power is such that they might max out the electrical grid. The Big Tech companies counter that AI can be used to maximize the utilization of the grid by all.[196]\n

                In 2024, the Wall Street Journal reported that big AI companies have begun negotiations with the US nuclear power providers to provide electricity to the data centers. In March 2024 Amazon purchased a Pennsylvania nuclear-powered data center for $650 Million (US).[197]\n

                In September 2024, Microsoft announced an agreement with Constellation Energy to re-open the Three Mile Island nuclear power plant to provide Microsoft with 100% of all electric power produced by the plant for 20 years. Reopening the plant, which suffered a partial nuclear meltdown of its Unit 2 reactor in 1979, will require Constellation to get through strict regulatory processes which will include extensive safety scrutiny from the US Nuclear Regulatory Commission. If approved (this will be the first ever US re-commissioning of a nuclear plant), over 835 megawatts of power – enough for 800,000 homes – of energy will be produced. The cost for re-opening and upgrading is estimated at $1.6 billion (US) and is dependent on tax breaks for nuclear power contained in the 2022 US Inflation Reduction Act.[198] The US government and the state of Michigan are investing almost $2 billion (US) to reopen the Palisades Nuclear reactor on Lake Michigan. Closed since 2022, the plant is planned to be reopened in October 2025. The Three Mile Island facility will be renamed the Crane Clean Energy Center after Chris Crane, a nuclear proponent and former CEO of Exelon who was responsible for Exelon spinoff of Constellation.[199]\n

                \n

                Misinformation

                \n\n

                YouTube, Facebook and others use recommender systems to guide users to more content. These AI programs were given the goal of maximizing user engagement (that is, the only goal was to keep people watching). The AI learned that users tended to choose misinformation, conspiracy theories, and extreme partisan content, and, to keep them watching, the AI recommended more of it. Users also tended to watch more content on the same subject, so the AI led people into filter bubbles where they received multiple versions of the same misinformation.[200] This convinced many users that the misinformation was true, and ultimately undermined trust in institutions, the media and the government.[201] The AI program had correctly learned to maximize its goal, but the result was harmful to society. After the U.S. election in 2016, major technology companies took steps to mitigate the problem [citation needed].\n

                In 2022, generative AI began to create images, audio, video and text that are indistinguishable from real photographs, recordings, films, or human writing. It is possible for bad actors to use this technology to create massive amounts of misinformation or propaganda.[202] AI pioneer Geoffrey Hinton expressed concern about AI enabling "authoritarian leaders to manipulate their electorates" on a large scale, among other risks.[203]\n

                \n

                Algorithmic bias and fairness

                \n\n

                Machine learning applications will be biased[k] if they learn from biased data.[205] The developers may not be aware that the bias exists.[206] Bias can be introduced by the way training data is selected and by the way a model is deployed.[207][205] If a biased algorithm is used to make decisions that can seriously harm people (as it can in medicine, finance, recruitment, housing or policing) then the algorithm may cause discrimination.[208] The field of fairness studies how to prevent harms from algorithmic biases.\n

                On June 28, 2015, Google Photos\'s new image labeling feature mistakenly identified Jacky Alcine and a friend as "gorillas" because they were black. The system was trained on a dataset that contained very few images of black people,[209] a problem called "sample size disparity".[210] Google "fixed" this problem by preventing the system from labelling anything as a "gorilla". Eight years later, in 2023, Google Photos still could not identify a gorilla, and neither could similar products from Apple, Facebook, Microsoft and Amazon.[211]\n

                COMPAS is a commercial program widely used by U.S. courts to assess the likelihood of a defendant becoming a recidivist. In 2016, Julia Angwin at ProPublica discovered that COMPAS exhibited racial bias, despite the fact that the program was not told the races of the defendants. Although the error rate for both whites and blacks was calibrated equal at exactly 61%, the errors for each race were different—the system consistently overestimated the chance that a black person would re-offend and would underestimate the chance that a white person would not re-offend.[212] In 2017, several researchers[l] showed that it was mathematically impossible for COMPAS to accommodate all possible measures of fairness when the base rates of re-offense were different for whites and blacks in the data.[214]\n

                A program can make biased decisions even if the data does not explicitly mention a problematic feature (such as "race" or "gender"). The feature will correlate with other features (like "address", "shopping history" or "first name"), and the program will make the same decisions based on these features as it would on "race" or "gender".[215] Moritz Hardt said "the most robust fact in this research area is that fairness through blindness doesn\'t work."[216]\n

                Criticism of COMPAS highlighted that machine learning models are designed to make "predictions" that are only valid if we assume that the future will resemble the past. If they are trained on data that includes the results of racist decisions in the past, machine learning models must predict that racist decisions will be made in the future. If an application then uses these predictions as recommendations, some of these "recommendations" will likely be racist.[217] Thus, machine learning is not well suited to help make decisions in areas where there is hope that the future will be better than the past. It is descriptive rather than prescriptive.[m]\n

                Bias and unfairness may go undetected because the developers are overwhelmingly white and male: among AI engineers, about 4% are black and 20% are women.[210]\n

                There are various conflicting definitions and mathematical models of fairness. These notions depend on ethical assumptions, and are influenced by beliefs about society. One broad category is distributive fairness, which focuses on the outcomes, often identifying groups and seeking to compensate for statistical disparities. Representational fairness tries to ensure that AI systems do not reinforce negative stereotypes or render certain groups invisible. Procedural fairness focuses on the decision process rather than the outcome. The most relevant notions of fairness may depend on the context, notably the type of AI application and the stakeholders. The subjectivity in the notions of bias and fairness makes it difficult for companies to operationalize them. Having access to sensitive attributes such as race or gender is also considered by many AI ethicists to be necessary in order to compensate for biases, but it may conflict with anti-discrimination laws.[204]\n

                At its 2022 Conference on Fairness, Accountability, and Transparency (ACM FAccT 2022), the Association for Computing Machinery, in Seoul, South Korea, presented and published findings that recommend that until AI and robotics systems are demonstrated to be free of bias mistakes, they are unsafe, and the use of self-learning neural networks trained on vast, unregulated sources of flawed internet data should be curtailed.[dubiousdiscuss][219]\n

                \n

                Lack of transparency

                \n\n

                Many AI systems are so complex that their designers cannot explain how they reach their decisions.[220] Particularly with deep neural networks, in which there are a large amount of non-linear relationships between inputs and outputs. But some popular explainability techniques exist.[221]\n

                It is impossible to be certain that a program is operating correctly if no one knows how exactly it works. There have been many cases where a machine learning program passed rigorous tests, but nevertheless learned something different than what the programmers intended. For example, a system that could identify skin diseases better than medical professionals was found to actually have a strong tendency to classify images with a ruler as "cancerous", because pictures of malignancies typically include a ruler to show the scale.[222] Another machine learning system designed to help effectively allocate medical resources was found to classify patients with asthma as being at "low risk" of dying from pneumonia. Having asthma is actually a severe risk factor, but since the patients having asthma would usually get much more medical care, they were relatively unlikely to die according to the training data. The correlation between asthma and low risk of dying from pneumonia was real, but misleading.[223]\n

                People who have been harmed by an algorithm\'s decision have a right to an explanation.[224] Doctors, for example, are expected to clearly and completely explain to their colleagues the reasoning behind any decision they make. Early drafts of the European Union\'s General Data Protection Regulation in 2016 included an explicit statement that this right exists.[n] Industry experts noted that this is an unsolved problem with no solution in sight. Regulators argued that nevertheless the harm is real: if the problem has no solution, the tools should not be used.[225]\n

                DARPA established the XAI ("Explainable Artificial Intelligence") program in 2014 to try to solve these problems.[226]\n

                Several approaches aim to address the transparency problem. SHAP enables to visualise the contribution of each feature to the output.[227] LIME can locally approximate a model\'s outputs with a simpler, interpretable model.[228] Multitask learning provides a large number of outputs in addition to the target classification. These other outputs can help developers deduce what the network has learned.[229] Deconvolution, DeepDream and other generative methods can allow developers to see what different layers of a deep network for computer vision have learned, and produce output that can suggest what the network is learning.[230] For generative pre-trained transformers, Anthropic developed a technique based on dictionary learning that associates patterns of neuron activations with human-understandable concepts.[231]\n

                \n

                Bad actors and weaponized AI

                \n\n

                Artificial intelligence provides a number of tools that are useful to bad actors, such as authoritarian governments, terrorists, criminals or rogue states.\n

                A lethal autonomous weapon is a machine that locates, selects and engages human targets without human supervision.[o] Widely available AI tools can be used by bad actors to develop inexpensive autonomous weapons and, if produced at scale, they are potentially weapons of mass destruction.[233] Even when used in conventional warfare, it is unlikely that they will be unable to reliably choose targets and could potentially kill an innocent person.[233] In 2014, 30 nations (including China) supported a ban on autonomous weapons under the United Nations\' Convention on Certain Conventional Weapons, however the United States and others disagreed.[234] By 2015, over fifty countries were reported to be researching battlefield robots.[235]\n

                AI tools make it easier for authoritarian governments to efficiently control their citizens in several ways. Face and voice recognition allow widespread surveillance. Machine learning, operating this data, can classify potential enemies of the state and prevent them from hiding. Recommendation systems can precisely target propaganda and misinformation for maximum effect. Deepfakes and generative AI aid in producing misinformation. Advanced AI can make authoritarian centralized decision making more competitive than liberal and decentralized systems such as markets. It lowers the cost and difficulty of digital warfare and advanced spyware.[236] All these technologies have been available since 2020 or earlier—AI facial recognition systems are already being used for mass surveillance in China.[237][238]\n

                There many other ways that AI is expected to help bad actors, some of which can not be foreseen. For example, machine-learning AI is able to design tens of thousands of toxic molecules in a matter of hours.[239]\n

                \n

                Technological unemployment

                \n\n

                Economists have frequently highlighted the risks of redundancies from AI, and speculated about unemployment if there is no adequate social policy for full employment.[240]\n

                In the past, technology has tended to increase rather than reduce total employment, but economists acknowledge that "we\'re in uncharted territory" with AI.[241] A survey of economists showed disagreement about whether the increasing use of robots and AI will cause a substantial increase in long-term unemployment, but they generally agree that it could be a net benefit if productivity gains are redistributed.[242] Risk estimates vary; for example, in the 2010s, Michael Osborne and Carl Benedikt Frey estimated 47% of U.S. jobs are at "high risk" of potential automation, while an OECD report classified only 9% of U.S. jobs as "high risk".[p][244] The methodology of speculating about future employment levels has been criticised as lacking evidential foundation, and for implying that technology, rather than social policy, creates unemployment, as opposed to redundancies.[240] In April 2023, it was reported that 70% of the jobs for Chinese video game illustrators had been eliminated by generative artificial intelligence.[245][246]\n

                Unlike previous waves of automation, many middle-class jobs may be eliminated by artificial intelligence; The Economist stated in 2015 that "the worry that AI could do to white-collar jobs what steam power did to blue-collar ones during the Industrial Revolution" is "worth taking seriously".[247] Jobs at extreme risk range from paralegals to fast food cooks, while job demand is likely to increase for care-related professions ranging from personal healthcare to the clergy.[248]\n

                From the early days of the development of artificial intelligence, there have been arguments, for example, those put forward by Joseph Weizenbaum, about whether tasks that can be done by computers actually should be done by them, given the difference between computers and humans, and between quantitative calculation and qualitative, value-based judgement.[249]\n

                \n

                Existential risk

                \n\n

                It has been argued AI will become so powerful that humanity may irreversibly lose control of it. This could, as physicist Stephen Hawking stated, "spell the end of the human race".[250] This scenario has been common in science fiction, when a computer or robot suddenly develops a human-like "self-awareness" (or "sentience" or "consciousness") and becomes a malevolent character.[q] These sci-fi scenarios are misleading in several ways.\n

                First, AI does not require human-like "sentience" to be an existential risk. Modern AI programs are given specific goals and use learning and intelligence to achieve them. Philosopher Nick Bostrom argued that if one gives almost any goal to a sufficiently powerful AI, it may choose to destroy humanity to achieve it (he used the example of a paperclip factory manager).[252] Stuart Russell gives the example of household robot that tries to find a way to kill its owner to prevent it from being unplugged, reasoning that "you can\'t fetch the coffee if you\'re dead."[253] In order to be safe for humanity, a superintelligence would have to be genuinely aligned with humanity\'s morality and values so that it is "fundamentally on our side".[254]\n

                Second, Yuval Noah Harari argues that AI does not require a robot body or physical control to pose an existential risk. The essential parts of civilization are not physical. Things like ideologies, law, government, money and the economy are made of language; they exist because there are stories that billions of people believe. The current prevalence of misinformation suggests that an AI could use language to convince people to believe anything, even to take actions that are destructive.[255]\n

                The opinions amongst experts and industry insiders are mixed, with sizable fractions both concerned and unconcerned by risk from eventual superintelligent AI.[256] Personalities such as Stephen Hawking, Bill Gates, and Elon Musk,[257] as well as AI pioneers such as Yoshua Bengio, Stuart Russell, Demis Hassabis, and Sam Altman, have expressed concerns about existential risk from AI.\n

                In May 2023, Geoffrey Hinton announced his resignation from Google in order to be able to "freely speak out about the risks of AI" without "considering how this impacts Google."[258] He notably mentioned risks of an AI takeover,[259] and stressed that in order to avoid the worst outcomes, establishing safety guidelines will require cooperation among those competing in use of AI.[260]\n

                In 2023, many leading AI experts issued the joint statement that "Mitigating the risk of extinction from AI should be a global priority alongside other societal-scale risks such as pandemics and nuclear war".[261]\n

                Other researchers, however, spoke in favor of a less dystopian view. AI pioneer Juergen Schmidhuber did not sign the joint statement, emphasising that in 95% of all cases, AI research is about making "human lives longer and healthier and easier."[262] While the tools that are now being used to improve lives can also be used by bad actors, "they can also be used against the bad actors."[263][264] Andrew Ng also argued that "it\'s a mistake to fall for the doomsday hype on AI—and that regulators who do will only benefit vested interests."[265] Yann LeCun "scoffs at his peers\' dystopian scenarios of supercharged misinformation and even, eventually, human extinction."[266] In the early 2010s, experts argued that the risks are too distant in the future to warrant research or that humans will be valuable from the perspective of a superintelligent machine.[267] However, after 2016, the study of current and future risks and possible solutions became a serious area of research.[268]\n

                \n

                Ethical machines and alignment

                \n\n

                Friendly AI are machines that have been designed from the beginning to minimize risks and to make choices that benefit humans. Eliezer Yudkowsky, who coined the term, argues that developing friendly AI should be a higher research priority: it may require a large investment and it must be completed before AI becomes an existential risk.[269]\n

                Machines with intelligence have the potential to use their intelligence to make ethical decisions. The field of machine ethics provides machines with ethical principles and procedures for resolving ethical dilemmas.[270]\nThe field of machine ethics is also called computational morality,[270]\nand was founded at an AAAI symposium in 2005.[271]\n

                Other approaches include Wendell Wallach\'s "artificial moral agents"[272] and Stuart J. Russell\'s three principles for developing provably beneficial machines.[273]\n

                \n

                Open source

                \n

                Active organizations in the AI open-source community include Hugging Face,[274] Google,[275] EleutherAI and Meta.[276] Various AI models, such as Llama 2, Mistral or Stable Diffusion, have been made open-weight,[277][278] meaning that their architecture and trained parameters (the "weights") are publicly available. Open-weight models can be freely fine-tuned, which allows companies to specialize them with their own data and for their own use-case.[279] Open-weight models are useful for research and innovation but can also be misused. Since they can be fine-tuned, any built-in security measure, such as objecting to harmful requests, can be trained away until it becomes ineffective. Some researchers warn that future AI models may develop dangerous capabilities (such as the potential to drastically facilitate bioterrorism) and that once released on the Internet, they cannot be deleted everywhere if needed. They recommend pre-release audits and cost-benefit analyses.[280]\n

                \n

                Frameworks

                \n

                Artificial Intelligence projects can have their ethical permissibility tested while designing, developing, and implementing an AI system. An AI framework such as the Care and Act Framework containing the SUM values—developed by the Alan Turing Institute tests projects in four main areas:[281][282]\n

                \n
                • Respect the dignity of individual people
                • \n
                • Connect with other people sincerely, openly, and inclusively
                • \n
                • Care for the wellbeing of everyone
                • \n
                • Protect social values, justice, and the public interest
                \n

                Other developments in ethical frameworks include those decided upon during the Asilomar Conference, the Montreal Declaration for Responsible AI, and the IEEE\'s Ethics of Autonomous Systems initiative, among others;[283] however, these principles do not go without their criticisms, especially regards to the people chosen contributes to these frameworks.[284]\n

                Promotion of the wellbeing of the people and communities that these technologies affect requires consideration of the social and ethical implications at all stages of AI system design, development and implementation, and collaboration between job roles such as data scientists, product managers, data engineers, domain experts, and delivery managers.[285]\n

                The UK AI Safety Institute released in 2024 a testing toolset called \'Inspect\' for AI safety evaluations available under a MIT open-source licence which is freely available on GitHub and can be improved with third-party packages. It can be used to evaluate AI models in a range of areas including core knowledge, ability to reason, and autonomous capabilities.[286]\n

                \n

                Regulation

                \n\n
                AI Safety Summit
                The first global AI Safety Summit was held in 2023 with a declaration calling for international co-operation.
                \n

                The regulation of artificial intelligence is the development of public sector policies and laws for promoting and regulating AI; it is therefore related to the broader regulation of algorithms.[287] The regulatory and policy landscape for AI is an emerging issue in jurisdictions globally.[288] According to AI Index at Stanford, the annual number of AI-related laws passed in the 127 survey countries jumped from one passed in 2016 to 37 passed in 2022 alone.[289][290] Between 2016 and 2020, more than 30 countries adopted dedicated strategies for AI.[291] Most EU member states had released national AI strategies, as had Canada, China, India, Japan, Mauritius, the Russian Federation, Saudi Arabia, United Arab Emirates, U.S., and Vietnam. Others were in the process of elaborating their own AI strategy, including Bangladesh, Malaysia and Tunisia.[291] The Global Partnership on Artificial Intelligence was launched in June 2020, stating a need for AI to be developed in accordance with human rights and democratic values, to ensure public confidence and trust in the technology.[291] Henry Kissinger, Eric Schmidt, and Daniel Huttenlocher published a joint statement in November 2021 calling for a government commission to regulate AI.[292] In 2023, OpenAI leaders published recommendations for the governance of superintelligence, which they believe may happen in less than 10 years.[293] In 2023, the United Nations also launched an advisory body to provide recommendations on AI governance; the body comprises technology company executives, governments officials and academics.[294] In 2024, the Council of Europe created the first international legally binding treaty on AI, called the "Framework Convention on Artificial Intelligence and Human Rights, Democracy and the Rule of Law". It was adopted by the European Union, the United States, the United Kingdom, and other signatories.[295]\n

                In a 2022 Ipsos survey, attitudes towards AI varied greatly by country; 78% of Chinese citizens, but only 35% of Americans, agreed that "products and services using AI have more benefits than drawbacks".[289] A 2023 Reuters/Ipsos poll found that 61% of Americans agree, and 22% disagree, that AI poses risks to humanity.[296] In a 2023 Fox News poll, 35% of Americans thought it "very important", and an additional 41% thought it "somewhat important", for the federal government to regulate AI, versus 13% responding "not very important" and 8% responding "not at all important".[297][298]\n

                In November 2023, the first global AI Safety Summit was held in Bletchley Park in the UK to discuss the near and far term risks of AI and the possibility of mandatory and voluntary regulatory frameworks.[299] 28 countries including the United States, China, and the European Union issued a declaration at the start of the summit, calling for international co-operation to manage the challenges and risks of artificial intelligence.[300][301] In May 2024 at the AI Seoul Summit, 16 global AI tech companies agreed to safety commitments on the development of AI.[302][303]\n

                \n

                History

                \n\n\n

                The study of mechanical or "formal" reasoning began with philosophers and mathematicians in antiquity. The study of logic led directly to Alan Turing\'s theory of computation, which suggested that a machine, by shuffling symbols as simple as "0" and "1", could simulate any conceivable form of mathematical reasoning.[304][305] This, along with concurrent discoveries in cybernetics, information theory and neurobiology, led researchers to consider the possibility of building an "electronic brain".[r] They developed several areas of research that would become part of AI,[307] such as McCullouch and Pitts design for "artificial neurons" in 1943,[115] and Turing\'s influential 1950 paper \'Computing Machinery and Intelligence\', which introduced the Turing test and showed that "machine intelligence" was plausible.[308][305]\n

                The field of AI research was founded at a workshop at Dartmouth College in 1956.[s][6] The attendees became the leaders of AI research in the 1960s.[t] They and their students produced programs that the press described as "astonishing":[u] computers were learning checkers strategies, solving word problems in algebra, proving logical theorems and speaking English.[v][7] Artificial intelligence laboratories were set up at a number of British and U.S. universities in the latter 1950s and early 1960s.[305]\n

                Researchers in the 1960s and the 1970s were convinced that their methods would eventually succeed in creating a machine with general intelligence and considered this the goal of their field.[312] In 1965 Herbert Simon predicted, "machines will be capable, within twenty years, of doing any work a man can do".[313] In 1967 Marvin Minsky agreed, writing that "within a generation ... the problem of creating \'artificial intelligence\' will substantially be solved".[314] They had, however, underestimated the difficulty of the problem.[w] In 1974, both the U.S. and British governments cut off exploratory research in response to the criticism of Sir James Lighthill[316] and ongoing pressure from the U.S. Congress to fund more productive projects.[317] Minsky\'s and Papert\'s book Perceptrons was understood as proving that artificial neural networks would never be useful for solving real-world tasks, thus discrediting the approach altogether.[318] The "AI winter", a period when obtaining funding for AI projects was difficult, followed.[9]\n

                In the early 1980s, AI research was revived by the commercial success of expert systems,[319] a form of AI program that simulated the knowledge and analytical skills of human experts. By 1985, the market for AI had reached over a billion dollars. At the same time, Japan\'s fifth generation computer project inspired the U.S. and British governments to restore funding for academic research.[8] However, beginning with the collapse of the Lisp Machine market in 1987, AI once again fell into disrepute, and a second, longer-lasting winter began.[10]\n

                Up to this point, most of AI\'s funding had gone to projects that used high-level symbols to represent mental objects like plans, goals, beliefs, and known facts. In the 1980s, some researchers began to doubt that this approach would be able to imitate all the processes of human cognition, especially perception, robotics, learning and pattern recognition,[320] and began to look into "sub-symbolic" approaches.[321] Rodney Brooks rejected "representation" in general and focussed directly on engineering machines that move and survive.[x] Judea Pearl, Lofti Zadeh and others developed methods that handled incomplete and uncertain information by making reasonable guesses rather than precise logic.[86][326] But the most important development was the revival of "connectionism", including neural network research, by Geoffrey Hinton and others.[327] In 1990, Yann LeCun successfully showed that convolutional neural networks can recognize handwritten digits, the first of many successful applications of neural networks.[328]\n

                AI gradually restored its reputation in the late 1990s and early 21st century by exploiting formal mathematical methods and by finding specific solutions to specific problems. This "narrow" and "formal" focus allowed researchers to produce verifiable results and collaborate with other fields (such as statistics, economics and mathematics).[329] By 2000, solutions developed by AI researchers were being widely used, although in the 1990s they were rarely described as "artificial intelligence" (a tendency known as the AI effect).[330]\nHowever, several academic researchers became concerned that AI was no longer pursuing its original goal of creating versatile, fully intelligent machines. Beginning around 2002, they founded the subfield of artificial general intelligence (or "AGI"), which had several well-funded institutions by the 2010s.[4]\n

                Deep learning began to dominate industry benchmarks in 2012 and was adopted throughout the field.[11]\nFor many specific tasks, other methods were abandoned.[y]\nDeep learning\'s success was based on both hardware improvements (faster computers,[332] graphics processing units, cloud computing[333]) and access to large amounts of data[334] (including curated datasets,[333] such as ImageNet). Deep learning\'s success led to an enormous increase in interest and funding in AI.[z] The amount of machine learning research (measured by total publications) increased by 50% in the years 2015–2019.[291]\n

                In 2016, issues of fairness and the misuse of technology were catapulted into center stage at machine learning conferences, publications vastly increased, funding became available, and many researchers re-focussed their careers on these issues. The alignment problem became a serious field of academic study.[268]\n

                In the late teens and early 2020s, AGI companies began to deliver programs that created enormous interest. In 2015, AlphaGo, developed by DeepMind, beat the world champion Go player. The program was taught only the rules of the game and developed strategy by itself. GPT-3 is a large language model that was released in 2020 by OpenAI and is capable of generating high-quality human-like text.[335] These programs, and others, inspired an aggressive AI boom, where large companies began investing billions in AI research. According to AI Impacts, about $50 billion annually was invested in "AI" around 2022 in the U.S. alone and about 20% of the new U.S. Computer Science PhD graduates have specialized in "AI".[336] About 800,000 "AI"-related U.S. job openings existed in 2022.[337]\n

                \n

                Philosophy

                \n

                Philosophical debates have historically sought to determine the nature of intelligence and how to make intelligent machines.[338] Another major focus has been whether machines can be conscious, and the associated ethical implications.[339] Many other topics in philosophy can relevant to AI, such as epistemology and free will.[340] Rapid advancements have intensified public discussions on the philosophy and ethics of AI.[339]\n

                Defining artificial intelligence

                \n\n

                Alan Turing wrote in 1950 "I propose to consider the question \'can machines think\'?"[341] He advised changing the question from whether a machine "thinks", to "whether or not it is possible for machinery to show intelligent behaviour".[341] He devised the Turing test, which measures the ability of a machine to simulate human conversation.[308] Since we can only observe the behavior of the machine, it does not matter if it is "actually" thinking or literally has a "mind". Turing notes that we can not determine these things about other people but "it is usual to have a polite convention that everyone thinks."[342]\n

                \n
                The Turing test can provide some evidence of intelligence, but it penalizes non-human intelligent behavior.[343]
                \n

                Russell and Norvig agree with Turing that intelligence must be defined in terms of external behavior, not internal structure.[1] However, they are critical that the test requires the machine to imitate humans. "Aeronautical engineering texts," they wrote, "do not define the goal of their field as making \'machines that fly so exactly like pigeons that they can fool other pigeons.\'"[344] AI founder John McCarthy agreed, writing that "Artificial intelligence is not, by definition, simulation of human intelligence".[345]\n

                McCarthy defines intelligence as "the computational part of the ability to achieve goals in the world".[346] Another AI founder, Marvin Minsky similarly describes it as "the ability to solve hard problems".[347] The leading AI textbook defines it as the study of agents that perceive their environment and take actions that maximize their chances of achieving defined goals.[1] These definitions view intelligence in terms of well-defined problems with well-defined solutions, where both the difficulty of the problem and the performance of the program are direct measures of the "intelligence" of the machine—and no other philosophical discussion is required, or may not even be possible.\n

                Another definition has been adopted by Google,[348] a major practitioner in the field of AI. This definition stipulates the ability of systems to synthesize information as the manifestation of intelligence, similar to the way it is defined in biological intelligence.\n

                Some authors have suggested in practice, that the definition of AI is vague and difficult to define, with contention as to whether classical algorithms should be categorised as AI,[349] with many companies during the early 2020s AI boom using the term as a marketing buzzword, often even if they did "not actually use AI in a material way".[350]\n

                \n

                Evaluating approaches to AI

                \n

                No established unifying theory or paradigm has guided AI research for most of its history.[aa] The unprecedented success of statistical machine learning in the 2010s eclipsed all other approaches (so much so that some sources, especially in the business world, use the term "artificial intelligence" to mean "machine learning with neural networks"). This approach is mostly sub-symbolic, soft and narrow. Critics argue that these questions may have to be revisited by future generations of AI researchers.\n

                \n

                Symbolic AI and its limits

                \n

                Symbolic AI (or "GOFAI")[352] simulated the high-level conscious reasoning that people use when they solve puzzles, express legal reasoning and do mathematics. They were highly successful at "intelligent" tasks such as algebra or IQ tests. In the 1960s, Newell and Simon proposed the physical symbol systems hypothesis: "A physical symbol system has the necessary and sufficient means of general intelligent action."[353]\n

                However, the symbolic approach failed on many tasks that humans solve easily, such as learning, recognizing an object or commonsense reasoning. Moravec\'s paradox is the discovery that high-level "intelligent" tasks were easy for AI, but low level "instinctive" tasks were extremely difficult.[354] Philosopher Hubert Dreyfus had argued since the 1960s that human expertise depends on unconscious instinct rather than conscious symbol manipulation, and on having a "feel" for the situation, rather than explicit symbolic knowledge.[355] Although his arguments had been ridiculed and ignored when they were first presented, eventually, AI research came to agree with him.[ab][16]\n

                The issue is not resolved: sub-symbolic reasoning can make many of the same inscrutable mistakes that human intuition does, such as algorithmic bias. Critics such as Noam Chomsky argue continuing research into symbolic AI will still be necessary to attain general intelligence,[357][358] in part because sub-symbolic AI is a move away from explainable AI: it can be difficult or impossible to understand why a modern statistical AI program made a particular decision. The emerging field of neuro-symbolic artificial intelligence attempts to bridge the two approaches.\n

                \n

                Neat vs. scruffy

                \n\n

                "Neats" hope that intelligent behavior is described using simple, elegant principles (such as logic, optimization, or neural networks). "Scruffies" expect that it necessarily requires solving a large number of unrelated problems. Neats defend their programs with theoretical rigor, scruffies rely mainly on incremental testing to see if they work. This issue was actively discussed in the 1970s and 1980s,[359] but eventually was seen as irrelevant. Modern AI has elements of both.\n

                \n

                Soft vs. hard computing

                \n\n

                Finding a provably correct or optimal solution is intractable for many important problems.[15] Soft computing is a set of techniques, including genetic algorithms, fuzzy logic and neural networks, that are tolerant of imprecision, uncertainty, partial truth and approximation. Soft computing was introduced in the late 1980s and most successful AI programs in the 21st century are examples of soft computing with neural networks.\n

                \n

                Narrow vs. general AI

                \n\n

                AI researchers are divided as to whether to pursue the goals of artificial general intelligence and superintelligence directly or to solve as many specific problems as possible (narrow AI) in hopes these solutions will lead indirectly to the field\'s long-term goals.[360][361] General intelligence is difficult to define and difficult to measure, and modern AI has had more verifiable successes by focusing on specific problems with specific solutions. The sub-field of artificial general intelligence studies this area exclusively.\n

                \n

                Machine consciousness, sentience, and mind

                \n\n

                The philosophy of mind does not know whether a machine can have a mind, consciousness and mental states, in the same sense that human beings do. This issue considers the internal experiences of the machine, rather than its external behavior. Mainstream AI research considers this issue irrelevant because it does not affect the goals of the field: to build machines that can solve problems using intelligence. Russell and Norvig add that "[t]he additional project of making a machine conscious in exactly the way humans are is not one that we are equipped to take on."[362] However, the question has become central to the philosophy of mind. It is also typically the central question at issue in artificial intelligence in fiction.\n

                \n

                Consciousness

                \n\n

                David Chalmers identified two problems in understanding the mind, which he named the "hard" and "easy" problems of consciousness.[363] The easy problem is understanding how the brain processes signals, makes plans and controls behavior. The hard problem is explaining how this feels or why it should feel like anything at all, assuming we are right in thinking that it truly does feel like something (Dennett\'s consciousness illusionism says this is an illusion). While human information processing is easy to explain, human subjective experience is difficult to explain. For example, it is easy to imagine a color-blind person who has learned to identify which objects in their field of view are red, but it is not clear what would be required for the person to know what red looks like.[364]\n

                \n

                Computationalism and functionalism

                \n\n

                Computationalism is the position in the philosophy of mind that the human mind is an information processing system and that thinking is a form of computing. Computationalism argues that the relationship between mind and body is similar or identical to the relationship between software and hardware and thus may be a solution to the mind–body problem. This philosophical position was inspired by the work of AI researchers and cognitive scientists in the 1960s and was originally proposed by philosophers Jerry Fodor and Hilary Putnam.[365]\n

                Philosopher John Searle characterized this position as "strong AI": "The appropriately programmed computer with the right inputs and outputs would thereby have a mind in exactly the same sense human beings have minds."[ac] Searle counters this assertion with his Chinese room argument, which attempts to show that, even if a machine perfectly simulates human behavior, there is still no reason to suppose it also has a mind.[369]\n

                \n

                AI welfare and rights

                \n

                It is difficult or impossible to reliably evaluate whether an advanced AI is sentient (has the ability to feel), and if so, to what degree.[370] But if there is a significant chance that a given machine can feel and suffer, then it may be entitled to certain rights or welfare protection measures, similarly to animals.[371][372] Sapience (a set of capacities related to high intelligence, such as discernment or self-awareness) may provide another moral basis for AI rights.[371] Robot rights are also sometimes proposed as a practical way to integrate autonomous agents into society.[373]\n

                In 2017, the European Union considered granting "electronic personhood" to some of the most capable AI systems. Similarly to the legal status of companies, it would have conferred rights but also responsibilities.[374] Critics argued in 2018 that granting rights to AI systems would downplay the importance of human rights, and that legislation should focus on user needs rather than speculative futuristic scenarios. They also noted that robots lacked the autonomy to take part to society on their own.[375][376]\n

                Progress in AI increased interest in the topic. Proponents of AI welfare and rights often argue that AI sentience, if it emerges, would be particularly easy to deny. They warn that this may be a moral blind spot analogous to slavery or factory farming, which could lead to large-scale suffering if sentient AI is created and carelessly exploited.[372][371]\n

                \n

                Future

                \n

                Superintelligence and the singularity

                \n

                A superintelligence is a hypothetical agent that would possess intelligence far surpassing that of the brightest and most gifted human mind.[361]If research into artificial general intelligence produced sufficiently intelligent software, it might be able to reprogram and improve itself. The improved software would be even better at improving itself, leading to what I. J. Good called an "intelligence explosion" and Vernor Vinge called a "singularity".[377]\n

                However, technologies cannot improve exponentially indefinitely, and typically follow an S-shaped curve, slowing when they reach the physical limits of what the technology can do.[378]\n

                \n

                Transhumanism

                \n\n

                Robot designer Hans Moravec, cyberneticist Kevin Warwick and inventor Ray Kurzweil have predicted that humans and machines may merge in the future into cyborgs that are more capable and powerful than either. This idea, called transhumanism, has roots in the writings of Aldous Huxley and Robert Ettinger.[379]\n

                Edward Fredkin argues that "artificial intelligence is the next step in evolution", an idea first proposed by Samuel Butler\'s "Darwin among the Machines" as far back as 1863, and expanded upon by George Dyson in his 1998 book Darwin Among the Machines: The Evolution of Global Intelligence.[380]\n

                \n

                In fiction

                \n\n
                The word "robot" itself was coined by Karel Čapek in his 1921 play R.U.R., the title standing for "Rossum\'s Universal Robots".
                \n

                Thought-capable artificial beings have appeared as storytelling devices since antiquity,[381] and have been a persistent theme in science fiction.[382]\n

                A common trope in these works began with Mary Shelley\'s Frankenstein, where a human creation becomes a threat to its masters. This includes such works as Arthur C. Clarke\'s and Stanley Kubrick\'s 2001: A Space Odyssey (both 1968), with HAL 9000, the murderous computer in charge of the Discovery One spaceship, as well as The Terminator (1984) and The Matrix (1999). In contrast, the rare loyal robots such as Gort from The Day the Earth Stood Still (1951) and Bishop from Aliens (1986) are less prominent in popular culture.[383]\n

                Isaac Asimov introduced the Three Laws of Robotics in many stories, most notably with the "Multivac" super-intelligent computer. Asimov\'s laws are often brought up during lay discussions of machine ethics;[384] while almost all artificial intelligence researchers are familiar with Asimov\'s laws through popular culture, they generally consider the laws useless for many reasons, one of which is their ambiguity.[385]\n

                Several works use AI to force us to confront the fundamental question of what makes us human, showing us artificial beings that have the ability to feel, and thus to suffer. This appears in Karel Čapek\'s R.U.R., the films A.I. Artificial Intelligence and Ex Machina, as well as the novel Do Androids Dream of Electric Sheep?, by Philip K. Dick. Dick considers the idea that our understanding of human subjectivity is altered by technology created with artificial intelligence.[386]\n

                \n

                See also

                \n\n

                Explanatory notes

                \n
                \n
                  \n
                1. ^ a b This list of intelligent traits is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
                2. \n
                3. ^ a b This list of tools is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
                4. \n
                5. ^ It is among the reasons that expert systems proved to be inefficient for capturing knowledge.[30][31]\n
                6. \n
                7. ^ \n"Rational agent" is general term used in economics, philosophy and theoretical artificial intelligence. It can refer to anything that directs its behavior to accomplish goals, such as a person, an animal, a corporation, a nation, or in the case of AI, a computer program.\n
                8. \n
                9. ^ Alan Turing discussed the centrality of learning as early as 1950, in his classic paper "Computing Machinery and Intelligence".[42] In 1956, at the original Dartmouth AI summer conference, Ray Solomonoff wrote a report on unsupervised probabilistic machine learning: "An Inductive Inference Machine".[43]\n
                10. \n
                11. ^ See AI winter § Machine translation and the ALPAC report of 1966\n
                12. \n
                13. ^ \nCompared with symbolic logic, formal Bayesian inference is computationally expensive. For inference to be tractable, most observations must be conditionally independent of one another. AdSense uses a Bayesian network with over 300 million edges to learn which ads to serve.[93]\n
                14. \n
                15. ^ Expectation–maximization, one of the most popular algorithms in machine learning, allows clustering in the presence of unknown latent variables.[95]\n
                16. \n
                17. ^ \nSome form of deep neural networks (without a specific learning algorithm) were described by:\nWarren S. McCulloch and Walter Pitts (1943)[115]\nAlan Turing (1948);[116]\nKarl Steinbuch and Roger David Joseph (1961).[117]\nDeep or recurrent networks that learned (or used gradient descent) were developed by:\nFrank Rosenblatt(1957);[116]\nOliver Selfridge (1959);[117]\nAlexey Ivakhnenko and Valentin Lapa (1965);[118]\nKaoru Nakano (1971);[119]\nShun-Ichi Amari (1972);[119]\nJohn Joseph Hopfield (1982).[119]\nPrecursors to backpropagation were developed by:\nHenry J. Kelley (1960);[116]\nArthur E. Bryson (1962);[116]\nStuart Dreyfus (1962);[116]\nArthur E. Bryson and Yu-Chi Ho (1969);[116]\nBackpropagation was independently developed by:\nSeppo Linnainmaa (1970);[120]\nPaul Werbos (1974).[116]\n
                18. \n
                19. ^ Geoffrey Hinton said, of his work on neural networks in the 1990s, "our labeled datasets were thousands of times too small. [And] our computers were millions of times too slow."[121]\n
                20. \n
                21. ^ In statistics, a bias is a systematic error or deviation from the correct value. But in the context of fairness, it refers to a tendency in favor or against a certain group or individual characteristic, usually in a way that is considered unfair or harmful. A statistically unbiased AI system that produces disparate outcomes for different demographic groups may thus be viewed as biased in the ethical sense.[204]\n
                22. \n
                23. ^ Including Jon Kleinberg (Cornell University), Sendhil Mullainathan (University of Chicago), Cynthia Chouldechova (Carnegie Mellon) and Sam Corbett-Davis (Stanford)[213]\n
                24. \n
                25. ^ Moritz Hardt (a director at the Max Planck Institute for Intelligent Systems) argues that machine learning "is fundamentally the wrong tool for a lot of domains, where you\'re trying to design interventions and mechanisms that change the world."[218]\n
                26. \n
                27. ^ When the law was passed in 2018, it still contained a form of this provision.\n
                28. \n
                29. ^ This is the United Nations\' definition, and includes things like land mines as well.[232]\n
                30. \n
                31. ^ See table 4; 9% is both the OECD average and the U.S. average.[243]\n
                32. \n
                33. ^ Sometimes called a "robopocalypse"[251]\n
                34. \n
                35. ^ "Electronic brain" was the term used by the press around this time.[304][306]\n
                36. \n
                37. ^ \nDaniel Crevier wrote, "the conference is generally recognized as the official birthdate of the new science."[309] Russell and Norvig called the conference "the inception of artificial intelligence."[115]\n
                38. \n
                39. ^ \nRussell and Norvig wrote "for the next 20 years the field would be dominated by these people and their students."[310]\n
                40. \n
                41. ^ \nRussell and Norvig wrote "it was astonishing whenever a computer did anything kind of smartish".[311]\n
                42. \n
                43. ^ \nThe programs described are Arthur Samuel\'s checkers program for the IBM 701, Daniel Bobrow\'s STUDENT, Newell and Simon\'s Logic Theorist and Terry Winograd\'s SHRDLU.\n
                44. \n
                45. ^ Russell and Norvig write: "in almost all cases, these early systems failed on more difficult problems"[315]\n
                46. \n
                47. ^ \nEmbodied approaches to AI[322] were championed by Hans Moravec[323] and Rodney Brooks[324] and went by many names: Nouvelle AI.[324] Developmental robotics.[325]\n
                48. \n
                49. ^ Matteo Wong wrote in The Atlantic: "Whereas for decades, computer-science fields such as natural-language processing, computer vision, and robotics used extremely different methods, now they all use a programming method called "deep learning." As a result, their code and approaches have become more similar, and their models are easier to integrate into one another."[331]\n
                50. \n
                51. ^ Jack Clark wrote in Bloomberg: "After a half-decade of quiet breakthroughs in artificial intelligence, 2015 has been a landmark year. Computers are smarter and learning faster than ever", and noted that the number of software projects that use machine learning at Google increased from a "sporadic usage" in 2012 to more than 2,700 projects in 2015.[333]\n
                52. \n
                53. ^ Nils Nilsson wrote in 1983: "Simply put, there is wide disagreement in the field about what AI is all about."[351]\n
                54. \n
                55. ^ \nDaniel Crevier wrote that "time has proven the accuracy and perceptiveness of some of Dreyfus\'s comments. Had he formulated them less aggressively, constructive actions they suggested might have been taken much earlier."[356]\n
                56. \n
                57. ^ \nSearle presented this definition of "Strong AI" in 1999.[366] Searle\'s original formulation was "The appropriately programmed computer really is a mind, in the sense that computers given the right programs can be literally said to understand and have other cognitive states."[367] Strong AI is defined similarly by Russell and Norvig: "Stong AI – the assertion that machines that do so are actually thinking (as opposed to simulating thinking)."[368]\n
                58. \n
                \n

                References

                \n
                \n
                  \n
                1. ^ a b c Russell & Norvig (2021), pp. 1–4.\n
                2. \n
                3. ^ AI set to exceed human brain power Archived 2008-02-19 at the Wayback Machine CNN.com (July 26, 2006)\n
                4. \n
                5. ^ Kaplan, Andreas; Haenlein, Michael (2019). "Siri, Siri, in my hand: Who\'s the fairest in the land? On the interpretations, illustrations, and implications of artificial intelligence". Business Horizons. 62: 15–25. doi:10.1016/j.bushor.2018.08.004. ISSN 0007-6813. S2CID 158433736.\n
                6. \n
                7. ^ a b c \nArtificial general intelligence: Russell & Norvig (2021, pp. 32–33, 1020–1021)
                  Proposal for the modern version: Pennachin & Goertzel (2007)
                  Warnings of overspecialization in AI from leading researchers: Nilsson (1995), McCarthy (2007), Beal & Winston (2009)
                  \n
                8. \n
                9. ^ Russell & Norvig (2021, §1.2).\n
                10. \n
                11. ^ a b Dartmouth workshop: Russell & Norvig (2021, p. 18), McCorduck (2004, pp. 111–136), NRC (1999, pp. 200–201)
                  The proposal: McCarthy et al. (1955)
                  \n
                12. \n
                13. ^ a b Successful programs the 1960s: McCorduck (2004, pp. 243–252), Crevier (1993, pp. 52–107), Moravec (1988, p. 9), Russell & Norvig (2021, pp. 19–21)\n
                14. \n
                15. ^ a b Funding initiatives in the early 1980s: Fifth Generation Project (Japan), Alvey (UK), Microelectronics and Computer Technology Corporation (US), Strategic Computing Initiative (US): McCorduck (2004, pp. 426–441), Crevier (1993, pp. 161–162, 197–203, 211, 240), Russell & Norvig (2021, p. 23), NRC (1999, pp. 210–211), Newquist (1994, pp. 235–248)\n
                16. \n
                17. ^ a b First AI Winter, Lighthill report, Mansfield Amendment: Crevier (1993, pp. 115–117), Russell & Norvig (2021, pp. 21–22), NRC (1999, pp. 212–213), Howe (1994), Newquist (1994, pp. 189–201)\n
                18. \n
                19. ^ a b Second AI Winter: Russell & Norvig (2021, p. 24), McCorduck (2004, pp. 430–435), Crevier (1993, pp. 209–210), NRC (1999, pp. 214–216), Newquist (1994, pp. 301–318)\n
                20. \n
                21. ^ a b Deep learning revolution, AlexNet: Goldman (2022), Russell & Norvig (2021, p. 26), McKinsey (2018)\n
                22. \n
                23. ^ Toews (2023).\n
                24. \n
                25. ^ Problem-solving, puzzle solving, game playing, and deduction: Russell & Norvig (2021, chpt. 3–5), Russell & Norvig (2021, chpt. 6) (constraint satisfaction), Poole, Mackworth & Goebel (1998, chpt. 2, 3, 7, 9), Luger & Stubblefield (2004, chpt. 3, 4, 6, 8), Nilsson (1998, chpt. 7–12)\n
                26. \n
                27. ^ Uncertain reasoning: Russell & Norvig (2021, chpt. 12–18), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 333–381), Nilsson (1998, chpt. 7–12)\n
                28. \n
                29. ^ a b c Intractability and efficiency and the combinatorial explosion: Russell & Norvig (2021, p. 21)\n
                30. \n
                31. ^ a b c Psychological evidence of the prevalence of sub-symbolic reasoning and knowledge: Kahneman (2011), Dreyfus & Dreyfus (1986), Wason & Shapiro (1966), Kahneman, Slovic & Tversky (1982)\n
                32. \n
                33. ^ Knowledge representation and knowledge engineering: Russell & Norvig (2021, chpt. 10), Poole, Mackworth & Goebel (1998, pp. 23–46, 69–81, 169–233, 235–277, 281–298, 319–345), Luger & Stubblefield (2004, pp. 227–243), Nilsson (1998, chpt. 17.1–17.4, 18)\n
                34. \n
                35. ^ Smoliar & Zhang (1994).\n
                36. \n
                37. ^ Neumann & Möller (2008).\n
                38. \n
                39. ^ Kuperman, Reichley & Bailey (2006).\n
                40. \n
                41. ^ McGarry (2005).\n
                42. \n
                43. ^ Bertini, Del Bimbo & Torniai (2006).\n
                44. \n
                45. ^ Russell & Norvig (2021), pp. 272.\n
                46. \n
                47. ^ Representing categories and relations: Semantic networks, description logics, inheritance (including frames, and scripts): Russell & Norvig (2021, §10.2 & 10.5), Poole, Mackworth & Goebel (1998, pp. 174–177), Luger & Stubblefield (2004, pp. 248–258), Nilsson (1998, chpt. 18.3)\n
                48. \n
                49. ^ Representing events and time:Situation calculus, event calculus, fluent calculus (including solving the frame problem): Russell & Norvig (2021, §10.3), Poole, Mackworth & Goebel (1998, pp. 281–298), Nilsson (1998, chpt. 18.2)\n
                50. \n
                51. ^ Causal calculus: Poole, Mackworth & Goebel (1998, pp. 335–337)\n
                52. \n
                53. ^ Representing knowledge about knowledge: Belief calculus, modal logics: Russell & Norvig (2021, §10.4), Poole, Mackworth & Goebel (1998, pp. 275–277)\n
                54. \n
                55. ^ a b Default reasoning, Frame problem, default logic, non-monotonic logics, circumscription, closed world assumption, abduction: Russell & Norvig (2021, §10.6), Poole, Mackworth & Goebel (1998, pp. 248–256, 323–335), Luger & Stubblefield (2004, pp. 335–363), Nilsson (1998, ~18.3.3)\n(Poole et al. places abduction under "default reasoning". Luger et al. places this under "uncertain reasoning").\n
                56. \n
                57. ^ a b Breadth of commonsense knowledge: Lenat & Guha (1989, Introduction), Crevier (1993, pp. 113–114), Moravec (1988, p. 13), Russell & Norvig (2021, pp. 241, 385, 982) (qualification problem)\n
                58. \n
                59. ^ Newquist (1994), p. 296.\n
                60. \n
                61. ^ Crevier (1993), pp. 204–208.\n
                62. \n
                63. ^ Russell & Norvig (2021), p. 528.\n
                64. \n
                65. ^ Automated planning: Russell & Norvig (2021, chpt. 11).\n
                66. \n
                67. ^ Automated decision making, Decision theory: Russell & Norvig (2021, chpt. 16–18).\n
                68. \n
                69. ^ Classical planning: Russell & Norvig (2021, Section 11.2).\n
                70. \n
                71. ^ Sensorless or "conformant" planning, contingent planning, replanning (a.k.a online planning): Russell & Norvig (2021, Section 11.5).\n
                72. \n
                73. ^ Uncertain preferences: Russell & Norvig (2021, Section 16.7)\nInverse reinforcement learning: Russell & Norvig (2021, Section 22.6)\n
                74. \n
                75. ^ Information value theory: Russell & Norvig (2021, Section 16.6).\n
                76. \n
                77. ^ Markov decision process: Russell & Norvig (2021, chpt. 17).\n
                78. \n
                79. ^ Game theory and multi-agent decision theory: Russell & Norvig (2021, chpt. 18).\n
                80. \n
                81. ^ Learning: Russell & Norvig (2021, chpt. 19–22), Poole, Mackworth & Goebel (1998, pp. 397–438), Luger & Stubblefield (2004, pp. 385–542), Nilsson (1998, chpt. 3.3, 10.3, 17.5, 20)\n
                82. \n
                83. ^ Turing (1950).\n
                84. \n
                85. ^ Solomonoff (1956).\n
                86. \n
                87. ^ Unsupervised learning: Russell & Norvig (2021, pp. 653) (definition), Russell & Norvig (2021, pp. 738–740) (cluster analysis), Russell & Norvig (2021, pp. 846–860) (word embedding)\n
                88. \n
                89. ^ a b Supervised learning: Russell & Norvig (2021, §19.2) (Definition), Russell & Norvig (2021, Chpt. 19–20) (Techniques)\n
                90. \n
                91. ^ Reinforcement learning: Russell & Norvig (2021, chpt. 22), Luger & Stubblefield (2004, pp. 442–449)\n
                92. \n
                93. ^ Transfer learning: Russell & Norvig (2021, pp. 281), The Economist (2016)\n
                94. \n
                95. ^ "Artificial Intelligence (AI): What Is AI and How Does It Work? | Built In". builtin.com. Retrieved 30 October 2023.\n
                96. \n
                97. ^ Computational learning theory: Russell & Norvig (2021, pp. 672–674), Jordan & Mitchell (2015)\n
                98. \n
                99. ^ Natural language processing (NLP): Russell & Norvig (2021, chpt. 23–24), Poole, Mackworth & Goebel (1998, pp. 91–104), Luger & Stubblefield (2004, pp. 591–632)\n
                100. \n
                101. ^ Subproblems of NLP: Russell & Norvig (2021, pp. 849–850)\n
                102. \n
                103. ^ Russell & Norvig (2021), pp. 856–858.\n
                104. \n
                105. ^ Dickson (2022).\n
                106. \n
                107. ^ Modern statistical and deep learning approaches to NLP: Russell & Norvig (2021, chpt. 24), Cambria & White (2014)\n
                108. \n
                109. ^ Vincent (2019).\n
                110. \n
                111. ^ Russell & Norvig (2021), pp. 875–878.\n
                112. \n
                113. ^ Bushwick (2023).\n
                114. \n
                115. ^ Computer vision: Russell & Norvig (2021, chpt. 25), Nilsson (1998, chpt. 6)\n
                116. \n
                117. ^ Russell & Norvig (2021), pp. 849–850.\n
                118. \n
                119. ^ Russell & Norvig (2021), pp. 895–899.\n
                120. \n
                121. ^ Russell & Norvig (2021), pp. 899–901.\n
                122. \n
                123. ^ Challa et al. (2011).\n
                124. \n
                125. ^ Russell & Norvig (2021), pp. 931–938.\n
                126. \n
                127. ^ MIT AIL (2014).\n
                128. \n
                129. ^ Affective computing: Thro (1993), Edelson (1991), Tao & Tan (2005), Scassellati (2002)\n
                130. \n
                131. ^ Waddell (2018).\n
                132. \n
                133. ^ Poria et al. (2017).\n
                134. \n
                135. ^ Search algorithms: Russell & Norvig (2021, chpts. 3–5), Poole, Mackworth & Goebel (1998, pp. 113–163), Luger & Stubblefield (2004, pp. 79–164, 193–219), Nilsson (1998, chpts. 7–12)\n
                136. \n
                137. ^ State space search: Russell & Norvig (2021, chpt. 3)\n
                138. \n
                139. ^ Russell & Norvig (2021), sect. 11.2.\n
                140. \n
                141. ^ Uninformed searches (breadth first search, depth-first search and general state space search): Russell & Norvig (2021, sect. 3.4), Poole, Mackworth & Goebel (1998, pp. 113–132), Luger & Stubblefield (2004, pp. 79–121), Nilsson (1998, chpt. 8)\n
                142. \n
                143. ^ Heuristic or informed searches (e.g., greedy best first and A*): Russell & Norvig (2021, sect. 3.5), Poole, Mackworth & Goebel (1998, pp. 132–147), Poole & Mackworth (2017, sect. 3.6), Luger & Stubblefield (2004, pp. 133–150)\n
                144. \n
                145. ^ Adversarial search: Russell & Norvig (2021, chpt. 5)\n
                146. \n
                147. ^ Local or "optimization" search: Russell & Norvig (2021, chpt. 4)\n
                148. \n
                149. ^ Singh Chauhan, Nagesh (18 December 2020). "Optimization Algorithms in Neural Networks". KDnuggets. Retrieved 13 January 2024.\n
                150. \n
                151. ^ Evolutionary computation: Russell & Norvig (2021, sect. 4.1.2)\n
                152. \n
                153. ^ Merkle & Middendorf (2013).\n
                154. \n
                155. ^ Logic: Russell & Norvig (2021, chpts. 6–9), Luger & Stubblefield (2004, pp. 35–77), Nilsson (1998, chpt. 13–16)\n
                156. \n
                157. ^ Propositional logic: Russell & Norvig (2021, chpt. 6), Luger & Stubblefield (2004, pp. 45–50), Nilsson (1998, chpt. 13)\n
                158. \n
                159. ^ First-order logic and features such as equality: Russell & Norvig (2021, chpt. 7), Poole, Mackworth & Goebel (1998, pp. 268–275), Luger & Stubblefield (2004, pp. 50–62), Nilsson (1998, chpt. 15)\n
                160. \n
                161. ^ Logical inference: Russell & Norvig (2021, chpt. 10)\n
                162. \n
                163. ^ logical deduction as search: Russell & Norvig (2021, sects. 9.3, 9.4), Poole, Mackworth & Goebel (1998, pp. ~46–52), Luger & Stubblefield (2004, pp. 62–73), Nilsson (1998, chpt. 4.2, 7.2)\n
                164. \n
                165. ^ Resolution and unification: Russell & Norvig (2021, sections 7.5.2, 9.2, 9.5)\n
                166. \n
                167. ^ Warren, D.H.; Pereira, L.M.; Pereira, F. (1977). "Prolog-the language and its implementation compared with Lisp". ACM SIGPLAN Notices. 12 (8): 109–115. doi:10.1145/872734.806939.\n
                168. \n
                169. ^ Fuzzy logic: Russell & Norvig (2021, pp. 214, 255, 459), Scientific American (1999)\n
                170. \n
                171. ^ a b Stochastic methods for uncertain reasoning: Russell & Norvig (2021, chpt. 12–18, 20), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 165–191, 333–381), Nilsson (1998, chpt. 19)\n
                172. \n
                173. ^ decision theory and decision analysis: Russell & Norvig (2021, chpt. 16–18), Poole, Mackworth & Goebel (1998, pp. 381–394)\n
                174. \n
                175. ^ Information value theory: Russell & Norvig (2021, sect. 16.6)\n
                176. \n
                177. ^ Markov decision processes and dynamic decision networks: Russell & Norvig (2021, chpt. 17)\n
                178. \n
                179. ^ a b c Stochastic temporal models: Russell & Norvig (2021, chpt. 14)\nHidden Markov model: Russell & Norvig (2021, sect. 14.3)\nKalman filters: Russell & Norvig (2021, sect. 14.4)\nDynamic Bayesian networks: Russell & Norvig (2021, sect. 14.5)\n
                180. \n
                181. ^ Game theory and mechanism design: Russell & Norvig (2021, chpt. 18)\n
                182. \n
                183. ^ Bayesian networks: Russell & Norvig (2021, sects. 12.5–12.6, 13.4–13.5, 14.3–14.5, 16.5, 20.2–20.3), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~182–190, ≈363–379), Nilsson (1998, chpt. 19.3–19.4)\n
                184. \n
                185. ^ Domingos (2015), chpt. 6.\n
                186. \n
                187. ^ Bayesian inference algorithm: Russell & Norvig (2021, sect. 13.3–13.5), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~363–379), Nilsson (1998, chpt. 19.4 & 7)\n
                188. \n
                189. ^ Domingos (2015), p. 210.\n
                190. \n
                191. ^ Bayesian learning and the expectation–maximization algorithm: Russell & Norvig (2021, chpt. 20), Poole, Mackworth & Goebel (1998, pp. 424–433), Nilsson (1998, chpt. 20), Domingos (2015, p. 210)\n
                192. \n
                193. ^ Bayesian decision theory and Bayesian decision networks: Russell & Norvig (2021, sect. 16.5)\n
                194. \n
                195. ^ Statistical learning methods and classifiers: Russell & Norvig (2021, chpt. 20),\n
                196. \n
                197. ^ Ciaramella, Alberto; Ciaramella, Marco (2024). Introduction to Artificial Intelligence: from data analysis to generative AI. Intellisemantic Editions. ISBN 978-8-8947-8760-3.\n
                198. \n
                199. ^ Decision trees: Russell & Norvig (2021, sect. 19.3), Domingos (2015, p. 88)\n
                200. \n
                201. ^ Non-parameteric learning models such as K-nearest neighbor and support vector machines: Russell & Norvig (2021, sect. 19.7), Domingos (2015, p. 187) (k-nearest neighbor)\n\n
                202. \n
                203. ^ Domingos (2015), p. 152.\n
                204. \n
                205. ^ Naive Bayes classifier: Russell & Norvig (2021, sect. 12.6), Domingos (2015, p. 152)\n
                206. \n
                207. ^ a b Neural networks: Russell & Norvig (2021, chpt. 21), Domingos (2015, Chapter 4)\n
                208. \n
                209. ^ Gradient calculation in computational graphs, backpropagation, automatic differentiation: Russell & Norvig (2021, sect. 21.2), Luger & Stubblefield (2004, pp. 467–474), Nilsson (1998, chpt. 3.3)\n
                210. \n
                211. ^ Universal approximation theorem: Russell & Norvig (2021, p. 752)\nThe theorem: Cybenko (1988), Hornik, Stinchcombe & White (1989)\n
                212. \n
                213. ^ Feedforward neural networks: Russell & Norvig (2021, sect. 21.1)\n
                214. \n
                215. ^ Recurrent neural networks: Russell & Norvig (2021, sect. 21.6)\n
                216. \n
                217. ^ Perceptrons: Russell & Norvig (2021, pp. 21, 22, 683, 22)\n
                218. \n
                219. ^ a b Deep learning: Russell & Norvig (2021, chpt. 21), Goodfellow, Bengio & Courville (2016), Hinton et al. (2016), Schmidhuber (2015)\n
                220. \n
                221. ^ Convolutional neural networks: Russell & Norvig (2021, sect. 21.3)\n
                222. \n
                223. ^ Deng & Yu (2014), pp. 199–200.\n
                224. \n
                225. ^ Ciresan, Meier & Schmidhuber (2012).\n
                226. \n
                227. ^ Russell & Norvig (2021), p. 751.\n
                228. \n
                229. ^ a b c Russell & Norvig (2021), p. 17.\n
                230. \n
                231. ^ a b c d e f g Russell & Norvig (2021), p. 785.\n
                232. \n
                233. ^ a b Schmidhuber (2022), sect. 5.\n
                234. \n
                235. ^ Schmidhuber (2022), sect. 6.\n
                236. \n
                237. ^ a b c Schmidhuber (2022), sect. 7.\n
                238. \n
                239. ^ Schmidhuber (2022), sect. 8.\n
                240. \n
                241. ^ Quoted in Christian (2020, p. 22)\n
                242. \n
                243. ^ Smith (2023).\n
                244. \n
                245. ^ "Explained: Generative AI". 9 November 2023.\n
                246. \n
                247. ^ "AI Writing and Content Creation Tools". MIT Sloan Teaching & Learning Technologies. Archived from the original on 25 December 2023. Retrieved 25 December 2023.\n
                248. \n
                249. ^ Marmouyet (2023).\n
                250. \n
                251. ^ Kobielus (2019).\n
                252. \n
                253. ^ Thomason, James (21 May 2024). "Mojo Rising: The resurgence of AI-first programming languages". VentureBeat. Archived from the original on 27 June 2024. Retrieved 26 May 2024.\n
                254. \n
                255. ^ Wodecki, Ben (5 May 2023). "7 AI Programming Languages You Need to Know". AI Business. Archived from the original on 25 July 2024. Retrieved 5 October 2024.\n
                256. \n
                257. ^ Plumb, Taryn (18 September 2024). "Why Jensen Huang and Marc Benioff see \'gigantic\' opportunity for agentic AI". VentureBeat. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
                258. \n
                259. ^ Davenport, T; Kalakota, R (June 2019). "The potential for artificial intelligence in healthcare". Future Healthc J. 6 (2): 94–98. doi:10.7861/futurehosp.6-2-94. PMC 6616181. PMID 31363513.\n
                260. \n
                261. ^ Lyakhova, U.A.; Lyakhov, P.A. (2024). "Systematic review of approaches to detection and classification of skin cancer using artificial intelligence: Development and prospects". Computers in Biology and Medicine. 178: 108742. doi:10.1016/j.compbiomed.2024.108742. PMID 38875908.\n
                262. \n
                263. ^ Alqudaihi, Kawther S.; Aslam, Nida; Khan, Irfan Ullah; Almuhaideb, Abdullah M.; Alsunaidi, Shikah J.; Ibrahim, Nehad M. Abdel Rahman; Alhaidari, Fahd A.; Shaikh, Fatema S.; Alsenbel, Yasmine M.; Alalharith, Dima M.; Alharthi, Hajar M.; Alghamdi, Wejdan M.; Alshahrani, Mohammed S. (2021). "Cough Sound Detection and Diagnosis Using Artificial Intelligence Techniques: Challenges and Opportunities". IEEE Access. 9: 102327–102344. Bibcode:2021IEEEA...9j2327A. doi:10.1109/ACCESS.2021.3097559. ISSN 2169-3536. PMC 8545201. PMID 34786317.\n
                264. \n
                265. ^ a b Bax, Monique; Thorpe, Jordan; Romanov, Valentin (December 2023). "The future of personalized cardiovascular medicine demands 3D and 4D printing, stem cells, and artificial intelligence". Frontiers in Sensors. 4. doi:10.3389/fsens.2023.1294721. ISSN 2673-5067.\n
                266. \n
                267. ^ Jumper, J; Evans, R; Pritzel, A (2021). "Highly accurate protein structure prediction with AlphaFold". Nature. 596 (7873): 583–589. Bibcode:2021Natur.596..583J. doi:10.1038/s41586-021-03819-2. PMC 8371605. PMID 34265844.\n
                268. \n
                269. ^ "AI discovers new class of antibiotics to kill drug-resistant bacteria". 20 December 2023. Archived from the original on 16 September 2024. Retrieved 5 October 2024.\n
                270. \n
                271. ^ "AI speeds up drug design for Parkinson\'s ten-fold". Cambridge University. 17 April 2024. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                272. \n
                273. ^ Horne, Robert I.; Andrzejewska, Ewa A.; Alam, Parvez; Brotzakis, Z. Faidon; Srivastava, Ankit; Aubert, Alice; Nowinska, Magdalena; Gregory, Rebecca C.; Staats, Roxine; Possenti, Andrea; Chia, Sean; Sormanni, Pietro; Ghetti, Bernardino; Caughey, Byron; Knowles, Tuomas P. J.; Vendruscolo, Michele (17 April 2024). "Discovery of potent inhibitors of α-synuclein aggregation using structure-based iterative learning". Nature Chemical Biology. 20 (5). Nature: 634–645. doi:10.1038/s41589-024-01580-x. PMC 11062903. PMID 38632492.\n
                274. \n
                275. ^ Grant, Eugene F.; Lardner, Rex (25 July 1952). "The Talk of the Town – It". The New Yorker. ISSN 0028-792X. Archived from the original on 16 February 2020. Retrieved 28 January 2024.\n
                276. \n
                277. ^ Anderson, Mark Robert (11 May 2017). "Twenty years on from Deep Blue vs Kasparov: how a chess match started the big data revolution". The Conversation. Archived from the original on 17 September 2024. Retrieved 28 January 2024.\n
                278. \n
                279. ^ Markoff, John (16 February 2011). "Computer Wins on \'Jeopardy!\': Trivial, It\'s Not". The New York Times. ISSN 0362-4331. Archived from the original on 22 October 2014. Retrieved 28 January 2024.\n
                280. \n
                281. ^ Byford, Sam (27 May 2017). "AlphaGo retires from competitive Go after defeating world number one 3–0". The Verge. Archived from the original on 7 June 2017. Retrieved 28 January 2024.\n
                282. \n
                283. ^ Brown, Noam; Sandholm, Tuomas (30 August 2019). "Superhuman AI for multiplayer poker". Science. 365 (6456): 885–890. Bibcode:2019Sci...365..885B. doi:10.1126/science.aay2400. ISSN 0036-8075. PMID 31296650.\n
                284. \n
                285. ^ "MuZero: Mastering Go, chess, shogi and Atari without rules". Google DeepMind. 23 December 2020. Retrieved 28 January 2024.\n
                286. \n
                287. ^ Sample, Ian (30 October 2019). "AI becomes grandmaster in \'fiendishly complex\' StarCraft II". The Guardian. ISSN 0261-3077. Archived from the original on 29 December 2020. Retrieved 28 January 2024.\n
                288. \n
                289. ^ Wurman, P. R.; Barrett, S.; Kawamoto, K. (2022). "Outracing champion Gran Turismo drivers with deep reinforcement learning" (PDF). Nature. 602 (7896): 223–228. Bibcode:2022Natur.602..223W. doi:10.1038/s41586-021-04357-7. PMID 35140384.\n
                290. \n
                291. ^ Wilkins, Alex (13 March 2024). "Google AI learns to play open-world video games by watching them". New Scientist. Archived from the original on 26 July 2024. Retrieved 21 July 2024.\n
                292. \n
                293. ^ Uesato, J. et al.: Improving mathematical reasoning with process supervision. Archived 15 September 2024 at the Wayback Machine openai.com, May 31, 2023. Retrieved 2024-08-07.\n
                294. \n
                295. ^ Srivastava, Saurabh (29 February 2024). "Functional Benchmarks for Robust Evaluation of Reasoning Performance, and the Reasoning Gap". arXiv:2402.19450 [cs.AI].\n
                296. \n
                297. ^ Roberts, Siobhan (25 July 2024). "AI achieves silver-medal standard solving International Mathematical Olympiad problems". The New York Times. Archived from the original on 26 September 2024. Retrieved 7 August 2024.\n
                298. \n
                299. ^ LLEMMA. eleuther.ai. Retrieved 2024-08-07.\n
                300. \n
                301. ^ AI Math. Archived 5 October 2024 at the Wayback Machine Caesars Labs, 2024. Retrieved 2024-08-07.\n
                302. \n
                303. ^ Alex McFarland: 7 Best AI for Math Tools. Archived 11 September 2024 at the Wayback Machine unite.ai. Retrieved 2024-08-07\n
                304. \n
                305. ^ Matthew Finio & Amanda Downie: IBM Think 2024 Primer, "What is Artificial Intelligence (AI) in Finance?" 8 Dec. 2023\n
                306. \n
                307. ^ M. Nicolas, J. Firzli: Pensions Age/European Pensions magazine, "Artificial Intelligence: Ask the Industry" May June 2024 https://videovoice.org/ai-in-finance-innovation-entrepreneurship-vs-over-regulation-with-the-eus-artificial-intelligence-act-wont-work-as-intended/ Archived 11 September 2024 at the Wayback Machine.\n
                308. \n
                309. ^ a b c Congressional Research Service (2019). Artificial Intelligence and National Security (PDF). Washington, DC: Congressional Research Service. Archived (PDF) from the original on 8 May 2020. Retrieved 5 October 2024.PD-notice\n
                310. \n
                311. ^ a b Slyusar, Vadym (2019). "Artificial intelligence as the basis of future control networks". ResearchGate. doi:10.13140/RG.2.2.30247.50087. Archived from the original on 28 April 2021. Retrieved 20 July 2019.\n
                312. \n
                313. ^ Knight, Will. "The US and 30 Other Nations Agree to Set Guardrails for Military AI". Wired. ISSN 1059-1028. Archived from the original on 20 September 2024. Retrieved 24 January 2024.\n
                314. \n
                315. ^ Newsom, Gavin; Weber, Shirley N. (6 September 2023). "Executive Order N-12-23" (PDF). Executive Department, State of California. Archived (PDF) from the original on 21 February 2024. Retrieved 7 September 2023.\n
                316. \n
                317. ^ Pinaya, Walter H. L.; Graham, Mark S.; Kerfoot, Eric; Tudosiu, Petru-Daniel; Dafflon, Jessica; Fernandez, Virginia; Sanchez, Pedro; Wolleb, Julia; da Costa, Pedro F.; Patel, Ashay (2023). "Generative AI for Medical Imaging: extending the MONAI Framework". arXiv:2307.15208 [eess.IV].\n
                318. \n
                319. ^ Griffith, Erin; Metz, Cade (27 January 2023). "Anthropic Said to Be Closing In on $300 Million in New A.I. Funding". The New York Times. Archived from the original on 9 December 2023. Retrieved 14 March 2023.\n
                320. \n
                321. ^ Lanxon, Nate; Bass, Dina; Davalos, Jackie (10 March 2023). "A Cheat Sheet to AI Buzzwords and Their Meanings". Bloomberg News. Archived from the original on 17 November 2023. Retrieved 14 March 2023.\n
                322. \n
                323. ^ Marcelline, Marco (27 May 2023). "ChatGPT: Most Americans Know About It, But Few Actually Use the AI Chatbot". PCMag. Archived from the original on 21 May 2024. Retrieved 28 January 2024.\n
                324. \n
                325. ^ Lu, Donna (31 March 2023). "Misinformation, mistakes and the Pope in a puffer: what rapidly evolving AI can – and can\'t – do". The Guardian. ISSN 0261-3077. Archived from the original on 10 June 2024. Retrieved 28 January 2024.\n
                326. \n
                327. ^ Hurst, Luke (23 May 2023). "How a fake image of a Pentagon explosion shared on Twitter caused a real dip on Wall Street". euronews. Retrieved 28 January 2024.\n
                328. \n
                329. ^ Poole, David; Mackworth, Alan (2023). Artificial Intelligence, Foundations of Computational Agents (3rd ed.). Cambridge University Press. doi:10.1017/9781009258227. ISBN 978-1-0092-5819-7. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                330. \n
                331. ^ Russell, Stuart; Norvig, Peter (2020). Artificial Intelligence: A Modern Approach (4th ed.). Pearson. ISBN 978-0-1346-1099-3.\n
                332. \n
                333. ^ "Why agents are the next frontier of generative AI". McKinsey Digital. 24 July 2024. Archived from the original on 3 October 2024. Retrieved 10 August 2024.\n
                334. \n
                335. ^ Ransbotham, Sam; Kiron, David; Gerbert, Philipp; Reeves, Martin (6 September 2017). "Reshaping Business With Artificial Intelligence". MIT Sloan Management Review. Archived from the original on 13 February 2024.\n
                336. \n
                337. ^ Sun, Yuran; Zhao, Xilei; Lovreglio, Ruggiero; Kuligowski, Erica (1 January 2024), Naser, M. Z. (ed.), "8 – AI for large-scale evacuation modeling: promises and challenges", Interpretable Machine Learning for the Analysis, Design, Assessment, and Informed Decision Making for Civil Infrastructure, Woodhead Publishing Series in Civil and Structural Engineering, Woodhead Publishing, pp. 185–204, ISBN 978-0-1282-4073-1, archived from the original on 19 May 2024, retrieved 28 June 2024.\n
                338. \n
                339. ^ Gomaa, Islam; Adelzadeh, Masoud; Gwynne, Steven; Spencer, Bruce; Ko, Yoon; Bénichou, Noureddine; Ma, Chunyun; Elsagan, Nour; Duong, Dana; Zalok, Ehab; Kinateder, Max (1 November 2021). "A Framework for Intelligent Fire Detection and Evacuation System". Fire Technology. 57 (6): 3179–3185. doi:10.1007/s10694-021-01157-3. ISSN 1572-8099. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                340. \n
                341. ^ Zhao, Xilei; Lovreglio, Ruggiero; Nilsson, Daniel (1 May 2020). "Modelling and interpreting pre-evacuation decision-making using machine learning". Automation in Construction. 113: 103140. doi:10.1016/j.autcon.2020.103140. ISSN 0926-5805. Archived from the original on 19 May 2024. Retrieved 5 October 2024.\n
                342. \n
                343. ^ Müller, Vincent C. (30 April 2020). "Ethics of Artificial Intelligence and Robotics". Stanford Encyclopedia of Philosophy Archive. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                344. \n
                345. ^ Simonite (2016).\n
                346. \n
                347. ^ Russell & Norvig (2021), p. 987.\n
                348. \n
                349. ^ Laskowski (2023).\n
                350. \n
                351. ^ GAO (2022).\n
                352. \n
                353. ^ Valinsky (2019).\n
                354. \n
                355. ^ Russell & Norvig (2021), p. 991.\n
                356. \n
                357. ^ Russell & Norvig (2021), pp. 991–992.\n
                358. \n
                359. ^ Christian (2020), p. 63.\n
                360. \n
                361. ^ Vincent (2022).\n
                362. \n
                363. ^ Kopel, Matthew. "Copyright Services: Fair Use". Cornell University Library. Archived from the original on 26 September 2024. Retrieved 26 April 2024.\n
                364. \n
                365. ^ Burgess, Matt. "How to Stop Your Data From Being Used to Train AI". Wired. ISSN 1059-1028. Archived from the original on 3 October 2024. Retrieved 26 April 2024.\n
                366. \n
                367. ^ Reisner (2023).\n
                368. \n
                369. ^ Alter & Harris (2023).\n
                370. \n
                371. ^ "Getting the Innovation Ecosystem Ready for AI. An IP policy toolkit" (PDF). WIPO.\n
                372. \n
                373. ^ Hammond, George (27 December 2023). "Big Tech is spending more than VC firms on AI startups". Ars Technica. Archived from the original on 10 January 2024.\n
                374. \n
                375. ^ Wong, Matteo (24 October 2023). "The Future of AI Is GOMA". The Atlantic. Archived from the original on 5 January 2024.\n
                376. \n
                377. ^ "Big tech and the pursuit of AI dominance". The Economist. 26 March 2023. Archived from the original on 29 December 2023.\n
                378. \n
                379. ^ Fung, Brian (19 December 2023). "Where the battle to dominate AI may be won". CNN Business. Archived from the original on 13 January 2024.\n
                380. \n
                381. ^ Metz, Cade (5 July 2023). "In the Age of A.I., Tech\'s Little Guys Need Big Friends". The New York Times. Archived from the original on 8 July 2024. Retrieved 5 October 2024.\n
                382. \n
                383. ^ "Electricity 2024 – Analysis". IEA. 24 January 2024. Retrieved 13 July 2024.\n
                384. \n
                385. ^ Calvert, Brian (28 March 2024). "AI already uses as much energy as a small country. It\'s only the beginning". Vox. New York, New York. Archived from the original on 3 July 2024. Retrieved 5 October 2024.\n
                386. \n
                387. ^ Halper, Evan; O\'Donovan, Caroline (21 June 2024). "AI is exhausting the power grid. Tech firms are seeking a miracle solution". Washington Post.\n
                388. \n
                389. ^ Davenport, Carly. "AI Data Centers and the Coming YS Power Demand Surge" (PDF). Goldman Sachs. Archived from the original (PDF) on 26 July 2024. Retrieved 5 October 2024.\n
                390. \n
                391. ^ Ryan, Carol (12 April 2024). "Energy-Guzzling AI Is Also the Future of Energy Savings". Wall Street Journal. Dow Jones.\n
                392. \n
                393. ^ Hiller, Jennifer (1 July 2024). "Tech Industry Wants to Lock Up Nuclear Power for AI". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                394. \n
                395. ^ Halper, Evan (20 September 2024). "Microsoft deal would reopen Three Mile Island nuclear plant to power AI". Washington Post.\n
                396. \n
                397. ^ Hiller, Jennifer (20 September 2024). "Three Mile Island\'s Nuclear Plant to Reopen, Help Power Microsoft\'s AI Centers". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                398. \n
                399. ^ Nicas (2018).\n
                400. \n
                401. ^ Rainie, Lee; Keeter, Scott; Perrin, Andrew (22 July 2019). "Trust and Distrust in America". Pew Research Center. Archived from the original on 22 February 2024.\n
                402. \n
                403. ^ Williams (2023).\n
                404. \n
                405. ^ Taylor & Hern (2023).\n
                406. \n
                407. ^ a b Samuel, Sigal (19 April 2022). "Why it\'s so damn hard to make AI fair and unbiased". Vox. Archived from the original on 5 October 2024. Retrieved 24 July 2024.\n
                408. \n
                409. ^ a b Rose (2023).\n
                410. \n
                411. ^ CNA (2019).\n
                412. \n
                413. ^ Goffrey (2008), p. 17.\n
                414. \n
                415. ^ Berdahl et al. (2023); Goffrey (2008, p. 17); Rose (2023); Russell & Norvig (2021, p. 995)\n
                416. \n
                417. ^ Christian (2020), p. 25.\n
                418. \n
                419. ^ a b Russell & Norvig (2021), p. 995.\n
                420. \n
                421. ^ Grant & Hill (2023).\n
                422. \n
                423. ^ Larson & Angwin (2016).\n
                424. \n
                425. ^ Christian (2020), p. 67–70.\n
                426. \n
                427. ^ Christian (2020, pp. 67–70); Russell & Norvig (2021, pp. 993–994)\n
                428. \n
                429. ^ Russell & Norvig (2021, p. 995); Lipartito (2011, p. 36); Goodman & Flaxman (2017, p. 6); Christian (2020, pp. 39–40, 65)\n
                430. \n
                431. ^ Quoted in Christian (2020, p. 65).\n
                432. \n
                433. ^ Russell & Norvig (2021, p. 994); Christian (2020, pp. 40, 80–81)\n
                434. \n
                435. ^ Quoted in Christian (2020, p. 80)\n
                436. \n
                437. ^ Dockrill (2022).\n
                438. \n
                439. ^ Sample (2017).\n
                440. \n
                441. ^ "Black Box AI". 16 June 2023. Archived from the original on 15 June 2024. Retrieved 5 October 2024.\n
                442. \n
                443. ^ Christian (2020), p. 110.\n
                444. \n
                445. ^ Christian (2020), pp. 88–91.\n
                446. \n
                447. ^ Christian (2020, p. 83); Russell & Norvig (2021, p. 997)\n
                448. \n
                449. ^ Christian (2020), p. 91.\n
                450. \n
                451. ^ Christian (2020), p. 83.\n
                452. \n
                453. ^ Verma (2021).\n
                454. \n
                455. ^ Rothman (2020).\n
                456. \n
                457. ^ Christian (2020), pp. 105–108.\n
                458. \n
                459. ^ Christian (2020), pp. 108–112.\n
                460. \n
                461. ^ Ropek, Lucas (21 May 2024). "New Anthropic Research Sheds Light on AI\'s \'Black Box\'". Gizmodo. Archived from the original on 5 October 2024. Retrieved 23 May 2024.\n
                462. \n
                463. ^ Russell & Norvig (2021), p. 989.\n
                464. \n
                465. ^ a b Russell & Norvig (2021), pp. 987–990.\n
                466. \n
                467. ^ Russell & Norvig (2021), p. 988.\n
                468. \n
                469. ^ Robitzski (2018); Sainato (2015)\n
                470. \n
                471. ^ Harari (2018).\n
                472. \n
                473. ^ Buckley, Chris; Mozur, Paul (22 May 2019). "How China Uses High-Tech Surveillance to Subdue Minorities". The New York Times. Archived from the original on 25 November 2019. Retrieved 2 July 2019.\n
                474. \n
                475. ^ "Security lapse exposed a Chinese smart city surveillance system". 3 May 2019. Archived from the original on 7 March 2021. Retrieved 14 September 2020.\n
                476. \n
                477. ^ Urbina et al. (2022).\n
                478. \n
                479. ^ a b E. McGaughey, \'Will Robots Automate Your Job Away? Full Employment, Basic Income, and Economic Democracy\' (2022), 51(3) Industrial Law Journal 511–559. Archived 27 May 2023 at the Wayback Machine.\n
                480. \n
                481. ^ Ford & Colvin (2015);McGaughey (2022)\n
                482. \n
                483. ^ IGM Chicago (2017).\n
                484. \n
                485. ^ Arntz, Gregory & Zierahn (2016), p. 33.\n
                486. \n
                487. ^ Lohr (2017); Frey & Osborne (2017); Arntz, Gregory & Zierahn (2016, p. 33)\n
                488. \n
                489. ^ Zhou, Viola (11 April 2023). "AI is already taking video game illustrators\' jobs in China". Rest of World. Archived from the original on 21 February 2024. Retrieved 17 August 2023.\n
                490. \n
                491. ^ Carter, Justin (11 April 2023). "China\'s game art industry reportedly decimated by growing AI use". Game Developer. Archived from the original on 17 August 2023. Retrieved 17 August 2023.\n
                492. \n
                493. ^ Morgenstern (2015).\n
                494. \n
                495. ^ Mahdawi (2017); Thompson (2014)\n
                496. \n
                497. ^ Tarnoff, Ben (4 August 2023). "Lessons from Eliza". The Guardian Weekly. pp. 34–39.\n
                498. \n
                499. ^ Cellan-Jones (2014).\n
                500. \n
                501. ^ Russell & Norvig 2021, p. 1001.\n
                502. \n
                503. ^ Bostrom (2014).\n
                504. \n
                505. ^ Russell (2019).\n
                506. \n
                507. ^ Bostrom (2014); Müller & Bostrom (2014); Bostrom (2015).\n
                508. \n
                509. ^ Harari (2023).\n
                510. \n
                511. ^ Müller & Bostrom (2014).\n
                512. \n
                513. ^ Leaders\' concerns about the existential risks of AI around 2015: Rawlinson (2015), Holley (2015), Gibbs (2014), Sainato (2015)\n
                514. \n
                515. ^ ""Godfather of artificial intelligence" talks impact and potential of new AI". CBS News. 25 March 2023. Archived from the original on 28 March 2023. Retrieved 28 March 2023.\n
                516. \n
                517. ^ Pittis, Don (4 May 2023). "Canadian artificial intelligence leader Geoffrey Hinton piles on fears of computer takeover". CBC. Archived from the original on 7 July 2024. Retrieved 5 October 2024.\n
                518. \n
                519. ^ "\'50–50 chance\' that AI outsmarts humanity, Geoffrey Hinton says". Bloomberg BNN. 14 June 2024. Retrieved 6 July 2024.\n
                520. \n
                521. ^ Valance (2023).\n
                522. \n
                523. ^ Taylor, Josh (7 May 2023). "Rise of artificial intelligence is inevitable but should not be feared, \'father of AI\' says". The Guardian. Archived from the original on 23 October 2023. Retrieved 26 May 2023.\n
                524. \n
                525. ^ Colton, Emma (7 May 2023). "\'Father of AI\' says tech fears misplaced: \'You cannot stop it\'". Fox News. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
                526. \n
                527. ^ Jones, Hessie (23 May 2023). "Juergen Schmidhuber, Renowned \'Father Of Modern AI,\' Says His Life\'s Work Won\'t Lead To Dystopia". Forbes. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
                528. \n
                529. ^ McMorrow, Ryan (19 December 2023). "Andrew Ng: \'Do we think the world is better off with more or less intelligence?\'". Financial Times. Archived from the original on 25 January 2024. Retrieved 30 December 2023.\n
                530. \n
                531. ^ Levy, Steven (22 December 2023). "How Not to Be Stupid About AI, With Yann LeCun". Wired. Archived from the original on 28 December 2023. Retrieved 30 December 2023.\n
                532. \n
                533. ^ Arguments that AI is not an imminent risk: Brooks (2014), Geist (2015), Madrigal (2015), Lee (2014)\n
                534. \n
                535. ^ a b Christian (2020), pp. 67, 73.\n
                536. \n
                537. ^ Yudkowsky (2008).\n
                538. \n
                539. ^ a b Anderson & Anderson (2011).\n
                540. \n
                541. ^ AAAI (2014).\n
                542. \n
                543. ^ Wallach (2010).\n
                544. \n
                545. ^ Russell (2019), p. 173.\n
                546. \n
                547. ^ Stewart, Ashley; Melton, Monica. "Hugging Face CEO says he\'s focused on building a \'sustainable model\' for the $4.5 billion open-source-AI startup". Business Insider. Archived from the original on 25 September 2024. Retrieved 14 April 2024.\n
                548. \n
                549. ^ Wiggers, Kyle (9 April 2024). "Google open sources tools to support AI model development". TechCrunch. Archived from the original on 10 September 2024. Retrieved 14 April 2024.\n
                550. \n
                551. ^ Heaven, Will Douglas (12 May 2023). "The open-source AI boom is built on Big Tech\'s handouts. How long will it last?". MIT Technology Review. Retrieved 14 April 2024.\n
                552. \n
                553. ^ Brodsky, Sascha (19 December 2023). "Mistral AI\'s New Language Model Aims for Open Source Supremacy". AI Business. Archived from the original on 5 September 2024. Retrieved 5 October 2024.\n
                554. \n
                555. ^ Edwards, Benj (22 February 2024). "Stability announces Stable Diffusion 3, a next-gen AI image generator". Ars Technica. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
                556. \n
                557. ^ Marshall, Matt (29 January 2024). "How enterprises are using open source LLMs: 16 examples". VentureBeat. Archived from the original on 26 September 2024. Retrieved 5 October 2024.\n
                558. \n
                559. ^ Piper, Kelsey (2 February 2024). "Should we make our most powerful AI models open source to all?". Vox. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
                560. \n
                561. ^ Alan Turing Institute (2019). "Understanding artificial intelligence ethics and safety" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
                562. \n
                563. ^ Alan Turing Institute (2023). "AI Ethics and Governance in Practice" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
                564. \n
                565. ^ Floridi, Luciano; Cowls, Josh (23 June 2019). "A Unified Framework of Five Principles for AI in Society". Harvard Data Science Review. 1 (1). doi:10.1162/99608f92.8cd550d1. S2CID 198775713.\n
                566. \n
                567. ^ Buruk, Banu; Ekmekci, Perihan Elif; Arda, Berna (1 September 2020). "A critical perspective on guidelines for responsible and trustworthy artificial intelligence". Medicine, Health Care and Philosophy. 23 (3): 387–399. doi:10.1007/s11019-020-09948-1. ISSN 1572-8633. PMID 32236794. S2CID 214766800. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                568. \n
                569. ^ Kamila, Manoj Kumar; Jasrotia, Sahil Singh (1 January 2023). "Ethical issues in the development of artificial intelligence: recognizing the risks". International Journal of Ethics and Systems. ahead-of-print (ahead-of-print). doi:10.1108/IJOES-05-2023-0107. ISSN 2514-9369. S2CID 259614124. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                570. \n
                571. ^ "AI Safety Institute releases new AI safety evaluations platform". UK Government. 10 May 2024. Archived from the original on 5 October 2024. Retrieved 14 May 2024.\n
                572. \n
                573. ^ Regulation of AI to mitigate risks: Berryhill et al. (2019), Barfield & Pagallo (2018), Iphofen & Kritikos (2019), Wirtz, Weyerer & Geyer (2018), Buiten (2019)\n
                574. \n\n
                575. ^ a b Vincent (2023).\n
                576. \n
                577. ^ Stanford University (2023).\n
                578. \n
                579. ^ a b c d UNESCO (2021).\n
                580. \n
                581. ^ Kissinger (2021).\n
                582. \n
                583. ^ Altman, Brockman & Sutskever (2023).\n
                584. \n
                585. ^ VOA News (25 October 2023). "UN Announces Advisory Body on Artificial Intelligence". Archived from the original on 18 September 2024. Retrieved 5 October 2024.\n
                586. \n
                587. ^ "Council of Europe opens first ever global treaty on AI for signature". Council of Europe. 5 September 2024. Archived from the original on 17 September 2024. Retrieved 17 September 2024.\n
                588. \n
                589. ^ Edwards (2023).\n
                590. \n
                591. ^ Kasperowicz (2023).\n
                592. \n
                593. ^ Fox News (2023).\n
                594. \n
                595. ^ Milmo, Dan (3 November 2023). "Hope or Horror? The great AI debate dividing its pioneers". The Guardian Weekly. pp. 10–12.\n
                596. \n
                597. ^ "The Bletchley Declaration by Countries Attending the AI Safety Summit, 1–2 November 2023". GOV.UK. 1 November 2023. Archived from the original on 1 November 2023. Retrieved 2 November 2023.\n
                598. \n
                599. ^ "Countries agree to safe and responsible development of frontier AI in landmark Bletchley Declaration". GOV.UK (Press release). Archived from the original on 1 November 2023. Retrieved 1 November 2023.\n
                600. \n
                601. ^ "Second global AI summit secures safety commitments from companies". Reuters. 21 May 2024. Retrieved 23 May 2024.\n
                602. \n
                603. ^ "Frontier AI Safety Commitments, AI Seoul Summit 2024". gov.uk. 21 May 2024. Archived from the original on 23 May 2024. Retrieved 23 May 2024.\n
                604. \n
                605. ^ a b Russell & Norvig 2021, p. 9.\n
                606. \n
                607. ^ a b c Copeland, J., ed. (2004). The Essential Turing: the ideas that gave birth to the computer age. Oxford, England: Clarendon Press. ISBN 0-1982-5079-7.\n
                608. \n
                609. ^ "Google books ngram". Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                610. \n
                611. ^ AI\'s immediate precursors: McCorduck (2004, pp. 51–107), Crevier (1993, pp. 27–32), Russell & Norvig (2021, pp. 8–17), Moravec (1988, p. 3)\n
                612. \n
                613. ^ a b Turing\'s original publication of the Turing test in "Computing machinery and intelligence": Turing (1950)\nHistorical influence and philosophical implications: Haugeland (1985, pp. 6–9), Crevier (1993, p. 24), McCorduck (2004, pp. 70–71), Russell & Norvig (2021, pp. 2, 984)\n
                614. \n
                615. ^ Crevier (1993), pp. 47–49.\n
                616. \n
                617. ^ Russell & Norvig (2003), p. 17.\n
                618. \n
                619. ^ Russell & Norvig (2003), p. 18.\n
                620. \n
                621. ^ Newquist (1994), pp. 86–86.\n
                622. \n
                623. ^ Simon (1965, p. 96) quoted in Crevier (1993, p. 109)\n
                624. \n
                625. ^ Minsky (1967, p. 2) quoted in Crevier (1993, p. 109)\n
                626. \n
                627. ^ Russell & Norvig (2021), p. 21.\n
                628. \n
                629. ^ Lighthill (1973).\n
                630. \n
                631. ^ NRC 1999, pp. 212–213.\n
                632. \n
                633. ^ Russell & Norvig (2021), p. 22.\n
                634. \n
                635. ^ Expert systems: Russell & Norvig (2021, pp. 23, 292), Luger & Stubblefield (2004, pp. 227–331), Nilsson (1998, chpt. 17.4), McCorduck (2004, pp. 327–335, 434–435), Crevier (1993, pp. 145–162, 197–203), Newquist (1994, pp. 155–183)\n
                636. \n
                637. ^ Russell & Norvig (2021), p. 24.\n
                638. \n
                639. ^ Nilsson (1998), p. 7.\n
                640. \n
                641. ^ McCorduck (2004), pp. 454–462.\n
                642. \n
                643. ^ Moravec (1988).\n
                644. \n
                645. ^ a b Brooks (1990).\n
                646. \n
                647. ^ Developmental robotics: Weng et al. (2001), Lungarella et al. (2003), Asada et al. (2009), Oudeyer (2010)\n
                648. \n
                649. ^ Russell & Norvig (2021), p. 25.\n
                650. \n
                651. ^ Crevier (1993, pp. 214–215), Russell & Norvig (2021, pp. 24, 26)\n
                652. \n
                653. ^ Russell & Norvig (2021), p. 26.\n
                654. \n
                655. ^ Formal and narrow methods adopted in the 1990s: Russell & Norvig (2021, pp. 24–26), McCorduck (2004, pp. 486–487)\n
                656. \n
                657. ^ AI widely used in the late 1990s: Kurzweil (2005, p. 265), NRC (1999, pp. 216–222), Newquist (1994, pp. 189–201)\n
                658. \n
                659. ^ Wong (2023).\n
                660. \n
                661. ^ Moore\'s Law and AI: Russell & Norvig (2021, pp. 14, 27)\n
                662. \n
                663. ^ a b c Clark (2015b).\n
                664. \n
                665. ^ Big data: Russell & Norvig (2021, p. 26)\n
                666. \n
                667. ^ Sagar, Ram (3 June 2020). "OpenAI Releases GPT-3, The Largest Model So Far". Analytics India Magazine. Archived from the original on 4 August 2020. Retrieved 15 March 2023.\n
                668. \n
                669. ^ DiFeliciantonio (2023).\n
                670. \n
                671. ^ Goswami (2023).\n
                672. \n
                673. ^ Grayling, Anthony; Ball, Brian (1 August 2024). "Philosophy is crucial in the age of AI". The Conversation. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
                674. \n
                675. ^ a b Jarow, Oshan (15 June 2024). "Will AI ever become conscious? It depends on how you think about biology". Vox. Archived from the original on 21 September 2024. Retrieved 4 October 2024.\n
                676. \n
                677. ^ McCarthy, John. "The Philosophy of AI and the AI of Philosophy". jmc.stanford.edu. Archived from the original on 23 October 2018. Retrieved 3 October 2024.\n
                678. \n
                679. ^ a b Turing (1950), p. 1.\n
                680. \n
                681. ^ Turing (1950), Under "The Argument from Consciousness".\n
                682. \n
                683. ^ Kirk-Giannini, Cameron Domenico; Goldstein, Simon (16 October 2023). "AI is closer than ever to passing the Turing test for \'intelligence\'. What happens when it does?". The Conversation. Archived from the original on 25 September 2024. Retrieved 17 August 2024.\n
                684. \n
                685. ^ Russell & Norvig (2021), p. 3.\n
                686. \n
                687. ^ Maker (2006).\n
                688. \n
                689. ^ McCarthy (1999).\n
                690. \n
                691. ^ Minsky (1986).\n
                692. \n
                693. ^ "What Is Artificial Intelligence (AI)?". Google Cloud Platform. Archived from the original on 31 July 2023. Retrieved 16 October 2023.\n
                694. \n
                695. ^ "One of the Biggest Problems in Regulating AI Is Agreeing on a Definition". carnegieendowment.org. Retrieved 31 July 2024.\n
                696. \n
                697. ^ "AI or BS? How to tell if a marketing tool really uses artificial intelligence". The Drum. Retrieved 31 July 2024.\n
                698. \n
                699. ^ Nilsson (1983), p. 10.\n
                700. \n
                701. ^ Haugeland (1985), pp. 112–117.\n
                702. \n
                703. ^ Physical symbol system hypothesis: Newell & Simon (1976, p. 116)\nHistorical significance: McCorduck (2004, p. 153), Russell & Norvig (2021, p. 19)\n
                704. \n
                705. ^ Moravec\'s paradox: Moravec (1988, pp. 15–16), Minsky (1986, p. 29), Pinker (2007, pp. 190–191)\n
                706. \n
                707. ^ Dreyfus\' critique of AI: Dreyfus (1972), Dreyfus & Dreyfus (1986)\nHistorical significance and philosophical implications: Crevier (1993, pp. 120–132), McCorduck (2004, pp. 211–239), Russell & Norvig (2021, pp. 981–982), Fearn (2007, chpt. 3)\n
                708. \n
                709. ^ Crevier (1993), p. 125.\n
                710. \n
                711. ^ Langley (2011).\n
                712. \n
                713. ^ Katz (2012).\n
                714. \n
                715. ^ Neats vs. scruffies, the historic debate: McCorduck (2004, pp. 421–424, 486–489), Crevier (1993, p. 168), Nilsson (1983, pp. 10–11), Russell & Norvig (2021, p. 24)\nA classic example of the "scruffy" approach to intelligence: Minsky (1986)\nA modern example of neat AI and its aspirations in the 21st century: Domingos (2015)\n
                716. \n
                717. ^ Pennachin & Goertzel (2007).\n
                718. \n
                719. ^ a b Roberts (2016).\n
                720. \n
                721. ^ Russell & Norvig (2021), p. 986.\n
                722. \n
                723. ^ Chalmers (1995).\n
                724. \n
                725. ^ Dennett (1991).\n
                726. \n
                727. ^ Horst (2005).\n
                728. \n
                729. ^ Searle (1999).\n
                730. \n
                731. ^ Searle (1980), p. 1.\n
                732. \n
                733. ^ Russell & Norvig (2021), p. 9817.\n
                734. \n
                735. ^ Searle\'s Chinese room argument: Searle (1980). Searle\'s original presentation of the thought experiment., Searle (1999).\nDiscussion: Russell & Norvig (2021, pp. 985), McCorduck (2004, pp. 443–445), Crevier (1993, pp. 269–271)\n
                736. \n
                737. ^ Leith, Sam (7 July 2022). "Nick Bostrom: How can we be certain a machine isn\'t conscious?". The Spectator. Archived from the original on 26 September 2024. Retrieved 23 February 2024.\n
                738. \n
                739. ^ a b c Thomson, Jonny (31 October 2022). "Why don\'t robots have rights?". Big Think. Archived from the original on 13 September 2024. Retrieved 23 February 2024.\n
                740. \n
                741. ^ a b Kateman, Brian (24 July 2023). "AI Should Be Terrified of Humans". Time. Archived from the original on 25 September 2024. Retrieved 23 February 2024.\n
                742. \n
                743. ^ Wong, Jeff (10 July 2023). "What leaders need to know about robot rights". Fast Company.\n
                744. \n
                745. ^ Hern, Alex (12 January 2017). "Give robots \'personhood\' status, EU committee argues". The Guardian. ISSN 0261-3077. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
                746. \n
                747. ^ Dovey, Dana (14 April 2018). "Experts Don\'t Think Robots Should Have Rights". Newsweek. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
                748. \n
                749. ^ Cuddy, Alice (13 April 2018). "Robot rights violate human rights, experts warn EU". euronews. Archived from the original on 19 September 2024. Retrieved 23 February 2024.\n
                750. \n
                751. ^ The Intelligence explosion and technological singularity: Russell & Norvig (2021, pp. 1004–1005), Omohundro (2008), Kurzweil (2005)\n\nI. J. Good\'s "intelligence explosion": Good (1965)\n\nVernor Vinge\'s "singularity": Vinge (1993)\n
                752. \n
                753. ^ Russell & Norvig (2021), p. 1005.\n
                754. \n
                755. ^ Transhumanism: Moravec (1988), Kurzweil (2005), Russell & Norvig (2021, p. 1005)\n
                756. \n
                757. ^ AI as evolution: Edward Fredkin is quoted in McCorduck (2004, p. 401), Butler (1863), Dyson (1998)\n
                758. \n
                759. ^ AI in myth: McCorduck (2004, pp. 4–5)\n
                760. \n
                761. ^ McCorduck (2004), pp. 340–400.\n
                762. \n
                763. ^ Buttazzo (2001).\n
                764. \n
                765. ^ Anderson (2008).\n
                766. \n
                767. ^ McCauley (2007).\n
                768. \n
                769. ^ Galvan (1997).\n
                770. \n
                \n

                AI textbooks

                \n

                The two most widely used textbooks in 2023 (see the Open Syllabus):\n

                \n\n

                These were the four of the most widely used AI textbooks in 2008:\n

                \n
                \n\n
                \n

                Other textbooks:\n

                \n\n

                History of AI

                \n
                \n\n
                \n

                Other sources

                \n
                \n\n\n
                \n

                Further reading

                \n
                \n\n
                \n
                \n\n\n\n\n\n\n\n\n\n
                \n
                \n\t\t\t\t\t\n\t\t\t\t
                \n\t\t\t\n\t\t\t\n\t\t\n\t\t\n\t \n \n
                \n\t
                  \n
                  \n\n\n'}'. -[2024-10-27 22:48:22,401] [DEBUG] [Engine]: Agent 'agent3' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_fHTq93t35aAVv5eACwoYPaPP', type='function')], function_call=None) -[2024-10-27 22:48:22,402] [INFO] [Engine]: Assigning task to agent4: Search for the latest trends in AI using Twitter. -[2024-10-27 22:48:22,402] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Search for the latest trends in AI using Twitter.'. -[2024-10-27 22:48:23,736] [INFO] [BaseAgent]: Agent 'agent4' called 'fetch_webpage' with args '{'url': 'https://twitter.com/search?q=%23AI%20%23trending'}'. -[2024-10-27 22:48:23,736] [INFO] [BaseAgent]: Agent 'agent4' obtained result '{'success': True, 'error-msg': '', 'url': 'https://twitter.com/search?q=%23AI%20%23trending', 'content': '\n \n \n x.com\n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n \n \n '}'. -[2024-10-27 22:48:23,736] [DEBUG] [Engine]: Agent 'agent4' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23trending"}', name='fetch_webpage'), id='call_EOrnYwL1zWjLqNFVUoChHbjf', type='function')], function_call=None) -[2024-10-27 22:48:23,736] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google and provide the information?"}', name='communicate_to'), id='call_F07x1Yh6U2y9wbG8abglZ7Ri', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_fHTq93t35aAVv5eACwoYPaPP', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23trending"}', name='fetch_webpage'), id='call_EOrnYwL1zWjLqNFVUoChHbjf', type='function')], function_call=None) - -[2024-10-27 22:48:23,737] [INFO] [Engine]: Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google and provide the information?"}', name='communicate_to'), id='call_F07x1Yh6U2y9wbG8abglZ7Ri', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_fHTq93t35aAVv5eACwoYPaPP', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23trending"}', name='fetch_webpage'), id='call_EOrnYwL1zWjLqNFVUoChHbjf', type='function')], function_call=None) - -[2024-10-27 22:48:23,737] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google and provide the information?"}', name='communicate_to'), id='call_F07x1Yh6U2y9wbG8abglZ7Ri', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_fHTq93t35aAVv5eACwoYPaPP', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23trending"}', name='fetch_webpage'), id='call_EOrnYwL1zWjLqNFVUoChHbjf', type='function')], function_call=None) - -[2024-10-27 22:48:24,082] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': False} -[2024-10-27 22:48:24,083] [INFO] [Engine]: EnginePlanner decided to terminate the simulation. -[2024-10-27 22:48:24,084] [INFO] [Engine]: Engine simulation loop completed. -[2024-10-27 22:48:24,084] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-10-27 22:48:24,085] [INFO] [Evaluator]: Total Token Consumption: 36 -[2024-10-27 22:48:24,085] [INFO] [Evaluator]: Average Tokens per Iteration: 36.0 -[2024-10-27 22:48:24,085] [INFO] [Engine]: Simulation completed. -[2024-10-27 22:48:44,072] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-10-27 22:48:44,073] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-10-27 22:48:44,073] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-10-27 22:48:44,073] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-10-27 22:48:44,073] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-10-27 22:48:44,073] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-10-27 22:48:44,073] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-10-27 22:48:44,073] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-10-27 22:48:44,074] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-10-27 22:48:44,074] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-10-27 22:48:44,074] [INFO] [AgentGraph]: Relationship added: agent2 --[reports_to]--> agent1 -[2024-10-27 22:48:44,074] [INFO] [AgentGraph]: Relationship added: agent3 --[reports_to]--> agent1 -[2024-10-27 22:48:44,074] [INFO] [AgentGraph]: Relationship added: agent4 --[reports_to]--> agent1 -[2024-10-27 22:48:44,074] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-10-27 22:48:44,075] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-10-27 22:48:44,075] [INFO] [Engine]: Engine initialized. -[2024-10-27 22:48:44,075] [INFO] [Engine]: Engine starting simulation. -[2024-10-27 22:48:44,075] [INFO] [Engine]: Starting iteration 1 -[2024-10-27 22:48:44,075] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. If agents are asked, they should be assigned the task of answering. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-10-27 22:48:45,174] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'}, 'continue': True} -[2024-10-27 22:48:45,174] [INFO] [Engine]: Assigned tasks: {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'} -[2024-10-27 22:48:45,174] [INFO] [Engine]: Assigning task to agent2: Search for the latest trends in AI using Google. -[2024-10-27 22:48:45,175] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Search for the latest trends in AI using Google.'. -[2024-10-27 22:48:46,120] [INFO] [BaseAgent]: Agent 'agent2' called 'fetch_webpage' with args '{'url': 'https://www.google.com/search?q=latest+trends+in+AI'}'. -[2024-10-27 22:48:46,126] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'error-msg': '', 'url': 'https://www.google.com/search?q=latest+trends+in+AI', 'content': 'latest trends in AI - Google Search

                  Accessibility Links

                  About 518,000,000 results (0.28 seconds) 

                  Ads

                  Sponsored
                  The AI market is quickly increasing due to the rapid pace of technological change. See how we are helping executives understand how potential disruptors will impact growth.
                  Sponsored
                  View the report for AI use cases, AI predictions, tips for overcoming the risks, and more. AI...
                  Sponsored
                  Gain insights on sustainable computing and generative AI. Discover AI trends and...
                  Sponsored
                  Explore the Future: AI Stats & Trends in 2024. Stay Ahead of the Evolution. Get Ahead with...

                  Ads

                  Sponsored
                  Artificial intelligence and related technologies are advancing at a rapid pace. Explore our work on AI use, policy, ethics, and development. View Events. Browse Publications.
                  Google apps
                  '}'. -[2024-10-27 22:48:46,194] [DEBUG] [Engine]: Agent 'agent2' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_X2WZjFmudUmMzICf1BB5MHsR', type='function')], function_call=None) -[2024-10-27 22:48:46,194] [INFO] [Engine]: Assigning task to agent3: Search for the latest trends in AI using Wikipedia. -[2024-10-27 22:48:46,195] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Search for the latest trends in AI using Wikipedia.'. -[2024-10-27 22:48:47,378] [INFO] [BaseAgent]: Agent 'agent3' called 'fetch_webpage' with args '{'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence'}'. -[2024-10-27 22:48:47,387] [INFO] [BaseAgent]: Agent 'agent3' obtained result '{'success': True, 'error-msg': '', 'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence', 'content': '\n\n\n\nArtificial intelligence - Wikipedia\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nJump to content\n
                  \n\t
                  \n\t\t
                  \n\t\t\t
                  \n\n\t\t\n\t\t\t\n\n\n\t\t
                  \n\t\t
                  \n\t\t\t\n\n\n\t\t\t\n\n\t\t
                  \n\t\n\n
                  \n\t
                  \n\t\t
                  \n\t\t\t
                  \n\t\t
                  \n\t\t
                  \n\t\t\t
                  \n\t\t
                  \n\t\t\t\n\t\t
                  \n\t
                  \n\t
                  \n\t\t\t\t
                  \n\t\t\n\t\t\t
                  \n\t\t
                  \n\t\t
                  \n\t\t\t
                  \n\t\t\t\t
                  \n\t\t\t\t\t\n\t\t\t\t\t

                  Artificial intelligence

                  \n\t\t\t\t\t\t\t\n
                  \n\t\n\t\n\t
                  \n\n\t\t
                  \n\t\t\t\n\t\t\t\n\t\t\t\n\t\t
                  \n\n\t
                  \n
                  \n
                  \n\t\t\t\t
                  \n\t\t\t\t\t
                  \n\t\t\t\t\t\t
                  \n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
                  \n\t\t\t\t\t\t
                  \n\t\t\t\t\t\t\t\n\t\t\t\t\n\t\t\t\t\t\t\t
                  \n\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
                  \n\t\t\t\t\t
                  \n\t\t\t\t
                  \n\t\t\t\t
                  \n\t\t\t\t\t
                  \n\t\t\t\t\t\t\n\t\t\t\t\t\t
                  \n\t\t\n\t\t\t\t\t
                  \n\t\t\t\t
                  \n\t\t\t\t
                  \n\t\t\t\t\t
                  \n\t\t\t\t\t\t\t
                  \n\t\t
                  Page semi-protected
                  \n\t\t
                  \n\n\t\t\t\t\t\t
                  From Wikipedia, the free encyclopedia
                  \n\t\t\t\t\t
                  \n\t\t\t\t\t
                  \n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t
                  \n\n

                  \n

                  \n\n\n\n\n\n\n\n

                  Artificial intelligence (AI), in its broadest sense, is intelligence exhibited by machines, particularly computer systems. It is a field of research in computer science that develops and studies methods and software that enable machines to perceive their environment and use learning and intelligence to take actions that maximize their chances of achieving defined goals.[1] Such machines may be called AIs.\n

                  Some high-profile applications of AI include advanced web search engines (e.g., Google Search); recommendation systems (used by YouTube, Amazon, and Netflix); interacting via human speech (e.g., Google Assistant, Siri, and Alexa); autonomous vehicles (e.g., Waymo); generative and creative tools (e.g., ChatGPT, and AI art); and superhuman play and analysis in strategy games (e.g., chess and Go). However, many AI applications are not perceived as AI: "A lot of cutting edge AI has filtered into general applications, often without being called AI because once something becomes useful enough and common enough it\'s not labeled AI anymore."[2][3]\n

                  The various subfields of AI research are centered around particular goals and the use of particular tools. The traditional goals of AI research include reasoning, knowledge representation, planning, learning, natural language processing, perception, and support for robotics.[a] General intelligence—the ability to complete any task performable by a human on an at least equal level—is among the field\'s long-term goals.[4] To reach these goals, AI researchers have adapted and integrated a wide range of techniques, including search and mathematical optimization, formal logic, artificial neural networks, and methods based on statistics, operations research, and economics.[b] AI also draws upon psychology, linguistics, philosophy, neuroscience, and other fields.[5]\n

                  Artificial intelligence was founded as an academic discipline in 1956,[6] and the field went through multiple cycles of optimism,[7][8] followed by periods of disappointment and loss of funding, known as AI winter.[9][10] Funding and interest vastly increased after 2012 when deep learning outperformed previous AI techniques.[11] This growth accelerated further after 2017 with the transformer architecture,[12] and by the early 2020s hundreds of billions of dollars were being invested in AI (known as the "AI boom"). The widespread use of AI in the 21st century exposed several unintended consequences and harms in the present and raised concerns about its risks and long-term effects in the future, prompting discussions about regulatory policies to ensure the safety and benefits of the technology.\n

                  \n\n

                  Goals

                  \n

                  The general problem of simulating (or creating) intelligence has been broken into subproblems. These consist of particular traits or capabilities that researchers expect an intelligent system to display. The traits described below have received the most attention and cover the scope of AI research.[a]\n

                  \n

                  Reasoning and problem-solving

                  \n

                  Early researchers developed algorithms that imitated step-by-step reasoning that humans use when they solve puzzles or make logical deductions.[13] By the late 1980s and 1990s, methods were developed for dealing with uncertain or incomplete information, employing concepts from probability and economics.[14]\n

                  Many of these algorithms are insufficient for solving large reasoning problems because they experience a "combinatorial explosion": They become exponentially slower as the problems grow.[15] Even humans rarely use the step-by-step deduction that early AI research could model. They solve most of their problems using fast, intuitive judgments.[16] Accurate and efficient reasoning is an unsolved problem.\n

                  \n

                  Knowledge representation

                  \n
                  An ontology represents knowledge as a set of concepts within a domain and the relationships between those concepts.
                  \n

                  Knowledge representation and knowledge engineering[17] allow AI programs to answer questions intelligently and make deductions about real-world facts. Formal knowledge representations are used in content-based indexing and retrieval,[18] scene interpretation,[19] clinical decision support,[20] knowledge discovery (mining "interesting" and actionable inferences from large databases),[21] and other areas.[22]\n

                  A knowledge base is a body of knowledge represented in a form that can be used by a program. An ontology is the set of objects, relations, concepts, and properties used by a particular domain of knowledge.[23] Knowledge bases need to represent things such as objects, properties, categories, and relations between objects;[24] situations, events, states, and time;[25] causes and effects;[26] knowledge about knowledge (what we know about what other people know);[27] default reasoning (things that humans assume are true until they are told differently and will remain true even when other facts are changing);[28] and many other aspects and domains of knowledge.\n

                  Among the most difficult problems in knowledge representation are the breadth of commonsense knowledge (the set of atomic facts that the average person knows is enormous);[29] and the sub-symbolic form of most commonsense knowledge (much of what people know is not represented as "facts" or "statements" that they could express verbally).[16] There is also the difficulty of knowledge acquisition, the problem of obtaining knowledge for AI applications.[c]\n

                  \n

                  Planning and decision-making

                  \n

                  An "agent" is anything that perceives and takes actions in the world. A rational agent has goals or preferences and takes actions to make them happen.[d][32] In automated planning, the agent has a specific goal.[33] In automated decision-making, the agent has preferences—there are some situations it would prefer to be in, and some situations it is trying to avoid. The decision-making agent assigns a number to each situation (called the "utility") that measures how much the agent prefers it. For each possible action, it can calculate the "expected utility": the utility of all possible outcomes of the action, weighted by the probability that the outcome will occur. It can then choose the action with the maximum expected utility.[34]\n

                  In classical planning, the agent knows exactly what the effect of any action will be.[35] In most real-world problems, however, the agent may not be certain about the situation they are in (it is "unknown" or "unobservable") and it may not know for certain what will happen after each possible action (it is not "deterministic"). It must choose an action by making a probabilistic guess and then reassess the situation to see if the action worked.[36]\n

                  In some problems, the agent\'s preferences may be uncertain, especially if there are other agents or humans involved. These can be learned (e.g., with inverse reinforcement learning), or the agent can seek information to improve its preferences.[37] Information value theory can be used to weigh the value of exploratory or experimental actions.[38] The space of possible future actions and situations is typically intractably large, so the agents must take actions and evaluate situations while being uncertain of what the outcome will be.\n

                  A Markov decision process has a transition model that describes the probability that a particular action will change the state in a particular way and a reward function that supplies the utility of each state and the cost of each action. A policy associates a decision with each possible state. The policy could be calculated (e.g., by iteration), be heuristic, or it can be learned.[39]\n

                  Game theory describes the rational behavior of multiple interacting agents and is used in AI programs that make decisions that involve other agents.[40]\n

                  \n

                  Learning

                  \n

                  Machine learning is the study of programs that can improve their performance on a given task automatically.[41] It has been a part of AI from the beginning.[e]\n

                  There are several kinds of machine learning. Unsupervised learning analyzes a stream of data and finds patterns and makes predictions without any other guidance.[44] Supervised learning requires a human to label the input data first, and comes in two main varieties: classification (where the program must learn to predict what category the input belongs in) and regression (where the program must deduce a numeric function based on numeric input).[45]\n

                  In reinforcement learning, the agent is rewarded for good responses and punished for bad ones. The agent learns to choose responses that are classified as "good".[46] Transfer learning is when the knowledge gained from one problem is applied to a new problem.[47] Deep learning is a type of machine learning that runs inputs through biologically inspired artificial neural networks for all of these types of learning.[48]\n

                  Computational learning theory can assess learners by computational complexity, by sample complexity (how much data is required), or by other notions of optimization.[49]\n

                  \n
                  \n

                  Natural language processing

                  \n

                  Natural language processing (NLP)[50] allows programs to read, write and communicate in human languages such as English. Specific problems include speech recognition, speech synthesis, machine translation, information extraction, information retrieval and question answering.[51]\n

                  Early work, based on Noam Chomsky\'s generative grammar and semantic networks, had difficulty with word-sense disambiguation[f] unless restricted to small domains called "micro-worlds" (due to the common sense knowledge problem[29]). Margaret Masterman believed that it was meaning and not grammar that was the key to understanding languages, and that thesauri and not dictionaries should be the basis of computational language structure.\n

                  Modern deep learning techniques for NLP include word embedding (representing words, typically as vectors encoding their meaning),[52] transformers (a deep learning architecture using an attention mechanism),[53] and others.[54] In 2019, generative pre-trained transformer (or "GPT") language models began to generate coherent text,[55][56] and by 2023, these models were able to get human-level scores on the bar exam, SAT test, GRE test, and many other real-world applications.[57]\n

                  \n

                  Perception

                  \n

                  Machine perception is the ability to use input from sensors (such as cameras, microphones, wireless signals, active lidar, sonar, radar, and tactile sensors) to deduce aspects of the world. Computer vision is the ability to analyze visual input.[58]\n

                  The field includes speech recognition,[59] image classification,[60] facial recognition, object recognition,[61]object tracking,[62] and robotic perception.[63]\n

                  \n

                  Social intelligence

                  \n
                  Kismet, a robot head which was made in the 1990s; a machine that can recognize and simulate emotions[64]
                  \n

                  Affective computing is an interdisciplinary umbrella that comprises systems that recognize, interpret, process, or simulate human feeling, emotion, and mood.[65] For example, some virtual assistants are programmed to speak conversationally or even to banter humorously; it makes them appear more sensitive to the emotional dynamics of human interaction, or to otherwise facilitate human–computer interaction.\n

                  However, this tends to give naïve users an unrealistic conception of the intelligence of existing computer agents.[66] Moderate successes related to affective computing include textual sentiment analysis and, more recently, multimodal sentiment analysis, wherein AI classifies the affects displayed by a videotaped subject.[67]\n

                  \n

                  General intelligence

                  \n

                  A machine with artificial general intelligence should be able to solve a wide variety of problems with breadth and versatility similar to human intelligence.[4]\n

                  \n

                  Techniques

                  \n

                  AI research uses a wide variety of techniques to accomplish the goals above.[b]\n

                  \n

                  Search and optimization

                  \n

                  AI can solve many problems by intelligently searching through many possible solutions.[68] There are two very different kinds of search used in AI: state space search and local search.\n

                  \n
                  \n

                  State space search searches through a tree of possible states to try to find a goal state.[69] For example, planning algorithms search through trees of goals and subgoals, attempting to find a path to a target goal, a process called means-ends analysis.[70]\n

                  Simple exhaustive searches[71] are rarely sufficient for most real-world problems: the search space (the number of places to search) quickly grows to astronomical numbers. The result is a search that is too slow or never completes.[15] "Heuristics" or "rules of thumb" can help prioritize choices that are more likely to reach a goal.[72]\n

                  Adversarial search is used for game-playing programs, such as chess or Go. It searches through a tree of possible moves and counter-moves, looking for a winning position.[73]\n

                  \n
                  \n
                  Illustration of gradient descent for 3 different starting points; two parameters (represented by the plan coordinates) are adjusted in order to minimize the loss function (the height)

                  Local search uses mathematical optimization to find a solution to a problem. It begins with some form of guess and refines it incrementally.[74]\n

                  Gradient descent is a type of local search that optimizes a set of numerical parameters by incrementally adjusting them to minimize a loss function. Variants of gradient descent are commonly used to train neural networks.[75]\n

                  Another type of local search is evolutionary computation, which aims to iteratively improve a set of candidate solutions by "mutating" and "recombining" them, selecting only the fittest to survive each generation.[76]\n

                  Distributed search processes can coordinate via swarm intelligence algorithms. Two popular swarm algorithms used in search are particle swarm optimization (inspired by bird flocking) and ant colony optimization (inspired by ant trails).[77]\n

                  \n

                  Logic

                  \n

                  Formal logic is used for reasoning and knowledge representation.[78]\nFormal logic comes in two main forms: propositional logic (which operates on statements that are true or false and uses logical connectives such as "and", "or", "not" and "implies")[79] and predicate logic (which also operates on objects, predicates and relations and uses quantifiers such as "Every X is a Y" and "There are some Xs that are Ys").[80]\n

                  Deductive reasoning in logic is the process of proving a new statement (conclusion) from other statements that are given and assumed to be true (the premises).[81] Proofs can be structured as proof trees, in which nodes are labelled by sentences, and children nodes are connected to parent nodes by inference rules.\n

                  Given a problem and a set of premises, problem-solving reduces to searching for a proof tree whose root node is labelled by a solution of the problem and whose leaf nodes are labelled by premises or axioms. In the case of Horn clauses, problem-solving search can be performed by reasoning forwards from the premises or backwards from the problem.[82] In the more general case of the clausal form of first-order logic, resolution is a single, axiom-free rule of inference, in which a problem is solved by proving a contradiction from premises that include the negation of the problem to be solved.[83]\n

                  Inference in both Horn clause logic and first-order logic is undecidable, and therefore intractable. However, backward reasoning with Horn clauses, which underpins computation in the logic programming language Prolog, is Turing complete. Moreover, its efficiency is competitive with computation in other symbolic programming languages.[84]\n

                  Fuzzy logic assigns a "degree of truth" between 0 and 1. It can therefore handle propositions that are vague and partially true.[85]\n

                  Non-monotonic logics, including logic programming with negation as failure, are designed to handle default reasoning.[28] Other specialized versions of logic have been developed to describe many complex domains.\n

                  \n

                  Probabilistic methods for uncertain reasoning

                  \n
                  A simple Bayesian network, with the associated conditional probability tables
                  \n

                  Many problems in AI (including in reasoning, planning, learning, perception, and robotics) require the agent to operate with incomplete or uncertain information. AI researchers have devised a number of tools to solve these problems using methods from probability theory and economics.[86] Precise mathematical tools have been developed that analyze how an agent can make choices and plan, using decision theory, decision analysis,[87] and information value theory.[88] These tools include models such as Markov decision processes,[89] dynamic decision networks,[90] game theory and mechanism design.[91]\n

                  Bayesian networks[92] are a tool that can be used for reasoning (using the Bayesian inference algorithm),[g][94] learning (using the expectation–maximization algorithm),[h][96] planning (using decision networks)[97] and perception (using dynamic Bayesian networks).[90]\n

                  Probabilistic algorithms can also be used for filtering, prediction, smoothing, and finding explanations for streams of data, thus helping perception systems analyze processes that occur over time (e.g., hidden Markov models or Kalman filters).[90]\n

                  \n
                  Expectation–maximization clustering of Old Faithful eruption data starts from a random guess but then successfully converges on an accurate clustering of the two physically distinct modes of eruption.
                  \n

                  Classifiers and statistical learning methods

                  \n

                  The simplest AI applications can be divided into two types: classifiers (e.g., "if shiny then diamond"), on one hand, and controllers (e.g., "if diamond then pick up"), on the other hand. Classifiers[98] are functions that use pattern matching to determine the closest match. They can be fine-tuned based on chosen examples using supervised learning. Each pattern (also called an "observation") is labeled with a certain predefined class. All the observations combined with their class labels are known as a data set. When a new observation is received, that observation is classified based on previous experience.[45]\n

                  There are many kinds of classifiers in use.[99] The decision tree is the simplest and most widely used symbolic machine learning algorithm.[100] K-nearest neighbor algorithm was the most widely used analogical AI until the mid-1990s, and Kernel methods such as the support vector machine (SVM) displaced k-nearest neighbor in the 1990s.[101]\nThe naive Bayes classifier is reportedly the "most widely used learner"[102] at Google, due in part to its scalability.[103]\nNeural networks are also used as classifiers.[104]\n

                  \n

                  Artificial neural networks

                  \n
                  A neural network is an interconnected group of nodes, akin to the vast network of neurons in the human brain.
                  \n

                  An artificial neural network is based on a collection of nodes also known as artificial neurons, which loosely model the neurons in a biological brain. It is trained to recognise patterns; once trained, it can recognise those patterns in fresh data. There is an input, at least one hidden layer of nodes and an output. Each node applies a function and once the weight crosses its specified threshold, the data is transmitted to the next layer. A network is typically called a deep neural network if it has at least 2 hidden layers.[104]\n

                  Learning algorithms for neural networks use local search to choose the weights that will get the right output for each input during training. The most common training technique is the backpropagation algorithm.[105] Neural networks learn to model complex relationships between inputs and outputs and find patterns in data. In theory, a neural network can learn any function.[106]\n

                  In feedforward neural networks the signal passes in only one direction.[107] Recurrent neural networks feed the output signal back into the input, which allows short-term memories of previous input events. Long short term memory is the most successful network architecture for recurrent networks.[108] Perceptrons[109] use only a single layer of neurons; deep learning[110] uses multiple layers. Convolutional neural networks strengthen the connection between neurons that are "close" to each other—this is especially important in image processing, where a local set of neurons must identify an "edge" before the network can identify an object.[111]\n

                  \n
                  \n

                  Deep learning

                  \n
                  \n

                  Deep learning[110] uses several layers of neurons between the network\'s inputs and outputs. The multiple layers can progressively extract higher-level features from the raw input. For example, in image processing, lower layers may identify edges, while higher layers may identify the concepts relevant to a human such as digits, letters, or faces.[112]\n

                  Deep learning has profoundly improved the performance of programs in many important subfields of artificial intelligence, including computer vision, speech recognition, natural language processing, image classification,[113] and others. The reason that deep learning performs so well in so many applications is not known as of 2023.[114] The sudden success of deep learning in 2012–2015 did not occur because of some new discovery or theoretical breakthrough (deep neural networks and backpropagation had been described by many people, as far back as the 1950s)[i] but because of two factors: the incredible increase in computer power (including the hundred-fold increase in speed by switching to GPUs) and the availability of vast amounts of training data, especially the giant curated datasets used for benchmark testing, such as ImageNet.[j]\n

                  \n

                  GPT

                  \n

                  Generative pre-trained transformers (GPT) are large language models (LLMs) that generate text based on the semantic relationships between words in sentences. Text-based GPT models are pretrained on a large corpus of text that can be from the Internet. The pretraining consists of predicting the next token (a token being usually a word, subword, or punctuation). Throughout this pretraining, GPT models accumulate knowledge about the world and can then generate human-like text by repeatedly predicting the next token. Typically, a subsequent training phase makes the model more truthful, useful, and harmless, usually with a technique called reinforcement learning from human feedback (RLHF). Current GPT models are prone to generating falsehoods called "hallucinations", although this can be reduced with RLHF and quality data. They are used in chatbots, which allow people to ask a question or request a task in simple text.[122][123]\n

                  Current models and services include Gemini (formerly Bard), ChatGPT, Grok, Claude, Copilot, and LLaMA.[124] Multimodal GPT models can process different types of data (modalities) such as images, videos, sound, and text.[125]\n

                  \n

                  Hardware and software

                  \n\n

                  In the late 2010s, graphics processing units (GPUs) that were increasingly designed with AI-specific enhancements and used with specialized TensorFlow software had replaced previously used central processing unit (CPUs) as the dominant means for large-scale (commercial and academic) machine learning models\' training.[126] Specialized programming languages such as Prolog were used in early AI research,[127] but general-purpose programming languages like Python have become predominant.[128]\n

                  The transistor density in integrated circuits has been observed to roughly double every 18 months—a trend known as Moore\'s law, named after the Intel co-founder Gordon Moore, who first identified it. Improvements in GPUs have been even faster.[129]\n

                  \n

                  Applications

                  \n

                  AI and machine learning technology is used in most of the essential applications of the 2020s, including: search engines (such as Google Search), targeting online advertisements, recommendation systems (offered by Netflix, YouTube or Amazon), driving internet traffic, targeted advertising (AdSense, Facebook), virtual assistants (such as Siri or Alexa), autonomous vehicles (including drones, ADAS and self-driving cars), automatic language translation (Microsoft Translator, Google Translate), facial recognition (Apple\'s Face ID or Microsoft\'s DeepFace and Google\'s FaceNet) and image labeling (used by Facebook, Apple\'s iPhoto and TikTok). The deployment of AI may be overseen by a Chief automation officer (CAO).\n

                  Health and medicine

                  \n\n

                  The application of AI in medicine and medical research has the potential to increase patient care and quality of life.[130] Through the lens of the Hippocratic Oath, medical professionals are ethically compelled to use AI, if applications can more accurately diagnose and treat patients.[131][132]\n

                  For medical research, AI is an important tool for processing and integrating big data. This is particularly important for organoid and tissue engineering development which use microscopy imaging as a key technique in fabrication.[133] It has been suggested that AI can overcome discrepancies in funding allocated to different fields of research.[133] New AI tools can deepen the understanding of biomedically relevant pathways. For example, AlphaFold 2 (2021) demonstrated the ability to approximate, in hours rather than months, the 3D structure of a protein.[134] In 2023, it was reported that AI-guided drug discovery helped find a class of antibiotics capable of killing two different types of drug-resistant bacteria.[135] In 2024, researchers used machine learning to accelerate the search for Parkinson\'s disease drug treatments. Their aim was to identify compounds that block the clumping, or aggregation, of alpha-synuclein (the protein that characterises Parkinson\'s disease). They were able to speed up the initial screening process ten-fold and reduce the cost by a thousand-fold.[136][137]\n

                  \n

                  Games

                  \n\n

                  Game playing programs have been used since the 1950s to demonstrate and test AI\'s most advanced techniques.[138] Deep Blue became the first computer chess-playing system to beat a reigning world chess champion, Garry Kasparov, on 11 May 1997.[139] In 2011, in a Jeopardy! quiz show exhibition match, IBM\'s question answering system, Watson, defeated the two greatest Jeopardy! champions, Brad Rutter and Ken Jennings, by a significant margin.[140] In March 2016, AlphaGo won 4 out of 5 games of Go in a match with Go champion Lee Sedol, becoming the first computer Go-playing system to beat a professional Go player without handicaps. Then, in 2017, it defeated Ke Jie, who was the best Go player in the world.[141] Other programs handle imperfect-information games, such as the poker-playing program Pluribus.[142] DeepMind developed increasingly generalistic reinforcement learning models, such as with MuZero, which could be trained to play chess, Go, or Atari games.[143] In 2019, DeepMind\'s AlphaStar achieved grandmaster level in StarCraft II, a particularly challenging real-time strategy game that involves incomplete knowledge of what happens on the map.[144] In 2021, an AI agent competed in a PlayStation Gran Turismo competition, winning against four of the world\'s best Gran Turismo drivers using deep reinforcement learning.[145] In 2024, Google DeepMind introduced SIMA, a type of AI capable of autonomously playing nine previously unseen open-world video games by observing screen output, as well as executing short, specific tasks in response to natural language instructions.[146]\n

                  \n

                  Mathematics

                  \n

                  In mathematics, special forms of formal step-by-step reasoning are used. In contrast, LLMs such as GPT-4 Turbo, Gemini Ultra, Claude Opus, LLaMa-2 or Mistral Large are working with probabilistic models, which can produce wrong answers in the form of hallucinations. Therefore, they need not only a large database of mathematical problems to learn from but also methods such as supervised fine-tuning or trained classifiers with human-annotated data to improve answers for new problems and learn from corrections.[147] A 2024 study showed that the performance of some language models for reasoning capabilities in solving math problems not included in their training data was low, even for problems with only minor deviations from trained data.[148]\n

                  Alternatively, dedicated models for mathematic problem solving with higher precision for the outcome including proof of theorems have been developed such as Alpha Tensor, Alpha Geometry and Alpha Proof all from Google DeepMind,[149] Llemma from eleuther[150] or Julius.[151]\n

                  When natural language is used to describe mathematical problems, converters transform such prompts into a formal language such as Lean to define mathematic tasks.\n

                  Some models have been developed to solve challenging problems and reach good results in benchmark tests, others to serve as educational tools in mathematics.[152]\n

                  \n

                  Finance

                  \n

                  Finance is one of the fastest growing sectors where applied AI tools are being deployed: from retail online banking to investment advice and insurance, where automated "robot advisers" have been in use for some years.[153]\n

                  World Pensions experts like Nicolas Firzli insist it may be too early to see the emergence of highly innovative AI-informed financial products and services: "the deployment of AI tools will simply further automatise things: destroying tens of thousands of jobs in banking, financial planning, and pension advice in the process, but I\'m not sure it will unleash a new wave of [e.g., sophisticated] pension innovation."[154]\n

                  \n

                  Military

                  \n\n

                  Various countries are deploying AI military applications.[155] The main applications enhance command and control, communications, sensors, integration and interoperability.[156] Research is targeting intelligence collection and analysis, logistics, cyber operations, information operations, and semiautonomous and autonomous vehicles.[155] AI technologies enable coordination of sensors and effectors, threat detection and identification, marking of enemy positions, target acquisition, coordination and deconfliction of distributed Joint Fires between networked combat vehicles involving manned and unmanned teams.[156] AI was incorporated into military operations in Iraq and Syria.[155]\n

                  In November 2023, US Vice President Kamala Harris disclosed a declaration signed by 31 nations to set guardrails for the military use of AI. The commitments include using legal reviews to ensure the compliance of military AI with international laws, and being cautious and transparent in the development of this technology.[157]\n

                  \n

                  Generative AI

                  \n\n
                  Vincent van Gogh in watercolour created by generative AI software
                  \n

                  In the early 2020s, generative AI gained widespread prominence. GenAI is AI capable of generating text, images, videos, or other data using generative models,[158][159] often in response to prompts.[160][161]\n

                  In March 2023, 58% of U.S. adults had heard about ChatGPT and 14% had tried it.[162] The increasing realism and ease-of-use of AI-based text-to-image generators such as Midjourney, DALL-E, and Stable Diffusion sparked a trend of viral AI-generated photos. Widespread attention was gained by a fake photo of Pope Francis wearing a white puffer coat, the fictional arrest of Donald Trump, and a hoax of an attack on the Pentagon, as well as the usage in professional creative arts.[163][164]\n

                  \n

                  Agents

                  \n

                  Artificial intelligent (AI) agents are software entities designed to perceive their environment, make decisions, and take actions autonomously to achieve specific goals. These agents can interact with users, their environment, or other agents. AI agents are used in various applications, including virtual assistants, chatbots, autonomous vehicles, game-playing systems, and industrial robotics. AI agents operate within the constraints of their programming, available computational resources, and hardware limitations. This means they are restricted to performing tasks within their defined scope and have finite memory and processing capabilities. In real-world applications, AI agents often face time constraints for decision-making and action execution. Many AI agents incorporate learning algorithms, enabling them to improve their performance over time through experience or training. Using machine learning, AI agents can adapt to new situations and optimise their behaviour for their designated tasks.[165][166][167]\n

                  \n

                  Other industry-specific tasks

                  \n

                  There are also thousands of successful AI applications used to solve specific problems for specific industries or institutions. In a 2017 survey, one in five companies reported having incorporated "AI" in some offerings or processes.[168] A few examples are energy storage, medical diagnosis, military logistics, applications that predict the result of judicial decisions, foreign policy, or supply chain management.\n

                  AI applications for evacuation and disaster management are growing. AI has been used to investigate if and how people evacuated in large scale and small scale evacuations using historical data from GPS, videos or social media. Further, AI can provide real time information on the real time evacuation conditions.[169][170][171]\n

                  In agriculture, AI has helped farmers identify areas that need irrigation, fertilization, pesticide treatments or increasing yield. Agronomists use AI to conduct research and development. AI has been used to predict the ripening time for crops such as tomatoes, monitor soil moisture, operate agricultural robots, conduct predictive analytics, classify livestock pig call emotions, automate greenhouses, detect diseases and pests, and save water.\n

                  Artificial intelligence is used in astronomy to analyze increasing amounts of available data and applications, mainly for "classification, regression, clustering, forecasting, generation, discovery, and the development of new scientific insights." For example, it is used for discovering exoplanets, forecasting solar activity, and distinguishing between signals and instrumental effects in gravitational wave astronomy. Additionally, it could be used for activities in space, such as space exploration, including the analysis of data from space missions, real-time science decisions of spacecraft, space debris avoidance, and more autonomous operation.\n

                  \n

                  Ethics

                  \n\n

                  AI has potential benefits and potential risks.[172] AI may be able to advance science and find solutions for serious problems: Demis Hassabis of Deep Mind hopes to "solve intelligence, and then use that to solve everything else".[173] However, as the use of AI has become widespread, several unintended consequences and risks have been identified.[174] In-production systems can sometimes not factor ethics and bias into their AI training processes, especially when the AI algorithms are inherently unexplainable in deep learning.[175]\n

                  \n

                  Risks and harm

                  \n
                  \n\n

                  Machine learning algorithms require large amounts of data. The techniques used to acquire this data have raised concerns about privacy, surveillance and copyright.\n

                  AI-powered devices and services, such as virtual assistants and IoT products, continuously collect personal information, raising concerns about intrusive data gathering and unauthorized access by third parties. The loss of privacy is further exacerbated by AI\'s ability to process and combine vast amounts of data, potentially leading to a surveillance society where individual activities are constantly monitored and analyzed without adequate safeguards or transparency.\n

                  Sensitive user data collected may include online activity records, geolocation data, video or audio.[176] For example, in order to build speech recognition algorithms, Amazon has recorded millions of private conversations and allowed temporary workers to listen to and transcribe some of them.[177] Opinions about this widespread surveillance range from those who see it as a necessary evil to those for whom it is clearly unethical and a violation of the right to privacy.[178]\n

                  AI developers argue that this is the only way to deliver valuable applications. and have developed several techniques that attempt to preserve privacy while still obtaining the data, such as data aggregation, de-identification and differential privacy.[179] Since 2016, some privacy experts, such as Cynthia Dwork, have begun to view privacy in terms of fairness. Brian Christian wrote that experts have pivoted "from the question of \'what they know\' to the question of \'what they\'re doing with it\'."[180]\n

                  Generative AI is often trained on unlicensed copyrighted works, including in domains such as images or computer code; the output is then used under the rationale of "fair use". Experts disagree about how well and under what circumstances this rationale will hold up in courts of law; relevant factors may include "the purpose and character of the use of the copyrighted work" and "the effect upon the potential market for the copyrighted work".[181][182] Website owners who do not wish to have their content scraped can indicate it in a "robots.txt" file.[183] In 2023, leading authors (including John Grisham and Jonathan Franzen) sued AI companies for using their work to train generative AI.[184][185] Another discussed approach is to envision a separate sui generis system of protection for creations generated by AI to ensure fair attribution and compensation for human authors.[186]\n

                  \n

                  Dominance by tech giants

                  \n

                  The commercial AI scene is dominated by Big Tech companies such as Alphabet Inc., Amazon, Apple Inc., Meta Platforms, and Microsoft.[187][188][189] Some of these players already own the vast majority of existing cloud infrastructure and computing power from data centers, allowing them to entrench further in the marketplace.[190][191]\n

                  \n

                  Substantial power needs and other environmental impacts

                  \n\n

                  In January 2024, the International Energy Agency (IEA) released Electricity 2024, Analysis and Forecast to 2026, forecasting electric power use.[192] This is the first IEA report to make projections for data centers and power consumption for artificial intelligence and cryptocurrency. The report states that power demand for these uses might double by 2026, with additional electric power usage equal to electricity used by the whole Japanese nation.[193]\n

                  Prodigious power consumption by AI is responsible for the growth of fossil fuels use, and might delay closings of obsolete, carbon-emitting coal energy facilities. There is a feverish rise in the construction of data centers throughout the US, making large technology firms (e.g., Microsoft, Meta, Google, Amazon) into voracious consumers of electric power. Projected electric consumption is so immense that there is concern that it will be fulfilled no matter the source. A ChatGPT search involves the use of 10 times the electrical energy as a Google search. The large firms are in haste to find power sources – from nuclear energy to geothermal to fusion. The tech firms argue that – in the long view – AI will be eventually kinder to the environment, but they need the energy now. AI makes the power grid more efficient and "intelligent", will assist in the growth of nuclear power, and track overall carbon emissions, according to technology firms.[194]\n

                  A 2024 Goldman Sachs Research Paper, AI Data Centers and the Coming US Power Demand Surge, found "US power demand (is) likely to experience growth not seen in a generation...." and forecasts that, by 2030, US data centers will consume 8% of US power, as opposed to 3% in 2022, presaging growth for the electrical power generation industry by a variety of means.[195] Data centers\' need for more and more electrical power is such that they might max out the electrical grid. The Big Tech companies counter that AI can be used to maximize the utilization of the grid by all.[196]\n

                  In 2024, the Wall Street Journal reported that big AI companies have begun negotiations with the US nuclear power providers to provide electricity to the data centers. In March 2024 Amazon purchased a Pennsylvania nuclear-powered data center for $650 Million (US).[197]\n

                  In September 2024, Microsoft announced an agreement with Constellation Energy to re-open the Three Mile Island nuclear power plant to provide Microsoft with 100% of all electric power produced by the plant for 20 years. Reopening the plant, which suffered a partial nuclear meltdown of its Unit 2 reactor in 1979, will require Constellation to get through strict regulatory processes which will include extensive safety scrutiny from the US Nuclear Regulatory Commission. If approved (this will be the first ever US re-commissioning of a nuclear plant), over 835 megawatts of power – enough for 800,000 homes – of energy will be produced. The cost for re-opening and upgrading is estimated at $1.6 billion (US) and is dependent on tax breaks for nuclear power contained in the 2022 US Inflation Reduction Act.[198] The US government and the state of Michigan are investing almost $2 billion (US) to reopen the Palisades Nuclear reactor on Lake Michigan. Closed since 2022, the plant is planned to be reopened in October 2025. The Three Mile Island facility will be renamed the Crane Clean Energy Center after Chris Crane, a nuclear proponent and former CEO of Exelon who was responsible for Exelon spinoff of Constellation.[199]\n

                  \n

                  Misinformation

                  \n\n

                  YouTube, Facebook and others use recommender systems to guide users to more content. These AI programs were given the goal of maximizing user engagement (that is, the only goal was to keep people watching). The AI learned that users tended to choose misinformation, conspiracy theories, and extreme partisan content, and, to keep them watching, the AI recommended more of it. Users also tended to watch more content on the same subject, so the AI led people into filter bubbles where they received multiple versions of the same misinformation.[200] This convinced many users that the misinformation was true, and ultimately undermined trust in institutions, the media and the government.[201] The AI program had correctly learned to maximize its goal, but the result was harmful to society. After the U.S. election in 2016, major technology companies took steps to mitigate the problem [citation needed].\n

                  In 2022, generative AI began to create images, audio, video and text that are indistinguishable from real photographs, recordings, films, or human writing. It is possible for bad actors to use this technology to create massive amounts of misinformation or propaganda.[202] AI pioneer Geoffrey Hinton expressed concern about AI enabling "authoritarian leaders to manipulate their electorates" on a large scale, among other risks.[203]\n

                  \n

                  Algorithmic bias and fairness

                  \n\n

                  Machine learning applications will be biased[k] if they learn from biased data.[205] The developers may not be aware that the bias exists.[206] Bias can be introduced by the way training data is selected and by the way a model is deployed.[207][205] If a biased algorithm is used to make decisions that can seriously harm people (as it can in medicine, finance, recruitment, housing or policing) then the algorithm may cause discrimination.[208] The field of fairness studies how to prevent harms from algorithmic biases.\n

                  On June 28, 2015, Google Photos\'s new image labeling feature mistakenly identified Jacky Alcine and a friend as "gorillas" because they were black. The system was trained on a dataset that contained very few images of black people,[209] a problem called "sample size disparity".[210] Google "fixed" this problem by preventing the system from labelling anything as a "gorilla". Eight years later, in 2023, Google Photos still could not identify a gorilla, and neither could similar products from Apple, Facebook, Microsoft and Amazon.[211]\n

                  COMPAS is a commercial program widely used by U.S. courts to assess the likelihood of a defendant becoming a recidivist. In 2016, Julia Angwin at ProPublica discovered that COMPAS exhibited racial bias, despite the fact that the program was not told the races of the defendants. Although the error rate for both whites and blacks was calibrated equal at exactly 61%, the errors for each race were different—the system consistently overestimated the chance that a black person would re-offend and would underestimate the chance that a white person would not re-offend.[212] In 2017, several researchers[l] showed that it was mathematically impossible for COMPAS to accommodate all possible measures of fairness when the base rates of re-offense were different for whites and blacks in the data.[214]\n

                  A program can make biased decisions even if the data does not explicitly mention a problematic feature (such as "race" or "gender"). The feature will correlate with other features (like "address", "shopping history" or "first name"), and the program will make the same decisions based on these features as it would on "race" or "gender".[215] Moritz Hardt said "the most robust fact in this research area is that fairness through blindness doesn\'t work."[216]\n

                  Criticism of COMPAS highlighted that machine learning models are designed to make "predictions" that are only valid if we assume that the future will resemble the past. If they are trained on data that includes the results of racist decisions in the past, machine learning models must predict that racist decisions will be made in the future. If an application then uses these predictions as recommendations, some of these "recommendations" will likely be racist.[217] Thus, machine learning is not well suited to help make decisions in areas where there is hope that the future will be better than the past. It is descriptive rather than prescriptive.[m]\n

                  Bias and unfairness may go undetected because the developers are overwhelmingly white and male: among AI engineers, about 4% are black and 20% are women.[210]\n

                  There are various conflicting definitions and mathematical models of fairness. These notions depend on ethical assumptions, and are influenced by beliefs about society. One broad category is distributive fairness, which focuses on the outcomes, often identifying groups and seeking to compensate for statistical disparities. Representational fairness tries to ensure that AI systems do not reinforce negative stereotypes or render certain groups invisible. Procedural fairness focuses on the decision process rather than the outcome. The most relevant notions of fairness may depend on the context, notably the type of AI application and the stakeholders. The subjectivity in the notions of bias and fairness makes it difficult for companies to operationalize them. Having access to sensitive attributes such as race or gender is also considered by many AI ethicists to be necessary in order to compensate for biases, but it may conflict with anti-discrimination laws.[204]\n

                  At its 2022 Conference on Fairness, Accountability, and Transparency (ACM FAccT 2022), the Association for Computing Machinery, in Seoul, South Korea, presented and published findings that recommend that until AI and robotics systems are demonstrated to be free of bias mistakes, they are unsafe, and the use of self-learning neural networks trained on vast, unregulated sources of flawed internet data should be curtailed.[dubiousdiscuss][219]\n

                  \n

                  Lack of transparency

                  \n\n

                  Many AI systems are so complex that their designers cannot explain how they reach their decisions.[220] Particularly with deep neural networks, in which there are a large amount of non-linear relationships between inputs and outputs. But some popular explainability techniques exist.[221]\n

                  It is impossible to be certain that a program is operating correctly if no one knows how exactly it works. There have been many cases where a machine learning program passed rigorous tests, but nevertheless learned something different than what the programmers intended. For example, a system that could identify skin diseases better than medical professionals was found to actually have a strong tendency to classify images with a ruler as "cancerous", because pictures of malignancies typically include a ruler to show the scale.[222] Another machine learning system designed to help effectively allocate medical resources was found to classify patients with asthma as being at "low risk" of dying from pneumonia. Having asthma is actually a severe risk factor, but since the patients having asthma would usually get much more medical care, they were relatively unlikely to die according to the training data. The correlation between asthma and low risk of dying from pneumonia was real, but misleading.[223]\n

                  People who have been harmed by an algorithm\'s decision have a right to an explanation.[224] Doctors, for example, are expected to clearly and completely explain to their colleagues the reasoning behind any decision they make. Early drafts of the European Union\'s General Data Protection Regulation in 2016 included an explicit statement that this right exists.[n] Industry experts noted that this is an unsolved problem with no solution in sight. Regulators argued that nevertheless the harm is real: if the problem has no solution, the tools should not be used.[225]\n

                  DARPA established the XAI ("Explainable Artificial Intelligence") program in 2014 to try to solve these problems.[226]\n

                  Several approaches aim to address the transparency problem. SHAP enables to visualise the contribution of each feature to the output.[227] LIME can locally approximate a model\'s outputs with a simpler, interpretable model.[228] Multitask learning provides a large number of outputs in addition to the target classification. These other outputs can help developers deduce what the network has learned.[229] Deconvolution, DeepDream and other generative methods can allow developers to see what different layers of a deep network for computer vision have learned, and produce output that can suggest what the network is learning.[230] For generative pre-trained transformers, Anthropic developed a technique based on dictionary learning that associates patterns of neuron activations with human-understandable concepts.[231]\n

                  \n

                  Bad actors and weaponized AI

                  \n\n

                  Artificial intelligence provides a number of tools that are useful to bad actors, such as authoritarian governments, terrorists, criminals or rogue states.\n

                  A lethal autonomous weapon is a machine that locates, selects and engages human targets without human supervision.[o] Widely available AI tools can be used by bad actors to develop inexpensive autonomous weapons and, if produced at scale, they are potentially weapons of mass destruction.[233] Even when used in conventional warfare, it is unlikely that they will be unable to reliably choose targets and could potentially kill an innocent person.[233] In 2014, 30 nations (including China) supported a ban on autonomous weapons under the United Nations\' Convention on Certain Conventional Weapons, however the United States and others disagreed.[234] By 2015, over fifty countries were reported to be researching battlefield robots.[235]\n

                  AI tools make it easier for authoritarian governments to efficiently control their citizens in several ways. Face and voice recognition allow widespread surveillance. Machine learning, operating this data, can classify potential enemies of the state and prevent them from hiding. Recommendation systems can precisely target propaganda and misinformation for maximum effect. Deepfakes and generative AI aid in producing misinformation. Advanced AI can make authoritarian centralized decision making more competitive than liberal and decentralized systems such as markets. It lowers the cost and difficulty of digital warfare and advanced spyware.[236] All these technologies have been available since 2020 or earlier—AI facial recognition systems are already being used for mass surveillance in China.[237][238]\n

                  There many other ways that AI is expected to help bad actors, some of which can not be foreseen. For example, machine-learning AI is able to design tens of thousands of toxic molecules in a matter of hours.[239]\n

                  \n

                  Technological unemployment

                  \n\n

                  Economists have frequently highlighted the risks of redundancies from AI, and speculated about unemployment if there is no adequate social policy for full employment.[240]\n

                  In the past, technology has tended to increase rather than reduce total employment, but economists acknowledge that "we\'re in uncharted territory" with AI.[241] A survey of economists showed disagreement about whether the increasing use of robots and AI will cause a substantial increase in long-term unemployment, but they generally agree that it could be a net benefit if productivity gains are redistributed.[242] Risk estimates vary; for example, in the 2010s, Michael Osborne and Carl Benedikt Frey estimated 47% of U.S. jobs are at "high risk" of potential automation, while an OECD report classified only 9% of U.S. jobs as "high risk".[p][244] The methodology of speculating about future employment levels has been criticised as lacking evidential foundation, and for implying that technology, rather than social policy, creates unemployment, as opposed to redundancies.[240] In April 2023, it was reported that 70% of the jobs for Chinese video game illustrators had been eliminated by generative artificial intelligence.[245][246]\n

                  Unlike previous waves of automation, many middle-class jobs may be eliminated by artificial intelligence; The Economist stated in 2015 that "the worry that AI could do to white-collar jobs what steam power did to blue-collar ones during the Industrial Revolution" is "worth taking seriously".[247] Jobs at extreme risk range from paralegals to fast food cooks, while job demand is likely to increase for care-related professions ranging from personal healthcare to the clergy.[248]\n

                  From the early days of the development of artificial intelligence, there have been arguments, for example, those put forward by Joseph Weizenbaum, about whether tasks that can be done by computers actually should be done by them, given the difference between computers and humans, and between quantitative calculation and qualitative, value-based judgement.[249]\n

                  \n

                  Existential risk

                  \n\n

                  It has been argued AI will become so powerful that humanity may irreversibly lose control of it. This could, as physicist Stephen Hawking stated, "spell the end of the human race".[250] This scenario has been common in science fiction, when a computer or robot suddenly develops a human-like "self-awareness" (or "sentience" or "consciousness") and becomes a malevolent character.[q] These sci-fi scenarios are misleading in several ways.\n

                  First, AI does not require human-like "sentience" to be an existential risk. Modern AI programs are given specific goals and use learning and intelligence to achieve them. Philosopher Nick Bostrom argued that if one gives almost any goal to a sufficiently powerful AI, it may choose to destroy humanity to achieve it (he used the example of a paperclip factory manager).[252] Stuart Russell gives the example of household robot that tries to find a way to kill its owner to prevent it from being unplugged, reasoning that "you can\'t fetch the coffee if you\'re dead."[253] In order to be safe for humanity, a superintelligence would have to be genuinely aligned with humanity\'s morality and values so that it is "fundamentally on our side".[254]\n

                  Second, Yuval Noah Harari argues that AI does not require a robot body or physical control to pose an existential risk. The essential parts of civilization are not physical. Things like ideologies, law, government, money and the economy are made of language; they exist because there are stories that billions of people believe. The current prevalence of misinformation suggests that an AI could use language to convince people to believe anything, even to take actions that are destructive.[255]\n

                  The opinions amongst experts and industry insiders are mixed, with sizable fractions both concerned and unconcerned by risk from eventual superintelligent AI.[256] Personalities such as Stephen Hawking, Bill Gates, and Elon Musk,[257] as well as AI pioneers such as Yoshua Bengio, Stuart Russell, Demis Hassabis, and Sam Altman, have expressed concerns about existential risk from AI.\n

                  In May 2023, Geoffrey Hinton announced his resignation from Google in order to be able to "freely speak out about the risks of AI" without "considering how this impacts Google."[258] He notably mentioned risks of an AI takeover,[259] and stressed that in order to avoid the worst outcomes, establishing safety guidelines will require cooperation among those competing in use of AI.[260]\n

                  In 2023, many leading AI experts issued the joint statement that "Mitigating the risk of extinction from AI should be a global priority alongside other societal-scale risks such as pandemics and nuclear war".[261]\n

                  Other researchers, however, spoke in favor of a less dystopian view. AI pioneer Juergen Schmidhuber did not sign the joint statement, emphasising that in 95% of all cases, AI research is about making "human lives longer and healthier and easier."[262] While the tools that are now being used to improve lives can also be used by bad actors, "they can also be used against the bad actors."[263][264] Andrew Ng also argued that "it\'s a mistake to fall for the doomsday hype on AI—and that regulators who do will only benefit vested interests."[265] Yann LeCun "scoffs at his peers\' dystopian scenarios of supercharged misinformation and even, eventually, human extinction."[266] In the early 2010s, experts argued that the risks are too distant in the future to warrant research or that humans will be valuable from the perspective of a superintelligent machine.[267] However, after 2016, the study of current and future risks and possible solutions became a serious area of research.[268]\n

                  \n

                  Ethical machines and alignment

                  \n\n

                  Friendly AI are machines that have been designed from the beginning to minimize risks and to make choices that benefit humans. Eliezer Yudkowsky, who coined the term, argues that developing friendly AI should be a higher research priority: it may require a large investment and it must be completed before AI becomes an existential risk.[269]\n

                  Machines with intelligence have the potential to use their intelligence to make ethical decisions. The field of machine ethics provides machines with ethical principles and procedures for resolving ethical dilemmas.[270]\nThe field of machine ethics is also called computational morality,[270]\nand was founded at an AAAI symposium in 2005.[271]\n

                  Other approaches include Wendell Wallach\'s "artificial moral agents"[272] and Stuart J. Russell\'s three principles for developing provably beneficial machines.[273]\n

                  \n

                  Open source

                  \n

                  Active organizations in the AI open-source community include Hugging Face,[274] Google,[275] EleutherAI and Meta.[276] Various AI models, such as Llama 2, Mistral or Stable Diffusion, have been made open-weight,[277][278] meaning that their architecture and trained parameters (the "weights") are publicly available. Open-weight models can be freely fine-tuned, which allows companies to specialize them with their own data and for their own use-case.[279] Open-weight models are useful for research and innovation but can also be misused. Since they can be fine-tuned, any built-in security measure, such as objecting to harmful requests, can be trained away until it becomes ineffective. Some researchers warn that future AI models may develop dangerous capabilities (such as the potential to drastically facilitate bioterrorism) and that once released on the Internet, they cannot be deleted everywhere if needed. They recommend pre-release audits and cost-benefit analyses.[280]\n

                  \n

                  Frameworks

                  \n

                  Artificial Intelligence projects can have their ethical permissibility tested while designing, developing, and implementing an AI system. An AI framework such as the Care and Act Framework containing the SUM values—developed by the Alan Turing Institute tests projects in four main areas:[281][282]\n

                  \n
                  • Respect the dignity of individual people
                  • \n
                  • Connect with other people sincerely, openly, and inclusively
                  • \n
                  • Care for the wellbeing of everyone
                  • \n
                  • Protect social values, justice, and the public interest
                  \n

                  Other developments in ethical frameworks include those decided upon during the Asilomar Conference, the Montreal Declaration for Responsible AI, and the IEEE\'s Ethics of Autonomous Systems initiative, among others;[283] however, these principles do not go without their criticisms, especially regards to the people chosen contributes to these frameworks.[284]\n

                  Promotion of the wellbeing of the people and communities that these technologies affect requires consideration of the social and ethical implications at all stages of AI system design, development and implementation, and collaboration between job roles such as data scientists, product managers, data engineers, domain experts, and delivery managers.[285]\n

                  The UK AI Safety Institute released in 2024 a testing toolset called \'Inspect\' for AI safety evaluations available under a MIT open-source licence which is freely available on GitHub and can be improved with third-party packages. It can be used to evaluate AI models in a range of areas including core knowledge, ability to reason, and autonomous capabilities.[286]\n

                  \n

                  Regulation

                  \n\n
                  AI Safety Summit
                  The first global AI Safety Summit was held in 2023 with a declaration calling for international co-operation.
                  \n

                  The regulation of artificial intelligence is the development of public sector policies and laws for promoting and regulating AI; it is therefore related to the broader regulation of algorithms.[287] The regulatory and policy landscape for AI is an emerging issue in jurisdictions globally.[288] According to AI Index at Stanford, the annual number of AI-related laws passed in the 127 survey countries jumped from one passed in 2016 to 37 passed in 2022 alone.[289][290] Between 2016 and 2020, more than 30 countries adopted dedicated strategies for AI.[291] Most EU member states had released national AI strategies, as had Canada, China, India, Japan, Mauritius, the Russian Federation, Saudi Arabia, United Arab Emirates, U.S., and Vietnam. Others were in the process of elaborating their own AI strategy, including Bangladesh, Malaysia and Tunisia.[291] The Global Partnership on Artificial Intelligence was launched in June 2020, stating a need for AI to be developed in accordance with human rights and democratic values, to ensure public confidence and trust in the technology.[291] Henry Kissinger, Eric Schmidt, and Daniel Huttenlocher published a joint statement in November 2021 calling for a government commission to regulate AI.[292] In 2023, OpenAI leaders published recommendations for the governance of superintelligence, which they believe may happen in less than 10 years.[293] In 2023, the United Nations also launched an advisory body to provide recommendations on AI governance; the body comprises technology company executives, governments officials and academics.[294] In 2024, the Council of Europe created the first international legally binding treaty on AI, called the "Framework Convention on Artificial Intelligence and Human Rights, Democracy and the Rule of Law". It was adopted by the European Union, the United States, the United Kingdom, and other signatories.[295]\n

                  In a 2022 Ipsos survey, attitudes towards AI varied greatly by country; 78% of Chinese citizens, but only 35% of Americans, agreed that "products and services using AI have more benefits than drawbacks".[289] A 2023 Reuters/Ipsos poll found that 61% of Americans agree, and 22% disagree, that AI poses risks to humanity.[296] In a 2023 Fox News poll, 35% of Americans thought it "very important", and an additional 41% thought it "somewhat important", for the federal government to regulate AI, versus 13% responding "not very important" and 8% responding "not at all important".[297][298]\n

                  In November 2023, the first global AI Safety Summit was held in Bletchley Park in the UK to discuss the near and far term risks of AI and the possibility of mandatory and voluntary regulatory frameworks.[299] 28 countries including the United States, China, and the European Union issued a declaration at the start of the summit, calling for international co-operation to manage the challenges and risks of artificial intelligence.[300][301] In May 2024 at the AI Seoul Summit, 16 global AI tech companies agreed to safety commitments on the development of AI.[302][303]\n

                  \n

                  History

                  \n\n\n

                  The study of mechanical or "formal" reasoning began with philosophers and mathematicians in antiquity. The study of logic led directly to Alan Turing\'s theory of computation, which suggested that a machine, by shuffling symbols as simple as "0" and "1", could simulate any conceivable form of mathematical reasoning.[304][305] This, along with concurrent discoveries in cybernetics, information theory and neurobiology, led researchers to consider the possibility of building an "electronic brain".[r] They developed several areas of research that would become part of AI,[307] such as McCullouch and Pitts design for "artificial neurons" in 1943,[115] and Turing\'s influential 1950 paper \'Computing Machinery and Intelligence\', which introduced the Turing test and showed that "machine intelligence" was plausible.[308][305]\n

                  The field of AI research was founded at a workshop at Dartmouth College in 1956.[s][6] The attendees became the leaders of AI research in the 1960s.[t] They and their students produced programs that the press described as "astonishing":[u] computers were learning checkers strategies, solving word problems in algebra, proving logical theorems and speaking English.[v][7] Artificial intelligence laboratories were set up at a number of British and U.S. universities in the latter 1950s and early 1960s.[305]\n

                  Researchers in the 1960s and the 1970s were convinced that their methods would eventually succeed in creating a machine with general intelligence and considered this the goal of their field.[312] In 1965 Herbert Simon predicted, "machines will be capable, within twenty years, of doing any work a man can do".[313] In 1967 Marvin Minsky agreed, writing that "within a generation ... the problem of creating \'artificial intelligence\' will substantially be solved".[314] They had, however, underestimated the difficulty of the problem.[w] In 1974, both the U.S. and British governments cut off exploratory research in response to the criticism of Sir James Lighthill[316] and ongoing pressure from the U.S. Congress to fund more productive projects.[317] Minsky\'s and Papert\'s book Perceptrons was understood as proving that artificial neural networks would never be useful for solving real-world tasks, thus discrediting the approach altogether.[318] The "AI winter", a period when obtaining funding for AI projects was difficult, followed.[9]\n

                  In the early 1980s, AI research was revived by the commercial success of expert systems,[319] a form of AI program that simulated the knowledge and analytical skills of human experts. By 1985, the market for AI had reached over a billion dollars. At the same time, Japan\'s fifth generation computer project inspired the U.S. and British governments to restore funding for academic research.[8] However, beginning with the collapse of the Lisp Machine market in 1987, AI once again fell into disrepute, and a second, longer-lasting winter began.[10]\n

                  Up to this point, most of AI\'s funding had gone to projects that used high-level symbols to represent mental objects like plans, goals, beliefs, and known facts. In the 1980s, some researchers began to doubt that this approach would be able to imitate all the processes of human cognition, especially perception, robotics, learning and pattern recognition,[320] and began to look into "sub-symbolic" approaches.[321] Rodney Brooks rejected "representation" in general and focussed directly on engineering machines that move and survive.[x] Judea Pearl, Lofti Zadeh and others developed methods that handled incomplete and uncertain information by making reasonable guesses rather than precise logic.[86][326] But the most important development was the revival of "connectionism", including neural network research, by Geoffrey Hinton and others.[327] In 1990, Yann LeCun successfully showed that convolutional neural networks can recognize handwritten digits, the first of many successful applications of neural networks.[328]\n

                  AI gradually restored its reputation in the late 1990s and early 21st century by exploiting formal mathematical methods and by finding specific solutions to specific problems. This "narrow" and "formal" focus allowed researchers to produce verifiable results and collaborate with other fields (such as statistics, economics and mathematics).[329] By 2000, solutions developed by AI researchers were being widely used, although in the 1990s they were rarely described as "artificial intelligence" (a tendency known as the AI effect).[330]\nHowever, several academic researchers became concerned that AI was no longer pursuing its original goal of creating versatile, fully intelligent machines. Beginning around 2002, they founded the subfield of artificial general intelligence (or "AGI"), which had several well-funded institutions by the 2010s.[4]\n

                  Deep learning began to dominate industry benchmarks in 2012 and was adopted throughout the field.[11]\nFor many specific tasks, other methods were abandoned.[y]\nDeep learning\'s success was based on both hardware improvements (faster computers,[332] graphics processing units, cloud computing[333]) and access to large amounts of data[334] (including curated datasets,[333] such as ImageNet). Deep learning\'s success led to an enormous increase in interest and funding in AI.[z] The amount of machine learning research (measured by total publications) increased by 50% in the years 2015–2019.[291]\n

                  In 2016, issues of fairness and the misuse of technology were catapulted into center stage at machine learning conferences, publications vastly increased, funding became available, and many researchers re-focussed their careers on these issues. The alignment problem became a serious field of academic study.[268]\n

                  In the late teens and early 2020s, AGI companies began to deliver programs that created enormous interest. In 2015, AlphaGo, developed by DeepMind, beat the world champion Go player. The program was taught only the rules of the game and developed strategy by itself. GPT-3 is a large language model that was released in 2020 by OpenAI and is capable of generating high-quality human-like text.[335] These programs, and others, inspired an aggressive AI boom, where large companies began investing billions in AI research. According to AI Impacts, about $50 billion annually was invested in "AI" around 2022 in the U.S. alone and about 20% of the new U.S. Computer Science PhD graduates have specialized in "AI".[336] About 800,000 "AI"-related U.S. job openings existed in 2022.[337]\n

                  \n

                  Philosophy

                  \n

                  Philosophical debates have historically sought to determine the nature of intelligence and how to make intelligent machines.[338] Another major focus has been whether machines can be conscious, and the associated ethical implications.[339] Many other topics in philosophy can relevant to AI, such as epistemology and free will.[340] Rapid advancements have intensified public discussions on the philosophy and ethics of AI.[339]\n

                  Defining artificial intelligence

                  \n\n

                  Alan Turing wrote in 1950 "I propose to consider the question \'can machines think\'?"[341] He advised changing the question from whether a machine "thinks", to "whether or not it is possible for machinery to show intelligent behaviour".[341] He devised the Turing test, which measures the ability of a machine to simulate human conversation.[308] Since we can only observe the behavior of the machine, it does not matter if it is "actually" thinking or literally has a "mind". Turing notes that we can not determine these things about other people but "it is usual to have a polite convention that everyone thinks."[342]\n

                  \n
                  The Turing test can provide some evidence of intelligence, but it penalizes non-human intelligent behavior.[343]
                  \n

                  Russell and Norvig agree with Turing that intelligence must be defined in terms of external behavior, not internal structure.[1] However, they are critical that the test requires the machine to imitate humans. "Aeronautical engineering texts," they wrote, "do not define the goal of their field as making \'machines that fly so exactly like pigeons that they can fool other pigeons.\'"[344] AI founder John McCarthy agreed, writing that "Artificial intelligence is not, by definition, simulation of human intelligence".[345]\n

                  McCarthy defines intelligence as "the computational part of the ability to achieve goals in the world".[346] Another AI founder, Marvin Minsky similarly describes it as "the ability to solve hard problems".[347] The leading AI textbook defines it as the study of agents that perceive their environment and take actions that maximize their chances of achieving defined goals.[1] These definitions view intelligence in terms of well-defined problems with well-defined solutions, where both the difficulty of the problem and the performance of the program are direct measures of the "intelligence" of the machine—and no other philosophical discussion is required, or may not even be possible.\n

                  Another definition has been adopted by Google,[348] a major practitioner in the field of AI. This definition stipulates the ability of systems to synthesize information as the manifestation of intelligence, similar to the way it is defined in biological intelligence.\n

                  Some authors have suggested in practice, that the definition of AI is vague and difficult to define, with contention as to whether classical algorithms should be categorised as AI,[349] with many companies during the early 2020s AI boom using the term as a marketing buzzword, often even if they did "not actually use AI in a material way".[350]\n

                  \n

                  Evaluating approaches to AI

                  \n

                  No established unifying theory or paradigm has guided AI research for most of its history.[aa] The unprecedented success of statistical machine learning in the 2010s eclipsed all other approaches (so much so that some sources, especially in the business world, use the term "artificial intelligence" to mean "machine learning with neural networks"). This approach is mostly sub-symbolic, soft and narrow. Critics argue that these questions may have to be revisited by future generations of AI researchers.\n

                  \n

                  Symbolic AI and its limits

                  \n

                  Symbolic AI (or "GOFAI")[352] simulated the high-level conscious reasoning that people use when they solve puzzles, express legal reasoning and do mathematics. They were highly successful at "intelligent" tasks such as algebra or IQ tests. In the 1960s, Newell and Simon proposed the physical symbol systems hypothesis: "A physical symbol system has the necessary and sufficient means of general intelligent action."[353]\n

                  However, the symbolic approach failed on many tasks that humans solve easily, such as learning, recognizing an object or commonsense reasoning. Moravec\'s paradox is the discovery that high-level "intelligent" tasks were easy for AI, but low level "instinctive" tasks were extremely difficult.[354] Philosopher Hubert Dreyfus had argued since the 1960s that human expertise depends on unconscious instinct rather than conscious symbol manipulation, and on having a "feel" for the situation, rather than explicit symbolic knowledge.[355] Although his arguments had been ridiculed and ignored when they were first presented, eventually, AI research came to agree with him.[ab][16]\n

                  The issue is not resolved: sub-symbolic reasoning can make many of the same inscrutable mistakes that human intuition does, such as algorithmic bias. Critics such as Noam Chomsky argue continuing research into symbolic AI will still be necessary to attain general intelligence,[357][358] in part because sub-symbolic AI is a move away from explainable AI: it can be difficult or impossible to understand why a modern statistical AI program made a particular decision. The emerging field of neuro-symbolic artificial intelligence attempts to bridge the two approaches.\n

                  \n

                  Neat vs. scruffy

                  \n\n

                  "Neats" hope that intelligent behavior is described using simple, elegant principles (such as logic, optimization, or neural networks). "Scruffies" expect that it necessarily requires solving a large number of unrelated problems. Neats defend their programs with theoretical rigor, scruffies rely mainly on incremental testing to see if they work. This issue was actively discussed in the 1970s and 1980s,[359] but eventually was seen as irrelevant. Modern AI has elements of both.\n

                  \n

                  Soft vs. hard computing

                  \n\n

                  Finding a provably correct or optimal solution is intractable for many important problems.[15] Soft computing is a set of techniques, including genetic algorithms, fuzzy logic and neural networks, that are tolerant of imprecision, uncertainty, partial truth and approximation. Soft computing was introduced in the late 1980s and most successful AI programs in the 21st century are examples of soft computing with neural networks.\n

                  \n

                  Narrow vs. general AI

                  \n\n

                  AI researchers are divided as to whether to pursue the goals of artificial general intelligence and superintelligence directly or to solve as many specific problems as possible (narrow AI) in hopes these solutions will lead indirectly to the field\'s long-term goals.[360][361] General intelligence is difficult to define and difficult to measure, and modern AI has had more verifiable successes by focusing on specific problems with specific solutions. The sub-field of artificial general intelligence studies this area exclusively.\n

                  \n

                  Machine consciousness, sentience, and mind

                  \n\n

                  The philosophy of mind does not know whether a machine can have a mind, consciousness and mental states, in the same sense that human beings do. This issue considers the internal experiences of the machine, rather than its external behavior. Mainstream AI research considers this issue irrelevant because it does not affect the goals of the field: to build machines that can solve problems using intelligence. Russell and Norvig add that "[t]he additional project of making a machine conscious in exactly the way humans are is not one that we are equipped to take on."[362] However, the question has become central to the philosophy of mind. It is also typically the central question at issue in artificial intelligence in fiction.\n

                  \n

                  Consciousness

                  \n\n

                  David Chalmers identified two problems in understanding the mind, which he named the "hard" and "easy" problems of consciousness.[363] The easy problem is understanding how the brain processes signals, makes plans and controls behavior. The hard problem is explaining how this feels or why it should feel like anything at all, assuming we are right in thinking that it truly does feel like something (Dennett\'s consciousness illusionism says this is an illusion). While human information processing is easy to explain, human subjective experience is difficult to explain. For example, it is easy to imagine a color-blind person who has learned to identify which objects in their field of view are red, but it is not clear what would be required for the person to know what red looks like.[364]\n

                  \n

                  Computationalism and functionalism

                  \n\n

                  Computationalism is the position in the philosophy of mind that the human mind is an information processing system and that thinking is a form of computing. Computationalism argues that the relationship between mind and body is similar or identical to the relationship between software and hardware and thus may be a solution to the mind–body problem. This philosophical position was inspired by the work of AI researchers and cognitive scientists in the 1960s and was originally proposed by philosophers Jerry Fodor and Hilary Putnam.[365]\n

                  Philosopher John Searle characterized this position as "strong AI": "The appropriately programmed computer with the right inputs and outputs would thereby have a mind in exactly the same sense human beings have minds."[ac] Searle counters this assertion with his Chinese room argument, which attempts to show that, even if a machine perfectly simulates human behavior, there is still no reason to suppose it also has a mind.[369]\n

                  \n

                  AI welfare and rights

                  \n

                  It is difficult or impossible to reliably evaluate whether an advanced AI is sentient (has the ability to feel), and if so, to what degree.[370] But if there is a significant chance that a given machine can feel and suffer, then it may be entitled to certain rights or welfare protection measures, similarly to animals.[371][372] Sapience (a set of capacities related to high intelligence, such as discernment or self-awareness) may provide another moral basis for AI rights.[371] Robot rights are also sometimes proposed as a practical way to integrate autonomous agents into society.[373]\n

                  In 2017, the European Union considered granting "electronic personhood" to some of the most capable AI systems. Similarly to the legal status of companies, it would have conferred rights but also responsibilities.[374] Critics argued in 2018 that granting rights to AI systems would downplay the importance of human rights, and that legislation should focus on user needs rather than speculative futuristic scenarios. They also noted that robots lacked the autonomy to take part to society on their own.[375][376]\n

                  Progress in AI increased interest in the topic. Proponents of AI welfare and rights often argue that AI sentience, if it emerges, would be particularly easy to deny. They warn that this may be a moral blind spot analogous to slavery or factory farming, which could lead to large-scale suffering if sentient AI is created and carelessly exploited.[372][371]\n

                  \n

                  Future

                  \n

                  Superintelligence and the singularity

                  \n

                  A superintelligence is a hypothetical agent that would possess intelligence far surpassing that of the brightest and most gifted human mind.[361]If research into artificial general intelligence produced sufficiently intelligent software, it might be able to reprogram and improve itself. The improved software would be even better at improving itself, leading to what I. J. Good called an "intelligence explosion" and Vernor Vinge called a "singularity".[377]\n

                  However, technologies cannot improve exponentially indefinitely, and typically follow an S-shaped curve, slowing when they reach the physical limits of what the technology can do.[378]\n

                  \n

                  Transhumanism

                  \n\n

                  Robot designer Hans Moravec, cyberneticist Kevin Warwick and inventor Ray Kurzweil have predicted that humans and machines may merge in the future into cyborgs that are more capable and powerful than either. This idea, called transhumanism, has roots in the writings of Aldous Huxley and Robert Ettinger.[379]\n

                  Edward Fredkin argues that "artificial intelligence is the next step in evolution", an idea first proposed by Samuel Butler\'s "Darwin among the Machines" as far back as 1863, and expanded upon by George Dyson in his 1998 book Darwin Among the Machines: The Evolution of Global Intelligence.[380]\n

                  \n

                  In fiction

                  \n\n
                  The word "robot" itself was coined by Karel Čapek in his 1921 play R.U.R., the title standing for "Rossum\'s Universal Robots".
                  \n

                  Thought-capable artificial beings have appeared as storytelling devices since antiquity,[381] and have been a persistent theme in science fiction.[382]\n

                  A common trope in these works began with Mary Shelley\'s Frankenstein, where a human creation becomes a threat to its masters. This includes such works as Arthur C. Clarke\'s and Stanley Kubrick\'s 2001: A Space Odyssey (both 1968), with HAL 9000, the murderous computer in charge of the Discovery One spaceship, as well as The Terminator (1984) and The Matrix (1999). In contrast, the rare loyal robots such as Gort from The Day the Earth Stood Still (1951) and Bishop from Aliens (1986) are less prominent in popular culture.[383]\n

                  Isaac Asimov introduced the Three Laws of Robotics in many stories, most notably with the "Multivac" super-intelligent computer. Asimov\'s laws are often brought up during lay discussions of machine ethics;[384] while almost all artificial intelligence researchers are familiar with Asimov\'s laws through popular culture, they generally consider the laws useless for many reasons, one of which is their ambiguity.[385]\n

                  Several works use AI to force us to confront the fundamental question of what makes us human, showing us artificial beings that have the ability to feel, and thus to suffer. This appears in Karel Čapek\'s R.U.R., the films A.I. Artificial Intelligence and Ex Machina, as well as the novel Do Androids Dream of Electric Sheep?, by Philip K. Dick. Dick considers the idea that our understanding of human subjectivity is altered by technology created with artificial intelligence.[386]\n

                  \n

                  See also

                  \n\n

                  Explanatory notes

                  \n
                  \n
                    \n
                  1. ^ a b This list of intelligent traits is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
                  2. \n
                  3. ^ a b This list of tools is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
                  4. \n
                  5. ^ It is among the reasons that expert systems proved to be inefficient for capturing knowledge.[30][31]\n
                  6. \n
                  7. ^ \n"Rational agent" is general term used in economics, philosophy and theoretical artificial intelligence. It can refer to anything that directs its behavior to accomplish goals, such as a person, an animal, a corporation, a nation, or in the case of AI, a computer program.\n
                  8. \n
                  9. ^ Alan Turing discussed the centrality of learning as early as 1950, in his classic paper "Computing Machinery and Intelligence".[42] In 1956, at the original Dartmouth AI summer conference, Ray Solomonoff wrote a report on unsupervised probabilistic machine learning: "An Inductive Inference Machine".[43]\n
                  10. \n
                  11. ^ See AI winter § Machine translation and the ALPAC report of 1966\n
                  12. \n
                  13. ^ \nCompared with symbolic logic, formal Bayesian inference is computationally expensive. For inference to be tractable, most observations must be conditionally independent of one another. AdSense uses a Bayesian network with over 300 million edges to learn which ads to serve.[93]\n
                  14. \n
                  15. ^ Expectation–maximization, one of the most popular algorithms in machine learning, allows clustering in the presence of unknown latent variables.[95]\n
                  16. \n
                  17. ^ \nSome form of deep neural networks (without a specific learning algorithm) were described by:\nWarren S. McCulloch and Walter Pitts (1943)[115]\nAlan Turing (1948);[116]\nKarl Steinbuch and Roger David Joseph (1961).[117]\nDeep or recurrent networks that learned (or used gradient descent) were developed by:\nFrank Rosenblatt(1957);[116]\nOliver Selfridge (1959);[117]\nAlexey Ivakhnenko and Valentin Lapa (1965);[118]\nKaoru Nakano (1971);[119]\nShun-Ichi Amari (1972);[119]\nJohn Joseph Hopfield (1982).[119]\nPrecursors to backpropagation were developed by:\nHenry J. Kelley (1960);[116]\nArthur E. Bryson (1962);[116]\nStuart Dreyfus (1962);[116]\nArthur E. Bryson and Yu-Chi Ho (1969);[116]\nBackpropagation was independently developed by:\nSeppo Linnainmaa (1970);[120]\nPaul Werbos (1974).[116]\n
                  18. \n
                  19. ^ Geoffrey Hinton said, of his work on neural networks in the 1990s, "our labeled datasets were thousands of times too small. [And] our computers were millions of times too slow."[121]\n
                  20. \n
                  21. ^ In statistics, a bias is a systematic error or deviation from the correct value. But in the context of fairness, it refers to a tendency in favor or against a certain group or individual characteristic, usually in a way that is considered unfair or harmful. A statistically unbiased AI system that produces disparate outcomes for different demographic groups may thus be viewed as biased in the ethical sense.[204]\n
                  22. \n
                  23. ^ Including Jon Kleinberg (Cornell University), Sendhil Mullainathan (University of Chicago), Cynthia Chouldechova (Carnegie Mellon) and Sam Corbett-Davis (Stanford)[213]\n
                  24. \n
                  25. ^ Moritz Hardt (a director at the Max Planck Institute for Intelligent Systems) argues that machine learning "is fundamentally the wrong tool for a lot of domains, where you\'re trying to design interventions and mechanisms that change the world."[218]\n
                  26. \n
                  27. ^ When the law was passed in 2018, it still contained a form of this provision.\n
                  28. \n
                  29. ^ This is the United Nations\' definition, and includes things like land mines as well.[232]\n
                  30. \n
                  31. ^ See table 4; 9% is both the OECD average and the U.S. average.[243]\n
                  32. \n
                  33. ^ Sometimes called a "robopocalypse"[251]\n
                  34. \n
                  35. ^ "Electronic brain" was the term used by the press around this time.[304][306]\n
                  36. \n
                  37. ^ \nDaniel Crevier wrote, "the conference is generally recognized as the official birthdate of the new science."[309] Russell and Norvig called the conference "the inception of artificial intelligence."[115]\n
                  38. \n
                  39. ^ \nRussell and Norvig wrote "for the next 20 years the field would be dominated by these people and their students."[310]\n
                  40. \n
                  41. ^ \nRussell and Norvig wrote "it was astonishing whenever a computer did anything kind of smartish".[311]\n
                  42. \n
                  43. ^ \nThe programs described are Arthur Samuel\'s checkers program for the IBM 701, Daniel Bobrow\'s STUDENT, Newell and Simon\'s Logic Theorist and Terry Winograd\'s SHRDLU.\n
                  44. \n
                  45. ^ Russell and Norvig write: "in almost all cases, these early systems failed on more difficult problems"[315]\n
                  46. \n
                  47. ^ \nEmbodied approaches to AI[322] were championed by Hans Moravec[323] and Rodney Brooks[324] and went by many names: Nouvelle AI.[324] Developmental robotics.[325]\n
                  48. \n
                  49. ^ Matteo Wong wrote in The Atlantic: "Whereas for decades, computer-science fields such as natural-language processing, computer vision, and robotics used extremely different methods, now they all use a programming method called "deep learning." As a result, their code and approaches have become more similar, and their models are easier to integrate into one another."[331]\n
                  50. \n
                  51. ^ Jack Clark wrote in Bloomberg: "After a half-decade of quiet breakthroughs in artificial intelligence, 2015 has been a landmark year. Computers are smarter and learning faster than ever", and noted that the number of software projects that use machine learning at Google increased from a "sporadic usage" in 2012 to more than 2,700 projects in 2015.[333]\n
                  52. \n
                  53. ^ Nils Nilsson wrote in 1983: "Simply put, there is wide disagreement in the field about what AI is all about."[351]\n
                  54. \n
                  55. ^ \nDaniel Crevier wrote that "time has proven the accuracy and perceptiveness of some of Dreyfus\'s comments. Had he formulated them less aggressively, constructive actions they suggested might have been taken much earlier."[356]\n
                  56. \n
                  57. ^ \nSearle presented this definition of "Strong AI" in 1999.[366] Searle\'s original formulation was "The appropriately programmed computer really is a mind, in the sense that computers given the right programs can be literally said to understand and have other cognitive states."[367] Strong AI is defined similarly by Russell and Norvig: "Stong AI – the assertion that machines that do so are actually thinking (as opposed to simulating thinking)."[368]\n
                  58. \n
                  \n

                  References

                  \n
                  \n
                    \n
                  1. ^ a b c Russell & Norvig (2021), pp. 1–4.\n
                  2. \n
                  3. ^ AI set to exceed human brain power Archived 2008-02-19 at the Wayback Machine CNN.com (July 26, 2006)\n
                  4. \n
                  5. ^ Kaplan, Andreas; Haenlein, Michael (2019). "Siri, Siri, in my hand: Who\'s the fairest in the land? On the interpretations, illustrations, and implications of artificial intelligence". Business Horizons. 62: 15–25. doi:10.1016/j.bushor.2018.08.004. ISSN 0007-6813. S2CID 158433736.\n
                  6. \n
                  7. ^ a b c \nArtificial general intelligence: Russell & Norvig (2021, pp. 32–33, 1020–1021)
                    Proposal for the modern version: Pennachin & Goertzel (2007)
                    Warnings of overspecialization in AI from leading researchers: Nilsson (1995), McCarthy (2007), Beal & Winston (2009)
                    \n
                  8. \n
                  9. ^ Russell & Norvig (2021, §1.2).\n
                  10. \n
                  11. ^ a b Dartmouth workshop: Russell & Norvig (2021, p. 18), McCorduck (2004, pp. 111–136), NRC (1999, pp. 200–201)
                    The proposal: McCarthy et al. (1955)
                    \n
                  12. \n
                  13. ^ a b Successful programs the 1960s: McCorduck (2004, pp. 243–252), Crevier (1993, pp. 52–107), Moravec (1988, p. 9), Russell & Norvig (2021, pp. 19–21)\n
                  14. \n
                  15. ^ a b Funding initiatives in the early 1980s: Fifth Generation Project (Japan), Alvey (UK), Microelectronics and Computer Technology Corporation (US), Strategic Computing Initiative (US): McCorduck (2004, pp. 426–441), Crevier (1993, pp. 161–162, 197–203, 211, 240), Russell & Norvig (2021, p. 23), NRC (1999, pp. 210–211), Newquist (1994, pp. 235–248)\n
                  16. \n
                  17. ^ a b First AI Winter, Lighthill report, Mansfield Amendment: Crevier (1993, pp. 115–117), Russell & Norvig (2021, pp. 21–22), NRC (1999, pp. 212–213), Howe (1994), Newquist (1994, pp. 189–201)\n
                  18. \n
                  19. ^ a b Second AI Winter: Russell & Norvig (2021, p. 24), McCorduck (2004, pp. 430–435), Crevier (1993, pp. 209–210), NRC (1999, pp. 214–216), Newquist (1994, pp. 301–318)\n
                  20. \n
                  21. ^ a b Deep learning revolution, AlexNet: Goldman (2022), Russell & Norvig (2021, p. 26), McKinsey (2018)\n
                  22. \n
                  23. ^ Toews (2023).\n
                  24. \n
                  25. ^ Problem-solving, puzzle solving, game playing, and deduction: Russell & Norvig (2021, chpt. 3–5), Russell & Norvig (2021, chpt. 6) (constraint satisfaction), Poole, Mackworth & Goebel (1998, chpt. 2, 3, 7, 9), Luger & Stubblefield (2004, chpt. 3, 4, 6, 8), Nilsson (1998, chpt. 7–12)\n
                  26. \n
                  27. ^ Uncertain reasoning: Russell & Norvig (2021, chpt. 12–18), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 333–381), Nilsson (1998, chpt. 7–12)\n
                  28. \n
                  29. ^ a b c Intractability and efficiency and the combinatorial explosion: Russell & Norvig (2021, p. 21)\n
                  30. \n
                  31. ^ a b c Psychological evidence of the prevalence of sub-symbolic reasoning and knowledge: Kahneman (2011), Dreyfus & Dreyfus (1986), Wason & Shapiro (1966), Kahneman, Slovic & Tversky (1982)\n
                  32. \n
                  33. ^ Knowledge representation and knowledge engineering: Russell & Norvig (2021, chpt. 10), Poole, Mackworth & Goebel (1998, pp. 23–46, 69–81, 169–233, 235–277, 281–298, 319–345), Luger & Stubblefield (2004, pp. 227–243), Nilsson (1998, chpt. 17.1–17.4, 18)\n
                  34. \n
                  35. ^ Smoliar & Zhang (1994).\n
                  36. \n
                  37. ^ Neumann & Möller (2008).\n
                  38. \n
                  39. ^ Kuperman, Reichley & Bailey (2006).\n
                  40. \n
                  41. ^ McGarry (2005).\n
                  42. \n
                  43. ^ Bertini, Del Bimbo & Torniai (2006).\n
                  44. \n
                  45. ^ Russell & Norvig (2021), pp. 272.\n
                  46. \n
                  47. ^ Representing categories and relations: Semantic networks, description logics, inheritance (including frames, and scripts): Russell & Norvig (2021, §10.2 & 10.5), Poole, Mackworth & Goebel (1998, pp. 174–177), Luger & Stubblefield (2004, pp. 248–258), Nilsson (1998, chpt. 18.3)\n
                  48. \n
                  49. ^ Representing events and time:Situation calculus, event calculus, fluent calculus (including solving the frame problem): Russell & Norvig (2021, §10.3), Poole, Mackworth & Goebel (1998, pp. 281–298), Nilsson (1998, chpt. 18.2)\n
                  50. \n
                  51. ^ Causal calculus: Poole, Mackworth & Goebel (1998, pp. 335–337)\n
                  52. \n
                  53. ^ Representing knowledge about knowledge: Belief calculus, modal logics: Russell & Norvig (2021, §10.4), Poole, Mackworth & Goebel (1998, pp. 275–277)\n
                  54. \n
                  55. ^ a b Default reasoning, Frame problem, default logic, non-monotonic logics, circumscription, closed world assumption, abduction: Russell & Norvig (2021, §10.6), Poole, Mackworth & Goebel (1998, pp. 248–256, 323–335), Luger & Stubblefield (2004, pp. 335–363), Nilsson (1998, ~18.3.3)\n(Poole et al. places abduction under "default reasoning". Luger et al. places this under "uncertain reasoning").\n
                  56. \n
                  57. ^ a b Breadth of commonsense knowledge: Lenat & Guha (1989, Introduction), Crevier (1993, pp. 113–114), Moravec (1988, p. 13), Russell & Norvig (2021, pp. 241, 385, 982) (qualification problem)\n
                  58. \n
                  59. ^ Newquist (1994), p. 296.\n
                  60. \n
                  61. ^ Crevier (1993), pp. 204–208.\n
                  62. \n
                  63. ^ Russell & Norvig (2021), p. 528.\n
                  64. \n
                  65. ^ Automated planning: Russell & Norvig (2021, chpt. 11).\n
                  66. \n
                  67. ^ Automated decision making, Decision theory: Russell & Norvig (2021, chpt. 16–18).\n
                  68. \n
                  69. ^ Classical planning: Russell & Norvig (2021, Section 11.2).\n
                  70. \n
                  71. ^ Sensorless or "conformant" planning, contingent planning, replanning (a.k.a online planning): Russell & Norvig (2021, Section 11.5).\n
                  72. \n
                  73. ^ Uncertain preferences: Russell & Norvig (2021, Section 16.7)\nInverse reinforcement learning: Russell & Norvig (2021, Section 22.6)\n
                  74. \n
                  75. ^ Information value theory: Russell & Norvig (2021, Section 16.6).\n
                  76. \n
                  77. ^ Markov decision process: Russell & Norvig (2021, chpt. 17).\n
                  78. \n
                  79. ^ Game theory and multi-agent decision theory: Russell & Norvig (2021, chpt. 18).\n
                  80. \n
                  81. ^ Learning: Russell & Norvig (2021, chpt. 19–22), Poole, Mackworth & Goebel (1998, pp. 397–438), Luger & Stubblefield (2004, pp. 385–542), Nilsson (1998, chpt. 3.3, 10.3, 17.5, 20)\n
                  82. \n
                  83. ^ Turing (1950).\n
                  84. \n
                  85. ^ Solomonoff (1956).\n
                  86. \n
                  87. ^ Unsupervised learning: Russell & Norvig (2021, pp. 653) (definition), Russell & Norvig (2021, pp. 738–740) (cluster analysis), Russell & Norvig (2021, pp. 846–860) (word embedding)\n
                  88. \n
                  89. ^ a b Supervised learning: Russell & Norvig (2021, §19.2) (Definition), Russell & Norvig (2021, Chpt. 19–20) (Techniques)\n
                  90. \n
                  91. ^ Reinforcement learning: Russell & Norvig (2021, chpt. 22), Luger & Stubblefield (2004, pp. 442–449)\n
                  92. \n
                  93. ^ Transfer learning: Russell & Norvig (2021, pp. 281), The Economist (2016)\n
                  94. \n
                  95. ^ "Artificial Intelligence (AI): What Is AI and How Does It Work? | Built In". builtin.com. Retrieved 30 October 2023.\n
                  96. \n
                  97. ^ Computational learning theory: Russell & Norvig (2021, pp. 672–674), Jordan & Mitchell (2015)\n
                  98. \n
                  99. ^ Natural language processing (NLP): Russell & Norvig (2021, chpt. 23–24), Poole, Mackworth & Goebel (1998, pp. 91–104), Luger & Stubblefield (2004, pp. 591–632)\n
                  100. \n
                  101. ^ Subproblems of NLP: Russell & Norvig (2021, pp. 849–850)\n
                  102. \n
                  103. ^ Russell & Norvig (2021), pp. 856–858.\n
                  104. \n
                  105. ^ Dickson (2022).\n
                  106. \n
                  107. ^ Modern statistical and deep learning approaches to NLP: Russell & Norvig (2021, chpt. 24), Cambria & White (2014)\n
                  108. \n
                  109. ^ Vincent (2019).\n
                  110. \n
                  111. ^ Russell & Norvig (2021), pp. 875–878.\n
                  112. \n
                  113. ^ Bushwick (2023).\n
                  114. \n
                  115. ^ Computer vision: Russell & Norvig (2021, chpt. 25), Nilsson (1998, chpt. 6)\n
                  116. \n
                  117. ^ Russell & Norvig (2021), pp. 849–850.\n
                  118. \n
                  119. ^ Russell & Norvig (2021), pp. 895–899.\n
                  120. \n
                  121. ^ Russell & Norvig (2021), pp. 899–901.\n
                  122. \n
                  123. ^ Challa et al. (2011).\n
                  124. \n
                  125. ^ Russell & Norvig (2021), pp. 931–938.\n
                  126. \n
                  127. ^ MIT AIL (2014).\n
                  128. \n
                  129. ^ Affective computing: Thro (1993), Edelson (1991), Tao & Tan (2005), Scassellati (2002)\n
                  130. \n
                  131. ^ Waddell (2018).\n
                  132. \n
                  133. ^ Poria et al. (2017).\n
                  134. \n
                  135. ^ Search algorithms: Russell & Norvig (2021, chpts. 3–5), Poole, Mackworth & Goebel (1998, pp. 113–163), Luger & Stubblefield (2004, pp. 79–164, 193–219), Nilsson (1998, chpts. 7–12)\n
                  136. \n
                  137. ^ State space search: Russell & Norvig (2021, chpt. 3)\n
                  138. \n
                  139. ^ Russell & Norvig (2021), sect. 11.2.\n
                  140. \n
                  141. ^ Uninformed searches (breadth first search, depth-first search and general state space search): Russell & Norvig (2021, sect. 3.4), Poole, Mackworth & Goebel (1998, pp. 113–132), Luger & Stubblefield (2004, pp. 79–121), Nilsson (1998, chpt. 8)\n
                  142. \n
                  143. ^ Heuristic or informed searches (e.g., greedy best first and A*): Russell & Norvig (2021, sect. 3.5), Poole, Mackworth & Goebel (1998, pp. 132–147), Poole & Mackworth (2017, sect. 3.6), Luger & Stubblefield (2004, pp. 133–150)\n
                  144. \n
                  145. ^ Adversarial search: Russell & Norvig (2021, chpt. 5)\n
                  146. \n
                  147. ^ Local or "optimization" search: Russell & Norvig (2021, chpt. 4)\n
                  148. \n
                  149. ^ Singh Chauhan, Nagesh (18 December 2020). "Optimization Algorithms in Neural Networks". KDnuggets. Retrieved 13 January 2024.\n
                  150. \n
                  151. ^ Evolutionary computation: Russell & Norvig (2021, sect. 4.1.2)\n
                  152. \n
                  153. ^ Merkle & Middendorf (2013).\n
                  154. \n
                  155. ^ Logic: Russell & Norvig (2021, chpts. 6–9), Luger & Stubblefield (2004, pp. 35–77), Nilsson (1998, chpt. 13–16)\n
                  156. \n
                  157. ^ Propositional logic: Russell & Norvig (2021, chpt. 6), Luger & Stubblefield (2004, pp. 45–50), Nilsson (1998, chpt. 13)\n
                  158. \n
                  159. ^ First-order logic and features such as equality: Russell & Norvig (2021, chpt. 7), Poole, Mackworth & Goebel (1998, pp. 268–275), Luger & Stubblefield (2004, pp. 50–62), Nilsson (1998, chpt. 15)\n
                  160. \n
                  161. ^ Logical inference: Russell & Norvig (2021, chpt. 10)\n
                  162. \n
                  163. ^ logical deduction as search: Russell & Norvig (2021, sects. 9.3, 9.4), Poole, Mackworth & Goebel (1998, pp. ~46–52), Luger & Stubblefield (2004, pp. 62–73), Nilsson (1998, chpt. 4.2, 7.2)\n
                  164. \n
                  165. ^ Resolution and unification: Russell & Norvig (2021, sections 7.5.2, 9.2, 9.5)\n
                  166. \n
                  167. ^ Warren, D.H.; Pereira, L.M.; Pereira, F. (1977). "Prolog-the language and its implementation compared with Lisp". ACM SIGPLAN Notices. 12 (8): 109–115. doi:10.1145/872734.806939.\n
                  168. \n
                  169. ^ Fuzzy logic: Russell & Norvig (2021, pp. 214, 255, 459), Scientific American (1999)\n
                  170. \n
                  171. ^ a b Stochastic methods for uncertain reasoning: Russell & Norvig (2021, chpt. 12–18, 20), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 165–191, 333–381), Nilsson (1998, chpt. 19)\n
                  172. \n
                  173. ^ decision theory and decision analysis: Russell & Norvig (2021, chpt. 16–18), Poole, Mackworth & Goebel (1998, pp. 381–394)\n
                  174. \n
                  175. ^ Information value theory: Russell & Norvig (2021, sect. 16.6)\n
                  176. \n
                  177. ^ Markov decision processes and dynamic decision networks: Russell & Norvig (2021, chpt. 17)\n
                  178. \n
                  179. ^ a b c Stochastic temporal models: Russell & Norvig (2021, chpt. 14)\nHidden Markov model: Russell & Norvig (2021, sect. 14.3)\nKalman filters: Russell & Norvig (2021, sect. 14.4)\nDynamic Bayesian networks: Russell & Norvig (2021, sect. 14.5)\n
                  180. \n
                  181. ^ Game theory and mechanism design: Russell & Norvig (2021, chpt. 18)\n
                  182. \n
                  183. ^ Bayesian networks: Russell & Norvig (2021, sects. 12.5–12.6, 13.4–13.5, 14.3–14.5, 16.5, 20.2–20.3), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~182–190, ≈363–379), Nilsson (1998, chpt. 19.3–19.4)\n
                  184. \n
                  185. ^ Domingos (2015), chpt. 6.\n
                  186. \n
                  187. ^ Bayesian inference algorithm: Russell & Norvig (2021, sect. 13.3–13.5), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~363–379), Nilsson (1998, chpt. 19.4 & 7)\n
                  188. \n
                  189. ^ Domingos (2015), p. 210.\n
                  190. \n
                  191. ^ Bayesian learning and the expectation–maximization algorithm: Russell & Norvig (2021, chpt. 20), Poole, Mackworth & Goebel (1998, pp. 424–433), Nilsson (1998, chpt. 20), Domingos (2015, p. 210)\n
                  192. \n
                  193. ^ Bayesian decision theory and Bayesian decision networks: Russell & Norvig (2021, sect. 16.5)\n
                  194. \n
                  195. ^ Statistical learning methods and classifiers: Russell & Norvig (2021, chpt. 20),\n
                  196. \n
                  197. ^ Ciaramella, Alberto; Ciaramella, Marco (2024). Introduction to Artificial Intelligence: from data analysis to generative AI. Intellisemantic Editions. ISBN 978-8-8947-8760-3.\n
                  198. \n
                  199. ^ Decision trees: Russell & Norvig (2021, sect. 19.3), Domingos (2015, p. 88)\n
                  200. \n
                  201. ^ Non-parameteric learning models such as K-nearest neighbor and support vector machines: Russell & Norvig (2021, sect. 19.7), Domingos (2015, p. 187) (k-nearest neighbor)\n\n
                  202. \n
                  203. ^ Domingos (2015), p. 152.\n
                  204. \n
                  205. ^ Naive Bayes classifier: Russell & Norvig (2021, sect. 12.6), Domingos (2015, p. 152)\n
                  206. \n
                  207. ^ a b Neural networks: Russell & Norvig (2021, chpt. 21), Domingos (2015, Chapter 4)\n
                  208. \n
                  209. ^ Gradient calculation in computational graphs, backpropagation, automatic differentiation: Russell & Norvig (2021, sect. 21.2), Luger & Stubblefield (2004, pp. 467–474), Nilsson (1998, chpt. 3.3)\n
                  210. \n
                  211. ^ Universal approximation theorem: Russell & Norvig (2021, p. 752)\nThe theorem: Cybenko (1988), Hornik, Stinchcombe & White (1989)\n
                  212. \n
                  213. ^ Feedforward neural networks: Russell & Norvig (2021, sect. 21.1)\n
                  214. \n
                  215. ^ Recurrent neural networks: Russell & Norvig (2021, sect. 21.6)\n
                  216. \n
                  217. ^ Perceptrons: Russell & Norvig (2021, pp. 21, 22, 683, 22)\n
                  218. \n
                  219. ^ a b Deep learning: Russell & Norvig (2021, chpt. 21), Goodfellow, Bengio & Courville (2016), Hinton et al. (2016), Schmidhuber (2015)\n
                  220. \n
                  221. ^ Convolutional neural networks: Russell & Norvig (2021, sect. 21.3)\n
                  222. \n
                  223. ^ Deng & Yu (2014), pp. 199–200.\n
                  224. \n
                  225. ^ Ciresan, Meier & Schmidhuber (2012).\n
                  226. \n
                  227. ^ Russell & Norvig (2021), p. 751.\n
                  228. \n
                  229. ^ a b c Russell & Norvig (2021), p. 17.\n
                  230. \n
                  231. ^ a b c d e f g Russell & Norvig (2021), p. 785.\n
                  232. \n
                  233. ^ a b Schmidhuber (2022), sect. 5.\n
                  234. \n
                  235. ^ Schmidhuber (2022), sect. 6.\n
                  236. \n
                  237. ^ a b c Schmidhuber (2022), sect. 7.\n
                  238. \n
                  239. ^ Schmidhuber (2022), sect. 8.\n
                  240. \n
                  241. ^ Quoted in Christian (2020, p. 22)\n
                  242. \n
                  243. ^ Smith (2023).\n
                  244. \n
                  245. ^ "Explained: Generative AI". 9 November 2023.\n
                  246. \n
                  247. ^ "AI Writing and Content Creation Tools". MIT Sloan Teaching & Learning Technologies. Archived from the original on 25 December 2023. Retrieved 25 December 2023.\n
                  248. \n
                  249. ^ Marmouyet (2023).\n
                  250. \n
                  251. ^ Kobielus (2019).\n
                  252. \n
                  253. ^ Thomason, James (21 May 2024). "Mojo Rising: The resurgence of AI-first programming languages". VentureBeat. Archived from the original on 27 June 2024. Retrieved 26 May 2024.\n
                  254. \n
                  255. ^ Wodecki, Ben (5 May 2023). "7 AI Programming Languages You Need to Know". AI Business. Archived from the original on 25 July 2024. Retrieved 5 October 2024.\n
                  256. \n
                  257. ^ Plumb, Taryn (18 September 2024). "Why Jensen Huang and Marc Benioff see \'gigantic\' opportunity for agentic AI". VentureBeat. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
                  258. \n
                  259. ^ Davenport, T; Kalakota, R (June 2019). "The potential for artificial intelligence in healthcare". Future Healthc J. 6 (2): 94–98. doi:10.7861/futurehosp.6-2-94. PMC 6616181. PMID 31363513.\n
                  260. \n
                  261. ^ Lyakhova, U.A.; Lyakhov, P.A. (2024). "Systematic review of approaches to detection and classification of skin cancer using artificial intelligence: Development and prospects". Computers in Biology and Medicine. 178: 108742. doi:10.1016/j.compbiomed.2024.108742. PMID 38875908.\n
                  262. \n
                  263. ^ Alqudaihi, Kawther S.; Aslam, Nida; Khan, Irfan Ullah; Almuhaideb, Abdullah M.; Alsunaidi, Shikah J.; Ibrahim, Nehad M. Abdel Rahman; Alhaidari, Fahd A.; Shaikh, Fatema S.; Alsenbel, Yasmine M.; Alalharith, Dima M.; Alharthi, Hajar M.; Alghamdi, Wejdan M.; Alshahrani, Mohammed S. (2021). "Cough Sound Detection and Diagnosis Using Artificial Intelligence Techniques: Challenges and Opportunities". IEEE Access. 9: 102327–102344. Bibcode:2021IEEEA...9j2327A. doi:10.1109/ACCESS.2021.3097559. ISSN 2169-3536. PMC 8545201. PMID 34786317.\n
                  264. \n
                  265. ^ a b Bax, Monique; Thorpe, Jordan; Romanov, Valentin (December 2023). "The future of personalized cardiovascular medicine demands 3D and 4D printing, stem cells, and artificial intelligence". Frontiers in Sensors. 4. doi:10.3389/fsens.2023.1294721. ISSN 2673-5067.\n
                  266. \n
                  267. ^ Jumper, J; Evans, R; Pritzel, A (2021). "Highly accurate protein structure prediction with AlphaFold". Nature. 596 (7873): 583–589. Bibcode:2021Natur.596..583J. doi:10.1038/s41586-021-03819-2. PMC 8371605. PMID 34265844.\n
                  268. \n
                  269. ^ "AI discovers new class of antibiotics to kill drug-resistant bacteria". 20 December 2023. Archived from the original on 16 September 2024. Retrieved 5 October 2024.\n
                  270. \n
                  271. ^ "AI speeds up drug design for Parkinson\'s ten-fold". Cambridge University. 17 April 2024. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                  272. \n
                  273. ^ Horne, Robert I.; Andrzejewska, Ewa A.; Alam, Parvez; Brotzakis, Z. Faidon; Srivastava, Ankit; Aubert, Alice; Nowinska, Magdalena; Gregory, Rebecca C.; Staats, Roxine; Possenti, Andrea; Chia, Sean; Sormanni, Pietro; Ghetti, Bernardino; Caughey, Byron; Knowles, Tuomas P. J.; Vendruscolo, Michele (17 April 2024). "Discovery of potent inhibitors of α-synuclein aggregation using structure-based iterative learning". Nature Chemical Biology. 20 (5). Nature: 634–645. doi:10.1038/s41589-024-01580-x. PMC 11062903. PMID 38632492.\n
                  274. \n
                  275. ^ Grant, Eugene F.; Lardner, Rex (25 July 1952). "The Talk of the Town – It". The New Yorker. ISSN 0028-792X. Archived from the original on 16 February 2020. Retrieved 28 January 2024.\n
                  276. \n
                  277. ^ Anderson, Mark Robert (11 May 2017). "Twenty years on from Deep Blue vs Kasparov: how a chess match started the big data revolution". The Conversation. Archived from the original on 17 September 2024. Retrieved 28 January 2024.\n
                  278. \n
                  279. ^ Markoff, John (16 February 2011). "Computer Wins on \'Jeopardy!\': Trivial, It\'s Not". The New York Times. ISSN 0362-4331. Archived from the original on 22 October 2014. Retrieved 28 January 2024.\n
                  280. \n
                  281. ^ Byford, Sam (27 May 2017). "AlphaGo retires from competitive Go after defeating world number one 3–0". The Verge. Archived from the original on 7 June 2017. Retrieved 28 January 2024.\n
                  282. \n
                  283. ^ Brown, Noam; Sandholm, Tuomas (30 August 2019). "Superhuman AI for multiplayer poker". Science. 365 (6456): 885–890. Bibcode:2019Sci...365..885B. doi:10.1126/science.aay2400. ISSN 0036-8075. PMID 31296650.\n
                  284. \n
                  285. ^ "MuZero: Mastering Go, chess, shogi and Atari without rules". Google DeepMind. 23 December 2020. Retrieved 28 January 2024.\n
                  286. \n
                  287. ^ Sample, Ian (30 October 2019). "AI becomes grandmaster in \'fiendishly complex\' StarCraft II". The Guardian. ISSN 0261-3077. Archived from the original on 29 December 2020. Retrieved 28 January 2024.\n
                  288. \n
                  289. ^ Wurman, P. R.; Barrett, S.; Kawamoto, K. (2022). "Outracing champion Gran Turismo drivers with deep reinforcement learning" (PDF). Nature. 602 (7896): 223–228. Bibcode:2022Natur.602..223W. doi:10.1038/s41586-021-04357-7. PMID 35140384.\n
                  290. \n
                  291. ^ Wilkins, Alex (13 March 2024). "Google AI learns to play open-world video games by watching them". New Scientist. Archived from the original on 26 July 2024. Retrieved 21 July 2024.\n
                  292. \n
                  293. ^ Uesato, J. et al.: Improving mathematical reasoning with process supervision. Archived 15 September 2024 at the Wayback Machine openai.com, May 31, 2023. Retrieved 2024-08-07.\n
                  294. \n
                  295. ^ Srivastava, Saurabh (29 February 2024). "Functional Benchmarks for Robust Evaluation of Reasoning Performance, and the Reasoning Gap". arXiv:2402.19450 [cs.AI].\n
                  296. \n
                  297. ^ Roberts, Siobhan (25 July 2024). "AI achieves silver-medal standard solving International Mathematical Olympiad problems". The New York Times. Archived from the original on 26 September 2024. Retrieved 7 August 2024.\n
                  298. \n
                  299. ^ LLEMMA. eleuther.ai. Retrieved 2024-08-07.\n
                  300. \n
                  301. ^ AI Math. Archived 5 October 2024 at the Wayback Machine Caesars Labs, 2024. Retrieved 2024-08-07.\n
                  302. \n
                  303. ^ Alex McFarland: 7 Best AI for Math Tools. Archived 11 September 2024 at the Wayback Machine unite.ai. Retrieved 2024-08-07\n
                  304. \n
                  305. ^ Matthew Finio & Amanda Downie: IBM Think 2024 Primer, "What is Artificial Intelligence (AI) in Finance?" 8 Dec. 2023\n
                  306. \n
                  307. ^ M. Nicolas, J. Firzli: Pensions Age/European Pensions magazine, "Artificial Intelligence: Ask the Industry" May June 2024 https://videovoice.org/ai-in-finance-innovation-entrepreneurship-vs-over-regulation-with-the-eus-artificial-intelligence-act-wont-work-as-intended/ Archived 11 September 2024 at the Wayback Machine.\n
                  308. \n
                  309. ^ a b c Congressional Research Service (2019). Artificial Intelligence and National Security (PDF). Washington, DC: Congressional Research Service. Archived (PDF) from the original on 8 May 2020. Retrieved 5 October 2024.PD-notice\n
                  310. \n
                  311. ^ a b Slyusar, Vadym (2019). "Artificial intelligence as the basis of future control networks". ResearchGate. doi:10.13140/RG.2.2.30247.50087. Archived from the original on 28 April 2021. Retrieved 20 July 2019.\n
                  312. \n
                  313. ^ Knight, Will. "The US and 30 Other Nations Agree to Set Guardrails for Military AI". Wired. ISSN 1059-1028. Archived from the original on 20 September 2024. Retrieved 24 January 2024.\n
                  314. \n
                  315. ^ Newsom, Gavin; Weber, Shirley N. (6 September 2023). "Executive Order N-12-23" (PDF). Executive Department, State of California. Archived (PDF) from the original on 21 February 2024. Retrieved 7 September 2023.\n
                  316. \n
                  317. ^ Pinaya, Walter H. L.; Graham, Mark S.; Kerfoot, Eric; Tudosiu, Petru-Daniel; Dafflon, Jessica; Fernandez, Virginia; Sanchez, Pedro; Wolleb, Julia; da Costa, Pedro F.; Patel, Ashay (2023). "Generative AI for Medical Imaging: extending the MONAI Framework". arXiv:2307.15208 [eess.IV].\n
                  318. \n
                  319. ^ Griffith, Erin; Metz, Cade (27 January 2023). "Anthropic Said to Be Closing In on $300 Million in New A.I. Funding". The New York Times. Archived from the original on 9 December 2023. Retrieved 14 March 2023.\n
                  320. \n
                  321. ^ Lanxon, Nate; Bass, Dina; Davalos, Jackie (10 March 2023). "A Cheat Sheet to AI Buzzwords and Their Meanings". Bloomberg News. Archived from the original on 17 November 2023. Retrieved 14 March 2023.\n
                  322. \n
                  323. ^ Marcelline, Marco (27 May 2023). "ChatGPT: Most Americans Know About It, But Few Actually Use the AI Chatbot". PCMag. Archived from the original on 21 May 2024. Retrieved 28 January 2024.\n
                  324. \n
                  325. ^ Lu, Donna (31 March 2023). "Misinformation, mistakes and the Pope in a puffer: what rapidly evolving AI can – and can\'t – do". The Guardian. ISSN 0261-3077. Archived from the original on 10 June 2024. Retrieved 28 January 2024.\n
                  326. \n
                  327. ^ Hurst, Luke (23 May 2023). "How a fake image of a Pentagon explosion shared on Twitter caused a real dip on Wall Street". euronews. Retrieved 28 January 2024.\n
                  328. \n
                  329. ^ Poole, David; Mackworth, Alan (2023). Artificial Intelligence, Foundations of Computational Agents (3rd ed.). Cambridge University Press. doi:10.1017/9781009258227. ISBN 978-1-0092-5819-7. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                  330. \n
                  331. ^ Russell, Stuart; Norvig, Peter (2020). Artificial Intelligence: A Modern Approach (4th ed.). Pearson. ISBN 978-0-1346-1099-3.\n
                  332. \n
                  333. ^ "Why agents are the next frontier of generative AI". McKinsey Digital. 24 July 2024. Archived from the original on 3 October 2024. Retrieved 10 August 2024.\n
                  334. \n
                  335. ^ Ransbotham, Sam; Kiron, David; Gerbert, Philipp; Reeves, Martin (6 September 2017). "Reshaping Business With Artificial Intelligence". MIT Sloan Management Review. Archived from the original on 13 February 2024.\n
                  336. \n
                  337. ^ Sun, Yuran; Zhao, Xilei; Lovreglio, Ruggiero; Kuligowski, Erica (1 January 2024), Naser, M. Z. (ed.), "8 – AI for large-scale evacuation modeling: promises and challenges", Interpretable Machine Learning for the Analysis, Design, Assessment, and Informed Decision Making for Civil Infrastructure, Woodhead Publishing Series in Civil and Structural Engineering, Woodhead Publishing, pp. 185–204, ISBN 978-0-1282-4073-1, archived from the original on 19 May 2024, retrieved 28 June 2024.\n
                  338. \n
                  339. ^ Gomaa, Islam; Adelzadeh, Masoud; Gwynne, Steven; Spencer, Bruce; Ko, Yoon; Bénichou, Noureddine; Ma, Chunyun; Elsagan, Nour; Duong, Dana; Zalok, Ehab; Kinateder, Max (1 November 2021). "A Framework for Intelligent Fire Detection and Evacuation System". Fire Technology. 57 (6): 3179–3185. doi:10.1007/s10694-021-01157-3. ISSN 1572-8099. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                  340. \n
                  341. ^ Zhao, Xilei; Lovreglio, Ruggiero; Nilsson, Daniel (1 May 2020). "Modelling and interpreting pre-evacuation decision-making using machine learning". Automation in Construction. 113: 103140. doi:10.1016/j.autcon.2020.103140. ISSN 0926-5805. Archived from the original on 19 May 2024. Retrieved 5 October 2024.\n
                  342. \n
                  343. ^ Müller, Vincent C. (30 April 2020). "Ethics of Artificial Intelligence and Robotics". Stanford Encyclopedia of Philosophy Archive. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                  344. \n
                  345. ^ Simonite (2016).\n
                  346. \n
                  347. ^ Russell & Norvig (2021), p. 987.\n
                  348. \n
                  349. ^ Laskowski (2023).\n
                  350. \n
                  351. ^ GAO (2022).\n
                  352. \n
                  353. ^ Valinsky (2019).\n
                  354. \n
                  355. ^ Russell & Norvig (2021), p. 991.\n
                  356. \n
                  357. ^ Russell & Norvig (2021), pp. 991–992.\n
                  358. \n
                  359. ^ Christian (2020), p. 63.\n
                  360. \n
                  361. ^ Vincent (2022).\n
                  362. \n
                  363. ^ Kopel, Matthew. "Copyright Services: Fair Use". Cornell University Library. Archived from the original on 26 September 2024. Retrieved 26 April 2024.\n
                  364. \n
                  365. ^ Burgess, Matt. "How to Stop Your Data From Being Used to Train AI". Wired. ISSN 1059-1028. Archived from the original on 3 October 2024. Retrieved 26 April 2024.\n
                  366. \n
                  367. ^ Reisner (2023).\n
                  368. \n
                  369. ^ Alter & Harris (2023).\n
                  370. \n
                  371. ^ "Getting the Innovation Ecosystem Ready for AI. An IP policy toolkit" (PDF). WIPO.\n
                  372. \n
                  373. ^ Hammond, George (27 December 2023). "Big Tech is spending more than VC firms on AI startups". Ars Technica. Archived from the original on 10 January 2024.\n
                  374. \n
                  375. ^ Wong, Matteo (24 October 2023). "The Future of AI Is GOMA". The Atlantic. Archived from the original on 5 January 2024.\n
                  376. \n
                  377. ^ "Big tech and the pursuit of AI dominance". The Economist. 26 March 2023. Archived from the original on 29 December 2023.\n
                  378. \n
                  379. ^ Fung, Brian (19 December 2023). "Where the battle to dominate AI may be won". CNN Business. Archived from the original on 13 January 2024.\n
                  380. \n
                  381. ^ Metz, Cade (5 July 2023). "In the Age of A.I., Tech\'s Little Guys Need Big Friends". The New York Times. Archived from the original on 8 July 2024. Retrieved 5 October 2024.\n
                  382. \n
                  383. ^ "Electricity 2024 – Analysis". IEA. 24 January 2024. Retrieved 13 July 2024.\n
                  384. \n
                  385. ^ Calvert, Brian (28 March 2024). "AI already uses as much energy as a small country. It\'s only the beginning". Vox. New York, New York. Archived from the original on 3 July 2024. Retrieved 5 October 2024.\n
                  386. \n
                  387. ^ Halper, Evan; O\'Donovan, Caroline (21 June 2024). "AI is exhausting the power grid. Tech firms are seeking a miracle solution". Washington Post.\n
                  388. \n
                  389. ^ Davenport, Carly. "AI Data Centers and the Coming YS Power Demand Surge" (PDF). Goldman Sachs. Archived from the original (PDF) on 26 July 2024. Retrieved 5 October 2024.\n
                  390. \n
                  391. ^ Ryan, Carol (12 April 2024). "Energy-Guzzling AI Is Also the Future of Energy Savings". Wall Street Journal. Dow Jones.\n
                  392. \n
                  393. ^ Hiller, Jennifer (1 July 2024). "Tech Industry Wants to Lock Up Nuclear Power for AI". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                  394. \n
                  395. ^ Halper, Evan (20 September 2024). "Microsoft deal would reopen Three Mile Island nuclear plant to power AI". Washington Post.\n
                  396. \n
                  397. ^ Hiller, Jennifer (20 September 2024). "Three Mile Island\'s Nuclear Plant to Reopen, Help Power Microsoft\'s AI Centers". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                  398. \n
                  399. ^ Nicas (2018).\n
                  400. \n
                  401. ^ Rainie, Lee; Keeter, Scott; Perrin, Andrew (22 July 2019). "Trust and Distrust in America". Pew Research Center. Archived from the original on 22 February 2024.\n
                  402. \n
                  403. ^ Williams (2023).\n
                  404. \n
                  405. ^ Taylor & Hern (2023).\n
                  406. \n
                  407. ^ a b Samuel, Sigal (19 April 2022). "Why it\'s so damn hard to make AI fair and unbiased". Vox. Archived from the original on 5 October 2024. Retrieved 24 July 2024.\n
                  408. \n
                  409. ^ a b Rose (2023).\n
                  410. \n
                  411. ^ CNA (2019).\n
                  412. \n
                  413. ^ Goffrey (2008), p. 17.\n
                  414. \n
                  415. ^ Berdahl et al. (2023); Goffrey (2008, p. 17); Rose (2023); Russell & Norvig (2021, p. 995)\n
                  416. \n
                  417. ^ Christian (2020), p. 25.\n
                  418. \n
                  419. ^ a b Russell & Norvig (2021), p. 995.\n
                  420. \n
                  421. ^ Grant & Hill (2023).\n
                  422. \n
                  423. ^ Larson & Angwin (2016).\n
                  424. \n
                  425. ^ Christian (2020), p. 67–70.\n
                  426. \n
                  427. ^ Christian (2020, pp. 67–70); Russell & Norvig (2021, pp. 993–994)\n
                  428. \n
                  429. ^ Russell & Norvig (2021, p. 995); Lipartito (2011, p. 36); Goodman & Flaxman (2017, p. 6); Christian (2020, pp. 39–40, 65)\n
                  430. \n
                  431. ^ Quoted in Christian (2020, p. 65).\n
                  432. \n
                  433. ^ Russell & Norvig (2021, p. 994); Christian (2020, pp. 40, 80–81)\n
                  434. \n
                  435. ^ Quoted in Christian (2020, p. 80)\n
                  436. \n
                  437. ^ Dockrill (2022).\n
                  438. \n
                  439. ^ Sample (2017).\n
                  440. \n
                  441. ^ "Black Box AI". 16 June 2023. Archived from the original on 15 June 2024. Retrieved 5 October 2024.\n
                  442. \n
                  443. ^ Christian (2020), p. 110.\n
                  444. \n
                  445. ^ Christian (2020), pp. 88–91.\n
                  446. \n
                  447. ^ Christian (2020, p. 83); Russell & Norvig (2021, p. 997)\n
                  448. \n
                  449. ^ Christian (2020), p. 91.\n
                  450. \n
                  451. ^ Christian (2020), p. 83.\n
                  452. \n
                  453. ^ Verma (2021).\n
                  454. \n
                  455. ^ Rothman (2020).\n
                  456. \n
                  457. ^ Christian (2020), pp. 105–108.\n
                  458. \n
                  459. ^ Christian (2020), pp. 108–112.\n
                  460. \n
                  461. ^ Ropek, Lucas (21 May 2024). "New Anthropic Research Sheds Light on AI\'s \'Black Box\'". Gizmodo. Archived from the original on 5 October 2024. Retrieved 23 May 2024.\n
                  462. \n
                  463. ^ Russell & Norvig (2021), p. 989.\n
                  464. \n
                  465. ^ a b Russell & Norvig (2021), pp. 987–990.\n
                  466. \n
                  467. ^ Russell & Norvig (2021), p. 988.\n
                  468. \n
                  469. ^ Robitzski (2018); Sainato (2015)\n
                  470. \n
                  471. ^ Harari (2018).\n
                  472. \n
                  473. ^ Buckley, Chris; Mozur, Paul (22 May 2019). "How China Uses High-Tech Surveillance to Subdue Minorities". The New York Times. Archived from the original on 25 November 2019. Retrieved 2 July 2019.\n
                  474. \n
                  475. ^ "Security lapse exposed a Chinese smart city surveillance system". 3 May 2019. Archived from the original on 7 March 2021. Retrieved 14 September 2020.\n
                  476. \n
                  477. ^ Urbina et al. (2022).\n
                  478. \n
                  479. ^ a b E. McGaughey, \'Will Robots Automate Your Job Away? Full Employment, Basic Income, and Economic Democracy\' (2022), 51(3) Industrial Law Journal 511–559. Archived 27 May 2023 at the Wayback Machine.\n
                  480. \n
                  481. ^ Ford & Colvin (2015);McGaughey (2022)\n
                  482. \n
                  483. ^ IGM Chicago (2017).\n
                  484. \n
                  485. ^ Arntz, Gregory & Zierahn (2016), p. 33.\n
                  486. \n
                  487. ^ Lohr (2017); Frey & Osborne (2017); Arntz, Gregory & Zierahn (2016, p. 33)\n
                  488. \n
                  489. ^ Zhou, Viola (11 April 2023). "AI is already taking video game illustrators\' jobs in China". Rest of World. Archived from the original on 21 February 2024. Retrieved 17 August 2023.\n
                  490. \n
                  491. ^ Carter, Justin (11 April 2023). "China\'s game art industry reportedly decimated by growing AI use". Game Developer. Archived from the original on 17 August 2023. Retrieved 17 August 2023.\n
                  492. \n
                  493. ^ Morgenstern (2015).\n
                  494. \n
                  495. ^ Mahdawi (2017); Thompson (2014)\n
                  496. \n
                  497. ^ Tarnoff, Ben (4 August 2023). "Lessons from Eliza". The Guardian Weekly. pp. 34–39.\n
                  498. \n
                  499. ^ Cellan-Jones (2014).\n
                  500. \n
                  501. ^ Russell & Norvig 2021, p. 1001.\n
                  502. \n
                  503. ^ Bostrom (2014).\n
                  504. \n
                  505. ^ Russell (2019).\n
                  506. \n
                  507. ^ Bostrom (2014); Müller & Bostrom (2014); Bostrom (2015).\n
                  508. \n
                  509. ^ Harari (2023).\n
                  510. \n
                  511. ^ Müller & Bostrom (2014).\n
                  512. \n
                  513. ^ Leaders\' concerns about the existential risks of AI around 2015: Rawlinson (2015), Holley (2015), Gibbs (2014), Sainato (2015)\n
                  514. \n
                  515. ^ ""Godfather of artificial intelligence" talks impact and potential of new AI". CBS News. 25 March 2023. Archived from the original on 28 March 2023. Retrieved 28 March 2023.\n
                  516. \n
                  517. ^ Pittis, Don (4 May 2023). "Canadian artificial intelligence leader Geoffrey Hinton piles on fears of computer takeover". CBC. Archived from the original on 7 July 2024. Retrieved 5 October 2024.\n
                  518. \n
                  519. ^ "\'50–50 chance\' that AI outsmarts humanity, Geoffrey Hinton says". Bloomberg BNN. 14 June 2024. Retrieved 6 July 2024.\n
                  520. \n
                  521. ^ Valance (2023).\n
                  522. \n
                  523. ^ Taylor, Josh (7 May 2023). "Rise of artificial intelligence is inevitable but should not be feared, \'father of AI\' says". The Guardian. Archived from the original on 23 October 2023. Retrieved 26 May 2023.\n
                  524. \n
                  525. ^ Colton, Emma (7 May 2023). "\'Father of AI\' says tech fears misplaced: \'You cannot stop it\'". Fox News. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
                  526. \n
                  527. ^ Jones, Hessie (23 May 2023). "Juergen Schmidhuber, Renowned \'Father Of Modern AI,\' Says His Life\'s Work Won\'t Lead To Dystopia". Forbes. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
                  528. \n
                  529. ^ McMorrow, Ryan (19 December 2023). "Andrew Ng: \'Do we think the world is better off with more or less intelligence?\'". Financial Times. Archived from the original on 25 January 2024. Retrieved 30 December 2023.\n
                  530. \n
                  531. ^ Levy, Steven (22 December 2023). "How Not to Be Stupid About AI, With Yann LeCun". Wired. Archived from the original on 28 December 2023. Retrieved 30 December 2023.\n
                  532. \n
                  533. ^ Arguments that AI is not an imminent risk: Brooks (2014), Geist (2015), Madrigal (2015), Lee (2014)\n
                  534. \n
                  535. ^ a b Christian (2020), pp. 67, 73.\n
                  536. \n
                  537. ^ Yudkowsky (2008).\n
                  538. \n
                  539. ^ a b Anderson & Anderson (2011).\n
                  540. \n
                  541. ^ AAAI (2014).\n
                  542. \n
                  543. ^ Wallach (2010).\n
                  544. \n
                  545. ^ Russell (2019), p. 173.\n
                  546. \n
                  547. ^ Stewart, Ashley; Melton, Monica. "Hugging Face CEO says he\'s focused on building a \'sustainable model\' for the $4.5 billion open-source-AI startup". Business Insider. Archived from the original on 25 September 2024. Retrieved 14 April 2024.\n
                  548. \n
                  549. ^ Wiggers, Kyle (9 April 2024). "Google open sources tools to support AI model development". TechCrunch. Archived from the original on 10 September 2024. Retrieved 14 April 2024.\n
                  550. \n
                  551. ^ Heaven, Will Douglas (12 May 2023). "The open-source AI boom is built on Big Tech\'s handouts. How long will it last?". MIT Technology Review. Retrieved 14 April 2024.\n
                  552. \n
                  553. ^ Brodsky, Sascha (19 December 2023). "Mistral AI\'s New Language Model Aims for Open Source Supremacy". AI Business. Archived from the original on 5 September 2024. Retrieved 5 October 2024.\n
                  554. \n
                  555. ^ Edwards, Benj (22 February 2024). "Stability announces Stable Diffusion 3, a next-gen AI image generator". Ars Technica. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
                  556. \n
                  557. ^ Marshall, Matt (29 January 2024). "How enterprises are using open source LLMs: 16 examples". VentureBeat. Archived from the original on 26 September 2024. Retrieved 5 October 2024.\n
                  558. \n
                  559. ^ Piper, Kelsey (2 February 2024). "Should we make our most powerful AI models open source to all?". Vox. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
                  560. \n
                  561. ^ Alan Turing Institute (2019). "Understanding artificial intelligence ethics and safety" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
                  562. \n
                  563. ^ Alan Turing Institute (2023). "AI Ethics and Governance in Practice" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
                  564. \n
                  565. ^ Floridi, Luciano; Cowls, Josh (23 June 2019). "A Unified Framework of Five Principles for AI in Society". Harvard Data Science Review. 1 (1). doi:10.1162/99608f92.8cd550d1. S2CID 198775713.\n
                  566. \n
                  567. ^ Buruk, Banu; Ekmekci, Perihan Elif; Arda, Berna (1 September 2020). "A critical perspective on guidelines for responsible and trustworthy artificial intelligence". Medicine, Health Care and Philosophy. 23 (3): 387–399. doi:10.1007/s11019-020-09948-1. ISSN 1572-8633. PMID 32236794. S2CID 214766800. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                  568. \n
                  569. ^ Kamila, Manoj Kumar; Jasrotia, Sahil Singh (1 January 2023). "Ethical issues in the development of artificial intelligence: recognizing the risks". International Journal of Ethics and Systems. ahead-of-print (ahead-of-print). doi:10.1108/IJOES-05-2023-0107. ISSN 2514-9369. S2CID 259614124. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                  570. \n
                  571. ^ "AI Safety Institute releases new AI safety evaluations platform". UK Government. 10 May 2024. Archived from the original on 5 October 2024. Retrieved 14 May 2024.\n
                  572. \n
                  573. ^ Regulation of AI to mitigate risks: Berryhill et al. (2019), Barfield & Pagallo (2018), Iphofen & Kritikos (2019), Wirtz, Weyerer & Geyer (2018), Buiten (2019)\n
                  574. \n\n
                  575. ^ a b Vincent (2023).\n
                  576. \n
                  577. ^ Stanford University (2023).\n
                  578. \n
                  579. ^ a b c d UNESCO (2021).\n
                  580. \n
                  581. ^ Kissinger (2021).\n
                  582. \n
                  583. ^ Altman, Brockman & Sutskever (2023).\n
                  584. \n
                  585. ^ VOA News (25 October 2023). "UN Announces Advisory Body on Artificial Intelligence". Archived from the original on 18 September 2024. Retrieved 5 October 2024.\n
                  586. \n
                  587. ^ "Council of Europe opens first ever global treaty on AI for signature". Council of Europe. 5 September 2024. Archived from the original on 17 September 2024. Retrieved 17 September 2024.\n
                  588. \n
                  589. ^ Edwards (2023).\n
                  590. \n
                  591. ^ Kasperowicz (2023).\n
                  592. \n
                  593. ^ Fox News (2023).\n
                  594. \n
                  595. ^ Milmo, Dan (3 November 2023). "Hope or Horror? The great AI debate dividing its pioneers". The Guardian Weekly. pp. 10–12.\n
                  596. \n
                  597. ^ "The Bletchley Declaration by Countries Attending the AI Safety Summit, 1–2 November 2023". GOV.UK. 1 November 2023. Archived from the original on 1 November 2023. Retrieved 2 November 2023.\n
                  598. \n
                  599. ^ "Countries agree to safe and responsible development of frontier AI in landmark Bletchley Declaration". GOV.UK (Press release). Archived from the original on 1 November 2023. Retrieved 1 November 2023.\n
                  600. \n
                  601. ^ "Second global AI summit secures safety commitments from companies". Reuters. 21 May 2024. Retrieved 23 May 2024.\n
                  602. \n
                  603. ^ "Frontier AI Safety Commitments, AI Seoul Summit 2024". gov.uk. 21 May 2024. Archived from the original on 23 May 2024. Retrieved 23 May 2024.\n
                  604. \n
                  605. ^ a b Russell & Norvig 2021, p. 9.\n
                  606. \n
                  607. ^ a b c Copeland, J., ed. (2004). The Essential Turing: the ideas that gave birth to the computer age. Oxford, England: Clarendon Press. ISBN 0-1982-5079-7.\n
                  608. \n
                  609. ^ "Google books ngram". Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                  610. \n
                  611. ^ AI\'s immediate precursors: McCorduck (2004, pp. 51–107), Crevier (1993, pp. 27–32), Russell & Norvig (2021, pp. 8–17), Moravec (1988, p. 3)\n
                  612. \n
                  613. ^ a b Turing\'s original publication of the Turing test in "Computing machinery and intelligence": Turing (1950)\nHistorical influence and philosophical implications: Haugeland (1985, pp. 6–9), Crevier (1993, p. 24), McCorduck (2004, pp. 70–71), Russell & Norvig (2021, pp. 2, 984)\n
                  614. \n
                  615. ^ Crevier (1993), pp. 47–49.\n
                  616. \n
                  617. ^ Russell & Norvig (2003), p. 17.\n
                  618. \n
                  619. ^ Russell & Norvig (2003), p. 18.\n
                  620. \n
                  621. ^ Newquist (1994), pp. 86–86.\n
                  622. \n
                  623. ^ Simon (1965, p. 96) quoted in Crevier (1993, p. 109)\n
                  624. \n
                  625. ^ Minsky (1967, p. 2) quoted in Crevier (1993, p. 109)\n
                  626. \n
                  627. ^ Russell & Norvig (2021), p. 21.\n
                  628. \n
                  629. ^ Lighthill (1973).\n
                  630. \n
                  631. ^ NRC 1999, pp. 212–213.\n
                  632. \n
                  633. ^ Russell & Norvig (2021), p. 22.\n
                  634. \n
                  635. ^ Expert systems: Russell & Norvig (2021, pp. 23, 292), Luger & Stubblefield (2004, pp. 227–331), Nilsson (1998, chpt. 17.4), McCorduck (2004, pp. 327–335, 434–435), Crevier (1993, pp. 145–162, 197–203), Newquist (1994, pp. 155–183)\n
                  636. \n
                  637. ^ Russell & Norvig (2021), p. 24.\n
                  638. \n
                  639. ^ Nilsson (1998), p. 7.\n
                  640. \n
                  641. ^ McCorduck (2004), pp. 454–462.\n
                  642. \n
                  643. ^ Moravec (1988).\n
                  644. \n
                  645. ^ a b Brooks (1990).\n
                  646. \n
                  647. ^ Developmental robotics: Weng et al. (2001), Lungarella et al. (2003), Asada et al. (2009), Oudeyer (2010)\n
                  648. \n
                  649. ^ Russell & Norvig (2021), p. 25.\n
                  650. \n
                  651. ^ Crevier (1993, pp. 214–215), Russell & Norvig (2021, pp. 24, 26)\n
                  652. \n
                  653. ^ Russell & Norvig (2021), p. 26.\n
                  654. \n
                  655. ^ Formal and narrow methods adopted in the 1990s: Russell & Norvig (2021, pp. 24–26), McCorduck (2004, pp. 486–487)\n
                  656. \n
                  657. ^ AI widely used in the late 1990s: Kurzweil (2005, p. 265), NRC (1999, pp. 216–222), Newquist (1994, pp. 189–201)\n
                  658. \n
                  659. ^ Wong (2023).\n
                  660. \n
                  661. ^ Moore\'s Law and AI: Russell & Norvig (2021, pp. 14, 27)\n
                  662. \n
                  663. ^ a b c Clark (2015b).\n
                  664. \n
                  665. ^ Big data: Russell & Norvig (2021, p. 26)\n
                  666. \n
                  667. ^ Sagar, Ram (3 June 2020). "OpenAI Releases GPT-3, The Largest Model So Far". Analytics India Magazine. Archived from the original on 4 August 2020. Retrieved 15 March 2023.\n
                  668. \n
                  669. ^ DiFeliciantonio (2023).\n
                  670. \n
                  671. ^ Goswami (2023).\n
                  672. \n
                  673. ^ Grayling, Anthony; Ball, Brian (1 August 2024). "Philosophy is crucial in the age of AI". The Conversation. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
                  674. \n
                  675. ^ a b Jarow, Oshan (15 June 2024). "Will AI ever become conscious? It depends on how you think about biology". Vox. Archived from the original on 21 September 2024. Retrieved 4 October 2024.\n
                  676. \n
                  677. ^ McCarthy, John. "The Philosophy of AI and the AI of Philosophy". jmc.stanford.edu. Archived from the original on 23 October 2018. Retrieved 3 October 2024.\n
                  678. \n
                  679. ^ a b Turing (1950), p. 1.\n
                  680. \n
                  681. ^ Turing (1950), Under "The Argument from Consciousness".\n
                  682. \n
                  683. ^ Kirk-Giannini, Cameron Domenico; Goldstein, Simon (16 October 2023). "AI is closer than ever to passing the Turing test for \'intelligence\'. What happens when it does?". The Conversation. Archived from the original on 25 September 2024. Retrieved 17 August 2024.\n
                  684. \n
                  685. ^ Russell & Norvig (2021), p. 3.\n
                  686. \n
                  687. ^ Maker (2006).\n
                  688. \n
                  689. ^ McCarthy (1999).\n
                  690. \n
                  691. ^ Minsky (1986).\n
                  692. \n
                  693. ^ "What Is Artificial Intelligence (AI)?". Google Cloud Platform. Archived from the original on 31 July 2023. Retrieved 16 October 2023.\n
                  694. \n
                  695. ^ "One of the Biggest Problems in Regulating AI Is Agreeing on a Definition". carnegieendowment.org. Retrieved 31 July 2024.\n
                  696. \n
                  697. ^ "AI or BS? How to tell if a marketing tool really uses artificial intelligence". The Drum. Retrieved 31 July 2024.\n
                  698. \n
                  699. ^ Nilsson (1983), p. 10.\n
                  700. \n
                  701. ^ Haugeland (1985), pp. 112–117.\n
                  702. \n
                  703. ^ Physical symbol system hypothesis: Newell & Simon (1976, p. 116)\nHistorical significance: McCorduck (2004, p. 153), Russell & Norvig (2021, p. 19)\n
                  704. \n
                  705. ^ Moravec\'s paradox: Moravec (1988, pp. 15–16), Minsky (1986, p. 29), Pinker (2007, pp. 190–191)\n
                  706. \n
                  707. ^ Dreyfus\' critique of AI: Dreyfus (1972), Dreyfus & Dreyfus (1986)\nHistorical significance and philosophical implications: Crevier (1993, pp. 120–132), McCorduck (2004, pp. 211–239), Russell & Norvig (2021, pp. 981–982), Fearn (2007, chpt. 3)\n
                  708. \n
                  709. ^ Crevier (1993), p. 125.\n
                  710. \n
                  711. ^ Langley (2011).\n
                  712. \n
                  713. ^ Katz (2012).\n
                  714. \n
                  715. ^ Neats vs. scruffies, the historic debate: McCorduck (2004, pp. 421–424, 486–489), Crevier (1993, p. 168), Nilsson (1983, pp. 10–11), Russell & Norvig (2021, p. 24)\nA classic example of the "scruffy" approach to intelligence: Minsky (1986)\nA modern example of neat AI and its aspirations in the 21st century: Domingos (2015)\n
                  716. \n
                  717. ^ Pennachin & Goertzel (2007).\n
                  718. \n
                  719. ^ a b Roberts (2016).\n
                  720. \n
                  721. ^ Russell & Norvig (2021), p. 986.\n
                  722. \n
                  723. ^ Chalmers (1995).\n
                  724. \n
                  725. ^ Dennett (1991).\n
                  726. \n
                  727. ^ Horst (2005).\n
                  728. \n
                  729. ^ Searle (1999).\n
                  730. \n
                  731. ^ Searle (1980), p. 1.\n
                  732. \n
                  733. ^ Russell & Norvig (2021), p. 9817.\n
                  734. \n
                  735. ^ Searle\'s Chinese room argument: Searle (1980). Searle\'s original presentation of the thought experiment., Searle (1999).\nDiscussion: Russell & Norvig (2021, pp. 985), McCorduck (2004, pp. 443–445), Crevier (1993, pp. 269–271)\n
                  736. \n
                  737. ^ Leith, Sam (7 July 2022). "Nick Bostrom: How can we be certain a machine isn\'t conscious?". The Spectator. Archived from the original on 26 September 2024. Retrieved 23 February 2024.\n
                  738. \n
                  739. ^ a b c Thomson, Jonny (31 October 2022). "Why don\'t robots have rights?". Big Think. Archived from the original on 13 September 2024. Retrieved 23 February 2024.\n
                  740. \n
                  741. ^ a b Kateman, Brian (24 July 2023). "AI Should Be Terrified of Humans". Time. Archived from the original on 25 September 2024. Retrieved 23 February 2024.\n
                  742. \n
                  743. ^ Wong, Jeff (10 July 2023). "What leaders need to know about robot rights". Fast Company.\n
                  744. \n
                  745. ^ Hern, Alex (12 January 2017). "Give robots \'personhood\' status, EU committee argues". The Guardian. ISSN 0261-3077. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
                  746. \n
                  747. ^ Dovey, Dana (14 April 2018). "Experts Don\'t Think Robots Should Have Rights". Newsweek. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
                  748. \n
                  749. ^ Cuddy, Alice (13 April 2018). "Robot rights violate human rights, experts warn EU". euronews. Archived from the original on 19 September 2024. Retrieved 23 February 2024.\n
                  750. \n
                  751. ^ The Intelligence explosion and technological singularity: Russell & Norvig (2021, pp. 1004–1005), Omohundro (2008), Kurzweil (2005)\n\nI. J. Good\'s "intelligence explosion": Good (1965)\n\nVernor Vinge\'s "singularity": Vinge (1993)\n
                  752. \n
                  753. ^ Russell & Norvig (2021), p. 1005.\n
                  754. \n
                  755. ^ Transhumanism: Moravec (1988), Kurzweil (2005), Russell & Norvig (2021, p. 1005)\n
                  756. \n
                  757. ^ AI as evolution: Edward Fredkin is quoted in McCorduck (2004, p. 401), Butler (1863), Dyson (1998)\n
                  758. \n
                  759. ^ AI in myth: McCorduck (2004, pp. 4–5)\n
                  760. \n
                  761. ^ McCorduck (2004), pp. 340–400.\n
                  762. \n
                  763. ^ Buttazzo (2001).\n
                  764. \n
                  765. ^ Anderson (2008).\n
                  766. \n
                  767. ^ McCauley (2007).\n
                  768. \n
                  769. ^ Galvan (1997).\n
                  770. \n
                  \n

                  AI textbooks

                  \n

                  The two most widely used textbooks in 2023 (see the Open Syllabus):\n

                  \n\n

                  These were the four of the most widely used AI textbooks in 2008:\n

                  \n
                  \n\n
                  \n

                  Other textbooks:\n

                  \n\n

                  History of AI

                  \n
                  \n\n
                  \n

                  Other sources

                  \n
                  \n\n\n
                  \n

                  Further reading

                  \n
                  \n\n
                  \n
                  \n\n\n\n\n\n\n\n\n\n
                  \n
                  \n\t\t\t\t\t\n\t\t\t\t
                  \n\t\t\t\n\t\t\t\n\t\t\n\t\t\n\t \n \n
                  \n\t
                    \n
                    \n\n\n'}'. -[2024-10-27 22:48:47,572] [DEBUG] [Engine]: Agent 'agent3' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_DcBA03Fz8lUAbQAAZTM0WYMc', type='function')], function_call=None) -[2024-10-27 22:48:47,573] [INFO] [Engine]: Assigning task to agent4: Search for the latest trends in AI using Twitter. -[2024-10-27 22:48:47,573] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Search for the latest trends in AI using Twitter.'. -[2024-10-27 22:48:48,908] [INFO] [BaseAgent]: Agent 'agent4' called 'fetch_webpage' with args '{'url': 'https://twitter.com/search?q=AI%20trends&src=typed_query'}'. -[2024-10-27 22:48:48,909] [INFO] [BaseAgent]: Agent 'agent4' obtained result '{'success': True, 'error-msg': '', 'url': 'https://twitter.com/search?q=AI%20trends&src=typed_query', 'content': '\n \n \n x.com\n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n \n \n '}'. -[2024-10-27 22:48:48,910] [DEBUG] [Engine]: Agent 'agent4' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=AI%20trends&src=typed_query"}', name='fetch_webpage'), id='call_hbEAeQ1r14bPnT5YMTOXepM7', type='function')], function_call=None) -[2024-10-27 22:48:48,911] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_X2WZjFmudUmMzICf1BB5MHsR', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_DcBA03Fz8lUAbQAAZTM0WYMc', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=AI%20trends&src=typed_query"}', name='fetch_webpage'), id='call_hbEAeQ1r14bPnT5YMTOXepM7', type='function')], function_call=None) - -[2024-10-27 22:48:48,912] [INFO] [Engine]: Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_X2WZjFmudUmMzICf1BB5MHsR', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_DcBA03Fz8lUAbQAAZTM0WYMc', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=AI%20trends&src=typed_query"}', name='fetch_webpage'), id='call_hbEAeQ1r14bPnT5YMTOXepM7', type='function')], function_call=None) - -[2024-10-27 22:48:48,913] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_X2WZjFmudUmMzICf1BB5MHsR', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_DcBA03Fz8lUAbQAAZTM0WYMc', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=AI%20trends&src=typed_query"}', name='fetch_webpage'), id='call_hbEAeQ1r14bPnT5YMTOXepM7', type='function')], function_call=None) - -[2024-10-27 22:48:49,349] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-10-27 22:48:49,349] [INFO] [Engine]: Starting iteration 2 -[2024-10-27 22:48:49,350] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_X2WZjFmudUmMzICf1BB5MHsR', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_DcBA03Fz8lUAbQAAZTM0WYMc', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=AI%20trends&src=typed_query"}', name='fetch_webpage'), id='call_hbEAeQ1r14bPnT5YMTOXepM7', type='function')], function_call=None) - - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. If agents are asked, they should be assigned the task of answering. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-10-27 22:48:50,191] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent1': 'Ask agent2, agent3, and agent4 for the latest trends in AI.'}, 'continue': True} -[2024-10-27 22:48:50,191] [INFO] [Engine]: Assigned tasks: {'agent1': 'Ask agent2, agent3, and agent4 for the latest trends in AI.'} -[2024-10-27 22:48:50,191] [INFO] [Engine]: Assigning task to agent1: Ask agent2, agent3, and agent4 for the latest trends in AI. -[2024-10-27 22:48:50,192] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Ask agent2, agent3, and agent4 for the latest trends in AI.'. -[2024-10-27 22:48:51,246] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: Can you provide the latest trends in AI? -[2024-10-27 22:48:51,246] [INFO] [BaseAgent]: Agent agent2 received message from agent1: Can you provide the latest trends in AI? -[2024-10-27 22:48:51,246] [INFO] [BaseAgent]: Agent 'agent1' called 'communicate_to' with args '{'target_agent_id': 'agent2', 'message': 'Can you provide the latest trends in AI?'}'. -[2024-10-27 22:48:51,246] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': True, 'message': 'Successfully sent message to agent agent2', 'session_id': 'default_session'}'. -[2024-10-27 22:48:51,246] [DEBUG] [Engine]: Agent 'agent1' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent2", "message": "Can you provide the latest trends in AI?"}', name='communicate_to'), id='call_JztVMeqFiy1BchvKcwkoDqmh', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent3", "message": "Can you provide the latest trends in AI?"}', name='communicate_to'), id='call_OZhbV5Hg6HOhdX3N9LvgyXlq', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent4", "message": "Can you provide the latest trends in AI?"}', name='communicate_to'), id='call_02Xc3raTvAJbIUvXRxGDvTP1', type='function')], function_call=None) -[2024-10-27 22:48:51,247] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- agent1: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent2", "message": "Can you provide the latest trends in AI?"}', name='communicate_to'), id='call_JztVMeqFiy1BchvKcwkoDqmh', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent3", "message": "Can you provide the latest trends in AI?"}', name='communicate_to'), id='call_OZhbV5Hg6HOhdX3N9LvgyXlq', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent4", "message": "Can you provide the latest trends in AI?"}', name='communicate_to'), id='call_02Xc3raTvAJbIUvXRxGDvTP1', type='function')], function_call=None) - -[2024-10-27 22:48:51,247] [INFO] [Engine]: Agents' Results Summary: -- agent1: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent2", "message": "Can you provide the latest trends in AI?"}', name='communicate_to'), id='call_JztVMeqFiy1BchvKcwkoDqmh', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent3", "message": "Can you provide the latest trends in AI?"}', name='communicate_to'), id='call_OZhbV5Hg6HOhdX3N9LvgyXlq', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent4", "message": "Can you provide the latest trends in AI?"}', name='communicate_to'), id='call_02Xc3raTvAJbIUvXRxGDvTP1', type='function')], function_call=None) - -[2024-10-27 22:48:51,247] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_X2WZjFmudUmMzICf1BB5MHsR', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_DcBA03Fz8lUAbQAAZTM0WYMc', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=AI%20trends&src=typed_query"}', name='fetch_webpage'), id='call_hbEAeQ1r14bPnT5YMTOXepM7', type='function')], function_call=None) - -Agents' Results Summary: -- agent1: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent2", "message": "Can you provide the latest trends in AI?"}', name='communicate_to'), id='call_JztVMeqFiy1BchvKcwkoDqmh', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent3", "message": "Can you provide the latest trends in AI?"}', name='communicate_to'), id='call_OZhbV5Hg6HOhdX3N9LvgyXlq', type='function'), ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id": "agent4", "message": "Can you provide the latest trends in AI?"}', name='communicate_to'), id='call_02Xc3raTvAJbIUvXRxGDvTP1', type='function')], function_call=None) - -[2024-10-27 22:48:51,695] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-10-27 22:48:51,695] [INFO] [Engine]: Maximum iterations reached. -[2024-10-27 22:48:51,695] [INFO] [Engine]: Engine simulation loop completed. -[2024-10-27 22:48:51,696] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-10-27 22:48:51,696] [INFO] [Evaluator]: Total Token Consumption: 86 -[2024-10-27 22:48:51,696] [INFO] [Evaluator]: Average Tokens per Iteration: 43.0 -[2024-10-27 22:48:51,696] [INFO] [Engine]: Simulation completed. -[2024-10-27 22:49:47,364] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-10-27 22:49:47,364] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-10-27 22:49:47,364] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-10-27 22:49:47,364] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-10-27 22:49:47,364] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-10-27 22:49:47,364] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-10-27 22:49:47,365] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-10-27 22:49:47,365] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-10-27 22:49:47,365] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-10-27 22:49:47,365] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-10-27 22:49:47,365] [INFO] [AgentGraph]: Relationship added: agent2 --[reports_to]--> agent1 -[2024-10-27 22:49:47,365] [INFO] [AgentGraph]: Relationship added: agent3 --[reports_to]--> agent1 -[2024-10-27 22:49:47,365] [INFO] [AgentGraph]: Relationship added: agent4 --[reports_to]--> agent1 -[2024-10-27 22:49:47,365] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-10-27 22:49:47,366] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-10-27 22:49:47,366] [INFO] [Engine]: Engine initialized. -[2024-10-27 22:49:47,366] [INFO] [Engine]: Engine starting simulation. -[2024-10-27 22:49:47,366] [INFO] [Engine]: Starting iteration 1 -[2024-10-27 22:49:47,366] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-10-27 22:49:48,340] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'}, 'continue': True} -[2024-10-27 22:49:48,340] [INFO] [Engine]: Assigned tasks: {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'} -[2024-10-27 22:49:48,340] [INFO] [Engine]: Assigning task to agent2: Search for the latest trends in AI using Google. -[2024-10-27 22:49:48,341] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Search for the latest trends in AI using Google.'. -[2024-10-27 22:49:48,994] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Can you search for the latest trends in AI using Google and provide the information? -[2024-10-27 22:49:48,994] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Can you search for the latest trends in AI using Google and provide the information? -[2024-10-27 22:49:48,994] [INFO] [BaseAgent]: Agent 'agent2' called 'communicate_to' with args '{'target_agent_id': 'agent1', 'message': 'Can you search for the latest trends in AI using Google and provide the information?'}'. -[2024-10-27 22:49:48,994] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'message': 'Successfully sent message to agent agent1', 'session_id': 'default_session'}'. -[2024-10-27 22:49:48,995] [DEBUG] [Engine]: Agent 'agent2' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google and provide the information?"}', name='communicate_to'), id='call_Obx1HArCSur0W33N4s9mQ2vh', type='function')], function_call=None) -[2024-10-27 22:49:49,003] [INFO] [Engine]: Assigning task to agent3: Search for the latest trends in AI using Wikipedia. -[2024-10-27 22:49:49,003] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Search for the latest trends in AI using Wikipedia.'. -[2024-10-27 22:49:49,752] [INFO] [BaseAgent]: Agent 'agent3' called 'fetch_webpage' with args '{'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence'}'. -[2024-10-27 22:49:49,765] [INFO] [BaseAgent]: Agent 'agent3' obtained result '{'success': True, 'error-msg': '', 'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence', 'content': '\n\n\n\nArtificial intelligence - Wikipedia\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nJump to content\n
                    \n\t
                    \n\t\t
                    \n\t\t\t
                    \n\n\t\t\n\t\t\t\n\n\n\t\t
                    \n\t\t
                    \n\t\t\t\n\n\n\t\t\t\n\n\t\t
                    \n\t\n\n
                    \n\t
                    \n\t\t
                    \n\t\t\t
                    \n\t\t
                    \n\t\t
                    \n\t\t\t
                    \n\t\t
                    \n\t\t\t\n\t\t
                    \n\t
                    \n\t
                    \n\t\t\t\t
                    \n\t\t\n\t\t\t
                    \n\t\t
                    \n\t\t
                    \n\t\t\t
                    \n\t\t\t\t
                    \n\t\t\t\t\t\n\t\t\t\t\t

                    Artificial intelligence

                    \n\t\t\t\t\t\t\t\n
                    \n\t\n\t\n\t
                    \n\n\t\t
                    \n\t\t\t\n\t\t\t\n\t\t\t\n\t\t
                    \n\n\t
                    \n
                    \n
                    \n\t\t\t\t
                    \n\t\t\t\t\t
                    \n\t\t\t\t\t\t
                    \n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
                    \n\t\t\t\t\t\t
                    \n\t\t\t\t\t\t\t\n\t\t\t\t\n\t\t\t\t\t\t\t
                    \n\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
                    \n\t\t\t\t\t
                    \n\t\t\t\t
                    \n\t\t\t\t
                    \n\t\t\t\t\t
                    \n\t\t\t\t\t\t\n\t\t\t\t\t\t
                    \n\t\t\n\t\t\t\t\t
                    \n\t\t\t\t
                    \n\t\t\t\t
                    \n\t\t\t\t\t
                    \n\t\t\t\t\t\t\t
                    \n\t\t
                    Page semi-protected
                    \n\t\t
                    \n\n\t\t\t\t\t\t
                    From Wikipedia, the free encyclopedia
                    \n\t\t\t\t\t
                    \n\t\t\t\t\t
                    \n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t
                    \n\n

                    \n

                    \n\n\n\n\n\n\n\n

                    Artificial intelligence (AI), in its broadest sense, is intelligence exhibited by machines, particularly computer systems. It is a field of research in computer science that develops and studies methods and software that enable machines to perceive their environment and use learning and intelligence to take actions that maximize their chances of achieving defined goals.[1] Such machines may be called AIs.\n

                    Some high-profile applications of AI include advanced web search engines (e.g., Google Search); recommendation systems (used by YouTube, Amazon, and Netflix); interacting via human speech (e.g., Google Assistant, Siri, and Alexa); autonomous vehicles (e.g., Waymo); generative and creative tools (e.g., ChatGPT, and AI art); and superhuman play and analysis in strategy games (e.g., chess and Go). However, many AI applications are not perceived as AI: "A lot of cutting edge AI has filtered into general applications, often without being called AI because once something becomes useful enough and common enough it\'s not labeled AI anymore."[2][3]\n

                    The various subfields of AI research are centered around particular goals and the use of particular tools. The traditional goals of AI research include reasoning, knowledge representation, planning, learning, natural language processing, perception, and support for robotics.[a] General intelligence—the ability to complete any task performable by a human on an at least equal level—is among the field\'s long-term goals.[4] To reach these goals, AI researchers have adapted and integrated a wide range of techniques, including search and mathematical optimization, formal logic, artificial neural networks, and methods based on statistics, operations research, and economics.[b] AI also draws upon psychology, linguistics, philosophy, neuroscience, and other fields.[5]\n

                    Artificial intelligence was founded as an academic discipline in 1956,[6] and the field went through multiple cycles of optimism,[7][8] followed by periods of disappointment and loss of funding, known as AI winter.[9][10] Funding and interest vastly increased after 2012 when deep learning outperformed previous AI techniques.[11] This growth accelerated further after 2017 with the transformer architecture,[12] and by the early 2020s hundreds of billions of dollars were being invested in AI (known as the "AI boom"). The widespread use of AI in the 21st century exposed several unintended consequences and harms in the present and raised concerns about its risks and long-term effects in the future, prompting discussions about regulatory policies to ensure the safety and benefits of the technology.\n

                    \n\n

                    Goals

                    \n

                    The general problem of simulating (or creating) intelligence has been broken into subproblems. These consist of particular traits or capabilities that researchers expect an intelligent system to display. The traits described below have received the most attention and cover the scope of AI research.[a]\n

                    \n

                    Reasoning and problem-solving

                    \n

                    Early researchers developed algorithms that imitated step-by-step reasoning that humans use when they solve puzzles or make logical deductions.[13] By the late 1980s and 1990s, methods were developed for dealing with uncertain or incomplete information, employing concepts from probability and economics.[14]\n

                    Many of these algorithms are insufficient for solving large reasoning problems because they experience a "combinatorial explosion": They become exponentially slower as the problems grow.[15] Even humans rarely use the step-by-step deduction that early AI research could model. They solve most of their problems using fast, intuitive judgments.[16] Accurate and efficient reasoning is an unsolved problem.\n

                    \n

                    Knowledge representation

                    \n
                    An ontology represents knowledge as a set of concepts within a domain and the relationships between those concepts.
                    \n

                    Knowledge representation and knowledge engineering[17] allow AI programs to answer questions intelligently and make deductions about real-world facts. Formal knowledge representations are used in content-based indexing and retrieval,[18] scene interpretation,[19] clinical decision support,[20] knowledge discovery (mining "interesting" and actionable inferences from large databases),[21] and other areas.[22]\n

                    A knowledge base is a body of knowledge represented in a form that can be used by a program. An ontology is the set of objects, relations, concepts, and properties used by a particular domain of knowledge.[23] Knowledge bases need to represent things such as objects, properties, categories, and relations between objects;[24] situations, events, states, and time;[25] causes and effects;[26] knowledge about knowledge (what we know about what other people know);[27] default reasoning (things that humans assume are true until they are told differently and will remain true even when other facts are changing);[28] and many other aspects and domains of knowledge.\n

                    Among the most difficult problems in knowledge representation are the breadth of commonsense knowledge (the set of atomic facts that the average person knows is enormous);[29] and the sub-symbolic form of most commonsense knowledge (much of what people know is not represented as "facts" or "statements" that they could express verbally).[16] There is also the difficulty of knowledge acquisition, the problem of obtaining knowledge for AI applications.[c]\n

                    \n

                    Planning and decision-making

                    \n

                    An "agent" is anything that perceives and takes actions in the world. A rational agent has goals or preferences and takes actions to make them happen.[d][32] In automated planning, the agent has a specific goal.[33] In automated decision-making, the agent has preferences—there are some situations it would prefer to be in, and some situations it is trying to avoid. The decision-making agent assigns a number to each situation (called the "utility") that measures how much the agent prefers it. For each possible action, it can calculate the "expected utility": the utility of all possible outcomes of the action, weighted by the probability that the outcome will occur. It can then choose the action with the maximum expected utility.[34]\n

                    In classical planning, the agent knows exactly what the effect of any action will be.[35] In most real-world problems, however, the agent may not be certain about the situation they are in (it is "unknown" or "unobservable") and it may not know for certain what will happen after each possible action (it is not "deterministic"). It must choose an action by making a probabilistic guess and then reassess the situation to see if the action worked.[36]\n

                    In some problems, the agent\'s preferences may be uncertain, especially if there are other agents or humans involved. These can be learned (e.g., with inverse reinforcement learning), or the agent can seek information to improve its preferences.[37] Information value theory can be used to weigh the value of exploratory or experimental actions.[38] The space of possible future actions and situations is typically intractably large, so the agents must take actions and evaluate situations while being uncertain of what the outcome will be.\n

                    A Markov decision process has a transition model that describes the probability that a particular action will change the state in a particular way and a reward function that supplies the utility of each state and the cost of each action. A policy associates a decision with each possible state. The policy could be calculated (e.g., by iteration), be heuristic, or it can be learned.[39]\n

                    Game theory describes the rational behavior of multiple interacting agents and is used in AI programs that make decisions that involve other agents.[40]\n

                    \n

                    Learning

                    \n

                    Machine learning is the study of programs that can improve their performance on a given task automatically.[41] It has been a part of AI from the beginning.[e]\n

                    There are several kinds of machine learning. Unsupervised learning analyzes a stream of data and finds patterns and makes predictions without any other guidance.[44] Supervised learning requires a human to label the input data first, and comes in two main varieties: classification (where the program must learn to predict what category the input belongs in) and regression (where the program must deduce a numeric function based on numeric input).[45]\n

                    In reinforcement learning, the agent is rewarded for good responses and punished for bad ones. The agent learns to choose responses that are classified as "good".[46] Transfer learning is when the knowledge gained from one problem is applied to a new problem.[47] Deep learning is a type of machine learning that runs inputs through biologically inspired artificial neural networks for all of these types of learning.[48]\n

                    Computational learning theory can assess learners by computational complexity, by sample complexity (how much data is required), or by other notions of optimization.[49]\n

                    \n
                    \n

                    Natural language processing

                    \n

                    Natural language processing (NLP)[50] allows programs to read, write and communicate in human languages such as English. Specific problems include speech recognition, speech synthesis, machine translation, information extraction, information retrieval and question answering.[51]\n

                    Early work, based on Noam Chomsky\'s generative grammar and semantic networks, had difficulty with word-sense disambiguation[f] unless restricted to small domains called "micro-worlds" (due to the common sense knowledge problem[29]). Margaret Masterman believed that it was meaning and not grammar that was the key to understanding languages, and that thesauri and not dictionaries should be the basis of computational language structure.\n

                    Modern deep learning techniques for NLP include word embedding (representing words, typically as vectors encoding their meaning),[52] transformers (a deep learning architecture using an attention mechanism),[53] and others.[54] In 2019, generative pre-trained transformer (or "GPT") language models began to generate coherent text,[55][56] and by 2023, these models were able to get human-level scores on the bar exam, SAT test, GRE test, and many other real-world applications.[57]\n

                    \n

                    Perception

                    \n

                    Machine perception is the ability to use input from sensors (such as cameras, microphones, wireless signals, active lidar, sonar, radar, and tactile sensors) to deduce aspects of the world. Computer vision is the ability to analyze visual input.[58]\n

                    The field includes speech recognition,[59] image classification,[60] facial recognition, object recognition,[61]object tracking,[62] and robotic perception.[63]\n

                    \n

                    Social intelligence

                    \n
                    Kismet, a robot head which was made in the 1990s; a machine that can recognize and simulate emotions[64]
                    \n

                    Affective computing is an interdisciplinary umbrella that comprises systems that recognize, interpret, process, or simulate human feeling, emotion, and mood.[65] For example, some virtual assistants are programmed to speak conversationally or even to banter humorously; it makes them appear more sensitive to the emotional dynamics of human interaction, or to otherwise facilitate human–computer interaction.\n

                    However, this tends to give naïve users an unrealistic conception of the intelligence of existing computer agents.[66] Moderate successes related to affective computing include textual sentiment analysis and, more recently, multimodal sentiment analysis, wherein AI classifies the affects displayed by a videotaped subject.[67]\n

                    \n

                    General intelligence

                    \n

                    A machine with artificial general intelligence should be able to solve a wide variety of problems with breadth and versatility similar to human intelligence.[4]\n

                    \n

                    Techniques

                    \n

                    AI research uses a wide variety of techniques to accomplish the goals above.[b]\n

                    \n

                    Search and optimization

                    \n

                    AI can solve many problems by intelligently searching through many possible solutions.[68] There are two very different kinds of search used in AI: state space search and local search.\n

                    \n
                    \n

                    State space search searches through a tree of possible states to try to find a goal state.[69] For example, planning algorithms search through trees of goals and subgoals, attempting to find a path to a target goal, a process called means-ends analysis.[70]\n

                    Simple exhaustive searches[71] are rarely sufficient for most real-world problems: the search space (the number of places to search) quickly grows to astronomical numbers. The result is a search that is too slow or never completes.[15] "Heuristics" or "rules of thumb" can help prioritize choices that are more likely to reach a goal.[72]\n

                    Adversarial search is used for game-playing programs, such as chess or Go. It searches through a tree of possible moves and counter-moves, looking for a winning position.[73]\n

                    \n
                    \n
                    Illustration of gradient descent for 3 different starting points; two parameters (represented by the plan coordinates) are adjusted in order to minimize the loss function (the height)

                    Local search uses mathematical optimization to find a solution to a problem. It begins with some form of guess and refines it incrementally.[74]\n

                    Gradient descent is a type of local search that optimizes a set of numerical parameters by incrementally adjusting them to minimize a loss function. Variants of gradient descent are commonly used to train neural networks.[75]\n

                    Another type of local search is evolutionary computation, which aims to iteratively improve a set of candidate solutions by "mutating" and "recombining" them, selecting only the fittest to survive each generation.[76]\n

                    Distributed search processes can coordinate via swarm intelligence algorithms. Two popular swarm algorithms used in search are particle swarm optimization (inspired by bird flocking) and ant colony optimization (inspired by ant trails).[77]\n

                    \n

                    Logic

                    \n

                    Formal logic is used for reasoning and knowledge representation.[78]\nFormal logic comes in two main forms: propositional logic (which operates on statements that are true or false and uses logical connectives such as "and", "or", "not" and "implies")[79] and predicate logic (which also operates on objects, predicates and relations and uses quantifiers such as "Every X is a Y" and "There are some Xs that are Ys").[80]\n

                    Deductive reasoning in logic is the process of proving a new statement (conclusion) from other statements that are given and assumed to be true (the premises).[81] Proofs can be structured as proof trees, in which nodes are labelled by sentences, and children nodes are connected to parent nodes by inference rules.\n

                    Given a problem and a set of premises, problem-solving reduces to searching for a proof tree whose root node is labelled by a solution of the problem and whose leaf nodes are labelled by premises or axioms. In the case of Horn clauses, problem-solving search can be performed by reasoning forwards from the premises or backwards from the problem.[82] In the more general case of the clausal form of first-order logic, resolution is a single, axiom-free rule of inference, in which a problem is solved by proving a contradiction from premises that include the negation of the problem to be solved.[83]\n

                    Inference in both Horn clause logic and first-order logic is undecidable, and therefore intractable. However, backward reasoning with Horn clauses, which underpins computation in the logic programming language Prolog, is Turing complete. Moreover, its efficiency is competitive with computation in other symbolic programming languages.[84]\n

                    Fuzzy logic assigns a "degree of truth" between 0 and 1. It can therefore handle propositions that are vague and partially true.[85]\n

                    Non-monotonic logics, including logic programming with negation as failure, are designed to handle default reasoning.[28] Other specialized versions of logic have been developed to describe many complex domains.\n

                    \n

                    Probabilistic methods for uncertain reasoning

                    \n
                    A simple Bayesian network, with the associated conditional probability tables
                    \n

                    Many problems in AI (including in reasoning, planning, learning, perception, and robotics) require the agent to operate with incomplete or uncertain information. AI researchers have devised a number of tools to solve these problems using methods from probability theory and economics.[86] Precise mathematical tools have been developed that analyze how an agent can make choices and plan, using decision theory, decision analysis,[87] and information value theory.[88] These tools include models such as Markov decision processes,[89] dynamic decision networks,[90] game theory and mechanism design.[91]\n

                    Bayesian networks[92] are a tool that can be used for reasoning (using the Bayesian inference algorithm),[g][94] learning (using the expectation–maximization algorithm),[h][96] planning (using decision networks)[97] and perception (using dynamic Bayesian networks).[90]\n

                    Probabilistic algorithms can also be used for filtering, prediction, smoothing, and finding explanations for streams of data, thus helping perception systems analyze processes that occur over time (e.g., hidden Markov models or Kalman filters).[90]\n

                    \n
                    Expectation–maximization clustering of Old Faithful eruption data starts from a random guess but then successfully converges on an accurate clustering of the two physically distinct modes of eruption.
                    \n

                    Classifiers and statistical learning methods

                    \n

                    The simplest AI applications can be divided into two types: classifiers (e.g., "if shiny then diamond"), on one hand, and controllers (e.g., "if diamond then pick up"), on the other hand. Classifiers[98] are functions that use pattern matching to determine the closest match. They can be fine-tuned based on chosen examples using supervised learning. Each pattern (also called an "observation") is labeled with a certain predefined class. All the observations combined with their class labels are known as a data set. When a new observation is received, that observation is classified based on previous experience.[45]\n

                    There are many kinds of classifiers in use.[99] The decision tree is the simplest and most widely used symbolic machine learning algorithm.[100] K-nearest neighbor algorithm was the most widely used analogical AI until the mid-1990s, and Kernel methods such as the support vector machine (SVM) displaced k-nearest neighbor in the 1990s.[101]\nThe naive Bayes classifier is reportedly the "most widely used learner"[102] at Google, due in part to its scalability.[103]\nNeural networks are also used as classifiers.[104]\n

                    \n

                    Artificial neural networks

                    \n
                    A neural network is an interconnected group of nodes, akin to the vast network of neurons in the human brain.
                    \n

                    An artificial neural network is based on a collection of nodes also known as artificial neurons, which loosely model the neurons in a biological brain. It is trained to recognise patterns; once trained, it can recognise those patterns in fresh data. There is an input, at least one hidden layer of nodes and an output. Each node applies a function and once the weight crosses its specified threshold, the data is transmitted to the next layer. A network is typically called a deep neural network if it has at least 2 hidden layers.[104]\n

                    Learning algorithms for neural networks use local search to choose the weights that will get the right output for each input during training. The most common training technique is the backpropagation algorithm.[105] Neural networks learn to model complex relationships between inputs and outputs and find patterns in data. In theory, a neural network can learn any function.[106]\n

                    In feedforward neural networks the signal passes in only one direction.[107] Recurrent neural networks feed the output signal back into the input, which allows short-term memories of previous input events. Long short term memory is the most successful network architecture for recurrent networks.[108] Perceptrons[109] use only a single layer of neurons; deep learning[110] uses multiple layers. Convolutional neural networks strengthen the connection between neurons that are "close" to each other—this is especially important in image processing, where a local set of neurons must identify an "edge" before the network can identify an object.[111]\n

                    \n
                    \n

                    Deep learning

                    \n
                    \n

                    Deep learning[110] uses several layers of neurons between the network\'s inputs and outputs. The multiple layers can progressively extract higher-level features from the raw input. For example, in image processing, lower layers may identify edges, while higher layers may identify the concepts relevant to a human such as digits, letters, or faces.[112]\n

                    Deep learning has profoundly improved the performance of programs in many important subfields of artificial intelligence, including computer vision, speech recognition, natural language processing, image classification,[113] and others. The reason that deep learning performs so well in so many applications is not known as of 2023.[114] The sudden success of deep learning in 2012–2015 did not occur because of some new discovery or theoretical breakthrough (deep neural networks and backpropagation had been described by many people, as far back as the 1950s)[i] but because of two factors: the incredible increase in computer power (including the hundred-fold increase in speed by switching to GPUs) and the availability of vast amounts of training data, especially the giant curated datasets used for benchmark testing, such as ImageNet.[j]\n

                    \n

                    GPT

                    \n

                    Generative pre-trained transformers (GPT) are large language models (LLMs) that generate text based on the semantic relationships between words in sentences. Text-based GPT models are pretrained on a large corpus of text that can be from the Internet. The pretraining consists of predicting the next token (a token being usually a word, subword, or punctuation). Throughout this pretraining, GPT models accumulate knowledge about the world and can then generate human-like text by repeatedly predicting the next token. Typically, a subsequent training phase makes the model more truthful, useful, and harmless, usually with a technique called reinforcement learning from human feedback (RLHF). Current GPT models are prone to generating falsehoods called "hallucinations", although this can be reduced with RLHF and quality data. They are used in chatbots, which allow people to ask a question or request a task in simple text.[122][123]\n

                    Current models and services include Gemini (formerly Bard), ChatGPT, Grok, Claude, Copilot, and LLaMA.[124] Multimodal GPT models can process different types of data (modalities) such as images, videos, sound, and text.[125]\n

                    \n

                    Hardware and software

                    \n\n

                    In the late 2010s, graphics processing units (GPUs) that were increasingly designed with AI-specific enhancements and used with specialized TensorFlow software had replaced previously used central processing unit (CPUs) as the dominant means for large-scale (commercial and academic) machine learning models\' training.[126] Specialized programming languages such as Prolog were used in early AI research,[127] but general-purpose programming languages like Python have become predominant.[128]\n

                    The transistor density in integrated circuits has been observed to roughly double every 18 months—a trend known as Moore\'s law, named after the Intel co-founder Gordon Moore, who first identified it. Improvements in GPUs have been even faster.[129]\n

                    \n

                    Applications

                    \n

                    AI and machine learning technology is used in most of the essential applications of the 2020s, including: search engines (such as Google Search), targeting online advertisements, recommendation systems (offered by Netflix, YouTube or Amazon), driving internet traffic, targeted advertising (AdSense, Facebook), virtual assistants (such as Siri or Alexa), autonomous vehicles (including drones, ADAS and self-driving cars), automatic language translation (Microsoft Translator, Google Translate), facial recognition (Apple\'s Face ID or Microsoft\'s DeepFace and Google\'s FaceNet) and image labeling (used by Facebook, Apple\'s iPhoto and TikTok). The deployment of AI may be overseen by a Chief automation officer (CAO).\n

                    Health and medicine

                    \n\n

                    The application of AI in medicine and medical research has the potential to increase patient care and quality of life.[130] Through the lens of the Hippocratic Oath, medical professionals are ethically compelled to use AI, if applications can more accurately diagnose and treat patients.[131][132]\n

                    For medical research, AI is an important tool for processing and integrating big data. This is particularly important for organoid and tissue engineering development which use microscopy imaging as a key technique in fabrication.[133] It has been suggested that AI can overcome discrepancies in funding allocated to different fields of research.[133] New AI tools can deepen the understanding of biomedically relevant pathways. For example, AlphaFold 2 (2021) demonstrated the ability to approximate, in hours rather than months, the 3D structure of a protein.[134] In 2023, it was reported that AI-guided drug discovery helped find a class of antibiotics capable of killing two different types of drug-resistant bacteria.[135] In 2024, researchers used machine learning to accelerate the search for Parkinson\'s disease drug treatments. Their aim was to identify compounds that block the clumping, or aggregation, of alpha-synuclein (the protein that characterises Parkinson\'s disease). They were able to speed up the initial screening process ten-fold and reduce the cost by a thousand-fold.[136][137]\n

                    \n

                    Games

                    \n\n

                    Game playing programs have been used since the 1950s to demonstrate and test AI\'s most advanced techniques.[138] Deep Blue became the first computer chess-playing system to beat a reigning world chess champion, Garry Kasparov, on 11 May 1997.[139] In 2011, in a Jeopardy! quiz show exhibition match, IBM\'s question answering system, Watson, defeated the two greatest Jeopardy! champions, Brad Rutter and Ken Jennings, by a significant margin.[140] In March 2016, AlphaGo won 4 out of 5 games of Go in a match with Go champion Lee Sedol, becoming the first computer Go-playing system to beat a professional Go player without handicaps. Then, in 2017, it defeated Ke Jie, who was the best Go player in the world.[141] Other programs handle imperfect-information games, such as the poker-playing program Pluribus.[142] DeepMind developed increasingly generalistic reinforcement learning models, such as with MuZero, which could be trained to play chess, Go, or Atari games.[143] In 2019, DeepMind\'s AlphaStar achieved grandmaster level in StarCraft II, a particularly challenging real-time strategy game that involves incomplete knowledge of what happens on the map.[144] In 2021, an AI agent competed in a PlayStation Gran Turismo competition, winning against four of the world\'s best Gran Turismo drivers using deep reinforcement learning.[145] In 2024, Google DeepMind introduced SIMA, a type of AI capable of autonomously playing nine previously unseen open-world video games by observing screen output, as well as executing short, specific tasks in response to natural language instructions.[146]\n

                    \n

                    Mathematics

                    \n

                    In mathematics, special forms of formal step-by-step reasoning are used. In contrast, LLMs such as GPT-4 Turbo, Gemini Ultra, Claude Opus, LLaMa-2 or Mistral Large are working with probabilistic models, which can produce wrong answers in the form of hallucinations. Therefore, they need not only a large database of mathematical problems to learn from but also methods such as supervised fine-tuning or trained classifiers with human-annotated data to improve answers for new problems and learn from corrections.[147] A 2024 study showed that the performance of some language models for reasoning capabilities in solving math problems not included in their training data was low, even for problems with only minor deviations from trained data.[148]\n

                    Alternatively, dedicated models for mathematic problem solving with higher precision for the outcome including proof of theorems have been developed such as Alpha Tensor, Alpha Geometry and Alpha Proof all from Google DeepMind,[149] Llemma from eleuther[150] or Julius.[151]\n

                    When natural language is used to describe mathematical problems, converters transform such prompts into a formal language such as Lean to define mathematic tasks.\n

                    Some models have been developed to solve challenging problems and reach good results in benchmark tests, others to serve as educational tools in mathematics.[152]\n

                    \n

                    Finance

                    \n

                    Finance is one of the fastest growing sectors where applied AI tools are being deployed: from retail online banking to investment advice and insurance, where automated "robot advisers" have been in use for some years.[153]\n

                    World Pensions experts like Nicolas Firzli insist it may be too early to see the emergence of highly innovative AI-informed financial products and services: "the deployment of AI tools will simply further automatise things: destroying tens of thousands of jobs in banking, financial planning, and pension advice in the process, but I\'m not sure it will unleash a new wave of [e.g., sophisticated] pension innovation."[154]\n

                    \n

                    Military

                    \n\n

                    Various countries are deploying AI military applications.[155] The main applications enhance command and control, communications, sensors, integration and interoperability.[156] Research is targeting intelligence collection and analysis, logistics, cyber operations, information operations, and semiautonomous and autonomous vehicles.[155] AI technologies enable coordination of sensors and effectors, threat detection and identification, marking of enemy positions, target acquisition, coordination and deconfliction of distributed Joint Fires between networked combat vehicles involving manned and unmanned teams.[156] AI was incorporated into military operations in Iraq and Syria.[155]\n

                    In November 2023, US Vice President Kamala Harris disclosed a declaration signed by 31 nations to set guardrails for the military use of AI. The commitments include using legal reviews to ensure the compliance of military AI with international laws, and being cautious and transparent in the development of this technology.[157]\n

                    \n

                    Generative AI

                    \n\n
                    Vincent van Gogh in watercolour created by generative AI software
                    \n

                    In the early 2020s, generative AI gained widespread prominence. GenAI is AI capable of generating text, images, videos, or other data using generative models,[158][159] often in response to prompts.[160][161]\n

                    In March 2023, 58% of U.S. adults had heard about ChatGPT and 14% had tried it.[162] The increasing realism and ease-of-use of AI-based text-to-image generators such as Midjourney, DALL-E, and Stable Diffusion sparked a trend of viral AI-generated photos. Widespread attention was gained by a fake photo of Pope Francis wearing a white puffer coat, the fictional arrest of Donald Trump, and a hoax of an attack on the Pentagon, as well as the usage in professional creative arts.[163][164]\n

                    \n

                    Agents

                    \n

                    Artificial intelligent (AI) agents are software entities designed to perceive their environment, make decisions, and take actions autonomously to achieve specific goals. These agents can interact with users, their environment, or other agents. AI agents are used in various applications, including virtual assistants, chatbots, autonomous vehicles, game-playing systems, and industrial robotics. AI agents operate within the constraints of their programming, available computational resources, and hardware limitations. This means they are restricted to performing tasks within their defined scope and have finite memory and processing capabilities. In real-world applications, AI agents often face time constraints for decision-making and action execution. Many AI agents incorporate learning algorithms, enabling them to improve their performance over time through experience or training. Using machine learning, AI agents can adapt to new situations and optimise their behaviour for their designated tasks.[165][166][167]\n

                    \n

                    Other industry-specific tasks

                    \n

                    There are also thousands of successful AI applications used to solve specific problems for specific industries or institutions. In a 2017 survey, one in five companies reported having incorporated "AI" in some offerings or processes.[168] A few examples are energy storage, medical diagnosis, military logistics, applications that predict the result of judicial decisions, foreign policy, or supply chain management.\n

                    AI applications for evacuation and disaster management are growing. AI has been used to investigate if and how people evacuated in large scale and small scale evacuations using historical data from GPS, videos or social media. Further, AI can provide real time information on the real time evacuation conditions.[169][170][171]\n

                    In agriculture, AI has helped farmers identify areas that need irrigation, fertilization, pesticide treatments or increasing yield. Agronomists use AI to conduct research and development. AI has been used to predict the ripening time for crops such as tomatoes, monitor soil moisture, operate agricultural robots, conduct predictive analytics, classify livestock pig call emotions, automate greenhouses, detect diseases and pests, and save water.\n

                    Artificial intelligence is used in astronomy to analyze increasing amounts of available data and applications, mainly for "classification, regression, clustering, forecasting, generation, discovery, and the development of new scientific insights." For example, it is used for discovering exoplanets, forecasting solar activity, and distinguishing between signals and instrumental effects in gravitational wave astronomy. Additionally, it could be used for activities in space, such as space exploration, including the analysis of data from space missions, real-time science decisions of spacecraft, space debris avoidance, and more autonomous operation.\n

                    \n

                    Ethics

                    \n\n

                    AI has potential benefits and potential risks.[172] AI may be able to advance science and find solutions for serious problems: Demis Hassabis of Deep Mind hopes to "solve intelligence, and then use that to solve everything else".[173] However, as the use of AI has become widespread, several unintended consequences and risks have been identified.[174] In-production systems can sometimes not factor ethics and bias into their AI training processes, especially when the AI algorithms are inherently unexplainable in deep learning.[175]\n

                    \n

                    Risks and harm

                    \n
                    \n\n

                    Machine learning algorithms require large amounts of data. The techniques used to acquire this data have raised concerns about privacy, surveillance and copyright.\n

                    AI-powered devices and services, such as virtual assistants and IoT products, continuously collect personal information, raising concerns about intrusive data gathering and unauthorized access by third parties. The loss of privacy is further exacerbated by AI\'s ability to process and combine vast amounts of data, potentially leading to a surveillance society where individual activities are constantly monitored and analyzed without adequate safeguards or transparency.\n

                    Sensitive user data collected may include online activity records, geolocation data, video or audio.[176] For example, in order to build speech recognition algorithms, Amazon has recorded millions of private conversations and allowed temporary workers to listen to and transcribe some of them.[177] Opinions about this widespread surveillance range from those who see it as a necessary evil to those for whom it is clearly unethical and a violation of the right to privacy.[178]\n

                    AI developers argue that this is the only way to deliver valuable applications. and have developed several techniques that attempt to preserve privacy while still obtaining the data, such as data aggregation, de-identification and differential privacy.[179] Since 2016, some privacy experts, such as Cynthia Dwork, have begun to view privacy in terms of fairness. Brian Christian wrote that experts have pivoted "from the question of \'what they know\' to the question of \'what they\'re doing with it\'."[180]\n

                    Generative AI is often trained on unlicensed copyrighted works, including in domains such as images or computer code; the output is then used under the rationale of "fair use". Experts disagree about how well and under what circumstances this rationale will hold up in courts of law; relevant factors may include "the purpose and character of the use of the copyrighted work" and "the effect upon the potential market for the copyrighted work".[181][182] Website owners who do not wish to have their content scraped can indicate it in a "robots.txt" file.[183] In 2023, leading authors (including John Grisham and Jonathan Franzen) sued AI companies for using their work to train generative AI.[184][185] Another discussed approach is to envision a separate sui generis system of protection for creations generated by AI to ensure fair attribution and compensation for human authors.[186]\n

                    \n

                    Dominance by tech giants

                    \n

                    The commercial AI scene is dominated by Big Tech companies such as Alphabet Inc., Amazon, Apple Inc., Meta Platforms, and Microsoft.[187][188][189] Some of these players already own the vast majority of existing cloud infrastructure and computing power from data centers, allowing them to entrench further in the marketplace.[190][191]\n

                    \n

                    Substantial power needs and other environmental impacts

                    \n\n

                    In January 2024, the International Energy Agency (IEA) released Electricity 2024, Analysis and Forecast to 2026, forecasting electric power use.[192] This is the first IEA report to make projections for data centers and power consumption for artificial intelligence and cryptocurrency. The report states that power demand for these uses might double by 2026, with additional electric power usage equal to electricity used by the whole Japanese nation.[193]\n

                    Prodigious power consumption by AI is responsible for the growth of fossil fuels use, and might delay closings of obsolete, carbon-emitting coal energy facilities. There is a feverish rise in the construction of data centers throughout the US, making large technology firms (e.g., Microsoft, Meta, Google, Amazon) into voracious consumers of electric power. Projected electric consumption is so immense that there is concern that it will be fulfilled no matter the source. A ChatGPT search involves the use of 10 times the electrical energy as a Google search. The large firms are in haste to find power sources – from nuclear energy to geothermal to fusion. The tech firms argue that – in the long view – AI will be eventually kinder to the environment, but they need the energy now. AI makes the power grid more efficient and "intelligent", will assist in the growth of nuclear power, and track overall carbon emissions, according to technology firms.[194]\n

                    A 2024 Goldman Sachs Research Paper, AI Data Centers and the Coming US Power Demand Surge, found "US power demand (is) likely to experience growth not seen in a generation...." and forecasts that, by 2030, US data centers will consume 8% of US power, as opposed to 3% in 2022, presaging growth for the electrical power generation industry by a variety of means.[195] Data centers\' need for more and more electrical power is such that they might max out the electrical grid. The Big Tech companies counter that AI can be used to maximize the utilization of the grid by all.[196]\n

                    In 2024, the Wall Street Journal reported that big AI companies have begun negotiations with the US nuclear power providers to provide electricity to the data centers. In March 2024 Amazon purchased a Pennsylvania nuclear-powered data center for $650 Million (US).[197]\n

                    In September 2024, Microsoft announced an agreement with Constellation Energy to re-open the Three Mile Island nuclear power plant to provide Microsoft with 100% of all electric power produced by the plant for 20 years. Reopening the plant, which suffered a partial nuclear meltdown of its Unit 2 reactor in 1979, will require Constellation to get through strict regulatory processes which will include extensive safety scrutiny from the US Nuclear Regulatory Commission. If approved (this will be the first ever US re-commissioning of a nuclear plant), over 835 megawatts of power – enough for 800,000 homes – of energy will be produced. The cost for re-opening and upgrading is estimated at $1.6 billion (US) and is dependent on tax breaks for nuclear power contained in the 2022 US Inflation Reduction Act.[198] The US government and the state of Michigan are investing almost $2 billion (US) to reopen the Palisades Nuclear reactor on Lake Michigan. Closed since 2022, the plant is planned to be reopened in October 2025. The Three Mile Island facility will be renamed the Crane Clean Energy Center after Chris Crane, a nuclear proponent and former CEO of Exelon who was responsible for Exelon spinoff of Constellation.[199]\n

                    \n

                    Misinformation

                    \n\n

                    YouTube, Facebook and others use recommender systems to guide users to more content. These AI programs were given the goal of maximizing user engagement (that is, the only goal was to keep people watching). The AI learned that users tended to choose misinformation, conspiracy theories, and extreme partisan content, and, to keep them watching, the AI recommended more of it. Users also tended to watch more content on the same subject, so the AI led people into filter bubbles where they received multiple versions of the same misinformation.[200] This convinced many users that the misinformation was true, and ultimately undermined trust in institutions, the media and the government.[201] The AI program had correctly learned to maximize its goal, but the result was harmful to society. After the U.S. election in 2016, major technology companies took steps to mitigate the problem [citation needed].\n

                    In 2022, generative AI began to create images, audio, video and text that are indistinguishable from real photographs, recordings, films, or human writing. It is possible for bad actors to use this technology to create massive amounts of misinformation or propaganda.[202] AI pioneer Geoffrey Hinton expressed concern about AI enabling "authoritarian leaders to manipulate their electorates" on a large scale, among other risks.[203]\n

                    \n

                    Algorithmic bias and fairness

                    \n\n

                    Machine learning applications will be biased[k] if they learn from biased data.[205] The developers may not be aware that the bias exists.[206] Bias can be introduced by the way training data is selected and by the way a model is deployed.[207][205] If a biased algorithm is used to make decisions that can seriously harm people (as it can in medicine, finance, recruitment, housing or policing) then the algorithm may cause discrimination.[208] The field of fairness studies how to prevent harms from algorithmic biases.\n

                    On June 28, 2015, Google Photos\'s new image labeling feature mistakenly identified Jacky Alcine and a friend as "gorillas" because they were black. The system was trained on a dataset that contained very few images of black people,[209] a problem called "sample size disparity".[210] Google "fixed" this problem by preventing the system from labelling anything as a "gorilla". Eight years later, in 2023, Google Photos still could not identify a gorilla, and neither could similar products from Apple, Facebook, Microsoft and Amazon.[211]\n

                    COMPAS is a commercial program widely used by U.S. courts to assess the likelihood of a defendant becoming a recidivist. In 2016, Julia Angwin at ProPublica discovered that COMPAS exhibited racial bias, despite the fact that the program was not told the races of the defendants. Although the error rate for both whites and blacks was calibrated equal at exactly 61%, the errors for each race were different—the system consistently overestimated the chance that a black person would re-offend and would underestimate the chance that a white person would not re-offend.[212] In 2017, several researchers[l] showed that it was mathematically impossible for COMPAS to accommodate all possible measures of fairness when the base rates of re-offense were different for whites and blacks in the data.[214]\n

                    A program can make biased decisions even if the data does not explicitly mention a problematic feature (such as "race" or "gender"). The feature will correlate with other features (like "address", "shopping history" or "first name"), and the program will make the same decisions based on these features as it would on "race" or "gender".[215] Moritz Hardt said "the most robust fact in this research area is that fairness through blindness doesn\'t work."[216]\n

                    Criticism of COMPAS highlighted that machine learning models are designed to make "predictions" that are only valid if we assume that the future will resemble the past. If they are trained on data that includes the results of racist decisions in the past, machine learning models must predict that racist decisions will be made in the future. If an application then uses these predictions as recommendations, some of these "recommendations" will likely be racist.[217] Thus, machine learning is not well suited to help make decisions in areas where there is hope that the future will be better than the past. It is descriptive rather than prescriptive.[m]\n

                    Bias and unfairness may go undetected because the developers are overwhelmingly white and male: among AI engineers, about 4% are black and 20% are women.[210]\n

                    There are various conflicting definitions and mathematical models of fairness. These notions depend on ethical assumptions, and are influenced by beliefs about society. One broad category is distributive fairness, which focuses on the outcomes, often identifying groups and seeking to compensate for statistical disparities. Representational fairness tries to ensure that AI systems do not reinforce negative stereotypes or render certain groups invisible. Procedural fairness focuses on the decision process rather than the outcome. The most relevant notions of fairness may depend on the context, notably the type of AI application and the stakeholders. The subjectivity in the notions of bias and fairness makes it difficult for companies to operationalize them. Having access to sensitive attributes such as race or gender is also considered by many AI ethicists to be necessary in order to compensate for biases, but it may conflict with anti-discrimination laws.[204]\n

                    At its 2022 Conference on Fairness, Accountability, and Transparency (ACM FAccT 2022), the Association for Computing Machinery, in Seoul, South Korea, presented and published findings that recommend that until AI and robotics systems are demonstrated to be free of bias mistakes, they are unsafe, and the use of self-learning neural networks trained on vast, unregulated sources of flawed internet data should be curtailed.[dubiousdiscuss][219]\n

                    \n

                    Lack of transparency

                    \n\n

                    Many AI systems are so complex that their designers cannot explain how they reach their decisions.[220] Particularly with deep neural networks, in which there are a large amount of non-linear relationships between inputs and outputs. But some popular explainability techniques exist.[221]\n

                    It is impossible to be certain that a program is operating correctly if no one knows how exactly it works. There have been many cases where a machine learning program passed rigorous tests, but nevertheless learned something different than what the programmers intended. For example, a system that could identify skin diseases better than medical professionals was found to actually have a strong tendency to classify images with a ruler as "cancerous", because pictures of malignancies typically include a ruler to show the scale.[222] Another machine learning system designed to help effectively allocate medical resources was found to classify patients with asthma as being at "low risk" of dying from pneumonia. Having asthma is actually a severe risk factor, but since the patients having asthma would usually get much more medical care, they were relatively unlikely to die according to the training data. The correlation between asthma and low risk of dying from pneumonia was real, but misleading.[223]\n

                    People who have been harmed by an algorithm\'s decision have a right to an explanation.[224] Doctors, for example, are expected to clearly and completely explain to their colleagues the reasoning behind any decision they make. Early drafts of the European Union\'s General Data Protection Regulation in 2016 included an explicit statement that this right exists.[n] Industry experts noted that this is an unsolved problem with no solution in sight. Regulators argued that nevertheless the harm is real: if the problem has no solution, the tools should not be used.[225]\n

                    DARPA established the XAI ("Explainable Artificial Intelligence") program in 2014 to try to solve these problems.[226]\n

                    Several approaches aim to address the transparency problem. SHAP enables to visualise the contribution of each feature to the output.[227] LIME can locally approximate a model\'s outputs with a simpler, interpretable model.[228] Multitask learning provides a large number of outputs in addition to the target classification. These other outputs can help developers deduce what the network has learned.[229] Deconvolution, DeepDream and other generative methods can allow developers to see what different layers of a deep network for computer vision have learned, and produce output that can suggest what the network is learning.[230] For generative pre-trained transformers, Anthropic developed a technique based on dictionary learning that associates patterns of neuron activations with human-understandable concepts.[231]\n

                    \n

                    Bad actors and weaponized AI

                    \n\n

                    Artificial intelligence provides a number of tools that are useful to bad actors, such as authoritarian governments, terrorists, criminals or rogue states.\n

                    A lethal autonomous weapon is a machine that locates, selects and engages human targets without human supervision.[o] Widely available AI tools can be used by bad actors to develop inexpensive autonomous weapons and, if produced at scale, they are potentially weapons of mass destruction.[233] Even when used in conventional warfare, it is unlikely that they will be unable to reliably choose targets and could potentially kill an innocent person.[233] In 2014, 30 nations (including China) supported a ban on autonomous weapons under the United Nations\' Convention on Certain Conventional Weapons, however the United States and others disagreed.[234] By 2015, over fifty countries were reported to be researching battlefield robots.[235]\n

                    AI tools make it easier for authoritarian governments to efficiently control their citizens in several ways. Face and voice recognition allow widespread surveillance. Machine learning, operating this data, can classify potential enemies of the state and prevent them from hiding. Recommendation systems can precisely target propaganda and misinformation for maximum effect. Deepfakes and generative AI aid in producing misinformation. Advanced AI can make authoritarian centralized decision making more competitive than liberal and decentralized systems such as markets. It lowers the cost and difficulty of digital warfare and advanced spyware.[236] All these technologies have been available since 2020 or earlier—AI facial recognition systems are already being used for mass surveillance in China.[237][238]\n

                    There many other ways that AI is expected to help bad actors, some of which can not be foreseen. For example, machine-learning AI is able to design tens of thousands of toxic molecules in a matter of hours.[239]\n

                    \n

                    Technological unemployment

                    \n\n

                    Economists have frequently highlighted the risks of redundancies from AI, and speculated about unemployment if there is no adequate social policy for full employment.[240]\n

                    In the past, technology has tended to increase rather than reduce total employment, but economists acknowledge that "we\'re in uncharted territory" with AI.[241] A survey of economists showed disagreement about whether the increasing use of robots and AI will cause a substantial increase in long-term unemployment, but they generally agree that it could be a net benefit if productivity gains are redistributed.[242] Risk estimates vary; for example, in the 2010s, Michael Osborne and Carl Benedikt Frey estimated 47% of U.S. jobs are at "high risk" of potential automation, while an OECD report classified only 9% of U.S. jobs as "high risk".[p][244] The methodology of speculating about future employment levels has been criticised as lacking evidential foundation, and for implying that technology, rather than social policy, creates unemployment, as opposed to redundancies.[240] In April 2023, it was reported that 70% of the jobs for Chinese video game illustrators had been eliminated by generative artificial intelligence.[245][246]\n

                    Unlike previous waves of automation, many middle-class jobs may be eliminated by artificial intelligence; The Economist stated in 2015 that "the worry that AI could do to white-collar jobs what steam power did to blue-collar ones during the Industrial Revolution" is "worth taking seriously".[247] Jobs at extreme risk range from paralegals to fast food cooks, while job demand is likely to increase for care-related professions ranging from personal healthcare to the clergy.[248]\n

                    From the early days of the development of artificial intelligence, there have been arguments, for example, those put forward by Joseph Weizenbaum, about whether tasks that can be done by computers actually should be done by them, given the difference between computers and humans, and between quantitative calculation and qualitative, value-based judgement.[249]\n

                    \n

                    Existential risk

                    \n\n

                    It has been argued AI will become so powerful that humanity may irreversibly lose control of it. This could, as physicist Stephen Hawking stated, "spell the end of the human race".[250] This scenario has been common in science fiction, when a computer or robot suddenly develops a human-like "self-awareness" (or "sentience" or "consciousness") and becomes a malevolent character.[q] These sci-fi scenarios are misleading in several ways.\n

                    First, AI does not require human-like "sentience" to be an existential risk. Modern AI programs are given specific goals and use learning and intelligence to achieve them. Philosopher Nick Bostrom argued that if one gives almost any goal to a sufficiently powerful AI, it may choose to destroy humanity to achieve it (he used the example of a paperclip factory manager).[252] Stuart Russell gives the example of household robot that tries to find a way to kill its owner to prevent it from being unplugged, reasoning that "you can\'t fetch the coffee if you\'re dead."[253] In order to be safe for humanity, a superintelligence would have to be genuinely aligned with humanity\'s morality and values so that it is "fundamentally on our side".[254]\n

                    Second, Yuval Noah Harari argues that AI does not require a robot body or physical control to pose an existential risk. The essential parts of civilization are not physical. Things like ideologies, law, government, money and the economy are made of language; they exist because there are stories that billions of people believe. The current prevalence of misinformation suggests that an AI could use language to convince people to believe anything, even to take actions that are destructive.[255]\n

                    The opinions amongst experts and industry insiders are mixed, with sizable fractions both concerned and unconcerned by risk from eventual superintelligent AI.[256] Personalities such as Stephen Hawking, Bill Gates, and Elon Musk,[257] as well as AI pioneers such as Yoshua Bengio, Stuart Russell, Demis Hassabis, and Sam Altman, have expressed concerns about existential risk from AI.\n

                    In May 2023, Geoffrey Hinton announced his resignation from Google in order to be able to "freely speak out about the risks of AI" without "considering how this impacts Google."[258] He notably mentioned risks of an AI takeover,[259] and stressed that in order to avoid the worst outcomes, establishing safety guidelines will require cooperation among those competing in use of AI.[260]\n

                    In 2023, many leading AI experts issued the joint statement that "Mitigating the risk of extinction from AI should be a global priority alongside other societal-scale risks such as pandemics and nuclear war".[261]\n

                    Other researchers, however, spoke in favor of a less dystopian view. AI pioneer Juergen Schmidhuber did not sign the joint statement, emphasising that in 95% of all cases, AI research is about making "human lives longer and healthier and easier."[262] While the tools that are now being used to improve lives can also be used by bad actors, "they can also be used against the bad actors."[263][264] Andrew Ng also argued that "it\'s a mistake to fall for the doomsday hype on AI—and that regulators who do will only benefit vested interests."[265] Yann LeCun "scoffs at his peers\' dystopian scenarios of supercharged misinformation and even, eventually, human extinction."[266] In the early 2010s, experts argued that the risks are too distant in the future to warrant research or that humans will be valuable from the perspective of a superintelligent machine.[267] However, after 2016, the study of current and future risks and possible solutions became a serious area of research.[268]\n

                    \n

                    Ethical machines and alignment

                    \n\n

                    Friendly AI are machines that have been designed from the beginning to minimize risks and to make choices that benefit humans. Eliezer Yudkowsky, who coined the term, argues that developing friendly AI should be a higher research priority: it may require a large investment and it must be completed before AI becomes an existential risk.[269]\n

                    Machines with intelligence have the potential to use their intelligence to make ethical decisions. The field of machine ethics provides machines with ethical principles and procedures for resolving ethical dilemmas.[270]\nThe field of machine ethics is also called computational morality,[270]\nand was founded at an AAAI symposium in 2005.[271]\n

                    Other approaches include Wendell Wallach\'s "artificial moral agents"[272] and Stuart J. Russell\'s three principles for developing provably beneficial machines.[273]\n

                    \n

                    Open source

                    \n

                    Active organizations in the AI open-source community include Hugging Face,[274] Google,[275] EleutherAI and Meta.[276] Various AI models, such as Llama 2, Mistral or Stable Diffusion, have been made open-weight,[277][278] meaning that their architecture and trained parameters (the "weights") are publicly available. Open-weight models can be freely fine-tuned, which allows companies to specialize them with their own data and for their own use-case.[279] Open-weight models are useful for research and innovation but can also be misused. Since they can be fine-tuned, any built-in security measure, such as objecting to harmful requests, can be trained away until it becomes ineffective. Some researchers warn that future AI models may develop dangerous capabilities (such as the potential to drastically facilitate bioterrorism) and that once released on the Internet, they cannot be deleted everywhere if needed. They recommend pre-release audits and cost-benefit analyses.[280]\n

                    \n

                    Frameworks

                    \n

                    Artificial Intelligence projects can have their ethical permissibility tested while designing, developing, and implementing an AI system. An AI framework such as the Care and Act Framework containing the SUM values—developed by the Alan Turing Institute tests projects in four main areas:[281][282]\n

                    \n
                    • Respect the dignity of individual people
                    • \n
                    • Connect with other people sincerely, openly, and inclusively
                    • \n
                    • Care for the wellbeing of everyone
                    • \n
                    • Protect social values, justice, and the public interest
                    \n

                    Other developments in ethical frameworks include those decided upon during the Asilomar Conference, the Montreal Declaration for Responsible AI, and the IEEE\'s Ethics of Autonomous Systems initiative, among others;[283] however, these principles do not go without their criticisms, especially regards to the people chosen contributes to these frameworks.[284]\n

                    Promotion of the wellbeing of the people and communities that these technologies affect requires consideration of the social and ethical implications at all stages of AI system design, development and implementation, and collaboration between job roles such as data scientists, product managers, data engineers, domain experts, and delivery managers.[285]\n

                    The UK AI Safety Institute released in 2024 a testing toolset called \'Inspect\' for AI safety evaluations available under a MIT open-source licence which is freely available on GitHub and can be improved with third-party packages. It can be used to evaluate AI models in a range of areas including core knowledge, ability to reason, and autonomous capabilities.[286]\n

                    \n

                    Regulation

                    \n\n
                    AI Safety Summit
                    The first global AI Safety Summit was held in 2023 with a declaration calling for international co-operation.
                    \n

                    The regulation of artificial intelligence is the development of public sector policies and laws for promoting and regulating AI; it is therefore related to the broader regulation of algorithms.[287] The regulatory and policy landscape for AI is an emerging issue in jurisdictions globally.[288] According to AI Index at Stanford, the annual number of AI-related laws passed in the 127 survey countries jumped from one passed in 2016 to 37 passed in 2022 alone.[289][290] Between 2016 and 2020, more than 30 countries adopted dedicated strategies for AI.[291] Most EU member states had released national AI strategies, as had Canada, China, India, Japan, Mauritius, the Russian Federation, Saudi Arabia, United Arab Emirates, U.S., and Vietnam. Others were in the process of elaborating their own AI strategy, including Bangladesh, Malaysia and Tunisia.[291] The Global Partnership on Artificial Intelligence was launched in June 2020, stating a need for AI to be developed in accordance with human rights and democratic values, to ensure public confidence and trust in the technology.[291] Henry Kissinger, Eric Schmidt, and Daniel Huttenlocher published a joint statement in November 2021 calling for a government commission to regulate AI.[292] In 2023, OpenAI leaders published recommendations for the governance of superintelligence, which they believe may happen in less than 10 years.[293] In 2023, the United Nations also launched an advisory body to provide recommendations on AI governance; the body comprises technology company executives, governments officials and academics.[294] In 2024, the Council of Europe created the first international legally binding treaty on AI, called the "Framework Convention on Artificial Intelligence and Human Rights, Democracy and the Rule of Law". It was adopted by the European Union, the United States, the United Kingdom, and other signatories.[295]\n

                    In a 2022 Ipsos survey, attitudes towards AI varied greatly by country; 78% of Chinese citizens, but only 35% of Americans, agreed that "products and services using AI have more benefits than drawbacks".[289] A 2023 Reuters/Ipsos poll found that 61% of Americans agree, and 22% disagree, that AI poses risks to humanity.[296] In a 2023 Fox News poll, 35% of Americans thought it "very important", and an additional 41% thought it "somewhat important", for the federal government to regulate AI, versus 13% responding "not very important" and 8% responding "not at all important".[297][298]\n

                    In November 2023, the first global AI Safety Summit was held in Bletchley Park in the UK to discuss the near and far term risks of AI and the possibility of mandatory and voluntary regulatory frameworks.[299] 28 countries including the United States, China, and the European Union issued a declaration at the start of the summit, calling for international co-operation to manage the challenges and risks of artificial intelligence.[300][301] In May 2024 at the AI Seoul Summit, 16 global AI tech companies agreed to safety commitments on the development of AI.[302][303]\n

                    \n

                    History

                    \n\n\n

                    The study of mechanical or "formal" reasoning began with philosophers and mathematicians in antiquity. The study of logic led directly to Alan Turing\'s theory of computation, which suggested that a machine, by shuffling symbols as simple as "0" and "1", could simulate any conceivable form of mathematical reasoning.[304][305] This, along with concurrent discoveries in cybernetics, information theory and neurobiology, led researchers to consider the possibility of building an "electronic brain".[r] They developed several areas of research that would become part of AI,[307] such as McCullouch and Pitts design for "artificial neurons" in 1943,[115] and Turing\'s influential 1950 paper \'Computing Machinery and Intelligence\', which introduced the Turing test and showed that "machine intelligence" was plausible.[308][305]\n

                    The field of AI research was founded at a workshop at Dartmouth College in 1956.[s][6] The attendees became the leaders of AI research in the 1960s.[t] They and their students produced programs that the press described as "astonishing":[u] computers were learning checkers strategies, solving word problems in algebra, proving logical theorems and speaking English.[v][7] Artificial intelligence laboratories were set up at a number of British and U.S. universities in the latter 1950s and early 1960s.[305]\n

                    Researchers in the 1960s and the 1970s were convinced that their methods would eventually succeed in creating a machine with general intelligence and considered this the goal of their field.[312] In 1965 Herbert Simon predicted, "machines will be capable, within twenty years, of doing any work a man can do".[313] In 1967 Marvin Minsky agreed, writing that "within a generation ... the problem of creating \'artificial intelligence\' will substantially be solved".[314] They had, however, underestimated the difficulty of the problem.[w] In 1974, both the U.S. and British governments cut off exploratory research in response to the criticism of Sir James Lighthill[316] and ongoing pressure from the U.S. Congress to fund more productive projects.[317] Minsky\'s and Papert\'s book Perceptrons was understood as proving that artificial neural networks would never be useful for solving real-world tasks, thus discrediting the approach altogether.[318] The "AI winter", a period when obtaining funding for AI projects was difficult, followed.[9]\n

                    In the early 1980s, AI research was revived by the commercial success of expert systems,[319] a form of AI program that simulated the knowledge and analytical skills of human experts. By 1985, the market for AI had reached over a billion dollars. At the same time, Japan\'s fifth generation computer project inspired the U.S. and British governments to restore funding for academic research.[8] However, beginning with the collapse of the Lisp Machine market in 1987, AI once again fell into disrepute, and a second, longer-lasting winter began.[10]\n

                    Up to this point, most of AI\'s funding had gone to projects that used high-level symbols to represent mental objects like plans, goals, beliefs, and known facts. In the 1980s, some researchers began to doubt that this approach would be able to imitate all the processes of human cognition, especially perception, robotics, learning and pattern recognition,[320] and began to look into "sub-symbolic" approaches.[321] Rodney Brooks rejected "representation" in general and focussed directly on engineering machines that move and survive.[x] Judea Pearl, Lofti Zadeh and others developed methods that handled incomplete and uncertain information by making reasonable guesses rather than precise logic.[86][326] But the most important development was the revival of "connectionism", including neural network research, by Geoffrey Hinton and others.[327] In 1990, Yann LeCun successfully showed that convolutional neural networks can recognize handwritten digits, the first of many successful applications of neural networks.[328]\n

                    AI gradually restored its reputation in the late 1990s and early 21st century by exploiting formal mathematical methods and by finding specific solutions to specific problems. This "narrow" and "formal" focus allowed researchers to produce verifiable results and collaborate with other fields (such as statistics, economics and mathematics).[329] By 2000, solutions developed by AI researchers were being widely used, although in the 1990s they were rarely described as "artificial intelligence" (a tendency known as the AI effect).[330]\nHowever, several academic researchers became concerned that AI was no longer pursuing its original goal of creating versatile, fully intelligent machines. Beginning around 2002, they founded the subfield of artificial general intelligence (or "AGI"), which had several well-funded institutions by the 2010s.[4]\n

                    Deep learning began to dominate industry benchmarks in 2012 and was adopted throughout the field.[11]\nFor many specific tasks, other methods were abandoned.[y]\nDeep learning\'s success was based on both hardware improvements (faster computers,[332] graphics processing units, cloud computing[333]) and access to large amounts of data[334] (including curated datasets,[333] such as ImageNet). Deep learning\'s success led to an enormous increase in interest and funding in AI.[z] The amount of machine learning research (measured by total publications) increased by 50% in the years 2015–2019.[291]\n

                    In 2016, issues of fairness and the misuse of technology were catapulted into center stage at machine learning conferences, publications vastly increased, funding became available, and many researchers re-focussed their careers on these issues. The alignment problem became a serious field of academic study.[268]\n

                    In the late teens and early 2020s, AGI companies began to deliver programs that created enormous interest. In 2015, AlphaGo, developed by DeepMind, beat the world champion Go player. The program was taught only the rules of the game and developed strategy by itself. GPT-3 is a large language model that was released in 2020 by OpenAI and is capable of generating high-quality human-like text.[335] These programs, and others, inspired an aggressive AI boom, where large companies began investing billions in AI research. According to AI Impacts, about $50 billion annually was invested in "AI" around 2022 in the U.S. alone and about 20% of the new U.S. Computer Science PhD graduates have specialized in "AI".[336] About 800,000 "AI"-related U.S. job openings existed in 2022.[337]\n

                    \n

                    Philosophy

                    \n

                    Philosophical debates have historically sought to determine the nature of intelligence and how to make intelligent machines.[338] Another major focus has been whether machines can be conscious, and the associated ethical implications.[339] Many other topics in philosophy can relevant to AI, such as epistemology and free will.[340] Rapid advancements have intensified public discussions on the philosophy and ethics of AI.[339]\n

                    Defining artificial intelligence

                    \n\n

                    Alan Turing wrote in 1950 "I propose to consider the question \'can machines think\'?"[341] He advised changing the question from whether a machine "thinks", to "whether or not it is possible for machinery to show intelligent behaviour".[341] He devised the Turing test, which measures the ability of a machine to simulate human conversation.[308] Since we can only observe the behavior of the machine, it does not matter if it is "actually" thinking or literally has a "mind". Turing notes that we can not determine these things about other people but "it is usual to have a polite convention that everyone thinks."[342]\n

                    \n
                    The Turing test can provide some evidence of intelligence, but it penalizes non-human intelligent behavior.[343]
                    \n

                    Russell and Norvig agree with Turing that intelligence must be defined in terms of external behavior, not internal structure.[1] However, they are critical that the test requires the machine to imitate humans. "Aeronautical engineering texts," they wrote, "do not define the goal of their field as making \'machines that fly so exactly like pigeons that they can fool other pigeons.\'"[344] AI founder John McCarthy agreed, writing that "Artificial intelligence is not, by definition, simulation of human intelligence".[345]\n

                    McCarthy defines intelligence as "the computational part of the ability to achieve goals in the world".[346] Another AI founder, Marvin Minsky similarly describes it as "the ability to solve hard problems".[347] The leading AI textbook defines it as the study of agents that perceive their environment and take actions that maximize their chances of achieving defined goals.[1] These definitions view intelligence in terms of well-defined problems with well-defined solutions, where both the difficulty of the problem and the performance of the program are direct measures of the "intelligence" of the machine—and no other philosophical discussion is required, or may not even be possible.\n

                    Another definition has been adopted by Google,[348] a major practitioner in the field of AI. This definition stipulates the ability of systems to synthesize information as the manifestation of intelligence, similar to the way it is defined in biological intelligence.\n

                    Some authors have suggested in practice, that the definition of AI is vague and difficult to define, with contention as to whether classical algorithms should be categorised as AI,[349] with many companies during the early 2020s AI boom using the term as a marketing buzzword, often even if they did "not actually use AI in a material way".[350]\n

                    \n

                    Evaluating approaches to AI

                    \n

                    No established unifying theory or paradigm has guided AI research for most of its history.[aa] The unprecedented success of statistical machine learning in the 2010s eclipsed all other approaches (so much so that some sources, especially in the business world, use the term "artificial intelligence" to mean "machine learning with neural networks"). This approach is mostly sub-symbolic, soft and narrow. Critics argue that these questions may have to be revisited by future generations of AI researchers.\n

                    \n

                    Symbolic AI and its limits

                    \n

                    Symbolic AI (or "GOFAI")[352] simulated the high-level conscious reasoning that people use when they solve puzzles, express legal reasoning and do mathematics. They were highly successful at "intelligent" tasks such as algebra or IQ tests. In the 1960s, Newell and Simon proposed the physical symbol systems hypothesis: "A physical symbol system has the necessary and sufficient means of general intelligent action."[353]\n

                    However, the symbolic approach failed on many tasks that humans solve easily, such as learning, recognizing an object or commonsense reasoning. Moravec\'s paradox is the discovery that high-level "intelligent" tasks were easy for AI, but low level "instinctive" tasks were extremely difficult.[354] Philosopher Hubert Dreyfus had argued since the 1960s that human expertise depends on unconscious instinct rather than conscious symbol manipulation, and on having a "feel" for the situation, rather than explicit symbolic knowledge.[355] Although his arguments had been ridiculed and ignored when they were first presented, eventually, AI research came to agree with him.[ab][16]\n

                    The issue is not resolved: sub-symbolic reasoning can make many of the same inscrutable mistakes that human intuition does, such as algorithmic bias. Critics such as Noam Chomsky argue continuing research into symbolic AI will still be necessary to attain general intelligence,[357][358] in part because sub-symbolic AI is a move away from explainable AI: it can be difficult or impossible to understand why a modern statistical AI program made a particular decision. The emerging field of neuro-symbolic artificial intelligence attempts to bridge the two approaches.\n

                    \n

                    Neat vs. scruffy

                    \n\n

                    "Neats" hope that intelligent behavior is described using simple, elegant principles (such as logic, optimization, or neural networks). "Scruffies" expect that it necessarily requires solving a large number of unrelated problems. Neats defend their programs with theoretical rigor, scruffies rely mainly on incremental testing to see if they work. This issue was actively discussed in the 1970s and 1980s,[359] but eventually was seen as irrelevant. Modern AI has elements of both.\n

                    \n

                    Soft vs. hard computing

                    \n\n

                    Finding a provably correct or optimal solution is intractable for many important problems.[15] Soft computing is a set of techniques, including genetic algorithms, fuzzy logic and neural networks, that are tolerant of imprecision, uncertainty, partial truth and approximation. Soft computing was introduced in the late 1980s and most successful AI programs in the 21st century are examples of soft computing with neural networks.\n

                    \n

                    Narrow vs. general AI

                    \n\n

                    AI researchers are divided as to whether to pursue the goals of artificial general intelligence and superintelligence directly or to solve as many specific problems as possible (narrow AI) in hopes these solutions will lead indirectly to the field\'s long-term goals.[360][361] General intelligence is difficult to define and difficult to measure, and modern AI has had more verifiable successes by focusing on specific problems with specific solutions. The sub-field of artificial general intelligence studies this area exclusively.\n

                    \n

                    Machine consciousness, sentience, and mind

                    \n\n

                    The philosophy of mind does not know whether a machine can have a mind, consciousness and mental states, in the same sense that human beings do. This issue considers the internal experiences of the machine, rather than its external behavior. Mainstream AI research considers this issue irrelevant because it does not affect the goals of the field: to build machines that can solve problems using intelligence. Russell and Norvig add that "[t]he additional project of making a machine conscious in exactly the way humans are is not one that we are equipped to take on."[362] However, the question has become central to the philosophy of mind. It is also typically the central question at issue in artificial intelligence in fiction.\n

                    \n

                    Consciousness

                    \n\n

                    David Chalmers identified two problems in understanding the mind, which he named the "hard" and "easy" problems of consciousness.[363] The easy problem is understanding how the brain processes signals, makes plans and controls behavior. The hard problem is explaining how this feels or why it should feel like anything at all, assuming we are right in thinking that it truly does feel like something (Dennett\'s consciousness illusionism says this is an illusion). While human information processing is easy to explain, human subjective experience is difficult to explain. For example, it is easy to imagine a color-blind person who has learned to identify which objects in their field of view are red, but it is not clear what would be required for the person to know what red looks like.[364]\n

                    \n

                    Computationalism and functionalism

                    \n\n

                    Computationalism is the position in the philosophy of mind that the human mind is an information processing system and that thinking is a form of computing. Computationalism argues that the relationship between mind and body is similar or identical to the relationship between software and hardware and thus may be a solution to the mind–body problem. This philosophical position was inspired by the work of AI researchers and cognitive scientists in the 1960s and was originally proposed by philosophers Jerry Fodor and Hilary Putnam.[365]\n

                    Philosopher John Searle characterized this position as "strong AI": "The appropriately programmed computer with the right inputs and outputs would thereby have a mind in exactly the same sense human beings have minds."[ac] Searle counters this assertion with his Chinese room argument, which attempts to show that, even if a machine perfectly simulates human behavior, there is still no reason to suppose it also has a mind.[369]\n

                    \n

                    AI welfare and rights

                    \n

                    It is difficult or impossible to reliably evaluate whether an advanced AI is sentient (has the ability to feel), and if so, to what degree.[370] But if there is a significant chance that a given machine can feel and suffer, then it may be entitled to certain rights or welfare protection measures, similarly to animals.[371][372] Sapience (a set of capacities related to high intelligence, such as discernment or self-awareness) may provide another moral basis for AI rights.[371] Robot rights are also sometimes proposed as a practical way to integrate autonomous agents into society.[373]\n

                    In 2017, the European Union considered granting "electronic personhood" to some of the most capable AI systems. Similarly to the legal status of companies, it would have conferred rights but also responsibilities.[374] Critics argued in 2018 that granting rights to AI systems would downplay the importance of human rights, and that legislation should focus on user needs rather than speculative futuristic scenarios. They also noted that robots lacked the autonomy to take part to society on their own.[375][376]\n

                    Progress in AI increased interest in the topic. Proponents of AI welfare and rights often argue that AI sentience, if it emerges, would be particularly easy to deny. They warn that this may be a moral blind spot analogous to slavery or factory farming, which could lead to large-scale suffering if sentient AI is created and carelessly exploited.[372][371]\n

                    \n

                    Future

                    \n

                    Superintelligence and the singularity

                    \n

                    A superintelligence is a hypothetical agent that would possess intelligence far surpassing that of the brightest and most gifted human mind.[361]If research into artificial general intelligence produced sufficiently intelligent software, it might be able to reprogram and improve itself. The improved software would be even better at improving itself, leading to what I. J. Good called an "intelligence explosion" and Vernor Vinge called a "singularity".[377]\n

                    However, technologies cannot improve exponentially indefinitely, and typically follow an S-shaped curve, slowing when they reach the physical limits of what the technology can do.[378]\n

                    \n

                    Transhumanism

                    \n\n

                    Robot designer Hans Moravec, cyberneticist Kevin Warwick and inventor Ray Kurzweil have predicted that humans and machines may merge in the future into cyborgs that are more capable and powerful than either. This idea, called transhumanism, has roots in the writings of Aldous Huxley and Robert Ettinger.[379]\n

                    Edward Fredkin argues that "artificial intelligence is the next step in evolution", an idea first proposed by Samuel Butler\'s "Darwin among the Machines" as far back as 1863, and expanded upon by George Dyson in his 1998 book Darwin Among the Machines: The Evolution of Global Intelligence.[380]\n

                    \n

                    In fiction

                    \n\n
                    The word "robot" itself was coined by Karel Čapek in his 1921 play R.U.R., the title standing for "Rossum\'s Universal Robots".
                    \n

                    Thought-capable artificial beings have appeared as storytelling devices since antiquity,[381] and have been a persistent theme in science fiction.[382]\n

                    A common trope in these works began with Mary Shelley\'s Frankenstein, where a human creation becomes a threat to its masters. This includes such works as Arthur C. Clarke\'s and Stanley Kubrick\'s 2001: A Space Odyssey (both 1968), with HAL 9000, the murderous computer in charge of the Discovery One spaceship, as well as The Terminator (1984) and The Matrix (1999). In contrast, the rare loyal robots such as Gort from The Day the Earth Stood Still (1951) and Bishop from Aliens (1986) are less prominent in popular culture.[383]\n

                    Isaac Asimov introduced the Three Laws of Robotics in many stories, most notably with the "Multivac" super-intelligent computer. Asimov\'s laws are often brought up during lay discussions of machine ethics;[384] while almost all artificial intelligence researchers are familiar with Asimov\'s laws through popular culture, they generally consider the laws useless for many reasons, one of which is their ambiguity.[385]\n

                    Several works use AI to force us to confront the fundamental question of what makes us human, showing us artificial beings that have the ability to feel, and thus to suffer. This appears in Karel Čapek\'s R.U.R., the films A.I. Artificial Intelligence and Ex Machina, as well as the novel Do Androids Dream of Electric Sheep?, by Philip K. Dick. Dick considers the idea that our understanding of human subjectivity is altered by technology created with artificial intelligence.[386]\n

                    \n

                    See also

                    \n\n

                    Explanatory notes

                    \n
                    \n
                      \n
                    1. ^ a b This list of intelligent traits is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
                    2. \n
                    3. ^ a b This list of tools is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
                    4. \n
                    5. ^ It is among the reasons that expert systems proved to be inefficient for capturing knowledge.[30][31]\n
                    6. \n
                    7. ^ \n"Rational agent" is general term used in economics, philosophy and theoretical artificial intelligence. It can refer to anything that directs its behavior to accomplish goals, such as a person, an animal, a corporation, a nation, or in the case of AI, a computer program.\n
                    8. \n
                    9. ^ Alan Turing discussed the centrality of learning as early as 1950, in his classic paper "Computing Machinery and Intelligence".[42] In 1956, at the original Dartmouth AI summer conference, Ray Solomonoff wrote a report on unsupervised probabilistic machine learning: "An Inductive Inference Machine".[43]\n
                    10. \n
                    11. ^ See AI winter § Machine translation and the ALPAC report of 1966\n
                    12. \n
                    13. ^ \nCompared with symbolic logic, formal Bayesian inference is computationally expensive. For inference to be tractable, most observations must be conditionally independent of one another. AdSense uses a Bayesian network with over 300 million edges to learn which ads to serve.[93]\n
                    14. \n
                    15. ^ Expectation–maximization, one of the most popular algorithms in machine learning, allows clustering in the presence of unknown latent variables.[95]\n
                    16. \n
                    17. ^ \nSome form of deep neural networks (without a specific learning algorithm) were described by:\nWarren S. McCulloch and Walter Pitts (1943)[115]\nAlan Turing (1948);[116]\nKarl Steinbuch and Roger David Joseph (1961).[117]\nDeep or recurrent networks that learned (or used gradient descent) were developed by:\nFrank Rosenblatt(1957);[116]\nOliver Selfridge (1959);[117]\nAlexey Ivakhnenko and Valentin Lapa (1965);[118]\nKaoru Nakano (1971);[119]\nShun-Ichi Amari (1972);[119]\nJohn Joseph Hopfield (1982).[119]\nPrecursors to backpropagation were developed by:\nHenry J. Kelley (1960);[116]\nArthur E. Bryson (1962);[116]\nStuart Dreyfus (1962);[116]\nArthur E. Bryson and Yu-Chi Ho (1969);[116]\nBackpropagation was independently developed by:\nSeppo Linnainmaa (1970);[120]\nPaul Werbos (1974).[116]\n
                    18. \n
                    19. ^ Geoffrey Hinton said, of his work on neural networks in the 1990s, "our labeled datasets were thousands of times too small. [And] our computers were millions of times too slow."[121]\n
                    20. \n
                    21. ^ In statistics, a bias is a systematic error or deviation from the correct value. But in the context of fairness, it refers to a tendency in favor or against a certain group or individual characteristic, usually in a way that is considered unfair or harmful. A statistically unbiased AI system that produces disparate outcomes for different demographic groups may thus be viewed as biased in the ethical sense.[204]\n
                    22. \n
                    23. ^ Including Jon Kleinberg (Cornell University), Sendhil Mullainathan (University of Chicago), Cynthia Chouldechova (Carnegie Mellon) and Sam Corbett-Davis (Stanford)[213]\n
                    24. \n
                    25. ^ Moritz Hardt (a director at the Max Planck Institute for Intelligent Systems) argues that machine learning "is fundamentally the wrong tool for a lot of domains, where you\'re trying to design interventions and mechanisms that change the world."[218]\n
                    26. \n
                    27. ^ When the law was passed in 2018, it still contained a form of this provision.\n
                    28. \n
                    29. ^ This is the United Nations\' definition, and includes things like land mines as well.[232]\n
                    30. \n
                    31. ^ See table 4; 9% is both the OECD average and the U.S. average.[243]\n
                    32. \n
                    33. ^ Sometimes called a "robopocalypse"[251]\n
                    34. \n
                    35. ^ "Electronic brain" was the term used by the press around this time.[304][306]\n
                    36. \n
                    37. ^ \nDaniel Crevier wrote, "the conference is generally recognized as the official birthdate of the new science."[309] Russell and Norvig called the conference "the inception of artificial intelligence."[115]\n
                    38. \n
                    39. ^ \nRussell and Norvig wrote "for the next 20 years the field would be dominated by these people and their students."[310]\n
                    40. \n
                    41. ^ \nRussell and Norvig wrote "it was astonishing whenever a computer did anything kind of smartish".[311]\n
                    42. \n
                    43. ^ \nThe programs described are Arthur Samuel\'s checkers program for the IBM 701, Daniel Bobrow\'s STUDENT, Newell and Simon\'s Logic Theorist and Terry Winograd\'s SHRDLU.\n
                    44. \n
                    45. ^ Russell and Norvig write: "in almost all cases, these early systems failed on more difficult problems"[315]\n
                    46. \n
                    47. ^ \nEmbodied approaches to AI[322] were championed by Hans Moravec[323] and Rodney Brooks[324] and went by many names: Nouvelle AI.[324] Developmental robotics.[325]\n
                    48. \n
                    49. ^ Matteo Wong wrote in The Atlantic: "Whereas for decades, computer-science fields such as natural-language processing, computer vision, and robotics used extremely different methods, now they all use a programming method called "deep learning." As a result, their code and approaches have become more similar, and their models are easier to integrate into one another."[331]\n
                    50. \n
                    51. ^ Jack Clark wrote in Bloomberg: "After a half-decade of quiet breakthroughs in artificial intelligence, 2015 has been a landmark year. Computers are smarter and learning faster than ever", and noted that the number of software projects that use machine learning at Google increased from a "sporadic usage" in 2012 to more than 2,700 projects in 2015.[333]\n
                    52. \n
                    53. ^ Nils Nilsson wrote in 1983: "Simply put, there is wide disagreement in the field about what AI is all about."[351]\n
                    54. \n
                    55. ^ \nDaniel Crevier wrote that "time has proven the accuracy and perceptiveness of some of Dreyfus\'s comments. Had he formulated them less aggressively, constructive actions they suggested might have been taken much earlier."[356]\n
                    56. \n
                    57. ^ \nSearle presented this definition of "Strong AI" in 1999.[366] Searle\'s original formulation was "The appropriately programmed computer really is a mind, in the sense that computers given the right programs can be literally said to understand and have other cognitive states."[367] Strong AI is defined similarly by Russell and Norvig: "Stong AI – the assertion that machines that do so are actually thinking (as opposed to simulating thinking)."[368]\n
                    58. \n
                    \n

                    References

                    \n
                    \n
                      \n
                    1. ^ a b c Russell & Norvig (2021), pp. 1–4.\n
                    2. \n
                    3. ^ AI set to exceed human brain power Archived 2008-02-19 at the Wayback Machine CNN.com (July 26, 2006)\n
                    4. \n
                    5. ^ Kaplan, Andreas; Haenlein, Michael (2019). "Siri, Siri, in my hand: Who\'s the fairest in the land? On the interpretations, illustrations, and implications of artificial intelligence". Business Horizons. 62: 15–25. doi:10.1016/j.bushor.2018.08.004. ISSN 0007-6813. S2CID 158433736.\n
                    6. \n
                    7. ^ a b c \nArtificial general intelligence: Russell & Norvig (2021, pp. 32–33, 1020–1021)
                      Proposal for the modern version: Pennachin & Goertzel (2007)
                      Warnings of overspecialization in AI from leading researchers: Nilsson (1995), McCarthy (2007), Beal & Winston (2009)
                      \n
                    8. \n
                    9. ^ Russell & Norvig (2021, §1.2).\n
                    10. \n
                    11. ^ a b Dartmouth workshop: Russell & Norvig (2021, p. 18), McCorduck (2004, pp. 111–136), NRC (1999, pp. 200–201)
                      The proposal: McCarthy et al. (1955)
                      \n
                    12. \n
                    13. ^ a b Successful programs the 1960s: McCorduck (2004, pp. 243–252), Crevier (1993, pp. 52–107), Moravec (1988, p. 9), Russell & Norvig (2021, pp. 19–21)\n
                    14. \n
                    15. ^ a b Funding initiatives in the early 1980s: Fifth Generation Project (Japan), Alvey (UK), Microelectronics and Computer Technology Corporation (US), Strategic Computing Initiative (US): McCorduck (2004, pp. 426–441), Crevier (1993, pp. 161–162, 197–203, 211, 240), Russell & Norvig (2021, p. 23), NRC (1999, pp. 210–211), Newquist (1994, pp. 235–248)\n
                    16. \n
                    17. ^ a b First AI Winter, Lighthill report, Mansfield Amendment: Crevier (1993, pp. 115–117), Russell & Norvig (2021, pp. 21–22), NRC (1999, pp. 212–213), Howe (1994), Newquist (1994, pp. 189–201)\n
                    18. \n
                    19. ^ a b Second AI Winter: Russell & Norvig (2021, p. 24), McCorduck (2004, pp. 430–435), Crevier (1993, pp. 209–210), NRC (1999, pp. 214–216), Newquist (1994, pp. 301–318)\n
                    20. \n
                    21. ^ a b Deep learning revolution, AlexNet: Goldman (2022), Russell & Norvig (2021, p. 26), McKinsey (2018)\n
                    22. \n
                    23. ^ Toews (2023).\n
                    24. \n
                    25. ^ Problem-solving, puzzle solving, game playing, and deduction: Russell & Norvig (2021, chpt. 3–5), Russell & Norvig (2021, chpt. 6) (constraint satisfaction), Poole, Mackworth & Goebel (1998, chpt. 2, 3, 7, 9), Luger & Stubblefield (2004, chpt. 3, 4, 6, 8), Nilsson (1998, chpt. 7–12)\n
                    26. \n
                    27. ^ Uncertain reasoning: Russell & Norvig (2021, chpt. 12–18), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 333–381), Nilsson (1998, chpt. 7–12)\n
                    28. \n
                    29. ^ a b c Intractability and efficiency and the combinatorial explosion: Russell & Norvig (2021, p. 21)\n
                    30. \n
                    31. ^ a b c Psychological evidence of the prevalence of sub-symbolic reasoning and knowledge: Kahneman (2011), Dreyfus & Dreyfus (1986), Wason & Shapiro (1966), Kahneman, Slovic & Tversky (1982)\n
                    32. \n
                    33. ^ Knowledge representation and knowledge engineering: Russell & Norvig (2021, chpt. 10), Poole, Mackworth & Goebel (1998, pp. 23–46, 69–81, 169–233, 235–277, 281–298, 319–345), Luger & Stubblefield (2004, pp. 227–243), Nilsson (1998, chpt. 17.1–17.4, 18)\n
                    34. \n
                    35. ^ Smoliar & Zhang (1994).\n
                    36. \n
                    37. ^ Neumann & Möller (2008).\n
                    38. \n
                    39. ^ Kuperman, Reichley & Bailey (2006).\n
                    40. \n
                    41. ^ McGarry (2005).\n
                    42. \n
                    43. ^ Bertini, Del Bimbo & Torniai (2006).\n
                    44. \n
                    45. ^ Russell & Norvig (2021), pp. 272.\n
                    46. \n
                    47. ^ Representing categories and relations: Semantic networks, description logics, inheritance (including frames, and scripts): Russell & Norvig (2021, §10.2 & 10.5), Poole, Mackworth & Goebel (1998, pp. 174–177), Luger & Stubblefield (2004, pp. 248–258), Nilsson (1998, chpt. 18.3)\n
                    48. \n
                    49. ^ Representing events and time:Situation calculus, event calculus, fluent calculus (including solving the frame problem): Russell & Norvig (2021, §10.3), Poole, Mackworth & Goebel (1998, pp. 281–298), Nilsson (1998, chpt. 18.2)\n
                    50. \n
                    51. ^ Causal calculus: Poole, Mackworth & Goebel (1998, pp. 335–337)\n
                    52. \n
                    53. ^ Representing knowledge about knowledge: Belief calculus, modal logics: Russell & Norvig (2021, §10.4), Poole, Mackworth & Goebel (1998, pp. 275–277)\n
                    54. \n
                    55. ^ a b Default reasoning, Frame problem, default logic, non-monotonic logics, circumscription, closed world assumption, abduction: Russell & Norvig (2021, §10.6), Poole, Mackworth & Goebel (1998, pp. 248–256, 323–335), Luger & Stubblefield (2004, pp. 335–363), Nilsson (1998, ~18.3.3)\n(Poole et al. places abduction under "default reasoning". Luger et al. places this under "uncertain reasoning").\n
                    56. \n
                    57. ^ a b Breadth of commonsense knowledge: Lenat & Guha (1989, Introduction), Crevier (1993, pp. 113–114), Moravec (1988, p. 13), Russell & Norvig (2021, pp. 241, 385, 982) (qualification problem)\n
                    58. \n
                    59. ^ Newquist (1994), p. 296.\n
                    60. \n
                    61. ^ Crevier (1993), pp. 204–208.\n
                    62. \n
                    63. ^ Russell & Norvig (2021), p. 528.\n
                    64. \n
                    65. ^ Automated planning: Russell & Norvig (2021, chpt. 11).\n
                    66. \n
                    67. ^ Automated decision making, Decision theory: Russell & Norvig (2021, chpt. 16–18).\n
                    68. \n
                    69. ^ Classical planning: Russell & Norvig (2021, Section 11.2).\n
                    70. \n
                    71. ^ Sensorless or "conformant" planning, contingent planning, replanning (a.k.a online planning): Russell & Norvig (2021, Section 11.5).\n
                    72. \n
                    73. ^ Uncertain preferences: Russell & Norvig (2021, Section 16.7)\nInverse reinforcement learning: Russell & Norvig (2021, Section 22.6)\n
                    74. \n
                    75. ^ Information value theory: Russell & Norvig (2021, Section 16.6).\n
                    76. \n
                    77. ^ Markov decision process: Russell & Norvig (2021, chpt. 17).\n
                    78. \n
                    79. ^ Game theory and multi-agent decision theory: Russell & Norvig (2021, chpt. 18).\n
                    80. \n
                    81. ^ Learning: Russell & Norvig (2021, chpt. 19–22), Poole, Mackworth & Goebel (1998, pp. 397–438), Luger & Stubblefield (2004, pp. 385–542), Nilsson (1998, chpt. 3.3, 10.3, 17.5, 20)\n
                    82. \n
                    83. ^ Turing (1950).\n
                    84. \n
                    85. ^ Solomonoff (1956).\n
                    86. \n
                    87. ^ Unsupervised learning: Russell & Norvig (2021, pp. 653) (definition), Russell & Norvig (2021, pp. 738–740) (cluster analysis), Russell & Norvig (2021, pp. 846–860) (word embedding)\n
                    88. \n
                    89. ^ a b Supervised learning: Russell & Norvig (2021, §19.2) (Definition), Russell & Norvig (2021, Chpt. 19–20) (Techniques)\n
                    90. \n
                    91. ^ Reinforcement learning: Russell & Norvig (2021, chpt. 22), Luger & Stubblefield (2004, pp. 442–449)\n
                    92. \n
                    93. ^ Transfer learning: Russell & Norvig (2021, pp. 281), The Economist (2016)\n
                    94. \n
                    95. ^ "Artificial Intelligence (AI): What Is AI and How Does It Work? | Built In". builtin.com. Retrieved 30 October 2023.\n
                    96. \n
                    97. ^ Computational learning theory: Russell & Norvig (2021, pp. 672–674), Jordan & Mitchell (2015)\n
                    98. \n
                    99. ^ Natural language processing (NLP): Russell & Norvig (2021, chpt. 23–24), Poole, Mackworth & Goebel (1998, pp. 91–104), Luger & Stubblefield (2004, pp. 591–632)\n
                    100. \n
                    101. ^ Subproblems of NLP: Russell & Norvig (2021, pp. 849–850)\n
                    102. \n
                    103. ^ Russell & Norvig (2021), pp. 856–858.\n
                    104. \n
                    105. ^ Dickson (2022).\n
                    106. \n
                    107. ^ Modern statistical and deep learning approaches to NLP: Russell & Norvig (2021, chpt. 24), Cambria & White (2014)\n
                    108. \n
                    109. ^ Vincent (2019).\n
                    110. \n
                    111. ^ Russell & Norvig (2021), pp. 875–878.\n
                    112. \n
                    113. ^ Bushwick (2023).\n
                    114. \n
                    115. ^ Computer vision: Russell & Norvig (2021, chpt. 25), Nilsson (1998, chpt. 6)\n
                    116. \n
                    117. ^ Russell & Norvig (2021), pp. 849–850.\n
                    118. \n
                    119. ^ Russell & Norvig (2021), pp. 895–899.\n
                    120. \n
                    121. ^ Russell & Norvig (2021), pp. 899–901.\n
                    122. \n
                    123. ^ Challa et al. (2011).\n
                    124. \n
                    125. ^ Russell & Norvig (2021), pp. 931–938.\n
                    126. \n
                    127. ^ MIT AIL (2014).\n
                    128. \n
                    129. ^ Affective computing: Thro (1993), Edelson (1991), Tao & Tan (2005), Scassellati (2002)\n
                    130. \n
                    131. ^ Waddell (2018).\n
                    132. \n
                    133. ^ Poria et al. (2017).\n
                    134. \n
                    135. ^ Search algorithms: Russell & Norvig (2021, chpts. 3–5), Poole, Mackworth & Goebel (1998, pp. 113–163), Luger & Stubblefield (2004, pp. 79–164, 193–219), Nilsson (1998, chpts. 7–12)\n
                    136. \n
                    137. ^ State space search: Russell & Norvig (2021, chpt. 3)\n
                    138. \n
                    139. ^ Russell & Norvig (2021), sect. 11.2.\n
                    140. \n
                    141. ^ Uninformed searches (breadth first search, depth-first search and general state space search): Russell & Norvig (2021, sect. 3.4), Poole, Mackworth & Goebel (1998, pp. 113–132), Luger & Stubblefield (2004, pp. 79–121), Nilsson (1998, chpt. 8)\n
                    142. \n
                    143. ^ Heuristic or informed searches (e.g., greedy best first and A*): Russell & Norvig (2021, sect. 3.5), Poole, Mackworth & Goebel (1998, pp. 132–147), Poole & Mackworth (2017, sect. 3.6), Luger & Stubblefield (2004, pp. 133–150)\n
                    144. \n
                    145. ^ Adversarial search: Russell & Norvig (2021, chpt. 5)\n
                    146. \n
                    147. ^ Local or "optimization" search: Russell & Norvig (2021, chpt. 4)\n
                    148. \n
                    149. ^ Singh Chauhan, Nagesh (18 December 2020). "Optimization Algorithms in Neural Networks". KDnuggets. Retrieved 13 January 2024.\n
                    150. \n
                    151. ^ Evolutionary computation: Russell & Norvig (2021, sect. 4.1.2)\n
                    152. \n
                    153. ^ Merkle & Middendorf (2013).\n
                    154. \n
                    155. ^ Logic: Russell & Norvig (2021, chpts. 6–9), Luger & Stubblefield (2004, pp. 35–77), Nilsson (1998, chpt. 13–16)\n
                    156. \n
                    157. ^ Propositional logic: Russell & Norvig (2021, chpt. 6), Luger & Stubblefield (2004, pp. 45–50), Nilsson (1998, chpt. 13)\n
                    158. \n
                    159. ^ First-order logic and features such as equality: Russell & Norvig (2021, chpt. 7), Poole, Mackworth & Goebel (1998, pp. 268–275), Luger & Stubblefield (2004, pp. 50–62), Nilsson (1998, chpt. 15)\n
                    160. \n
                    161. ^ Logical inference: Russell & Norvig (2021, chpt. 10)\n
                    162. \n
                    163. ^ logical deduction as search: Russell & Norvig (2021, sects. 9.3, 9.4), Poole, Mackworth & Goebel (1998, pp. ~46–52), Luger & Stubblefield (2004, pp. 62–73), Nilsson (1998, chpt. 4.2, 7.2)\n
                    164. \n
                    165. ^ Resolution and unification: Russell & Norvig (2021, sections 7.5.2, 9.2, 9.5)\n
                    166. \n
                    167. ^ Warren, D.H.; Pereira, L.M.; Pereira, F. (1977). "Prolog-the language and its implementation compared with Lisp". ACM SIGPLAN Notices. 12 (8): 109–115. doi:10.1145/872734.806939.\n
                    168. \n
                    169. ^ Fuzzy logic: Russell & Norvig (2021, pp. 214, 255, 459), Scientific American (1999)\n
                    170. \n
                    171. ^ a b Stochastic methods for uncertain reasoning: Russell & Norvig (2021, chpt. 12–18, 20), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 165–191, 333–381), Nilsson (1998, chpt. 19)\n
                    172. \n
                    173. ^ decision theory and decision analysis: Russell & Norvig (2021, chpt. 16–18), Poole, Mackworth & Goebel (1998, pp. 381–394)\n
                    174. \n
                    175. ^ Information value theory: Russell & Norvig (2021, sect. 16.6)\n
                    176. \n
                    177. ^ Markov decision processes and dynamic decision networks: Russell & Norvig (2021, chpt. 17)\n
                    178. \n
                    179. ^ a b c Stochastic temporal models: Russell & Norvig (2021, chpt. 14)\nHidden Markov model: Russell & Norvig (2021, sect. 14.3)\nKalman filters: Russell & Norvig (2021, sect. 14.4)\nDynamic Bayesian networks: Russell & Norvig (2021, sect. 14.5)\n
                    180. \n
                    181. ^ Game theory and mechanism design: Russell & Norvig (2021, chpt. 18)\n
                    182. \n
                    183. ^ Bayesian networks: Russell & Norvig (2021, sects. 12.5–12.6, 13.4–13.5, 14.3–14.5, 16.5, 20.2–20.3), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~182–190, ≈363–379), Nilsson (1998, chpt. 19.3–19.4)\n
                    184. \n
                    185. ^ Domingos (2015), chpt. 6.\n
                    186. \n
                    187. ^ Bayesian inference algorithm: Russell & Norvig (2021, sect. 13.3–13.5), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~363–379), Nilsson (1998, chpt. 19.4 & 7)\n
                    188. \n
                    189. ^ Domingos (2015), p. 210.\n
                    190. \n
                    191. ^ Bayesian learning and the expectation–maximization algorithm: Russell & Norvig (2021, chpt. 20), Poole, Mackworth & Goebel (1998, pp. 424–433), Nilsson (1998, chpt. 20), Domingos (2015, p. 210)\n
                    192. \n
                    193. ^ Bayesian decision theory and Bayesian decision networks: Russell & Norvig (2021, sect. 16.5)\n
                    194. \n
                    195. ^ Statistical learning methods and classifiers: Russell & Norvig (2021, chpt. 20),\n
                    196. \n
                    197. ^ Ciaramella, Alberto; Ciaramella, Marco (2024). Introduction to Artificial Intelligence: from data analysis to generative AI. Intellisemantic Editions. ISBN 978-8-8947-8760-3.\n
                    198. \n
                    199. ^ Decision trees: Russell & Norvig (2021, sect. 19.3), Domingos (2015, p. 88)\n
                    200. \n
                    201. ^ Non-parameteric learning models such as K-nearest neighbor and support vector machines: Russell & Norvig (2021, sect. 19.7), Domingos (2015, p. 187) (k-nearest neighbor)\n\n
                    202. \n
                    203. ^ Domingos (2015), p. 152.\n
                    204. \n
                    205. ^ Naive Bayes classifier: Russell & Norvig (2021, sect. 12.6), Domingos (2015, p. 152)\n
                    206. \n
                    207. ^ a b Neural networks: Russell & Norvig (2021, chpt. 21), Domingos (2015, Chapter 4)\n
                    208. \n
                    209. ^ Gradient calculation in computational graphs, backpropagation, automatic differentiation: Russell & Norvig (2021, sect. 21.2), Luger & Stubblefield (2004, pp. 467–474), Nilsson (1998, chpt. 3.3)\n
                    210. \n
                    211. ^ Universal approximation theorem: Russell & Norvig (2021, p. 752)\nThe theorem: Cybenko (1988), Hornik, Stinchcombe & White (1989)\n
                    212. \n
                    213. ^ Feedforward neural networks: Russell & Norvig (2021, sect. 21.1)\n
                    214. \n
                    215. ^ Recurrent neural networks: Russell & Norvig (2021, sect. 21.6)\n
                    216. \n
                    217. ^ Perceptrons: Russell & Norvig (2021, pp. 21, 22, 683, 22)\n
                    218. \n
                    219. ^ a b Deep learning: Russell & Norvig (2021, chpt. 21), Goodfellow, Bengio & Courville (2016), Hinton et al. (2016), Schmidhuber (2015)\n
                    220. \n
                    221. ^ Convolutional neural networks: Russell & Norvig (2021, sect. 21.3)\n
                    222. \n
                    223. ^ Deng & Yu (2014), pp. 199–200.\n
                    224. \n
                    225. ^ Ciresan, Meier & Schmidhuber (2012).\n
                    226. \n
                    227. ^ Russell & Norvig (2021), p. 751.\n
                    228. \n
                    229. ^ a b c Russell & Norvig (2021), p. 17.\n
                    230. \n
                    231. ^ a b c d e f g Russell & Norvig (2021), p. 785.\n
                    232. \n
                    233. ^ a b Schmidhuber (2022), sect. 5.\n
                    234. \n
                    235. ^ Schmidhuber (2022), sect. 6.\n
                    236. \n
                    237. ^ a b c Schmidhuber (2022), sect. 7.\n
                    238. \n
                    239. ^ Schmidhuber (2022), sect. 8.\n
                    240. \n
                    241. ^ Quoted in Christian (2020, p. 22)\n
                    242. \n
                    243. ^ Smith (2023).\n
                    244. \n
                    245. ^ "Explained: Generative AI". 9 November 2023.\n
                    246. \n
                    247. ^ "AI Writing and Content Creation Tools". MIT Sloan Teaching & Learning Technologies. Archived from the original on 25 December 2023. Retrieved 25 December 2023.\n
                    248. \n
                    249. ^ Marmouyet (2023).\n
                    250. \n
                    251. ^ Kobielus (2019).\n
                    252. \n
                    253. ^ Thomason, James (21 May 2024). "Mojo Rising: The resurgence of AI-first programming languages". VentureBeat. Archived from the original on 27 June 2024. Retrieved 26 May 2024.\n
                    254. \n
                    255. ^ Wodecki, Ben (5 May 2023). "7 AI Programming Languages You Need to Know". AI Business. Archived from the original on 25 July 2024. Retrieved 5 October 2024.\n
                    256. \n
                    257. ^ Plumb, Taryn (18 September 2024). "Why Jensen Huang and Marc Benioff see \'gigantic\' opportunity for agentic AI". VentureBeat. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
                    258. \n
                    259. ^ Davenport, T; Kalakota, R (June 2019). "The potential for artificial intelligence in healthcare". Future Healthc J. 6 (2): 94–98. doi:10.7861/futurehosp.6-2-94. PMC 6616181. PMID 31363513.\n
                    260. \n
                    261. ^ Lyakhova, U.A.; Lyakhov, P.A. (2024). "Systematic review of approaches to detection and classification of skin cancer using artificial intelligence: Development and prospects". Computers in Biology and Medicine. 178: 108742. doi:10.1016/j.compbiomed.2024.108742. PMID 38875908.\n
                    262. \n
                    263. ^ Alqudaihi, Kawther S.; Aslam, Nida; Khan, Irfan Ullah; Almuhaideb, Abdullah M.; Alsunaidi, Shikah J.; Ibrahim, Nehad M. Abdel Rahman; Alhaidari, Fahd A.; Shaikh, Fatema S.; Alsenbel, Yasmine M.; Alalharith, Dima M.; Alharthi, Hajar M.; Alghamdi, Wejdan M.; Alshahrani, Mohammed S. (2021). "Cough Sound Detection and Diagnosis Using Artificial Intelligence Techniques: Challenges and Opportunities". IEEE Access. 9: 102327–102344. Bibcode:2021IEEEA...9j2327A. doi:10.1109/ACCESS.2021.3097559. ISSN 2169-3536. PMC 8545201. PMID 34786317.\n
                    264. \n
                    265. ^ a b Bax, Monique; Thorpe, Jordan; Romanov, Valentin (December 2023). "The future of personalized cardiovascular medicine demands 3D and 4D printing, stem cells, and artificial intelligence". Frontiers in Sensors. 4. doi:10.3389/fsens.2023.1294721. ISSN 2673-5067.\n
                    266. \n
                    267. ^ Jumper, J; Evans, R; Pritzel, A (2021). "Highly accurate protein structure prediction with AlphaFold". Nature. 596 (7873): 583–589. Bibcode:2021Natur.596..583J. doi:10.1038/s41586-021-03819-2. PMC 8371605. PMID 34265844.\n
                    268. \n
                    269. ^ "AI discovers new class of antibiotics to kill drug-resistant bacteria". 20 December 2023. Archived from the original on 16 September 2024. Retrieved 5 October 2024.\n
                    270. \n
                    271. ^ "AI speeds up drug design for Parkinson\'s ten-fold". Cambridge University. 17 April 2024. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                    272. \n
                    273. ^ Horne, Robert I.; Andrzejewska, Ewa A.; Alam, Parvez; Brotzakis, Z. Faidon; Srivastava, Ankit; Aubert, Alice; Nowinska, Magdalena; Gregory, Rebecca C.; Staats, Roxine; Possenti, Andrea; Chia, Sean; Sormanni, Pietro; Ghetti, Bernardino; Caughey, Byron; Knowles, Tuomas P. J.; Vendruscolo, Michele (17 April 2024). "Discovery of potent inhibitors of α-synuclein aggregation using structure-based iterative learning". Nature Chemical Biology. 20 (5). Nature: 634–645. doi:10.1038/s41589-024-01580-x. PMC 11062903. PMID 38632492.\n
                    274. \n
                    275. ^ Grant, Eugene F.; Lardner, Rex (25 July 1952). "The Talk of the Town – It". The New Yorker. ISSN 0028-792X. Archived from the original on 16 February 2020. Retrieved 28 January 2024.\n
                    276. \n
                    277. ^ Anderson, Mark Robert (11 May 2017). "Twenty years on from Deep Blue vs Kasparov: how a chess match started the big data revolution". The Conversation. Archived from the original on 17 September 2024. Retrieved 28 January 2024.\n
                    278. \n
                    279. ^ Markoff, John (16 February 2011). "Computer Wins on \'Jeopardy!\': Trivial, It\'s Not". The New York Times. ISSN 0362-4331. Archived from the original on 22 October 2014. Retrieved 28 January 2024.\n
                    280. \n
                    281. ^ Byford, Sam (27 May 2017). "AlphaGo retires from competitive Go after defeating world number one 3–0". The Verge. Archived from the original on 7 June 2017. Retrieved 28 January 2024.\n
                    282. \n
                    283. ^ Brown, Noam; Sandholm, Tuomas (30 August 2019). "Superhuman AI for multiplayer poker". Science. 365 (6456): 885–890. Bibcode:2019Sci...365..885B. doi:10.1126/science.aay2400. ISSN 0036-8075. PMID 31296650.\n
                    284. \n
                    285. ^ "MuZero: Mastering Go, chess, shogi and Atari without rules". Google DeepMind. 23 December 2020. Retrieved 28 January 2024.\n
                    286. \n
                    287. ^ Sample, Ian (30 October 2019). "AI becomes grandmaster in \'fiendishly complex\' StarCraft II". The Guardian. ISSN 0261-3077. Archived from the original on 29 December 2020. Retrieved 28 January 2024.\n
                    288. \n
                    289. ^ Wurman, P. R.; Barrett, S.; Kawamoto, K. (2022). "Outracing champion Gran Turismo drivers with deep reinforcement learning" (PDF). Nature. 602 (7896): 223–228. Bibcode:2022Natur.602..223W. doi:10.1038/s41586-021-04357-7. PMID 35140384.\n
                    290. \n
                    291. ^ Wilkins, Alex (13 March 2024). "Google AI learns to play open-world video games by watching them". New Scientist. Archived from the original on 26 July 2024. Retrieved 21 July 2024.\n
                    292. \n
                    293. ^ Uesato, J. et al.: Improving mathematical reasoning with process supervision. Archived 15 September 2024 at the Wayback Machine openai.com, May 31, 2023. Retrieved 2024-08-07.\n
                    294. \n
                    295. ^ Srivastava, Saurabh (29 February 2024). "Functional Benchmarks for Robust Evaluation of Reasoning Performance, and the Reasoning Gap". arXiv:2402.19450 [cs.AI].\n
                    296. \n
                    297. ^ Roberts, Siobhan (25 July 2024). "AI achieves silver-medal standard solving International Mathematical Olympiad problems". The New York Times. Archived from the original on 26 September 2024. Retrieved 7 August 2024.\n
                    298. \n
                    299. ^ LLEMMA. eleuther.ai. Retrieved 2024-08-07.\n
                    300. \n
                    301. ^ AI Math. Archived 5 October 2024 at the Wayback Machine Caesars Labs, 2024. Retrieved 2024-08-07.\n
                    302. \n
                    303. ^ Alex McFarland: 7 Best AI for Math Tools. Archived 11 September 2024 at the Wayback Machine unite.ai. Retrieved 2024-08-07\n
                    304. \n
                    305. ^ Matthew Finio & Amanda Downie: IBM Think 2024 Primer, "What is Artificial Intelligence (AI) in Finance?" 8 Dec. 2023\n
                    306. \n
                    307. ^ M. Nicolas, J. Firzli: Pensions Age/European Pensions magazine, "Artificial Intelligence: Ask the Industry" May June 2024 https://videovoice.org/ai-in-finance-innovation-entrepreneurship-vs-over-regulation-with-the-eus-artificial-intelligence-act-wont-work-as-intended/ Archived 11 September 2024 at the Wayback Machine.\n
                    308. \n
                    309. ^ a b c Congressional Research Service (2019). Artificial Intelligence and National Security (PDF). Washington, DC: Congressional Research Service. Archived (PDF) from the original on 8 May 2020. Retrieved 5 October 2024.PD-notice\n
                    310. \n
                    311. ^ a b Slyusar, Vadym (2019). "Artificial intelligence as the basis of future control networks". ResearchGate. doi:10.13140/RG.2.2.30247.50087. Archived from the original on 28 April 2021. Retrieved 20 July 2019.\n
                    312. \n
                    313. ^ Knight, Will. "The US and 30 Other Nations Agree to Set Guardrails for Military AI". Wired. ISSN 1059-1028. Archived from the original on 20 September 2024. Retrieved 24 January 2024.\n
                    314. \n
                    315. ^ Newsom, Gavin; Weber, Shirley N. (6 September 2023). "Executive Order N-12-23" (PDF). Executive Department, State of California. Archived (PDF) from the original on 21 February 2024. Retrieved 7 September 2023.\n
                    316. \n
                    317. ^ Pinaya, Walter H. L.; Graham, Mark S.; Kerfoot, Eric; Tudosiu, Petru-Daniel; Dafflon, Jessica; Fernandez, Virginia; Sanchez, Pedro; Wolleb, Julia; da Costa, Pedro F.; Patel, Ashay (2023). "Generative AI for Medical Imaging: extending the MONAI Framework". arXiv:2307.15208 [eess.IV].\n
                    318. \n
                    319. ^ Griffith, Erin; Metz, Cade (27 January 2023). "Anthropic Said to Be Closing In on $300 Million in New A.I. Funding". The New York Times. Archived from the original on 9 December 2023. Retrieved 14 March 2023.\n
                    320. \n
                    321. ^ Lanxon, Nate; Bass, Dina; Davalos, Jackie (10 March 2023). "A Cheat Sheet to AI Buzzwords and Their Meanings". Bloomberg News. Archived from the original on 17 November 2023. Retrieved 14 March 2023.\n
                    322. \n
                    323. ^ Marcelline, Marco (27 May 2023). "ChatGPT: Most Americans Know About It, But Few Actually Use the AI Chatbot". PCMag. Archived from the original on 21 May 2024. Retrieved 28 January 2024.\n
                    324. \n
                    325. ^ Lu, Donna (31 March 2023). "Misinformation, mistakes and the Pope in a puffer: what rapidly evolving AI can – and can\'t – do". The Guardian. ISSN 0261-3077. Archived from the original on 10 June 2024. Retrieved 28 January 2024.\n
                    326. \n
                    327. ^ Hurst, Luke (23 May 2023). "How a fake image of a Pentagon explosion shared on Twitter caused a real dip on Wall Street". euronews. Retrieved 28 January 2024.\n
                    328. \n
                    329. ^ Poole, David; Mackworth, Alan (2023). Artificial Intelligence, Foundations of Computational Agents (3rd ed.). Cambridge University Press. doi:10.1017/9781009258227. ISBN 978-1-0092-5819-7. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                    330. \n
                    331. ^ Russell, Stuart; Norvig, Peter (2020). Artificial Intelligence: A Modern Approach (4th ed.). Pearson. ISBN 978-0-1346-1099-3.\n
                    332. \n
                    333. ^ "Why agents are the next frontier of generative AI". McKinsey Digital. 24 July 2024. Archived from the original on 3 October 2024. Retrieved 10 August 2024.\n
                    334. \n
                    335. ^ Ransbotham, Sam; Kiron, David; Gerbert, Philipp; Reeves, Martin (6 September 2017). "Reshaping Business With Artificial Intelligence". MIT Sloan Management Review. Archived from the original on 13 February 2024.\n
                    336. \n
                    337. ^ Sun, Yuran; Zhao, Xilei; Lovreglio, Ruggiero; Kuligowski, Erica (1 January 2024), Naser, M. Z. (ed.), "8 – AI for large-scale evacuation modeling: promises and challenges", Interpretable Machine Learning for the Analysis, Design, Assessment, and Informed Decision Making for Civil Infrastructure, Woodhead Publishing Series in Civil and Structural Engineering, Woodhead Publishing, pp. 185–204, ISBN 978-0-1282-4073-1, archived from the original on 19 May 2024, retrieved 28 June 2024.\n
                    338. \n
                    339. ^ Gomaa, Islam; Adelzadeh, Masoud; Gwynne, Steven; Spencer, Bruce; Ko, Yoon; Bénichou, Noureddine; Ma, Chunyun; Elsagan, Nour; Duong, Dana; Zalok, Ehab; Kinateder, Max (1 November 2021). "A Framework for Intelligent Fire Detection and Evacuation System". Fire Technology. 57 (6): 3179–3185. doi:10.1007/s10694-021-01157-3. ISSN 1572-8099. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                    340. \n
                    341. ^ Zhao, Xilei; Lovreglio, Ruggiero; Nilsson, Daniel (1 May 2020). "Modelling and interpreting pre-evacuation decision-making using machine learning". Automation in Construction. 113: 103140. doi:10.1016/j.autcon.2020.103140. ISSN 0926-5805. Archived from the original on 19 May 2024. Retrieved 5 October 2024.\n
                    342. \n
                    343. ^ Müller, Vincent C. (30 April 2020). "Ethics of Artificial Intelligence and Robotics". Stanford Encyclopedia of Philosophy Archive. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                    344. \n
                    345. ^ Simonite (2016).\n
                    346. \n
                    347. ^ Russell & Norvig (2021), p. 987.\n
                    348. \n
                    349. ^ Laskowski (2023).\n
                    350. \n
                    351. ^ GAO (2022).\n
                    352. \n
                    353. ^ Valinsky (2019).\n
                    354. \n
                    355. ^ Russell & Norvig (2021), p. 991.\n
                    356. \n
                    357. ^ Russell & Norvig (2021), pp. 991–992.\n
                    358. \n
                    359. ^ Christian (2020), p. 63.\n
                    360. \n
                    361. ^ Vincent (2022).\n
                    362. \n
                    363. ^ Kopel, Matthew. "Copyright Services: Fair Use". Cornell University Library. Archived from the original on 26 September 2024. Retrieved 26 April 2024.\n
                    364. \n
                    365. ^ Burgess, Matt. "How to Stop Your Data From Being Used to Train AI". Wired. ISSN 1059-1028. Archived from the original on 3 October 2024. Retrieved 26 April 2024.\n
                    366. \n
                    367. ^ Reisner (2023).\n
                    368. \n
                    369. ^ Alter & Harris (2023).\n
                    370. \n
                    371. ^ "Getting the Innovation Ecosystem Ready for AI. An IP policy toolkit" (PDF). WIPO.\n
                    372. \n
                    373. ^ Hammond, George (27 December 2023). "Big Tech is spending more than VC firms on AI startups". Ars Technica. Archived from the original on 10 January 2024.\n
                    374. \n
                    375. ^ Wong, Matteo (24 October 2023). "The Future of AI Is GOMA". The Atlantic. Archived from the original on 5 January 2024.\n
                    376. \n
                    377. ^ "Big tech and the pursuit of AI dominance". The Economist. 26 March 2023. Archived from the original on 29 December 2023.\n
                    378. \n
                    379. ^ Fung, Brian (19 December 2023). "Where the battle to dominate AI may be won". CNN Business. Archived from the original on 13 January 2024.\n
                    380. \n
                    381. ^ Metz, Cade (5 July 2023). "In the Age of A.I., Tech\'s Little Guys Need Big Friends". The New York Times. Archived from the original on 8 July 2024. Retrieved 5 October 2024.\n
                    382. \n
                    383. ^ "Electricity 2024 – Analysis". IEA. 24 January 2024. Retrieved 13 July 2024.\n
                    384. \n
                    385. ^ Calvert, Brian (28 March 2024). "AI already uses as much energy as a small country. It\'s only the beginning". Vox. New York, New York. Archived from the original on 3 July 2024. Retrieved 5 October 2024.\n
                    386. \n
                    387. ^ Halper, Evan; O\'Donovan, Caroline (21 June 2024). "AI is exhausting the power grid. Tech firms are seeking a miracle solution". Washington Post.\n
                    388. \n
                    389. ^ Davenport, Carly. "AI Data Centers and the Coming YS Power Demand Surge" (PDF). Goldman Sachs. Archived from the original (PDF) on 26 July 2024. Retrieved 5 October 2024.\n
                    390. \n
                    391. ^ Ryan, Carol (12 April 2024). "Energy-Guzzling AI Is Also the Future of Energy Savings". Wall Street Journal. Dow Jones.\n
                    392. \n
                    393. ^ Hiller, Jennifer (1 July 2024). "Tech Industry Wants to Lock Up Nuclear Power for AI". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                    394. \n
                    395. ^ Halper, Evan (20 September 2024). "Microsoft deal would reopen Three Mile Island nuclear plant to power AI". Washington Post.\n
                    396. \n
                    397. ^ Hiller, Jennifer (20 September 2024). "Three Mile Island\'s Nuclear Plant to Reopen, Help Power Microsoft\'s AI Centers". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                    398. \n
                    399. ^ Nicas (2018).\n
                    400. \n
                    401. ^ Rainie, Lee; Keeter, Scott; Perrin, Andrew (22 July 2019). "Trust and Distrust in America". Pew Research Center. Archived from the original on 22 February 2024.\n
                    402. \n
                    403. ^ Williams (2023).\n
                    404. \n
                    405. ^ Taylor & Hern (2023).\n
                    406. \n
                    407. ^ a b Samuel, Sigal (19 April 2022). "Why it\'s so damn hard to make AI fair and unbiased". Vox. Archived from the original on 5 October 2024. Retrieved 24 July 2024.\n
                    408. \n
                    409. ^ a b Rose (2023).\n
                    410. \n
                    411. ^ CNA (2019).\n
                    412. \n
                    413. ^ Goffrey (2008), p. 17.\n
                    414. \n
                    415. ^ Berdahl et al. (2023); Goffrey (2008, p. 17); Rose (2023); Russell & Norvig (2021, p. 995)\n
                    416. \n
                    417. ^ Christian (2020), p. 25.\n
                    418. \n
                    419. ^ a b Russell & Norvig (2021), p. 995.\n
                    420. \n
                    421. ^ Grant & Hill (2023).\n
                    422. \n
                    423. ^ Larson & Angwin (2016).\n
                    424. \n
                    425. ^ Christian (2020), p. 67–70.\n
                    426. \n
                    427. ^ Christian (2020, pp. 67–70); Russell & Norvig (2021, pp. 993–994)\n
                    428. \n
                    429. ^ Russell & Norvig (2021, p. 995); Lipartito (2011, p. 36); Goodman & Flaxman (2017, p. 6); Christian (2020, pp. 39–40, 65)\n
                    430. \n
                    431. ^ Quoted in Christian (2020, p. 65).\n
                    432. \n
                    433. ^ Russell & Norvig (2021, p. 994); Christian (2020, pp. 40, 80–81)\n
                    434. \n
                    435. ^ Quoted in Christian (2020, p. 80)\n
                    436. \n
                    437. ^ Dockrill (2022).\n
                    438. \n
                    439. ^ Sample (2017).\n
                    440. \n
                    441. ^ "Black Box AI". 16 June 2023. Archived from the original on 15 June 2024. Retrieved 5 October 2024.\n
                    442. \n
                    443. ^ Christian (2020), p. 110.\n
                    444. \n
                    445. ^ Christian (2020), pp. 88–91.\n
                    446. \n
                    447. ^ Christian (2020, p. 83); Russell & Norvig (2021, p. 997)\n
                    448. \n
                    449. ^ Christian (2020), p. 91.\n
                    450. \n
                    451. ^ Christian (2020), p. 83.\n
                    452. \n
                    453. ^ Verma (2021).\n
                    454. \n
                    455. ^ Rothman (2020).\n
                    456. \n
                    457. ^ Christian (2020), pp. 105–108.\n
                    458. \n
                    459. ^ Christian (2020), pp. 108–112.\n
                    460. \n
                    461. ^ Ropek, Lucas (21 May 2024). "New Anthropic Research Sheds Light on AI\'s \'Black Box\'". Gizmodo. Archived from the original on 5 October 2024. Retrieved 23 May 2024.\n
                    462. \n
                    463. ^ Russell & Norvig (2021), p. 989.\n
                    464. \n
                    465. ^ a b Russell & Norvig (2021), pp. 987–990.\n
                    466. \n
                    467. ^ Russell & Norvig (2021), p. 988.\n
                    468. \n
                    469. ^ Robitzski (2018); Sainato (2015)\n
                    470. \n
                    471. ^ Harari (2018).\n
                    472. \n
                    473. ^ Buckley, Chris; Mozur, Paul (22 May 2019). "How China Uses High-Tech Surveillance to Subdue Minorities". The New York Times. Archived from the original on 25 November 2019. Retrieved 2 July 2019.\n
                    474. \n
                    475. ^ "Security lapse exposed a Chinese smart city surveillance system". 3 May 2019. Archived from the original on 7 March 2021. Retrieved 14 September 2020.\n
                    476. \n
                    477. ^ Urbina et al. (2022).\n
                    478. \n
                    479. ^ a b E. McGaughey, \'Will Robots Automate Your Job Away? Full Employment, Basic Income, and Economic Democracy\' (2022), 51(3) Industrial Law Journal 511–559. Archived 27 May 2023 at the Wayback Machine.\n
                    480. \n
                    481. ^ Ford & Colvin (2015);McGaughey (2022)\n
                    482. \n
                    483. ^ IGM Chicago (2017).\n
                    484. \n
                    485. ^ Arntz, Gregory & Zierahn (2016), p. 33.\n
                    486. \n
                    487. ^ Lohr (2017); Frey & Osborne (2017); Arntz, Gregory & Zierahn (2016, p. 33)\n
                    488. \n
                    489. ^ Zhou, Viola (11 April 2023). "AI is already taking video game illustrators\' jobs in China". Rest of World. Archived from the original on 21 February 2024. Retrieved 17 August 2023.\n
                    490. \n
                    491. ^ Carter, Justin (11 April 2023). "China\'s game art industry reportedly decimated by growing AI use". Game Developer. Archived from the original on 17 August 2023. Retrieved 17 August 2023.\n
                    492. \n
                    493. ^ Morgenstern (2015).\n
                    494. \n
                    495. ^ Mahdawi (2017); Thompson (2014)\n
                    496. \n
                    497. ^ Tarnoff, Ben (4 August 2023). "Lessons from Eliza". The Guardian Weekly. pp. 34–39.\n
                    498. \n
                    499. ^ Cellan-Jones (2014).\n
                    500. \n
                    501. ^ Russell & Norvig 2021, p. 1001.\n
                    502. \n
                    503. ^ Bostrom (2014).\n
                    504. \n
                    505. ^ Russell (2019).\n
                    506. \n
                    507. ^ Bostrom (2014); Müller & Bostrom (2014); Bostrom (2015).\n
                    508. \n
                    509. ^ Harari (2023).\n
                    510. \n
                    511. ^ Müller & Bostrom (2014).\n
                    512. \n
                    513. ^ Leaders\' concerns about the existential risks of AI around 2015: Rawlinson (2015), Holley (2015), Gibbs (2014), Sainato (2015)\n
                    514. \n
                    515. ^ ""Godfather of artificial intelligence" talks impact and potential of new AI". CBS News. 25 March 2023. Archived from the original on 28 March 2023. Retrieved 28 March 2023.\n
                    516. \n
                    517. ^ Pittis, Don (4 May 2023). "Canadian artificial intelligence leader Geoffrey Hinton piles on fears of computer takeover". CBC. Archived from the original on 7 July 2024. Retrieved 5 October 2024.\n
                    518. \n
                    519. ^ "\'50–50 chance\' that AI outsmarts humanity, Geoffrey Hinton says". Bloomberg BNN. 14 June 2024. Retrieved 6 July 2024.\n
                    520. \n
                    521. ^ Valance (2023).\n
                    522. \n
                    523. ^ Taylor, Josh (7 May 2023). "Rise of artificial intelligence is inevitable but should not be feared, \'father of AI\' says". The Guardian. Archived from the original on 23 October 2023. Retrieved 26 May 2023.\n
                    524. \n
                    525. ^ Colton, Emma (7 May 2023). "\'Father of AI\' says tech fears misplaced: \'You cannot stop it\'". Fox News. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
                    526. \n
                    527. ^ Jones, Hessie (23 May 2023). "Juergen Schmidhuber, Renowned \'Father Of Modern AI,\' Says His Life\'s Work Won\'t Lead To Dystopia". Forbes. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
                    528. \n
                    529. ^ McMorrow, Ryan (19 December 2023). "Andrew Ng: \'Do we think the world is better off with more or less intelligence?\'". Financial Times. Archived from the original on 25 January 2024. Retrieved 30 December 2023.\n
                    530. \n
                    531. ^ Levy, Steven (22 December 2023). "How Not to Be Stupid About AI, With Yann LeCun". Wired. Archived from the original on 28 December 2023. Retrieved 30 December 2023.\n
                    532. \n
                    533. ^ Arguments that AI is not an imminent risk: Brooks (2014), Geist (2015), Madrigal (2015), Lee (2014)\n
                    534. \n
                    535. ^ a b Christian (2020), pp. 67, 73.\n
                    536. \n
                    537. ^ Yudkowsky (2008).\n
                    538. \n
                    539. ^ a b Anderson & Anderson (2011).\n
                    540. \n
                    541. ^ AAAI (2014).\n
                    542. \n
                    543. ^ Wallach (2010).\n
                    544. \n
                    545. ^ Russell (2019), p. 173.\n
                    546. \n
                    547. ^ Stewart, Ashley; Melton, Monica. "Hugging Face CEO says he\'s focused on building a \'sustainable model\' for the $4.5 billion open-source-AI startup". Business Insider. Archived from the original on 25 September 2024. Retrieved 14 April 2024.\n
                    548. \n
                    549. ^ Wiggers, Kyle (9 April 2024). "Google open sources tools to support AI model development". TechCrunch. Archived from the original on 10 September 2024. Retrieved 14 April 2024.\n
                    550. \n
                    551. ^ Heaven, Will Douglas (12 May 2023). "The open-source AI boom is built on Big Tech\'s handouts. How long will it last?". MIT Technology Review. Retrieved 14 April 2024.\n
                    552. \n
                    553. ^ Brodsky, Sascha (19 December 2023). "Mistral AI\'s New Language Model Aims for Open Source Supremacy". AI Business. Archived from the original on 5 September 2024. Retrieved 5 October 2024.\n
                    554. \n
                    555. ^ Edwards, Benj (22 February 2024). "Stability announces Stable Diffusion 3, a next-gen AI image generator". Ars Technica. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
                    556. \n
                    557. ^ Marshall, Matt (29 January 2024). "How enterprises are using open source LLMs: 16 examples". VentureBeat. Archived from the original on 26 September 2024. Retrieved 5 October 2024.\n
                    558. \n
                    559. ^ Piper, Kelsey (2 February 2024). "Should we make our most powerful AI models open source to all?". Vox. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
                    560. \n
                    561. ^ Alan Turing Institute (2019). "Understanding artificial intelligence ethics and safety" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
                    562. \n
                    563. ^ Alan Turing Institute (2023). "AI Ethics and Governance in Practice" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
                    564. \n
                    565. ^ Floridi, Luciano; Cowls, Josh (23 June 2019). "A Unified Framework of Five Principles for AI in Society". Harvard Data Science Review. 1 (1). doi:10.1162/99608f92.8cd550d1. S2CID 198775713.\n
                    566. \n
                    567. ^ Buruk, Banu; Ekmekci, Perihan Elif; Arda, Berna (1 September 2020). "A critical perspective on guidelines for responsible and trustworthy artificial intelligence". Medicine, Health Care and Philosophy. 23 (3): 387–399. doi:10.1007/s11019-020-09948-1. ISSN 1572-8633. PMID 32236794. S2CID 214766800. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                    568. \n
                    569. ^ Kamila, Manoj Kumar; Jasrotia, Sahil Singh (1 January 2023). "Ethical issues in the development of artificial intelligence: recognizing the risks". International Journal of Ethics and Systems. ahead-of-print (ahead-of-print). doi:10.1108/IJOES-05-2023-0107. ISSN 2514-9369. S2CID 259614124. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                    570. \n
                    571. ^ "AI Safety Institute releases new AI safety evaluations platform". UK Government. 10 May 2024. Archived from the original on 5 October 2024. Retrieved 14 May 2024.\n
                    572. \n
                    573. ^ Regulation of AI to mitigate risks: Berryhill et al. (2019), Barfield & Pagallo (2018), Iphofen & Kritikos (2019), Wirtz, Weyerer & Geyer (2018), Buiten (2019)\n
                    574. \n\n
                    575. ^ a b Vincent (2023).\n
                    576. \n
                    577. ^ Stanford University (2023).\n
                    578. \n
                    579. ^ a b c d UNESCO (2021).\n
                    580. \n
                    581. ^ Kissinger (2021).\n
                    582. \n
                    583. ^ Altman, Brockman & Sutskever (2023).\n
                    584. \n
                    585. ^ VOA News (25 October 2023). "UN Announces Advisory Body on Artificial Intelligence". Archived from the original on 18 September 2024. Retrieved 5 October 2024.\n
                    586. \n
                    587. ^ "Council of Europe opens first ever global treaty on AI for signature". Council of Europe. 5 September 2024. Archived from the original on 17 September 2024. Retrieved 17 September 2024.\n
                    588. \n
                    589. ^ Edwards (2023).\n
                    590. \n
                    591. ^ Kasperowicz (2023).\n
                    592. \n
                    593. ^ Fox News (2023).\n
                    594. \n
                    595. ^ Milmo, Dan (3 November 2023). "Hope or Horror? The great AI debate dividing its pioneers". The Guardian Weekly. pp. 10–12.\n
                    596. \n
                    597. ^ "The Bletchley Declaration by Countries Attending the AI Safety Summit, 1–2 November 2023". GOV.UK. 1 November 2023. Archived from the original on 1 November 2023. Retrieved 2 November 2023.\n
                    598. \n
                    599. ^ "Countries agree to safe and responsible development of frontier AI in landmark Bletchley Declaration". GOV.UK (Press release). Archived from the original on 1 November 2023. Retrieved 1 November 2023.\n
                    600. \n
                    601. ^ "Second global AI summit secures safety commitments from companies". Reuters. 21 May 2024. Retrieved 23 May 2024.\n
                    602. \n
                    603. ^ "Frontier AI Safety Commitments, AI Seoul Summit 2024". gov.uk. 21 May 2024. Archived from the original on 23 May 2024. Retrieved 23 May 2024.\n
                    604. \n
                    605. ^ a b Russell & Norvig 2021, p. 9.\n
                    606. \n
                    607. ^ a b c Copeland, J., ed. (2004). The Essential Turing: the ideas that gave birth to the computer age. Oxford, England: Clarendon Press. ISBN 0-1982-5079-7.\n
                    608. \n
                    609. ^ "Google books ngram". Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                    610. \n
                    611. ^ AI\'s immediate precursors: McCorduck (2004, pp. 51–107), Crevier (1993, pp. 27–32), Russell & Norvig (2021, pp. 8–17), Moravec (1988, p. 3)\n
                    612. \n
                    613. ^ a b Turing\'s original publication of the Turing test in "Computing machinery and intelligence": Turing (1950)\nHistorical influence and philosophical implications: Haugeland (1985, pp. 6–9), Crevier (1993, p. 24), McCorduck (2004, pp. 70–71), Russell & Norvig (2021, pp. 2, 984)\n
                    614. \n
                    615. ^ Crevier (1993), pp. 47–49.\n
                    616. \n
                    617. ^ Russell & Norvig (2003), p. 17.\n
                    618. \n
                    619. ^ Russell & Norvig (2003), p. 18.\n
                    620. \n
                    621. ^ Newquist (1994), pp. 86–86.\n
                    622. \n
                    623. ^ Simon (1965, p. 96) quoted in Crevier (1993, p. 109)\n
                    624. \n
                    625. ^ Minsky (1967, p. 2) quoted in Crevier (1993, p. 109)\n
                    626. \n
                    627. ^ Russell & Norvig (2021), p. 21.\n
                    628. \n
                    629. ^ Lighthill (1973).\n
                    630. \n
                    631. ^ NRC 1999, pp. 212–213.\n
                    632. \n
                    633. ^ Russell & Norvig (2021), p. 22.\n
                    634. \n
                    635. ^ Expert systems: Russell & Norvig (2021, pp. 23, 292), Luger & Stubblefield (2004, pp. 227–331), Nilsson (1998, chpt. 17.4), McCorduck (2004, pp. 327–335, 434–435), Crevier (1993, pp. 145–162, 197–203), Newquist (1994, pp. 155–183)\n
                    636. \n
                    637. ^ Russell & Norvig (2021), p. 24.\n
                    638. \n
                    639. ^ Nilsson (1998), p. 7.\n
                    640. \n
                    641. ^ McCorduck (2004), pp. 454–462.\n
                    642. \n
                    643. ^ Moravec (1988).\n
                    644. \n
                    645. ^ a b Brooks (1990).\n
                    646. \n
                    647. ^ Developmental robotics: Weng et al. (2001), Lungarella et al. (2003), Asada et al. (2009), Oudeyer (2010)\n
                    648. \n
                    649. ^ Russell & Norvig (2021), p. 25.\n
                    650. \n
                    651. ^ Crevier (1993, pp. 214–215), Russell & Norvig (2021, pp. 24, 26)\n
                    652. \n
                    653. ^ Russell & Norvig (2021), p. 26.\n
                    654. \n
                    655. ^ Formal and narrow methods adopted in the 1990s: Russell & Norvig (2021, pp. 24–26), McCorduck (2004, pp. 486–487)\n
                    656. \n
                    657. ^ AI widely used in the late 1990s: Kurzweil (2005, p. 265), NRC (1999, pp. 216–222), Newquist (1994, pp. 189–201)\n
                    658. \n
                    659. ^ Wong (2023).\n
                    660. \n
                    661. ^ Moore\'s Law and AI: Russell & Norvig (2021, pp. 14, 27)\n
                    662. \n
                    663. ^ a b c Clark (2015b).\n
                    664. \n
                    665. ^ Big data: Russell & Norvig (2021, p. 26)\n
                    666. \n
                    667. ^ Sagar, Ram (3 June 2020). "OpenAI Releases GPT-3, The Largest Model So Far". Analytics India Magazine. Archived from the original on 4 August 2020. Retrieved 15 March 2023.\n
                    668. \n
                    669. ^ DiFeliciantonio (2023).\n
                    670. \n
                    671. ^ Goswami (2023).\n
                    672. \n
                    673. ^ Grayling, Anthony; Ball, Brian (1 August 2024). "Philosophy is crucial in the age of AI". The Conversation. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
                    674. \n
                    675. ^ a b Jarow, Oshan (15 June 2024). "Will AI ever become conscious? It depends on how you think about biology". Vox. Archived from the original on 21 September 2024. Retrieved 4 October 2024.\n
                    676. \n
                    677. ^ McCarthy, John. "The Philosophy of AI and the AI of Philosophy". jmc.stanford.edu. Archived from the original on 23 October 2018. Retrieved 3 October 2024.\n
                    678. \n
                    679. ^ a b Turing (1950), p. 1.\n
                    680. \n
                    681. ^ Turing (1950), Under "The Argument from Consciousness".\n
                    682. \n
                    683. ^ Kirk-Giannini, Cameron Domenico; Goldstein, Simon (16 October 2023). "AI is closer than ever to passing the Turing test for \'intelligence\'. What happens when it does?". The Conversation. Archived from the original on 25 September 2024. Retrieved 17 August 2024.\n
                    684. \n
                    685. ^ Russell & Norvig (2021), p. 3.\n
                    686. \n
                    687. ^ Maker (2006).\n
                    688. \n
                    689. ^ McCarthy (1999).\n
                    690. \n
                    691. ^ Minsky (1986).\n
                    692. \n
                    693. ^ "What Is Artificial Intelligence (AI)?". Google Cloud Platform. Archived from the original on 31 July 2023. Retrieved 16 October 2023.\n
                    694. \n
                    695. ^ "One of the Biggest Problems in Regulating AI Is Agreeing on a Definition". carnegieendowment.org. Retrieved 31 July 2024.\n
                    696. \n
                    697. ^ "AI or BS? How to tell if a marketing tool really uses artificial intelligence". The Drum. Retrieved 31 July 2024.\n
                    698. \n
                    699. ^ Nilsson (1983), p. 10.\n
                    700. \n
                    701. ^ Haugeland (1985), pp. 112–117.\n
                    702. \n
                    703. ^ Physical symbol system hypothesis: Newell & Simon (1976, p. 116)\nHistorical significance: McCorduck (2004, p. 153), Russell & Norvig (2021, p. 19)\n
                    704. \n
                    705. ^ Moravec\'s paradox: Moravec (1988, pp. 15–16), Minsky (1986, p. 29), Pinker (2007, pp. 190–191)\n
                    706. \n
                    707. ^ Dreyfus\' critique of AI: Dreyfus (1972), Dreyfus & Dreyfus (1986)\nHistorical significance and philosophical implications: Crevier (1993, pp. 120–132), McCorduck (2004, pp. 211–239), Russell & Norvig (2021, pp. 981–982), Fearn (2007, chpt. 3)\n
                    708. \n
                    709. ^ Crevier (1993), p. 125.\n
                    710. \n
                    711. ^ Langley (2011).\n
                    712. \n
                    713. ^ Katz (2012).\n
                    714. \n
                    715. ^ Neats vs. scruffies, the historic debate: McCorduck (2004, pp. 421–424, 486–489), Crevier (1993, p. 168), Nilsson (1983, pp. 10–11), Russell & Norvig (2021, p. 24)\nA classic example of the "scruffy" approach to intelligence: Minsky (1986)\nA modern example of neat AI and its aspirations in the 21st century: Domingos (2015)\n
                    716. \n
                    717. ^ Pennachin & Goertzel (2007).\n
                    718. \n
                    719. ^ a b Roberts (2016).\n
                    720. \n
                    721. ^ Russell & Norvig (2021), p. 986.\n
                    722. \n
                    723. ^ Chalmers (1995).\n
                    724. \n
                    725. ^ Dennett (1991).\n
                    726. \n
                    727. ^ Horst (2005).\n
                    728. \n
                    729. ^ Searle (1999).\n
                    730. \n
                    731. ^ Searle (1980), p. 1.\n
                    732. \n
                    733. ^ Russell & Norvig (2021), p. 9817.\n
                    734. \n
                    735. ^ Searle\'s Chinese room argument: Searle (1980). Searle\'s original presentation of the thought experiment., Searle (1999).\nDiscussion: Russell & Norvig (2021, pp. 985), McCorduck (2004, pp. 443–445), Crevier (1993, pp. 269–271)\n
                    736. \n
                    737. ^ Leith, Sam (7 July 2022). "Nick Bostrom: How can we be certain a machine isn\'t conscious?". The Spectator. Archived from the original on 26 September 2024. Retrieved 23 February 2024.\n
                    738. \n
                    739. ^ a b c Thomson, Jonny (31 October 2022). "Why don\'t robots have rights?". Big Think. Archived from the original on 13 September 2024. Retrieved 23 February 2024.\n
                    740. \n
                    741. ^ a b Kateman, Brian (24 July 2023). "AI Should Be Terrified of Humans". Time. Archived from the original on 25 September 2024. Retrieved 23 February 2024.\n
                    742. \n
                    743. ^ Wong, Jeff (10 July 2023). "What leaders need to know about robot rights". Fast Company.\n
                    744. \n
                    745. ^ Hern, Alex (12 January 2017). "Give robots \'personhood\' status, EU committee argues". The Guardian. ISSN 0261-3077. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
                    746. \n
                    747. ^ Dovey, Dana (14 April 2018). "Experts Don\'t Think Robots Should Have Rights". Newsweek. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
                    748. \n
                    749. ^ Cuddy, Alice (13 April 2018). "Robot rights violate human rights, experts warn EU". euronews. Archived from the original on 19 September 2024. Retrieved 23 February 2024.\n
                    750. \n
                    751. ^ The Intelligence explosion and technological singularity: Russell & Norvig (2021, pp. 1004–1005), Omohundro (2008), Kurzweil (2005)\n\nI. J. Good\'s "intelligence explosion": Good (1965)\n\nVernor Vinge\'s "singularity": Vinge (1993)\n
                    752. \n
                    753. ^ Russell & Norvig (2021), p. 1005.\n
                    754. \n
                    755. ^ Transhumanism: Moravec (1988), Kurzweil (2005), Russell & Norvig (2021, p. 1005)\n
                    756. \n
                    757. ^ AI as evolution: Edward Fredkin is quoted in McCorduck (2004, p. 401), Butler (1863), Dyson (1998)\n
                    758. \n
                    759. ^ AI in myth: McCorduck (2004, pp. 4–5)\n
                    760. \n
                    761. ^ McCorduck (2004), pp. 340–400.\n
                    762. \n
                    763. ^ Buttazzo (2001).\n
                    764. \n
                    765. ^ Anderson (2008).\n
                    766. \n
                    767. ^ McCauley (2007).\n
                    768. \n
                    769. ^ Galvan (1997).\n
                    770. \n
                    \n

                    AI textbooks

                    \n

                    The two most widely used textbooks in 2023 (see the Open Syllabus):\n

                    \n\n

                    These were the four of the most widely used AI textbooks in 2008:\n

                    \n
                    \n\n
                    \n

                    Other textbooks:\n

                    \n\n

                    History of AI

                    \n
                    \n\n
                    \n

                    Other sources

                    \n
                    \n\n\n
                    \n

                    Further reading

                    \n
                    \n\n
                    \n
                    \n\n\n\n\n\n\n\n\n\n
                    \n
                    \n\t\t\t\t\t\n\t\t\t\t
                    \n\t\t\t\n\t\t\t\n\t\t\n\t\t\n\t \n \n
                    \n\t
                      \n
                      \n\n\n'}'. -[2024-10-27 22:49:49,946] [DEBUG] [Engine]: Agent 'agent3' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_kuc4xYiVcX3NHPRwoLOF1qud', type='function')], function_call=None) -[2024-10-27 22:49:49,946] [INFO] [Engine]: Assigning task to agent4: Search for the latest trends in AI using Twitter. -[2024-10-27 22:49:49,946] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Search for the latest trends in AI using Twitter.'. -[2024-10-27 22:49:51,441] [INFO] [BaseAgent]: Agent 'agent4' called 'fetch_webpage' with args '{'url': 'https://twitter.com/search?q=AI%20trends&src=typed_query'}'. -[2024-10-27 22:49:51,443] [INFO] [BaseAgent]: Agent 'agent4' obtained result '{'success': True, 'error-msg': '', 'url': 'https://twitter.com/search?q=AI%20trends&src=typed_query', 'content': '\n \n \n x.com\n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n \n \n '}'. -[2024-10-27 22:49:51,444] [DEBUG] [Engine]: Agent 'agent4' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=AI%20trends&src=typed_query"}', name='fetch_webpage'), id='call_yUYDAYhSA7vicJ03oRkCKNnd', type='function')], function_call=None) -[2024-10-27 22:49:51,445] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google and provide the information?"}', name='communicate_to'), id='call_Obx1HArCSur0W33N4s9mQ2vh', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_kuc4xYiVcX3NHPRwoLOF1qud', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=AI%20trends&src=typed_query"}', name='fetch_webpage'), id='call_yUYDAYhSA7vicJ03oRkCKNnd', type='function')], function_call=None) - -[2024-10-27 22:49:51,446] [INFO] [Engine]: Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google and provide the information?"}', name='communicate_to'), id='call_Obx1HArCSur0W33N4s9mQ2vh', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_kuc4xYiVcX3NHPRwoLOF1qud', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=AI%20trends&src=typed_query"}', name='fetch_webpage'), id='call_yUYDAYhSA7vicJ03oRkCKNnd', type='function')], function_call=None) - -[2024-10-27 22:49:51,446] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Can you search for the latest trends in AI using Google and provide the information?"}', name='communicate_to'), id='call_Obx1HArCSur0W33N4s9mQ2vh', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_kuc4xYiVcX3NHPRwoLOF1qud', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=AI%20trends&src=typed_query"}', name='fetch_webpage'), id='call_yUYDAYhSA7vicJ03oRkCKNnd', type='function')], function_call=None) - -[2024-10-27 22:49:51,859] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': False} -[2024-10-27 22:49:51,859] [INFO] [Engine]: EnginePlanner decided to terminate the simulation. -[2024-10-27 22:49:51,859] [INFO] [Engine]: Engine simulation loop completed. -[2024-10-27 22:49:51,859] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-10-27 22:49:51,859] [INFO] [Evaluator]: Total Token Consumption: 36 -[2024-10-27 22:49:51,859] [INFO] [Evaluator]: Average Tokens per Iteration: 36.0 -[2024-10-27 22:49:51,859] [INFO] [Engine]: Simulation completed. -[2024-10-27 22:50:24,734] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-10-27 22:50:24,735] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-10-27 22:50:24,735] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-10-27 22:50:24,735] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-10-27 22:50:24,735] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-10-27 22:50:24,735] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-10-27 22:50:24,735] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-10-27 22:50:24,736] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-10-27 22:50:24,736] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-10-27 22:50:24,736] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-10-27 22:50:24,736] [INFO] [AgentGraph]: Relationship added: agent2 --[reports_to]--> agent1 -[2024-10-27 22:50:24,736] [INFO] [AgentGraph]: Relationship added: agent3 --[reports_to]--> agent1 -[2024-10-27 22:50:24,736] [INFO] [AgentGraph]: Relationship added: agent4 --[reports_to]--> agent1 -[2024-10-27 22:50:24,736] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-10-27 22:50:24,736] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-10-27 22:50:24,736] [INFO] [Engine]: Engine initialized. -[2024-10-27 22:50:24,737] [INFO] [Engine]: Engine starting simulation. -[2024-10-27 22:50:24,737] [INFO] [Engine]: Starting iteration 1 -[2024-10-27 22:50:24,737] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-10-27 22:50:25,648] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'}, 'continue': True} -[2024-10-27 22:50:25,648] [INFO] [Engine]: Assigned tasks: {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'} -[2024-10-27 22:50:25,648] [INFO] [Engine]: Assigning task to agent2: Search for the latest trends in AI using Google. -[2024-10-27 22:50:25,648] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Search for the latest trends in AI using Google.'. -[2024-10-27 22:50:29,551] [INFO] [BaseAgent]: Agent 'agent2' called 'fetch_webpage' with args '{'url': 'https://www.google.com/search?q=latest+trends+in+AI'}'. -[2024-10-27 22:50:29,555] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'error-msg': '', 'url': 'https://www.google.com/search?q=latest+trends+in+AI', 'content': 'latest trends in AI - Google Search

                      Accessibility Links

                      About 526,000,000 results (0.35 seconds) 

                      Ads

                      Sponsored
                      View the report for AI use cases, AI predictions, tips for overcoming the risks, and more. AI is making software a collaborator in the workplace. Learn more in the 2024 AI Outlook.

                      Ads

                      Sponsored
                      Elevate AI decision-making and innovation in your organization. Discover AI trends and strategies for business success. Mitigate Risk. Energy Efficient Solution. Capitalize on AI.
                      Sponsored
                      The AI market is quickly increasing due to the rapid pace of technological change. See how we are helping executives understand how potential disruptors will impact growth.
                      Sponsored
                      Learn how organizations are measuring value from Generative AI and navigating challenges. Q3 research shows how businesses go from investing in potential to measuring...
                      Google apps
                      '}'. -[2024-10-27 22:50:29,615] [DEBUG] [Engine]: Agent 'agent2' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_2slixY8EDJc9BeSxtIE7cYNE', type='function')], function_call=None) -[2024-10-27 22:50:29,616] [INFO] [Engine]: Assigning task to agent3: Search for the latest trends in AI using Wikipedia. -[2024-10-27 22:50:29,616] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Search for the latest trends in AI using Wikipedia.'. -[2024-10-27 22:50:31,821] [INFO] [BaseAgent]: Agent 'agent3' called 'fetch_webpage' with args '{'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence'}'. -[2024-10-27 22:50:31,829] [INFO] [BaseAgent]: Agent 'agent3' obtained result '{'success': True, 'error-msg': '', 'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence', 'content': '\n\n\n\nArtificial intelligence - Wikipedia\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nJump to content\n
                      \n\t
                      \n\t\t
                      \n\t\t\t
                      \n\n\t\t\n\t\t\t\n\n\n\t\t
                      \n\t\t
                      \n\t\t\t\n\n\n\t\t\t\n\n\t\t
                      \n\t\n\n
                      \n\t
                      \n\t\t
                      \n\t\t\t
                      \n\t\t
                      \n\t\t
                      \n\t\t\t
                      \n\t\t
                      \n\t\t\t\n\t\t
                      \n\t
                      \n\t
                      \n\t\t\t\t
                      \n\t\t\n\t\t\t
                      \n\t\t
                      \n\t\t
                      \n\t\t\t
                      \n\t\t\t\t
                      \n\t\t\t\t\t\n\t\t\t\t\t

                      Artificial intelligence

                      \n\t\t\t\t\t\t\t\n
                      \n\t\n\t\n\t
                      \n\n\t\t
                      \n\t\t\t\n\t\t\t\n\t\t\t\n\t\t
                      \n\n\t
                      \n
                      \n
                      \n\t\t\t\t
                      \n\t\t\t\t\t
                      \n\t\t\t\t\t\t
                      \n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
                      \n\t\t\t\t\t\t
                      \n\t\t\t\t\t\t\t\n\t\t\t\t\n\t\t\t\t\t\t\t
                      \n\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
                      \n\t\t\t\t\t
                      \n\t\t\t\t
                      \n\t\t\t\t
                      \n\t\t\t\t\t
                      \n\t\t\t\t\t\t\n\t\t\t\t\t\t
                      \n\t\t\n\t\t\t\t\t
                      \n\t\t\t\t
                      \n\t\t\t\t
                      \n\t\t\t\t\t
                      \n\t\t\t\t\t\t\t
                      \n\t\t
                      Page semi-protected
                      \n\t\t
                      \n\n\t\t\t\t\t\t
                      From Wikipedia, the free encyclopedia
                      \n\t\t\t\t\t
                      \n\t\t\t\t\t
                      \n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t
                      \n\n

                      \n

                      \n\n\n\n\n\n\n\n

                      Artificial intelligence (AI), in its broadest sense, is intelligence exhibited by machines, particularly computer systems. It is a field of research in computer science that develops and studies methods and software that enable machines to perceive their environment and use learning and intelligence to take actions that maximize their chances of achieving defined goals.[1] Such machines may be called AIs.\n

                      Some high-profile applications of AI include advanced web search engines (e.g., Google Search); recommendation systems (used by YouTube, Amazon, and Netflix); interacting via human speech (e.g., Google Assistant, Siri, and Alexa); autonomous vehicles (e.g., Waymo); generative and creative tools (e.g., ChatGPT, and AI art); and superhuman play and analysis in strategy games (e.g., chess and Go). However, many AI applications are not perceived as AI: "A lot of cutting edge AI has filtered into general applications, often without being called AI because once something becomes useful enough and common enough it\'s not labeled AI anymore."[2][3]\n

                      The various subfields of AI research are centered around particular goals and the use of particular tools. The traditional goals of AI research include reasoning, knowledge representation, planning, learning, natural language processing, perception, and support for robotics.[a] General intelligence—the ability to complete any task performable by a human on an at least equal level—is among the field\'s long-term goals.[4] To reach these goals, AI researchers have adapted and integrated a wide range of techniques, including search and mathematical optimization, formal logic, artificial neural networks, and methods based on statistics, operations research, and economics.[b] AI also draws upon psychology, linguistics, philosophy, neuroscience, and other fields.[5]\n

                      Artificial intelligence was founded as an academic discipline in 1956,[6] and the field went through multiple cycles of optimism,[7][8] followed by periods of disappointment and loss of funding, known as AI winter.[9][10] Funding and interest vastly increased after 2012 when deep learning outperformed previous AI techniques.[11] This growth accelerated further after 2017 with the transformer architecture,[12] and by the early 2020s hundreds of billions of dollars were being invested in AI (known as the "AI boom"). The widespread use of AI in the 21st century exposed several unintended consequences and harms in the present and raised concerns about its risks and long-term effects in the future, prompting discussions about regulatory policies to ensure the safety and benefits of the technology.\n

                      \n\n

                      Goals

                      \n

                      The general problem of simulating (or creating) intelligence has been broken into subproblems. These consist of particular traits or capabilities that researchers expect an intelligent system to display. The traits described below have received the most attention and cover the scope of AI research.[a]\n

                      \n

                      Reasoning and problem-solving

                      \n

                      Early researchers developed algorithms that imitated step-by-step reasoning that humans use when they solve puzzles or make logical deductions.[13] By the late 1980s and 1990s, methods were developed for dealing with uncertain or incomplete information, employing concepts from probability and economics.[14]\n

                      Many of these algorithms are insufficient for solving large reasoning problems because they experience a "combinatorial explosion": They become exponentially slower as the problems grow.[15] Even humans rarely use the step-by-step deduction that early AI research could model. They solve most of their problems using fast, intuitive judgments.[16] Accurate and efficient reasoning is an unsolved problem.\n

                      \n

                      Knowledge representation

                      \n
                      An ontology represents knowledge as a set of concepts within a domain and the relationships between those concepts.
                      \n

                      Knowledge representation and knowledge engineering[17] allow AI programs to answer questions intelligently and make deductions about real-world facts. Formal knowledge representations are used in content-based indexing and retrieval,[18] scene interpretation,[19] clinical decision support,[20] knowledge discovery (mining "interesting" and actionable inferences from large databases),[21] and other areas.[22]\n

                      A knowledge base is a body of knowledge represented in a form that can be used by a program. An ontology is the set of objects, relations, concepts, and properties used by a particular domain of knowledge.[23] Knowledge bases need to represent things such as objects, properties, categories, and relations between objects;[24] situations, events, states, and time;[25] causes and effects;[26] knowledge about knowledge (what we know about what other people know);[27] default reasoning (things that humans assume are true until they are told differently and will remain true even when other facts are changing);[28] and many other aspects and domains of knowledge.\n

                      Among the most difficult problems in knowledge representation are the breadth of commonsense knowledge (the set of atomic facts that the average person knows is enormous);[29] and the sub-symbolic form of most commonsense knowledge (much of what people know is not represented as "facts" or "statements" that they could express verbally).[16] There is also the difficulty of knowledge acquisition, the problem of obtaining knowledge for AI applications.[c]\n

                      \n

                      Planning and decision-making

                      \n

                      An "agent" is anything that perceives and takes actions in the world. A rational agent has goals or preferences and takes actions to make them happen.[d][32] In automated planning, the agent has a specific goal.[33] In automated decision-making, the agent has preferences—there are some situations it would prefer to be in, and some situations it is trying to avoid. The decision-making agent assigns a number to each situation (called the "utility") that measures how much the agent prefers it. For each possible action, it can calculate the "expected utility": the utility of all possible outcomes of the action, weighted by the probability that the outcome will occur. It can then choose the action with the maximum expected utility.[34]\n

                      In classical planning, the agent knows exactly what the effect of any action will be.[35] In most real-world problems, however, the agent may not be certain about the situation they are in (it is "unknown" or "unobservable") and it may not know for certain what will happen after each possible action (it is not "deterministic"). It must choose an action by making a probabilistic guess and then reassess the situation to see if the action worked.[36]\n

                      In some problems, the agent\'s preferences may be uncertain, especially if there are other agents or humans involved. These can be learned (e.g., with inverse reinforcement learning), or the agent can seek information to improve its preferences.[37] Information value theory can be used to weigh the value of exploratory or experimental actions.[38] The space of possible future actions and situations is typically intractably large, so the agents must take actions and evaluate situations while being uncertain of what the outcome will be.\n

                      A Markov decision process has a transition model that describes the probability that a particular action will change the state in a particular way and a reward function that supplies the utility of each state and the cost of each action. A policy associates a decision with each possible state. The policy could be calculated (e.g., by iteration), be heuristic, or it can be learned.[39]\n

                      Game theory describes the rational behavior of multiple interacting agents and is used in AI programs that make decisions that involve other agents.[40]\n

                      \n

                      Learning

                      \n

                      Machine learning is the study of programs that can improve their performance on a given task automatically.[41] It has been a part of AI from the beginning.[e]\n

                      There are several kinds of machine learning. Unsupervised learning analyzes a stream of data and finds patterns and makes predictions without any other guidance.[44] Supervised learning requires a human to label the input data first, and comes in two main varieties: classification (where the program must learn to predict what category the input belongs in) and regression (where the program must deduce a numeric function based on numeric input).[45]\n

                      In reinforcement learning, the agent is rewarded for good responses and punished for bad ones. The agent learns to choose responses that are classified as "good".[46] Transfer learning is when the knowledge gained from one problem is applied to a new problem.[47] Deep learning is a type of machine learning that runs inputs through biologically inspired artificial neural networks for all of these types of learning.[48]\n

                      Computational learning theory can assess learners by computational complexity, by sample complexity (how much data is required), or by other notions of optimization.[49]\n

                      \n
                      \n

                      Natural language processing

                      \n

                      Natural language processing (NLP)[50] allows programs to read, write and communicate in human languages such as English. Specific problems include speech recognition, speech synthesis, machine translation, information extraction, information retrieval and question answering.[51]\n

                      Early work, based on Noam Chomsky\'s generative grammar and semantic networks, had difficulty with word-sense disambiguation[f] unless restricted to small domains called "micro-worlds" (due to the common sense knowledge problem[29]). Margaret Masterman believed that it was meaning and not grammar that was the key to understanding languages, and that thesauri and not dictionaries should be the basis of computational language structure.\n

                      Modern deep learning techniques for NLP include word embedding (representing words, typically as vectors encoding their meaning),[52] transformers (a deep learning architecture using an attention mechanism),[53] and others.[54] In 2019, generative pre-trained transformer (or "GPT") language models began to generate coherent text,[55][56] and by 2023, these models were able to get human-level scores on the bar exam, SAT test, GRE test, and many other real-world applications.[57]\n

                      \n

                      Perception

                      \n

                      Machine perception is the ability to use input from sensors (such as cameras, microphones, wireless signals, active lidar, sonar, radar, and tactile sensors) to deduce aspects of the world. Computer vision is the ability to analyze visual input.[58]\n

                      The field includes speech recognition,[59] image classification,[60] facial recognition, object recognition,[61]object tracking,[62] and robotic perception.[63]\n

                      \n

                      Social intelligence

                      \n
                      Kismet, a robot head which was made in the 1990s; a machine that can recognize and simulate emotions[64]
                      \n

                      Affective computing is an interdisciplinary umbrella that comprises systems that recognize, interpret, process, or simulate human feeling, emotion, and mood.[65] For example, some virtual assistants are programmed to speak conversationally or even to banter humorously; it makes them appear more sensitive to the emotional dynamics of human interaction, or to otherwise facilitate human–computer interaction.\n

                      However, this tends to give naïve users an unrealistic conception of the intelligence of existing computer agents.[66] Moderate successes related to affective computing include textual sentiment analysis and, more recently, multimodal sentiment analysis, wherein AI classifies the affects displayed by a videotaped subject.[67]\n

                      \n

                      General intelligence

                      \n

                      A machine with artificial general intelligence should be able to solve a wide variety of problems with breadth and versatility similar to human intelligence.[4]\n

                      \n

                      Techniques

                      \n

                      AI research uses a wide variety of techniques to accomplish the goals above.[b]\n

                      \n

                      Search and optimization

                      \n

                      AI can solve many problems by intelligently searching through many possible solutions.[68] There are two very different kinds of search used in AI: state space search and local search.\n

                      \n
                      \n

                      State space search searches through a tree of possible states to try to find a goal state.[69] For example, planning algorithms search through trees of goals and subgoals, attempting to find a path to a target goal, a process called means-ends analysis.[70]\n

                      Simple exhaustive searches[71] are rarely sufficient for most real-world problems: the search space (the number of places to search) quickly grows to astronomical numbers. The result is a search that is too slow or never completes.[15] "Heuristics" or "rules of thumb" can help prioritize choices that are more likely to reach a goal.[72]\n

                      Adversarial search is used for game-playing programs, such as chess or Go. It searches through a tree of possible moves and counter-moves, looking for a winning position.[73]\n

                      \n
                      \n
                      Illustration of gradient descent for 3 different starting points; two parameters (represented by the plan coordinates) are adjusted in order to minimize the loss function (the height)

                      Local search uses mathematical optimization to find a solution to a problem. It begins with some form of guess and refines it incrementally.[74]\n

                      Gradient descent is a type of local search that optimizes a set of numerical parameters by incrementally adjusting them to minimize a loss function. Variants of gradient descent are commonly used to train neural networks.[75]\n

                      Another type of local search is evolutionary computation, which aims to iteratively improve a set of candidate solutions by "mutating" and "recombining" them, selecting only the fittest to survive each generation.[76]\n

                      Distributed search processes can coordinate via swarm intelligence algorithms. Two popular swarm algorithms used in search are particle swarm optimization (inspired by bird flocking) and ant colony optimization (inspired by ant trails).[77]\n

                      \n

                      Logic

                      \n

                      Formal logic is used for reasoning and knowledge representation.[78]\nFormal logic comes in two main forms: propositional logic (which operates on statements that are true or false and uses logical connectives such as "and", "or", "not" and "implies")[79] and predicate logic (which also operates on objects, predicates and relations and uses quantifiers such as "Every X is a Y" and "There are some Xs that are Ys").[80]\n

                      Deductive reasoning in logic is the process of proving a new statement (conclusion) from other statements that are given and assumed to be true (the premises).[81] Proofs can be structured as proof trees, in which nodes are labelled by sentences, and children nodes are connected to parent nodes by inference rules.\n

                      Given a problem and a set of premises, problem-solving reduces to searching for a proof tree whose root node is labelled by a solution of the problem and whose leaf nodes are labelled by premises or axioms. In the case of Horn clauses, problem-solving search can be performed by reasoning forwards from the premises or backwards from the problem.[82] In the more general case of the clausal form of first-order logic, resolution is a single, axiom-free rule of inference, in which a problem is solved by proving a contradiction from premises that include the negation of the problem to be solved.[83]\n

                      Inference in both Horn clause logic and first-order logic is undecidable, and therefore intractable. However, backward reasoning with Horn clauses, which underpins computation in the logic programming language Prolog, is Turing complete. Moreover, its efficiency is competitive with computation in other symbolic programming languages.[84]\n

                      Fuzzy logic assigns a "degree of truth" between 0 and 1. It can therefore handle propositions that are vague and partially true.[85]\n

                      Non-monotonic logics, including logic programming with negation as failure, are designed to handle default reasoning.[28] Other specialized versions of logic have been developed to describe many complex domains.\n

                      \n

                      Probabilistic methods for uncertain reasoning

                      \n
                      A simple Bayesian network, with the associated conditional probability tables
                      \n

                      Many problems in AI (including in reasoning, planning, learning, perception, and robotics) require the agent to operate with incomplete or uncertain information. AI researchers have devised a number of tools to solve these problems using methods from probability theory and economics.[86] Precise mathematical tools have been developed that analyze how an agent can make choices and plan, using decision theory, decision analysis,[87] and information value theory.[88] These tools include models such as Markov decision processes,[89] dynamic decision networks,[90] game theory and mechanism design.[91]\n

                      Bayesian networks[92] are a tool that can be used for reasoning (using the Bayesian inference algorithm),[g][94] learning (using the expectation–maximization algorithm),[h][96] planning (using decision networks)[97] and perception (using dynamic Bayesian networks).[90]\n

                      Probabilistic algorithms can also be used for filtering, prediction, smoothing, and finding explanations for streams of data, thus helping perception systems analyze processes that occur over time (e.g., hidden Markov models or Kalman filters).[90]\n

                      \n
                      Expectation–maximization clustering of Old Faithful eruption data starts from a random guess but then successfully converges on an accurate clustering of the two physically distinct modes of eruption.
                      \n

                      Classifiers and statistical learning methods

                      \n

                      The simplest AI applications can be divided into two types: classifiers (e.g., "if shiny then diamond"), on one hand, and controllers (e.g., "if diamond then pick up"), on the other hand. Classifiers[98] are functions that use pattern matching to determine the closest match. They can be fine-tuned based on chosen examples using supervised learning. Each pattern (also called an "observation") is labeled with a certain predefined class. All the observations combined with their class labels are known as a data set. When a new observation is received, that observation is classified based on previous experience.[45]\n

                      There are many kinds of classifiers in use.[99] The decision tree is the simplest and most widely used symbolic machine learning algorithm.[100] K-nearest neighbor algorithm was the most widely used analogical AI until the mid-1990s, and Kernel methods such as the support vector machine (SVM) displaced k-nearest neighbor in the 1990s.[101]\nThe naive Bayes classifier is reportedly the "most widely used learner"[102] at Google, due in part to its scalability.[103]\nNeural networks are also used as classifiers.[104]\n

                      \n

                      Artificial neural networks

                      \n
                      A neural network is an interconnected group of nodes, akin to the vast network of neurons in the human brain.
                      \n

                      An artificial neural network is based on a collection of nodes also known as artificial neurons, which loosely model the neurons in a biological brain. It is trained to recognise patterns; once trained, it can recognise those patterns in fresh data. There is an input, at least one hidden layer of nodes and an output. Each node applies a function and once the weight crosses its specified threshold, the data is transmitted to the next layer. A network is typically called a deep neural network if it has at least 2 hidden layers.[104]\n

                      Learning algorithms for neural networks use local search to choose the weights that will get the right output for each input during training. The most common training technique is the backpropagation algorithm.[105] Neural networks learn to model complex relationships between inputs and outputs and find patterns in data. In theory, a neural network can learn any function.[106]\n

                      In feedforward neural networks the signal passes in only one direction.[107] Recurrent neural networks feed the output signal back into the input, which allows short-term memories of previous input events. Long short term memory is the most successful network architecture for recurrent networks.[108] Perceptrons[109] use only a single layer of neurons; deep learning[110] uses multiple layers. Convolutional neural networks strengthen the connection between neurons that are "close" to each other—this is especially important in image processing, where a local set of neurons must identify an "edge" before the network can identify an object.[111]\n

                      \n
                      \n

                      Deep learning

                      \n
                      \n

                      Deep learning[110] uses several layers of neurons between the network\'s inputs and outputs. The multiple layers can progressively extract higher-level features from the raw input. For example, in image processing, lower layers may identify edges, while higher layers may identify the concepts relevant to a human such as digits, letters, or faces.[112]\n

                      Deep learning has profoundly improved the performance of programs in many important subfields of artificial intelligence, including computer vision, speech recognition, natural language processing, image classification,[113] and others. The reason that deep learning performs so well in so many applications is not known as of 2023.[114] The sudden success of deep learning in 2012–2015 did not occur because of some new discovery or theoretical breakthrough (deep neural networks and backpropagation had been described by many people, as far back as the 1950s)[i] but because of two factors: the incredible increase in computer power (including the hundred-fold increase in speed by switching to GPUs) and the availability of vast amounts of training data, especially the giant curated datasets used for benchmark testing, such as ImageNet.[j]\n

                      \n

                      GPT

                      \n

                      Generative pre-trained transformers (GPT) are large language models (LLMs) that generate text based on the semantic relationships between words in sentences. Text-based GPT models are pretrained on a large corpus of text that can be from the Internet. The pretraining consists of predicting the next token (a token being usually a word, subword, or punctuation). Throughout this pretraining, GPT models accumulate knowledge about the world and can then generate human-like text by repeatedly predicting the next token. Typically, a subsequent training phase makes the model more truthful, useful, and harmless, usually with a technique called reinforcement learning from human feedback (RLHF). Current GPT models are prone to generating falsehoods called "hallucinations", although this can be reduced with RLHF and quality data. They are used in chatbots, which allow people to ask a question or request a task in simple text.[122][123]\n

                      Current models and services include Gemini (formerly Bard), ChatGPT, Grok, Claude, Copilot, and LLaMA.[124] Multimodal GPT models can process different types of data (modalities) such as images, videos, sound, and text.[125]\n

                      \n

                      Hardware and software

                      \n\n

                      In the late 2010s, graphics processing units (GPUs) that were increasingly designed with AI-specific enhancements and used with specialized TensorFlow software had replaced previously used central processing unit (CPUs) as the dominant means for large-scale (commercial and academic) machine learning models\' training.[126] Specialized programming languages such as Prolog were used in early AI research,[127] but general-purpose programming languages like Python have become predominant.[128]\n

                      The transistor density in integrated circuits has been observed to roughly double every 18 months—a trend known as Moore\'s law, named after the Intel co-founder Gordon Moore, who first identified it. Improvements in GPUs have been even faster.[129]\n

                      \n

                      Applications

                      \n

                      AI and machine learning technology is used in most of the essential applications of the 2020s, including: search engines (such as Google Search), targeting online advertisements, recommendation systems (offered by Netflix, YouTube or Amazon), driving internet traffic, targeted advertising (AdSense, Facebook), virtual assistants (such as Siri or Alexa), autonomous vehicles (including drones, ADAS and self-driving cars), automatic language translation (Microsoft Translator, Google Translate), facial recognition (Apple\'s Face ID or Microsoft\'s DeepFace and Google\'s FaceNet) and image labeling (used by Facebook, Apple\'s iPhoto and TikTok). The deployment of AI may be overseen by a Chief automation officer (CAO).\n

                      Health and medicine

                      \n\n

                      The application of AI in medicine and medical research has the potential to increase patient care and quality of life.[130] Through the lens of the Hippocratic Oath, medical professionals are ethically compelled to use AI, if applications can more accurately diagnose and treat patients.[131][132]\n

                      For medical research, AI is an important tool for processing and integrating big data. This is particularly important for organoid and tissue engineering development which use microscopy imaging as a key technique in fabrication.[133] It has been suggested that AI can overcome discrepancies in funding allocated to different fields of research.[133] New AI tools can deepen the understanding of biomedically relevant pathways. For example, AlphaFold 2 (2021) demonstrated the ability to approximate, in hours rather than months, the 3D structure of a protein.[134] In 2023, it was reported that AI-guided drug discovery helped find a class of antibiotics capable of killing two different types of drug-resistant bacteria.[135] In 2024, researchers used machine learning to accelerate the search for Parkinson\'s disease drug treatments. Their aim was to identify compounds that block the clumping, or aggregation, of alpha-synuclein (the protein that characterises Parkinson\'s disease). They were able to speed up the initial screening process ten-fold and reduce the cost by a thousand-fold.[136][137]\n

                      \n

                      Games

                      \n\n

                      Game playing programs have been used since the 1950s to demonstrate and test AI\'s most advanced techniques.[138] Deep Blue became the first computer chess-playing system to beat a reigning world chess champion, Garry Kasparov, on 11 May 1997.[139] In 2011, in a Jeopardy! quiz show exhibition match, IBM\'s question answering system, Watson, defeated the two greatest Jeopardy! champions, Brad Rutter and Ken Jennings, by a significant margin.[140] In March 2016, AlphaGo won 4 out of 5 games of Go in a match with Go champion Lee Sedol, becoming the first computer Go-playing system to beat a professional Go player without handicaps. Then, in 2017, it defeated Ke Jie, who was the best Go player in the world.[141] Other programs handle imperfect-information games, such as the poker-playing program Pluribus.[142] DeepMind developed increasingly generalistic reinforcement learning models, such as with MuZero, which could be trained to play chess, Go, or Atari games.[143] In 2019, DeepMind\'s AlphaStar achieved grandmaster level in StarCraft II, a particularly challenging real-time strategy game that involves incomplete knowledge of what happens on the map.[144] In 2021, an AI agent competed in a PlayStation Gran Turismo competition, winning against four of the world\'s best Gran Turismo drivers using deep reinforcement learning.[145] In 2024, Google DeepMind introduced SIMA, a type of AI capable of autonomously playing nine previously unseen open-world video games by observing screen output, as well as executing short, specific tasks in response to natural language instructions.[146]\n

                      \n

                      Mathematics

                      \n

                      In mathematics, special forms of formal step-by-step reasoning are used. In contrast, LLMs such as GPT-4 Turbo, Gemini Ultra, Claude Opus, LLaMa-2 or Mistral Large are working with probabilistic models, which can produce wrong answers in the form of hallucinations. Therefore, they need not only a large database of mathematical problems to learn from but also methods such as supervised fine-tuning or trained classifiers with human-annotated data to improve answers for new problems and learn from corrections.[147] A 2024 study showed that the performance of some language models for reasoning capabilities in solving math problems not included in their training data was low, even for problems with only minor deviations from trained data.[148]\n

                      Alternatively, dedicated models for mathematic problem solving with higher precision for the outcome including proof of theorems have been developed such as Alpha Tensor, Alpha Geometry and Alpha Proof all from Google DeepMind,[149] Llemma from eleuther[150] or Julius.[151]\n

                      When natural language is used to describe mathematical problems, converters transform such prompts into a formal language such as Lean to define mathematic tasks.\n

                      Some models have been developed to solve challenging problems and reach good results in benchmark tests, others to serve as educational tools in mathematics.[152]\n

                      \n

                      Finance

                      \n

                      Finance is one of the fastest growing sectors where applied AI tools are being deployed: from retail online banking to investment advice and insurance, where automated "robot advisers" have been in use for some years.[153]\n

                      World Pensions experts like Nicolas Firzli insist it may be too early to see the emergence of highly innovative AI-informed financial products and services: "the deployment of AI tools will simply further automatise things: destroying tens of thousands of jobs in banking, financial planning, and pension advice in the process, but I\'m not sure it will unleash a new wave of [e.g., sophisticated] pension innovation."[154]\n

                      \n

                      Military

                      \n\n

                      Various countries are deploying AI military applications.[155] The main applications enhance command and control, communications, sensors, integration and interoperability.[156] Research is targeting intelligence collection and analysis, logistics, cyber operations, information operations, and semiautonomous and autonomous vehicles.[155] AI technologies enable coordination of sensors and effectors, threat detection and identification, marking of enemy positions, target acquisition, coordination and deconfliction of distributed Joint Fires between networked combat vehicles involving manned and unmanned teams.[156] AI was incorporated into military operations in Iraq and Syria.[155]\n

                      In November 2023, US Vice President Kamala Harris disclosed a declaration signed by 31 nations to set guardrails for the military use of AI. The commitments include using legal reviews to ensure the compliance of military AI with international laws, and being cautious and transparent in the development of this technology.[157]\n

                      \n

                      Generative AI

                      \n\n
                      Vincent van Gogh in watercolour created by generative AI software
                      \n

                      In the early 2020s, generative AI gained widespread prominence. GenAI is AI capable of generating text, images, videos, or other data using generative models,[158][159] often in response to prompts.[160][161]\n

                      In March 2023, 58% of U.S. adults had heard about ChatGPT and 14% had tried it.[162] The increasing realism and ease-of-use of AI-based text-to-image generators such as Midjourney, DALL-E, and Stable Diffusion sparked a trend of viral AI-generated photos. Widespread attention was gained by a fake photo of Pope Francis wearing a white puffer coat, the fictional arrest of Donald Trump, and a hoax of an attack on the Pentagon, as well as the usage in professional creative arts.[163][164]\n

                      \n

                      Agents

                      \n

                      Artificial intelligent (AI) agents are software entities designed to perceive their environment, make decisions, and take actions autonomously to achieve specific goals. These agents can interact with users, their environment, or other agents. AI agents are used in various applications, including virtual assistants, chatbots, autonomous vehicles, game-playing systems, and industrial robotics. AI agents operate within the constraints of their programming, available computational resources, and hardware limitations. This means they are restricted to performing tasks within their defined scope and have finite memory and processing capabilities. In real-world applications, AI agents often face time constraints for decision-making and action execution. Many AI agents incorporate learning algorithms, enabling them to improve their performance over time through experience or training. Using machine learning, AI agents can adapt to new situations and optimise their behaviour for their designated tasks.[165][166][167]\n

                      \n

                      Other industry-specific tasks

                      \n

                      There are also thousands of successful AI applications used to solve specific problems for specific industries or institutions. In a 2017 survey, one in five companies reported having incorporated "AI" in some offerings or processes.[168] A few examples are energy storage, medical diagnosis, military logistics, applications that predict the result of judicial decisions, foreign policy, or supply chain management.\n

                      AI applications for evacuation and disaster management are growing. AI has been used to investigate if and how people evacuated in large scale and small scale evacuations using historical data from GPS, videos or social media. Further, AI can provide real time information on the real time evacuation conditions.[169][170][171]\n

                      In agriculture, AI has helped farmers identify areas that need irrigation, fertilization, pesticide treatments or increasing yield. Agronomists use AI to conduct research and development. AI has been used to predict the ripening time for crops such as tomatoes, monitor soil moisture, operate agricultural robots, conduct predictive analytics, classify livestock pig call emotions, automate greenhouses, detect diseases and pests, and save water.\n

                      Artificial intelligence is used in astronomy to analyze increasing amounts of available data and applications, mainly for "classification, regression, clustering, forecasting, generation, discovery, and the development of new scientific insights." For example, it is used for discovering exoplanets, forecasting solar activity, and distinguishing between signals and instrumental effects in gravitational wave astronomy. Additionally, it could be used for activities in space, such as space exploration, including the analysis of data from space missions, real-time science decisions of spacecraft, space debris avoidance, and more autonomous operation.\n

                      \n

                      Ethics

                      \n\n

                      AI has potential benefits and potential risks.[172] AI may be able to advance science and find solutions for serious problems: Demis Hassabis of Deep Mind hopes to "solve intelligence, and then use that to solve everything else".[173] However, as the use of AI has become widespread, several unintended consequences and risks have been identified.[174] In-production systems can sometimes not factor ethics and bias into their AI training processes, especially when the AI algorithms are inherently unexplainable in deep learning.[175]\n

                      \n

                      Risks and harm

                      \n
                      \n\n

                      Machine learning algorithms require large amounts of data. The techniques used to acquire this data have raised concerns about privacy, surveillance and copyright.\n

                      AI-powered devices and services, such as virtual assistants and IoT products, continuously collect personal information, raising concerns about intrusive data gathering and unauthorized access by third parties. The loss of privacy is further exacerbated by AI\'s ability to process and combine vast amounts of data, potentially leading to a surveillance society where individual activities are constantly monitored and analyzed without adequate safeguards or transparency.\n

                      Sensitive user data collected may include online activity records, geolocation data, video or audio.[176] For example, in order to build speech recognition algorithms, Amazon has recorded millions of private conversations and allowed temporary workers to listen to and transcribe some of them.[177] Opinions about this widespread surveillance range from those who see it as a necessary evil to those for whom it is clearly unethical and a violation of the right to privacy.[178]\n

                      AI developers argue that this is the only way to deliver valuable applications. and have developed several techniques that attempt to preserve privacy while still obtaining the data, such as data aggregation, de-identification and differential privacy.[179] Since 2016, some privacy experts, such as Cynthia Dwork, have begun to view privacy in terms of fairness. Brian Christian wrote that experts have pivoted "from the question of \'what they know\' to the question of \'what they\'re doing with it\'."[180]\n

                      Generative AI is often trained on unlicensed copyrighted works, including in domains such as images or computer code; the output is then used under the rationale of "fair use". Experts disagree about how well and under what circumstances this rationale will hold up in courts of law; relevant factors may include "the purpose and character of the use of the copyrighted work" and "the effect upon the potential market for the copyrighted work".[181][182] Website owners who do not wish to have their content scraped can indicate it in a "robots.txt" file.[183] In 2023, leading authors (including John Grisham and Jonathan Franzen) sued AI companies for using their work to train generative AI.[184][185] Another discussed approach is to envision a separate sui generis system of protection for creations generated by AI to ensure fair attribution and compensation for human authors.[186]\n

                      \n

                      Dominance by tech giants

                      \n

                      The commercial AI scene is dominated by Big Tech companies such as Alphabet Inc., Amazon, Apple Inc., Meta Platforms, and Microsoft.[187][188][189] Some of these players already own the vast majority of existing cloud infrastructure and computing power from data centers, allowing them to entrench further in the marketplace.[190][191]\n

                      \n

                      Substantial power needs and other environmental impacts

                      \n\n

                      In January 2024, the International Energy Agency (IEA) released Electricity 2024, Analysis and Forecast to 2026, forecasting electric power use.[192] This is the first IEA report to make projections for data centers and power consumption for artificial intelligence and cryptocurrency. The report states that power demand for these uses might double by 2026, with additional electric power usage equal to electricity used by the whole Japanese nation.[193]\n

                      Prodigious power consumption by AI is responsible for the growth of fossil fuels use, and might delay closings of obsolete, carbon-emitting coal energy facilities. There is a feverish rise in the construction of data centers throughout the US, making large technology firms (e.g., Microsoft, Meta, Google, Amazon) into voracious consumers of electric power. Projected electric consumption is so immense that there is concern that it will be fulfilled no matter the source. A ChatGPT search involves the use of 10 times the electrical energy as a Google search. The large firms are in haste to find power sources – from nuclear energy to geothermal to fusion. The tech firms argue that – in the long view – AI will be eventually kinder to the environment, but they need the energy now. AI makes the power grid more efficient and "intelligent", will assist in the growth of nuclear power, and track overall carbon emissions, according to technology firms.[194]\n

                      A 2024 Goldman Sachs Research Paper, AI Data Centers and the Coming US Power Demand Surge, found "US power demand (is) likely to experience growth not seen in a generation...." and forecasts that, by 2030, US data centers will consume 8% of US power, as opposed to 3% in 2022, presaging growth for the electrical power generation industry by a variety of means.[195] Data centers\' need for more and more electrical power is such that they might max out the electrical grid. The Big Tech companies counter that AI can be used to maximize the utilization of the grid by all.[196]\n

                      In 2024, the Wall Street Journal reported that big AI companies have begun negotiations with the US nuclear power providers to provide electricity to the data centers. In March 2024 Amazon purchased a Pennsylvania nuclear-powered data center for $650 Million (US).[197]\n

                      In September 2024, Microsoft announced an agreement with Constellation Energy to re-open the Three Mile Island nuclear power plant to provide Microsoft with 100% of all electric power produced by the plant for 20 years. Reopening the plant, which suffered a partial nuclear meltdown of its Unit 2 reactor in 1979, will require Constellation to get through strict regulatory processes which will include extensive safety scrutiny from the US Nuclear Regulatory Commission. If approved (this will be the first ever US re-commissioning of a nuclear plant), over 835 megawatts of power – enough for 800,000 homes – of energy will be produced. The cost for re-opening and upgrading is estimated at $1.6 billion (US) and is dependent on tax breaks for nuclear power contained in the 2022 US Inflation Reduction Act.[198] The US government and the state of Michigan are investing almost $2 billion (US) to reopen the Palisades Nuclear reactor on Lake Michigan. Closed since 2022, the plant is planned to be reopened in October 2025. The Three Mile Island facility will be renamed the Crane Clean Energy Center after Chris Crane, a nuclear proponent and former CEO of Exelon who was responsible for Exelon spinoff of Constellation.[199]\n

                      \n

                      Misinformation

                      \n\n

                      YouTube, Facebook and others use recommender systems to guide users to more content. These AI programs were given the goal of maximizing user engagement (that is, the only goal was to keep people watching). The AI learned that users tended to choose misinformation, conspiracy theories, and extreme partisan content, and, to keep them watching, the AI recommended more of it. Users also tended to watch more content on the same subject, so the AI led people into filter bubbles where they received multiple versions of the same misinformation.[200] This convinced many users that the misinformation was true, and ultimately undermined trust in institutions, the media and the government.[201] The AI program had correctly learned to maximize its goal, but the result was harmful to society. After the U.S. election in 2016, major technology companies took steps to mitigate the problem [citation needed].\n

                      In 2022, generative AI began to create images, audio, video and text that are indistinguishable from real photographs, recordings, films, or human writing. It is possible for bad actors to use this technology to create massive amounts of misinformation or propaganda.[202] AI pioneer Geoffrey Hinton expressed concern about AI enabling "authoritarian leaders to manipulate their electorates" on a large scale, among other risks.[203]\n

                      \n

                      Algorithmic bias and fairness

                      \n\n

                      Machine learning applications will be biased[k] if they learn from biased data.[205] The developers may not be aware that the bias exists.[206] Bias can be introduced by the way training data is selected and by the way a model is deployed.[207][205] If a biased algorithm is used to make decisions that can seriously harm people (as it can in medicine, finance, recruitment, housing or policing) then the algorithm may cause discrimination.[208] The field of fairness studies how to prevent harms from algorithmic biases.\n

                      On June 28, 2015, Google Photos\'s new image labeling feature mistakenly identified Jacky Alcine and a friend as "gorillas" because they were black. The system was trained on a dataset that contained very few images of black people,[209] a problem called "sample size disparity".[210] Google "fixed" this problem by preventing the system from labelling anything as a "gorilla". Eight years later, in 2023, Google Photos still could not identify a gorilla, and neither could similar products from Apple, Facebook, Microsoft and Amazon.[211]\n

                      COMPAS is a commercial program widely used by U.S. courts to assess the likelihood of a defendant becoming a recidivist. In 2016, Julia Angwin at ProPublica discovered that COMPAS exhibited racial bias, despite the fact that the program was not told the races of the defendants. Although the error rate for both whites and blacks was calibrated equal at exactly 61%, the errors for each race were different—the system consistently overestimated the chance that a black person would re-offend and would underestimate the chance that a white person would not re-offend.[212] In 2017, several researchers[l] showed that it was mathematically impossible for COMPAS to accommodate all possible measures of fairness when the base rates of re-offense were different for whites and blacks in the data.[214]\n

                      A program can make biased decisions even if the data does not explicitly mention a problematic feature (such as "race" or "gender"). The feature will correlate with other features (like "address", "shopping history" or "first name"), and the program will make the same decisions based on these features as it would on "race" or "gender".[215] Moritz Hardt said "the most robust fact in this research area is that fairness through blindness doesn\'t work."[216]\n

                      Criticism of COMPAS highlighted that machine learning models are designed to make "predictions" that are only valid if we assume that the future will resemble the past. If they are trained on data that includes the results of racist decisions in the past, machine learning models must predict that racist decisions will be made in the future. If an application then uses these predictions as recommendations, some of these "recommendations" will likely be racist.[217] Thus, machine learning is not well suited to help make decisions in areas where there is hope that the future will be better than the past. It is descriptive rather than prescriptive.[m]\n

                      Bias and unfairness may go undetected because the developers are overwhelmingly white and male: among AI engineers, about 4% are black and 20% are women.[210]\n

                      There are various conflicting definitions and mathematical models of fairness. These notions depend on ethical assumptions, and are influenced by beliefs about society. One broad category is distributive fairness, which focuses on the outcomes, often identifying groups and seeking to compensate for statistical disparities. Representational fairness tries to ensure that AI systems do not reinforce negative stereotypes or render certain groups invisible. Procedural fairness focuses on the decision process rather than the outcome. The most relevant notions of fairness may depend on the context, notably the type of AI application and the stakeholders. The subjectivity in the notions of bias and fairness makes it difficult for companies to operationalize them. Having access to sensitive attributes such as race or gender is also considered by many AI ethicists to be necessary in order to compensate for biases, but it may conflict with anti-discrimination laws.[204]\n

                      At its 2022 Conference on Fairness, Accountability, and Transparency (ACM FAccT 2022), the Association for Computing Machinery, in Seoul, South Korea, presented and published findings that recommend that until AI and robotics systems are demonstrated to be free of bias mistakes, they are unsafe, and the use of self-learning neural networks trained on vast, unregulated sources of flawed internet data should be curtailed.[dubiousdiscuss][219]\n

                      \n

                      Lack of transparency

                      \n\n

                      Many AI systems are so complex that their designers cannot explain how they reach their decisions.[220] Particularly with deep neural networks, in which there are a large amount of non-linear relationships between inputs and outputs. But some popular explainability techniques exist.[221]\n

                      It is impossible to be certain that a program is operating correctly if no one knows how exactly it works. There have been many cases where a machine learning program passed rigorous tests, but nevertheless learned something different than what the programmers intended. For example, a system that could identify skin diseases better than medical professionals was found to actually have a strong tendency to classify images with a ruler as "cancerous", because pictures of malignancies typically include a ruler to show the scale.[222] Another machine learning system designed to help effectively allocate medical resources was found to classify patients with asthma as being at "low risk" of dying from pneumonia. Having asthma is actually a severe risk factor, but since the patients having asthma would usually get much more medical care, they were relatively unlikely to die according to the training data. The correlation between asthma and low risk of dying from pneumonia was real, but misleading.[223]\n

                      People who have been harmed by an algorithm\'s decision have a right to an explanation.[224] Doctors, for example, are expected to clearly and completely explain to their colleagues the reasoning behind any decision they make. Early drafts of the European Union\'s General Data Protection Regulation in 2016 included an explicit statement that this right exists.[n] Industry experts noted that this is an unsolved problem with no solution in sight. Regulators argued that nevertheless the harm is real: if the problem has no solution, the tools should not be used.[225]\n

                      DARPA established the XAI ("Explainable Artificial Intelligence") program in 2014 to try to solve these problems.[226]\n

                      Several approaches aim to address the transparency problem. SHAP enables to visualise the contribution of each feature to the output.[227] LIME can locally approximate a model\'s outputs with a simpler, interpretable model.[228] Multitask learning provides a large number of outputs in addition to the target classification. These other outputs can help developers deduce what the network has learned.[229] Deconvolution, DeepDream and other generative methods can allow developers to see what different layers of a deep network for computer vision have learned, and produce output that can suggest what the network is learning.[230] For generative pre-trained transformers, Anthropic developed a technique based on dictionary learning that associates patterns of neuron activations with human-understandable concepts.[231]\n

                      \n

                      Bad actors and weaponized AI

                      \n\n

                      Artificial intelligence provides a number of tools that are useful to bad actors, such as authoritarian governments, terrorists, criminals or rogue states.\n

                      A lethal autonomous weapon is a machine that locates, selects and engages human targets without human supervision.[o] Widely available AI tools can be used by bad actors to develop inexpensive autonomous weapons and, if produced at scale, they are potentially weapons of mass destruction.[233] Even when used in conventional warfare, it is unlikely that they will be unable to reliably choose targets and could potentially kill an innocent person.[233] In 2014, 30 nations (including China) supported a ban on autonomous weapons under the United Nations\' Convention on Certain Conventional Weapons, however the United States and others disagreed.[234] By 2015, over fifty countries were reported to be researching battlefield robots.[235]\n

                      AI tools make it easier for authoritarian governments to efficiently control their citizens in several ways. Face and voice recognition allow widespread surveillance. Machine learning, operating this data, can classify potential enemies of the state and prevent them from hiding. Recommendation systems can precisely target propaganda and misinformation for maximum effect. Deepfakes and generative AI aid in producing misinformation. Advanced AI can make authoritarian centralized decision making more competitive than liberal and decentralized systems such as markets. It lowers the cost and difficulty of digital warfare and advanced spyware.[236] All these technologies have been available since 2020 or earlier—AI facial recognition systems are already being used for mass surveillance in China.[237][238]\n

                      There many other ways that AI is expected to help bad actors, some of which can not be foreseen. For example, machine-learning AI is able to design tens of thousands of toxic molecules in a matter of hours.[239]\n

                      \n

                      Technological unemployment

                      \n\n

                      Economists have frequently highlighted the risks of redundancies from AI, and speculated about unemployment if there is no adequate social policy for full employment.[240]\n

                      In the past, technology has tended to increase rather than reduce total employment, but economists acknowledge that "we\'re in uncharted territory" with AI.[241] A survey of economists showed disagreement about whether the increasing use of robots and AI will cause a substantial increase in long-term unemployment, but they generally agree that it could be a net benefit if productivity gains are redistributed.[242] Risk estimates vary; for example, in the 2010s, Michael Osborne and Carl Benedikt Frey estimated 47% of U.S. jobs are at "high risk" of potential automation, while an OECD report classified only 9% of U.S. jobs as "high risk".[p][244] The methodology of speculating about future employment levels has been criticised as lacking evidential foundation, and for implying that technology, rather than social policy, creates unemployment, as opposed to redundancies.[240] In April 2023, it was reported that 70% of the jobs for Chinese video game illustrators had been eliminated by generative artificial intelligence.[245][246]\n

                      Unlike previous waves of automation, many middle-class jobs may be eliminated by artificial intelligence; The Economist stated in 2015 that "the worry that AI could do to white-collar jobs what steam power did to blue-collar ones during the Industrial Revolution" is "worth taking seriously".[247] Jobs at extreme risk range from paralegals to fast food cooks, while job demand is likely to increase for care-related professions ranging from personal healthcare to the clergy.[248]\n

                      From the early days of the development of artificial intelligence, there have been arguments, for example, those put forward by Joseph Weizenbaum, about whether tasks that can be done by computers actually should be done by them, given the difference between computers and humans, and between quantitative calculation and qualitative, value-based judgement.[249]\n

                      \n

                      Existential risk

                      \n\n

                      It has been argued AI will become so powerful that humanity may irreversibly lose control of it. This could, as physicist Stephen Hawking stated, "spell the end of the human race".[250] This scenario has been common in science fiction, when a computer or robot suddenly develops a human-like "self-awareness" (or "sentience" or "consciousness") and becomes a malevolent character.[q] These sci-fi scenarios are misleading in several ways.\n

                      First, AI does not require human-like "sentience" to be an existential risk. Modern AI programs are given specific goals and use learning and intelligence to achieve them. Philosopher Nick Bostrom argued that if one gives almost any goal to a sufficiently powerful AI, it may choose to destroy humanity to achieve it (he used the example of a paperclip factory manager).[252] Stuart Russell gives the example of household robot that tries to find a way to kill its owner to prevent it from being unplugged, reasoning that "you can\'t fetch the coffee if you\'re dead."[253] In order to be safe for humanity, a superintelligence would have to be genuinely aligned with humanity\'s morality and values so that it is "fundamentally on our side".[254]\n

                      Second, Yuval Noah Harari argues that AI does not require a robot body or physical control to pose an existential risk. The essential parts of civilization are not physical. Things like ideologies, law, government, money and the economy are made of language; they exist because there are stories that billions of people believe. The current prevalence of misinformation suggests that an AI could use language to convince people to believe anything, even to take actions that are destructive.[255]\n

                      The opinions amongst experts and industry insiders are mixed, with sizable fractions both concerned and unconcerned by risk from eventual superintelligent AI.[256] Personalities such as Stephen Hawking, Bill Gates, and Elon Musk,[257] as well as AI pioneers such as Yoshua Bengio, Stuart Russell, Demis Hassabis, and Sam Altman, have expressed concerns about existential risk from AI.\n

                      In May 2023, Geoffrey Hinton announced his resignation from Google in order to be able to "freely speak out about the risks of AI" without "considering how this impacts Google."[258] He notably mentioned risks of an AI takeover,[259] and stressed that in order to avoid the worst outcomes, establishing safety guidelines will require cooperation among those competing in use of AI.[260]\n

                      In 2023, many leading AI experts issued the joint statement that "Mitigating the risk of extinction from AI should be a global priority alongside other societal-scale risks such as pandemics and nuclear war".[261]\n

                      Other researchers, however, spoke in favor of a less dystopian view. AI pioneer Juergen Schmidhuber did not sign the joint statement, emphasising that in 95% of all cases, AI research is about making "human lives longer and healthier and easier."[262] While the tools that are now being used to improve lives can also be used by bad actors, "they can also be used against the bad actors."[263][264] Andrew Ng also argued that "it\'s a mistake to fall for the doomsday hype on AI—and that regulators who do will only benefit vested interests."[265] Yann LeCun "scoffs at his peers\' dystopian scenarios of supercharged misinformation and even, eventually, human extinction."[266] In the early 2010s, experts argued that the risks are too distant in the future to warrant research or that humans will be valuable from the perspective of a superintelligent machine.[267] However, after 2016, the study of current and future risks and possible solutions became a serious area of research.[268]\n

                      \n

                      Ethical machines and alignment

                      \n\n

                      Friendly AI are machines that have been designed from the beginning to minimize risks and to make choices that benefit humans. Eliezer Yudkowsky, who coined the term, argues that developing friendly AI should be a higher research priority: it may require a large investment and it must be completed before AI becomes an existential risk.[269]\n

                      Machines with intelligence have the potential to use their intelligence to make ethical decisions. The field of machine ethics provides machines with ethical principles and procedures for resolving ethical dilemmas.[270]\nThe field of machine ethics is also called computational morality,[270]\nand was founded at an AAAI symposium in 2005.[271]\n

                      Other approaches include Wendell Wallach\'s "artificial moral agents"[272] and Stuart J. Russell\'s three principles for developing provably beneficial machines.[273]\n

                      \n

                      Open source

                      \n

                      Active organizations in the AI open-source community include Hugging Face,[274] Google,[275] EleutherAI and Meta.[276] Various AI models, such as Llama 2, Mistral or Stable Diffusion, have been made open-weight,[277][278] meaning that their architecture and trained parameters (the "weights") are publicly available. Open-weight models can be freely fine-tuned, which allows companies to specialize them with their own data and for their own use-case.[279] Open-weight models are useful for research and innovation but can also be misused. Since they can be fine-tuned, any built-in security measure, such as objecting to harmful requests, can be trained away until it becomes ineffective. Some researchers warn that future AI models may develop dangerous capabilities (such as the potential to drastically facilitate bioterrorism) and that once released on the Internet, they cannot be deleted everywhere if needed. They recommend pre-release audits and cost-benefit analyses.[280]\n

                      \n

                      Frameworks

                      \n

                      Artificial Intelligence projects can have their ethical permissibility tested while designing, developing, and implementing an AI system. An AI framework such as the Care and Act Framework containing the SUM values—developed by the Alan Turing Institute tests projects in four main areas:[281][282]\n

                      \n
                      • Respect the dignity of individual people
                      • \n
                      • Connect with other people sincerely, openly, and inclusively
                      • \n
                      • Care for the wellbeing of everyone
                      • \n
                      • Protect social values, justice, and the public interest
                      \n

                      Other developments in ethical frameworks include those decided upon during the Asilomar Conference, the Montreal Declaration for Responsible AI, and the IEEE\'s Ethics of Autonomous Systems initiative, among others;[283] however, these principles do not go without their criticisms, especially regards to the people chosen contributes to these frameworks.[284]\n

                      Promotion of the wellbeing of the people and communities that these technologies affect requires consideration of the social and ethical implications at all stages of AI system design, development and implementation, and collaboration between job roles such as data scientists, product managers, data engineers, domain experts, and delivery managers.[285]\n

                      The UK AI Safety Institute released in 2024 a testing toolset called \'Inspect\' for AI safety evaluations available under a MIT open-source licence which is freely available on GitHub and can be improved with third-party packages. It can be used to evaluate AI models in a range of areas including core knowledge, ability to reason, and autonomous capabilities.[286]\n

                      \n

                      Regulation

                      \n\n
                      AI Safety Summit
                      The first global AI Safety Summit was held in 2023 with a declaration calling for international co-operation.
                      \n

                      The regulation of artificial intelligence is the development of public sector policies and laws for promoting and regulating AI; it is therefore related to the broader regulation of algorithms.[287] The regulatory and policy landscape for AI is an emerging issue in jurisdictions globally.[288] According to AI Index at Stanford, the annual number of AI-related laws passed in the 127 survey countries jumped from one passed in 2016 to 37 passed in 2022 alone.[289][290] Between 2016 and 2020, more than 30 countries adopted dedicated strategies for AI.[291] Most EU member states had released national AI strategies, as had Canada, China, India, Japan, Mauritius, the Russian Federation, Saudi Arabia, United Arab Emirates, U.S., and Vietnam. Others were in the process of elaborating their own AI strategy, including Bangladesh, Malaysia and Tunisia.[291] The Global Partnership on Artificial Intelligence was launched in June 2020, stating a need for AI to be developed in accordance with human rights and democratic values, to ensure public confidence and trust in the technology.[291] Henry Kissinger, Eric Schmidt, and Daniel Huttenlocher published a joint statement in November 2021 calling for a government commission to regulate AI.[292] In 2023, OpenAI leaders published recommendations for the governance of superintelligence, which they believe may happen in less than 10 years.[293] In 2023, the United Nations also launched an advisory body to provide recommendations on AI governance; the body comprises technology company executives, governments officials and academics.[294] In 2024, the Council of Europe created the first international legally binding treaty on AI, called the "Framework Convention on Artificial Intelligence and Human Rights, Democracy and the Rule of Law". It was adopted by the European Union, the United States, the United Kingdom, and other signatories.[295]\n

                      In a 2022 Ipsos survey, attitudes towards AI varied greatly by country; 78% of Chinese citizens, but only 35% of Americans, agreed that "products and services using AI have more benefits than drawbacks".[289] A 2023 Reuters/Ipsos poll found that 61% of Americans agree, and 22% disagree, that AI poses risks to humanity.[296] In a 2023 Fox News poll, 35% of Americans thought it "very important", and an additional 41% thought it "somewhat important", for the federal government to regulate AI, versus 13% responding "not very important" and 8% responding "not at all important".[297][298]\n

                      In November 2023, the first global AI Safety Summit was held in Bletchley Park in the UK to discuss the near and far term risks of AI and the possibility of mandatory and voluntary regulatory frameworks.[299] 28 countries including the United States, China, and the European Union issued a declaration at the start of the summit, calling for international co-operation to manage the challenges and risks of artificial intelligence.[300][301] In May 2024 at the AI Seoul Summit, 16 global AI tech companies agreed to safety commitments on the development of AI.[302][303]\n

                      \n

                      History

                      \n\n\n

                      The study of mechanical or "formal" reasoning began with philosophers and mathematicians in antiquity. The study of logic led directly to Alan Turing\'s theory of computation, which suggested that a machine, by shuffling symbols as simple as "0" and "1", could simulate any conceivable form of mathematical reasoning.[304][305] This, along with concurrent discoveries in cybernetics, information theory and neurobiology, led researchers to consider the possibility of building an "electronic brain".[r] They developed several areas of research that would become part of AI,[307] such as McCullouch and Pitts design for "artificial neurons" in 1943,[115] and Turing\'s influential 1950 paper \'Computing Machinery and Intelligence\', which introduced the Turing test and showed that "machine intelligence" was plausible.[308][305]\n

                      The field of AI research was founded at a workshop at Dartmouth College in 1956.[s][6] The attendees became the leaders of AI research in the 1960s.[t] They and their students produced programs that the press described as "astonishing":[u] computers were learning checkers strategies, solving word problems in algebra, proving logical theorems and speaking English.[v][7] Artificial intelligence laboratories were set up at a number of British and U.S. universities in the latter 1950s and early 1960s.[305]\n

                      Researchers in the 1960s and the 1970s were convinced that their methods would eventually succeed in creating a machine with general intelligence and considered this the goal of their field.[312] In 1965 Herbert Simon predicted, "machines will be capable, within twenty years, of doing any work a man can do".[313] In 1967 Marvin Minsky agreed, writing that "within a generation ... the problem of creating \'artificial intelligence\' will substantially be solved".[314] They had, however, underestimated the difficulty of the problem.[w] In 1974, both the U.S. and British governments cut off exploratory research in response to the criticism of Sir James Lighthill[316] and ongoing pressure from the U.S. Congress to fund more productive projects.[317] Minsky\'s and Papert\'s book Perceptrons was understood as proving that artificial neural networks would never be useful for solving real-world tasks, thus discrediting the approach altogether.[318] The "AI winter", a period when obtaining funding for AI projects was difficult, followed.[9]\n

                      In the early 1980s, AI research was revived by the commercial success of expert systems,[319] a form of AI program that simulated the knowledge and analytical skills of human experts. By 1985, the market for AI had reached over a billion dollars. At the same time, Japan\'s fifth generation computer project inspired the U.S. and British governments to restore funding for academic research.[8] However, beginning with the collapse of the Lisp Machine market in 1987, AI once again fell into disrepute, and a second, longer-lasting winter began.[10]\n

                      Up to this point, most of AI\'s funding had gone to projects that used high-level symbols to represent mental objects like plans, goals, beliefs, and known facts. In the 1980s, some researchers began to doubt that this approach would be able to imitate all the processes of human cognition, especially perception, robotics, learning and pattern recognition,[320] and began to look into "sub-symbolic" approaches.[321] Rodney Brooks rejected "representation" in general and focussed directly on engineering machines that move and survive.[x] Judea Pearl, Lofti Zadeh and others developed methods that handled incomplete and uncertain information by making reasonable guesses rather than precise logic.[86][326] But the most important development was the revival of "connectionism", including neural network research, by Geoffrey Hinton and others.[327] In 1990, Yann LeCun successfully showed that convolutional neural networks can recognize handwritten digits, the first of many successful applications of neural networks.[328]\n

                      AI gradually restored its reputation in the late 1990s and early 21st century by exploiting formal mathematical methods and by finding specific solutions to specific problems. This "narrow" and "formal" focus allowed researchers to produce verifiable results and collaborate with other fields (such as statistics, economics and mathematics).[329] By 2000, solutions developed by AI researchers were being widely used, although in the 1990s they were rarely described as "artificial intelligence" (a tendency known as the AI effect).[330]\nHowever, several academic researchers became concerned that AI was no longer pursuing its original goal of creating versatile, fully intelligent machines. Beginning around 2002, they founded the subfield of artificial general intelligence (or "AGI"), which had several well-funded institutions by the 2010s.[4]\n

                      Deep learning began to dominate industry benchmarks in 2012 and was adopted throughout the field.[11]\nFor many specific tasks, other methods were abandoned.[y]\nDeep learning\'s success was based on both hardware improvements (faster computers,[332] graphics processing units, cloud computing[333]) and access to large amounts of data[334] (including curated datasets,[333] such as ImageNet). Deep learning\'s success led to an enormous increase in interest and funding in AI.[z] The amount of machine learning research (measured by total publications) increased by 50% in the years 2015–2019.[291]\n

                      In 2016, issues of fairness and the misuse of technology were catapulted into center stage at machine learning conferences, publications vastly increased, funding became available, and many researchers re-focussed their careers on these issues. The alignment problem became a serious field of academic study.[268]\n

                      In the late teens and early 2020s, AGI companies began to deliver programs that created enormous interest. In 2015, AlphaGo, developed by DeepMind, beat the world champion Go player. The program was taught only the rules of the game and developed strategy by itself. GPT-3 is a large language model that was released in 2020 by OpenAI and is capable of generating high-quality human-like text.[335] These programs, and others, inspired an aggressive AI boom, where large companies began investing billions in AI research. According to AI Impacts, about $50 billion annually was invested in "AI" around 2022 in the U.S. alone and about 20% of the new U.S. Computer Science PhD graduates have specialized in "AI".[336] About 800,000 "AI"-related U.S. job openings existed in 2022.[337]\n

                      \n

                      Philosophy

                      \n

                      Philosophical debates have historically sought to determine the nature of intelligence and how to make intelligent machines.[338] Another major focus has been whether machines can be conscious, and the associated ethical implications.[339] Many other topics in philosophy can relevant to AI, such as epistemology and free will.[340] Rapid advancements have intensified public discussions on the philosophy and ethics of AI.[339]\n

                      Defining artificial intelligence

                      \n\n

                      Alan Turing wrote in 1950 "I propose to consider the question \'can machines think\'?"[341] He advised changing the question from whether a machine "thinks", to "whether or not it is possible for machinery to show intelligent behaviour".[341] He devised the Turing test, which measures the ability of a machine to simulate human conversation.[308] Since we can only observe the behavior of the machine, it does not matter if it is "actually" thinking or literally has a "mind". Turing notes that we can not determine these things about other people but "it is usual to have a polite convention that everyone thinks."[342]\n

                      \n
                      The Turing test can provide some evidence of intelligence, but it penalizes non-human intelligent behavior.[343]
                      \n

                      Russell and Norvig agree with Turing that intelligence must be defined in terms of external behavior, not internal structure.[1] However, they are critical that the test requires the machine to imitate humans. "Aeronautical engineering texts," they wrote, "do not define the goal of their field as making \'machines that fly so exactly like pigeons that they can fool other pigeons.\'"[344] AI founder John McCarthy agreed, writing that "Artificial intelligence is not, by definition, simulation of human intelligence".[345]\n

                      McCarthy defines intelligence as "the computational part of the ability to achieve goals in the world".[346] Another AI founder, Marvin Minsky similarly describes it as "the ability to solve hard problems".[347] The leading AI textbook defines it as the study of agents that perceive their environment and take actions that maximize their chances of achieving defined goals.[1] These definitions view intelligence in terms of well-defined problems with well-defined solutions, where both the difficulty of the problem and the performance of the program are direct measures of the "intelligence" of the machine—and no other philosophical discussion is required, or may not even be possible.\n

                      Another definition has been adopted by Google,[348] a major practitioner in the field of AI. This definition stipulates the ability of systems to synthesize information as the manifestation of intelligence, similar to the way it is defined in biological intelligence.\n

                      Some authors have suggested in practice, that the definition of AI is vague and difficult to define, with contention as to whether classical algorithms should be categorised as AI,[349] with many companies during the early 2020s AI boom using the term as a marketing buzzword, often even if they did "not actually use AI in a material way".[350]\n

                      \n

                      Evaluating approaches to AI

                      \n

                      No established unifying theory or paradigm has guided AI research for most of its history.[aa] The unprecedented success of statistical machine learning in the 2010s eclipsed all other approaches (so much so that some sources, especially in the business world, use the term "artificial intelligence" to mean "machine learning with neural networks"). This approach is mostly sub-symbolic, soft and narrow. Critics argue that these questions may have to be revisited by future generations of AI researchers.\n

                      \n

                      Symbolic AI and its limits

                      \n

                      Symbolic AI (or "GOFAI")[352] simulated the high-level conscious reasoning that people use when they solve puzzles, express legal reasoning and do mathematics. They were highly successful at "intelligent" tasks such as algebra or IQ tests. In the 1960s, Newell and Simon proposed the physical symbol systems hypothesis: "A physical symbol system has the necessary and sufficient means of general intelligent action."[353]\n

                      However, the symbolic approach failed on many tasks that humans solve easily, such as learning, recognizing an object or commonsense reasoning. Moravec\'s paradox is the discovery that high-level "intelligent" tasks were easy for AI, but low level "instinctive" tasks were extremely difficult.[354] Philosopher Hubert Dreyfus had argued since the 1960s that human expertise depends on unconscious instinct rather than conscious symbol manipulation, and on having a "feel" for the situation, rather than explicit symbolic knowledge.[355] Although his arguments had been ridiculed and ignored when they were first presented, eventually, AI research came to agree with him.[ab][16]\n

                      The issue is not resolved: sub-symbolic reasoning can make many of the same inscrutable mistakes that human intuition does, such as algorithmic bias. Critics such as Noam Chomsky argue continuing research into symbolic AI will still be necessary to attain general intelligence,[357][358] in part because sub-symbolic AI is a move away from explainable AI: it can be difficult or impossible to understand why a modern statistical AI program made a particular decision. The emerging field of neuro-symbolic artificial intelligence attempts to bridge the two approaches.\n

                      \n

                      Neat vs. scruffy

                      \n\n

                      "Neats" hope that intelligent behavior is described using simple, elegant principles (such as logic, optimization, or neural networks). "Scruffies" expect that it necessarily requires solving a large number of unrelated problems. Neats defend their programs with theoretical rigor, scruffies rely mainly on incremental testing to see if they work. This issue was actively discussed in the 1970s and 1980s,[359] but eventually was seen as irrelevant. Modern AI has elements of both.\n

                      \n

                      Soft vs. hard computing

                      \n\n

                      Finding a provably correct or optimal solution is intractable for many important problems.[15] Soft computing is a set of techniques, including genetic algorithms, fuzzy logic and neural networks, that are tolerant of imprecision, uncertainty, partial truth and approximation. Soft computing was introduced in the late 1980s and most successful AI programs in the 21st century are examples of soft computing with neural networks.\n

                      \n

                      Narrow vs. general AI

                      \n\n

                      AI researchers are divided as to whether to pursue the goals of artificial general intelligence and superintelligence directly or to solve as many specific problems as possible (narrow AI) in hopes these solutions will lead indirectly to the field\'s long-term goals.[360][361] General intelligence is difficult to define and difficult to measure, and modern AI has had more verifiable successes by focusing on specific problems with specific solutions. The sub-field of artificial general intelligence studies this area exclusively.\n

                      \n

                      Machine consciousness, sentience, and mind

                      \n\n

                      The philosophy of mind does not know whether a machine can have a mind, consciousness and mental states, in the same sense that human beings do. This issue considers the internal experiences of the machine, rather than its external behavior. Mainstream AI research considers this issue irrelevant because it does not affect the goals of the field: to build machines that can solve problems using intelligence. Russell and Norvig add that "[t]he additional project of making a machine conscious in exactly the way humans are is not one that we are equipped to take on."[362] However, the question has become central to the philosophy of mind. It is also typically the central question at issue in artificial intelligence in fiction.\n

                      \n

                      Consciousness

                      \n\n

                      David Chalmers identified two problems in understanding the mind, which he named the "hard" and "easy" problems of consciousness.[363] The easy problem is understanding how the brain processes signals, makes plans and controls behavior. The hard problem is explaining how this feels or why it should feel like anything at all, assuming we are right in thinking that it truly does feel like something (Dennett\'s consciousness illusionism says this is an illusion). While human information processing is easy to explain, human subjective experience is difficult to explain. For example, it is easy to imagine a color-blind person who has learned to identify which objects in their field of view are red, but it is not clear what would be required for the person to know what red looks like.[364]\n

                      \n

                      Computationalism and functionalism

                      \n\n

                      Computationalism is the position in the philosophy of mind that the human mind is an information processing system and that thinking is a form of computing. Computationalism argues that the relationship between mind and body is similar or identical to the relationship between software and hardware and thus may be a solution to the mind–body problem. This philosophical position was inspired by the work of AI researchers and cognitive scientists in the 1960s and was originally proposed by philosophers Jerry Fodor and Hilary Putnam.[365]\n

                      Philosopher John Searle characterized this position as "strong AI": "The appropriately programmed computer with the right inputs and outputs would thereby have a mind in exactly the same sense human beings have minds."[ac] Searle counters this assertion with his Chinese room argument, which attempts to show that, even if a machine perfectly simulates human behavior, there is still no reason to suppose it also has a mind.[369]\n

                      \n

                      AI welfare and rights

                      \n

                      It is difficult or impossible to reliably evaluate whether an advanced AI is sentient (has the ability to feel), and if so, to what degree.[370] But if there is a significant chance that a given machine can feel and suffer, then it may be entitled to certain rights or welfare protection measures, similarly to animals.[371][372] Sapience (a set of capacities related to high intelligence, such as discernment or self-awareness) may provide another moral basis for AI rights.[371] Robot rights are also sometimes proposed as a practical way to integrate autonomous agents into society.[373]\n

                      In 2017, the European Union considered granting "electronic personhood" to some of the most capable AI systems. Similarly to the legal status of companies, it would have conferred rights but also responsibilities.[374] Critics argued in 2018 that granting rights to AI systems would downplay the importance of human rights, and that legislation should focus on user needs rather than speculative futuristic scenarios. They also noted that robots lacked the autonomy to take part to society on their own.[375][376]\n

                      Progress in AI increased interest in the topic. Proponents of AI welfare and rights often argue that AI sentience, if it emerges, would be particularly easy to deny. They warn that this may be a moral blind spot analogous to slavery or factory farming, which could lead to large-scale suffering if sentient AI is created and carelessly exploited.[372][371]\n

                      \n

                      Future

                      \n

                      Superintelligence and the singularity

                      \n

                      A superintelligence is a hypothetical agent that would possess intelligence far surpassing that of the brightest and most gifted human mind.[361]If research into artificial general intelligence produced sufficiently intelligent software, it might be able to reprogram and improve itself. The improved software would be even better at improving itself, leading to what I. J. Good called an "intelligence explosion" and Vernor Vinge called a "singularity".[377]\n

                      However, technologies cannot improve exponentially indefinitely, and typically follow an S-shaped curve, slowing when they reach the physical limits of what the technology can do.[378]\n

                      \n

                      Transhumanism

                      \n\n

                      Robot designer Hans Moravec, cyberneticist Kevin Warwick and inventor Ray Kurzweil have predicted that humans and machines may merge in the future into cyborgs that are more capable and powerful than either. This idea, called transhumanism, has roots in the writings of Aldous Huxley and Robert Ettinger.[379]\n

                      Edward Fredkin argues that "artificial intelligence is the next step in evolution", an idea first proposed by Samuel Butler\'s "Darwin among the Machines" as far back as 1863, and expanded upon by George Dyson in his 1998 book Darwin Among the Machines: The Evolution of Global Intelligence.[380]\n

                      \n

                      In fiction

                      \n\n
                      The word "robot" itself was coined by Karel Čapek in his 1921 play R.U.R., the title standing for "Rossum\'s Universal Robots".
                      \n

                      Thought-capable artificial beings have appeared as storytelling devices since antiquity,[381] and have been a persistent theme in science fiction.[382]\n

                      A common trope in these works began with Mary Shelley\'s Frankenstein, where a human creation becomes a threat to its masters. This includes such works as Arthur C. Clarke\'s and Stanley Kubrick\'s 2001: A Space Odyssey (both 1968), with HAL 9000, the murderous computer in charge of the Discovery One spaceship, as well as The Terminator (1984) and The Matrix (1999). In contrast, the rare loyal robots such as Gort from The Day the Earth Stood Still (1951) and Bishop from Aliens (1986) are less prominent in popular culture.[383]\n

                      Isaac Asimov introduced the Three Laws of Robotics in many stories, most notably with the "Multivac" super-intelligent computer. Asimov\'s laws are often brought up during lay discussions of machine ethics;[384] while almost all artificial intelligence researchers are familiar with Asimov\'s laws through popular culture, they generally consider the laws useless for many reasons, one of which is their ambiguity.[385]\n

                      Several works use AI to force us to confront the fundamental question of what makes us human, showing us artificial beings that have the ability to feel, and thus to suffer. This appears in Karel Čapek\'s R.U.R., the films A.I. Artificial Intelligence and Ex Machina, as well as the novel Do Androids Dream of Electric Sheep?, by Philip K. Dick. Dick considers the idea that our understanding of human subjectivity is altered by technology created with artificial intelligence.[386]\n

                      \n

                      See also

                      \n\n

                      Explanatory notes

                      \n
                      \n
                        \n
                      1. ^ a b This list of intelligent traits is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
                      2. \n
                      3. ^ a b This list of tools is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
                      4. \n
                      5. ^ It is among the reasons that expert systems proved to be inefficient for capturing knowledge.[30][31]\n
                      6. \n
                      7. ^ \n"Rational agent" is general term used in economics, philosophy and theoretical artificial intelligence. It can refer to anything that directs its behavior to accomplish goals, such as a person, an animal, a corporation, a nation, or in the case of AI, a computer program.\n
                      8. \n
                      9. ^ Alan Turing discussed the centrality of learning as early as 1950, in his classic paper "Computing Machinery and Intelligence".[42] In 1956, at the original Dartmouth AI summer conference, Ray Solomonoff wrote a report on unsupervised probabilistic machine learning: "An Inductive Inference Machine".[43]\n
                      10. \n
                      11. ^ See AI winter § Machine translation and the ALPAC report of 1966\n
                      12. \n
                      13. ^ \nCompared with symbolic logic, formal Bayesian inference is computationally expensive. For inference to be tractable, most observations must be conditionally independent of one another. AdSense uses a Bayesian network with over 300 million edges to learn which ads to serve.[93]\n
                      14. \n
                      15. ^ Expectation–maximization, one of the most popular algorithms in machine learning, allows clustering in the presence of unknown latent variables.[95]\n
                      16. \n
                      17. ^ \nSome form of deep neural networks (without a specific learning algorithm) were described by:\nWarren S. McCulloch and Walter Pitts (1943)[115]\nAlan Turing (1948);[116]\nKarl Steinbuch and Roger David Joseph (1961).[117]\nDeep or recurrent networks that learned (or used gradient descent) were developed by:\nFrank Rosenblatt(1957);[116]\nOliver Selfridge (1959);[117]\nAlexey Ivakhnenko and Valentin Lapa (1965);[118]\nKaoru Nakano (1971);[119]\nShun-Ichi Amari (1972);[119]\nJohn Joseph Hopfield (1982).[119]\nPrecursors to backpropagation were developed by:\nHenry J. Kelley (1960);[116]\nArthur E. Bryson (1962);[116]\nStuart Dreyfus (1962);[116]\nArthur E. Bryson and Yu-Chi Ho (1969);[116]\nBackpropagation was independently developed by:\nSeppo Linnainmaa (1970);[120]\nPaul Werbos (1974).[116]\n
                      18. \n
                      19. ^ Geoffrey Hinton said, of his work on neural networks in the 1990s, "our labeled datasets were thousands of times too small. [And] our computers were millions of times too slow."[121]\n
                      20. \n
                      21. ^ In statistics, a bias is a systematic error or deviation from the correct value. But in the context of fairness, it refers to a tendency in favor or against a certain group or individual characteristic, usually in a way that is considered unfair or harmful. A statistically unbiased AI system that produces disparate outcomes for different demographic groups may thus be viewed as biased in the ethical sense.[204]\n
                      22. \n
                      23. ^ Including Jon Kleinberg (Cornell University), Sendhil Mullainathan (University of Chicago), Cynthia Chouldechova (Carnegie Mellon) and Sam Corbett-Davis (Stanford)[213]\n
                      24. \n
                      25. ^ Moritz Hardt (a director at the Max Planck Institute for Intelligent Systems) argues that machine learning "is fundamentally the wrong tool for a lot of domains, where you\'re trying to design interventions and mechanisms that change the world."[218]\n
                      26. \n
                      27. ^ When the law was passed in 2018, it still contained a form of this provision.\n
                      28. \n
                      29. ^ This is the United Nations\' definition, and includes things like land mines as well.[232]\n
                      30. \n
                      31. ^ See table 4; 9% is both the OECD average and the U.S. average.[243]\n
                      32. \n
                      33. ^ Sometimes called a "robopocalypse"[251]\n
                      34. \n
                      35. ^ "Electronic brain" was the term used by the press around this time.[304][306]\n
                      36. \n
                      37. ^ \nDaniel Crevier wrote, "the conference is generally recognized as the official birthdate of the new science."[309] Russell and Norvig called the conference "the inception of artificial intelligence."[115]\n
                      38. \n
                      39. ^ \nRussell and Norvig wrote "for the next 20 years the field would be dominated by these people and their students."[310]\n
                      40. \n
                      41. ^ \nRussell and Norvig wrote "it was astonishing whenever a computer did anything kind of smartish".[311]\n
                      42. \n
                      43. ^ \nThe programs described are Arthur Samuel\'s checkers program for the IBM 701, Daniel Bobrow\'s STUDENT, Newell and Simon\'s Logic Theorist and Terry Winograd\'s SHRDLU.\n
                      44. \n
                      45. ^ Russell and Norvig write: "in almost all cases, these early systems failed on more difficult problems"[315]\n
                      46. \n
                      47. ^ \nEmbodied approaches to AI[322] were championed by Hans Moravec[323] and Rodney Brooks[324] and went by many names: Nouvelle AI.[324] Developmental robotics.[325]\n
                      48. \n
                      49. ^ Matteo Wong wrote in The Atlantic: "Whereas for decades, computer-science fields such as natural-language processing, computer vision, and robotics used extremely different methods, now they all use a programming method called "deep learning." As a result, their code and approaches have become more similar, and their models are easier to integrate into one another."[331]\n
                      50. \n
                      51. ^ Jack Clark wrote in Bloomberg: "After a half-decade of quiet breakthroughs in artificial intelligence, 2015 has been a landmark year. Computers are smarter and learning faster than ever", and noted that the number of software projects that use machine learning at Google increased from a "sporadic usage" in 2012 to more than 2,700 projects in 2015.[333]\n
                      52. \n
                      53. ^ Nils Nilsson wrote in 1983: "Simply put, there is wide disagreement in the field about what AI is all about."[351]\n
                      54. \n
                      55. ^ \nDaniel Crevier wrote that "time has proven the accuracy and perceptiveness of some of Dreyfus\'s comments. Had he formulated them less aggressively, constructive actions they suggested might have been taken much earlier."[356]\n
                      56. \n
                      57. ^ \nSearle presented this definition of "Strong AI" in 1999.[366] Searle\'s original formulation was "The appropriately programmed computer really is a mind, in the sense that computers given the right programs can be literally said to understand and have other cognitive states."[367] Strong AI is defined similarly by Russell and Norvig: "Stong AI – the assertion that machines that do so are actually thinking (as opposed to simulating thinking)."[368]\n
                      58. \n
                      \n

                      References

                      \n
                      \n
                        \n
                      1. ^ a b c Russell & Norvig (2021), pp. 1–4.\n
                      2. \n
                      3. ^ AI set to exceed human brain power Archived 2008-02-19 at the Wayback Machine CNN.com (July 26, 2006)\n
                      4. \n
                      5. ^ Kaplan, Andreas; Haenlein, Michael (2019). "Siri, Siri, in my hand: Who\'s the fairest in the land? On the interpretations, illustrations, and implications of artificial intelligence". Business Horizons. 62: 15–25. doi:10.1016/j.bushor.2018.08.004. ISSN 0007-6813. S2CID 158433736.\n
                      6. \n
                      7. ^ a b c \nArtificial general intelligence: Russell & Norvig (2021, pp. 32–33, 1020–1021)
                        Proposal for the modern version: Pennachin & Goertzel (2007)
                        Warnings of overspecialization in AI from leading researchers: Nilsson (1995), McCarthy (2007), Beal & Winston (2009)
                        \n
                      8. \n
                      9. ^ Russell & Norvig (2021, §1.2).\n
                      10. \n
                      11. ^ a b Dartmouth workshop: Russell & Norvig (2021, p. 18), McCorduck (2004, pp. 111–136), NRC (1999, pp. 200–201)
                        The proposal: McCarthy et al. (1955)
                        \n
                      12. \n
                      13. ^ a b Successful programs the 1960s: McCorduck (2004, pp. 243–252), Crevier (1993, pp. 52–107), Moravec (1988, p. 9), Russell & Norvig (2021, pp. 19–21)\n
                      14. \n
                      15. ^ a b Funding initiatives in the early 1980s: Fifth Generation Project (Japan), Alvey (UK), Microelectronics and Computer Technology Corporation (US), Strategic Computing Initiative (US): McCorduck (2004, pp. 426–441), Crevier (1993, pp. 161–162, 197–203, 211, 240), Russell & Norvig (2021, p. 23), NRC (1999, pp. 210–211), Newquist (1994, pp. 235–248)\n
                      16. \n
                      17. ^ a b First AI Winter, Lighthill report, Mansfield Amendment: Crevier (1993, pp. 115–117), Russell & Norvig (2021, pp. 21–22), NRC (1999, pp. 212–213), Howe (1994), Newquist (1994, pp. 189–201)\n
                      18. \n
                      19. ^ a b Second AI Winter: Russell & Norvig (2021, p. 24), McCorduck (2004, pp. 430–435), Crevier (1993, pp. 209–210), NRC (1999, pp. 214–216), Newquist (1994, pp. 301–318)\n
                      20. \n
                      21. ^ a b Deep learning revolution, AlexNet: Goldman (2022), Russell & Norvig (2021, p. 26), McKinsey (2018)\n
                      22. \n
                      23. ^ Toews (2023).\n
                      24. \n
                      25. ^ Problem-solving, puzzle solving, game playing, and deduction: Russell & Norvig (2021, chpt. 3–5), Russell & Norvig (2021, chpt. 6) (constraint satisfaction), Poole, Mackworth & Goebel (1998, chpt. 2, 3, 7, 9), Luger & Stubblefield (2004, chpt. 3, 4, 6, 8), Nilsson (1998, chpt. 7–12)\n
                      26. \n
                      27. ^ Uncertain reasoning: Russell & Norvig (2021, chpt. 12–18), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 333–381), Nilsson (1998, chpt. 7–12)\n
                      28. \n
                      29. ^ a b c Intractability and efficiency and the combinatorial explosion: Russell & Norvig (2021, p. 21)\n
                      30. \n
                      31. ^ a b c Psychological evidence of the prevalence of sub-symbolic reasoning and knowledge: Kahneman (2011), Dreyfus & Dreyfus (1986), Wason & Shapiro (1966), Kahneman, Slovic & Tversky (1982)\n
                      32. \n
                      33. ^ Knowledge representation and knowledge engineering: Russell & Norvig (2021, chpt. 10), Poole, Mackworth & Goebel (1998, pp. 23–46, 69–81, 169–233, 235–277, 281–298, 319–345), Luger & Stubblefield (2004, pp. 227–243), Nilsson (1998, chpt. 17.1–17.4, 18)\n
                      34. \n
                      35. ^ Smoliar & Zhang (1994).\n
                      36. \n
                      37. ^ Neumann & Möller (2008).\n
                      38. \n
                      39. ^ Kuperman, Reichley & Bailey (2006).\n
                      40. \n
                      41. ^ McGarry (2005).\n
                      42. \n
                      43. ^ Bertini, Del Bimbo & Torniai (2006).\n
                      44. \n
                      45. ^ Russell & Norvig (2021), pp. 272.\n
                      46. \n
                      47. ^ Representing categories and relations: Semantic networks, description logics, inheritance (including frames, and scripts): Russell & Norvig (2021, §10.2 & 10.5), Poole, Mackworth & Goebel (1998, pp. 174–177), Luger & Stubblefield (2004, pp. 248–258), Nilsson (1998, chpt. 18.3)\n
                      48. \n
                      49. ^ Representing events and time:Situation calculus, event calculus, fluent calculus (including solving the frame problem): Russell & Norvig (2021, §10.3), Poole, Mackworth & Goebel (1998, pp. 281–298), Nilsson (1998, chpt. 18.2)\n
                      50. \n
                      51. ^ Causal calculus: Poole, Mackworth & Goebel (1998, pp. 335–337)\n
                      52. \n
                      53. ^ Representing knowledge about knowledge: Belief calculus, modal logics: Russell & Norvig (2021, §10.4), Poole, Mackworth & Goebel (1998, pp. 275–277)\n
                      54. \n
                      55. ^ a b Default reasoning, Frame problem, default logic, non-monotonic logics, circumscription, closed world assumption, abduction: Russell & Norvig (2021, §10.6), Poole, Mackworth & Goebel (1998, pp. 248–256, 323–335), Luger & Stubblefield (2004, pp. 335–363), Nilsson (1998, ~18.3.3)\n(Poole et al. places abduction under "default reasoning". Luger et al. places this under "uncertain reasoning").\n
                      56. \n
                      57. ^ a b Breadth of commonsense knowledge: Lenat & Guha (1989, Introduction), Crevier (1993, pp. 113–114), Moravec (1988, p. 13), Russell & Norvig (2021, pp. 241, 385, 982) (qualification problem)\n
                      58. \n
                      59. ^ Newquist (1994), p. 296.\n
                      60. \n
                      61. ^ Crevier (1993), pp. 204–208.\n
                      62. \n
                      63. ^ Russell & Norvig (2021), p. 528.\n
                      64. \n
                      65. ^ Automated planning: Russell & Norvig (2021, chpt. 11).\n
                      66. \n
                      67. ^ Automated decision making, Decision theory: Russell & Norvig (2021, chpt. 16–18).\n
                      68. \n
                      69. ^ Classical planning: Russell & Norvig (2021, Section 11.2).\n
                      70. \n
                      71. ^ Sensorless or "conformant" planning, contingent planning, replanning (a.k.a online planning): Russell & Norvig (2021, Section 11.5).\n
                      72. \n
                      73. ^ Uncertain preferences: Russell & Norvig (2021, Section 16.7)\nInverse reinforcement learning: Russell & Norvig (2021, Section 22.6)\n
                      74. \n
                      75. ^ Information value theory: Russell & Norvig (2021, Section 16.6).\n
                      76. \n
                      77. ^ Markov decision process: Russell & Norvig (2021, chpt. 17).\n
                      78. \n
                      79. ^ Game theory and multi-agent decision theory: Russell & Norvig (2021, chpt. 18).\n
                      80. \n
                      81. ^ Learning: Russell & Norvig (2021, chpt. 19–22), Poole, Mackworth & Goebel (1998, pp. 397–438), Luger & Stubblefield (2004, pp. 385–542), Nilsson (1998, chpt. 3.3, 10.3, 17.5, 20)\n
                      82. \n
                      83. ^ Turing (1950).\n
                      84. \n
                      85. ^ Solomonoff (1956).\n
                      86. \n
                      87. ^ Unsupervised learning: Russell & Norvig (2021, pp. 653) (definition), Russell & Norvig (2021, pp. 738–740) (cluster analysis), Russell & Norvig (2021, pp. 846–860) (word embedding)\n
                      88. \n
                      89. ^ a b Supervised learning: Russell & Norvig (2021, §19.2) (Definition), Russell & Norvig (2021, Chpt. 19–20) (Techniques)\n
                      90. \n
                      91. ^ Reinforcement learning: Russell & Norvig (2021, chpt. 22), Luger & Stubblefield (2004, pp. 442–449)\n
                      92. \n
                      93. ^ Transfer learning: Russell & Norvig (2021, pp. 281), The Economist (2016)\n
                      94. \n
                      95. ^ "Artificial Intelligence (AI): What Is AI and How Does It Work? | Built In". builtin.com. Retrieved 30 October 2023.\n
                      96. \n
                      97. ^ Computational learning theory: Russell & Norvig (2021, pp. 672–674), Jordan & Mitchell (2015)\n
                      98. \n
                      99. ^ Natural language processing (NLP): Russell & Norvig (2021, chpt. 23–24), Poole, Mackworth & Goebel (1998, pp. 91–104), Luger & Stubblefield (2004, pp. 591–632)\n
                      100. \n
                      101. ^ Subproblems of NLP: Russell & Norvig (2021, pp. 849–850)\n
                      102. \n
                      103. ^ Russell & Norvig (2021), pp. 856–858.\n
                      104. \n
                      105. ^ Dickson (2022).\n
                      106. \n
                      107. ^ Modern statistical and deep learning approaches to NLP: Russell & Norvig (2021, chpt. 24), Cambria & White (2014)\n
                      108. \n
                      109. ^ Vincent (2019).\n
                      110. \n
                      111. ^ Russell & Norvig (2021), pp. 875–878.\n
                      112. \n
                      113. ^ Bushwick (2023).\n
                      114. \n
                      115. ^ Computer vision: Russell & Norvig (2021, chpt. 25), Nilsson (1998, chpt. 6)\n
                      116. \n
                      117. ^ Russell & Norvig (2021), pp. 849–850.\n
                      118. \n
                      119. ^ Russell & Norvig (2021), pp. 895–899.\n
                      120. \n
                      121. ^ Russell & Norvig (2021), pp. 899–901.\n
                      122. \n
                      123. ^ Challa et al. (2011).\n
                      124. \n
                      125. ^ Russell & Norvig (2021), pp. 931–938.\n
                      126. \n
                      127. ^ MIT AIL (2014).\n
                      128. \n
                      129. ^ Affective computing: Thro (1993), Edelson (1991), Tao & Tan (2005), Scassellati (2002)\n
                      130. \n
                      131. ^ Waddell (2018).\n
                      132. \n
                      133. ^ Poria et al. (2017).\n
                      134. \n
                      135. ^ Search algorithms: Russell & Norvig (2021, chpts. 3–5), Poole, Mackworth & Goebel (1998, pp. 113–163), Luger & Stubblefield (2004, pp. 79–164, 193–219), Nilsson (1998, chpts. 7–12)\n
                      136. \n
                      137. ^ State space search: Russell & Norvig (2021, chpt. 3)\n
                      138. \n
                      139. ^ Russell & Norvig (2021), sect. 11.2.\n
                      140. \n
                      141. ^ Uninformed searches (breadth first search, depth-first search and general state space search): Russell & Norvig (2021, sect. 3.4), Poole, Mackworth & Goebel (1998, pp. 113–132), Luger & Stubblefield (2004, pp. 79–121), Nilsson (1998, chpt. 8)\n
                      142. \n
                      143. ^ Heuristic or informed searches (e.g., greedy best first and A*): Russell & Norvig (2021, sect. 3.5), Poole, Mackworth & Goebel (1998, pp. 132–147), Poole & Mackworth (2017, sect. 3.6), Luger & Stubblefield (2004, pp. 133–150)\n
                      144. \n
                      145. ^ Adversarial search: Russell & Norvig (2021, chpt. 5)\n
                      146. \n
                      147. ^ Local or "optimization" search: Russell & Norvig (2021, chpt. 4)\n
                      148. \n
                      149. ^ Singh Chauhan, Nagesh (18 December 2020). "Optimization Algorithms in Neural Networks". KDnuggets. Retrieved 13 January 2024.\n
                      150. \n
                      151. ^ Evolutionary computation: Russell & Norvig (2021, sect. 4.1.2)\n
                      152. \n
                      153. ^ Merkle & Middendorf (2013).\n
                      154. \n
                      155. ^ Logic: Russell & Norvig (2021, chpts. 6–9), Luger & Stubblefield (2004, pp. 35–77), Nilsson (1998, chpt. 13–16)\n
                      156. \n
                      157. ^ Propositional logic: Russell & Norvig (2021, chpt. 6), Luger & Stubblefield (2004, pp. 45–50), Nilsson (1998, chpt. 13)\n
                      158. \n
                      159. ^ First-order logic and features such as equality: Russell & Norvig (2021, chpt. 7), Poole, Mackworth & Goebel (1998, pp. 268–275), Luger & Stubblefield (2004, pp. 50–62), Nilsson (1998, chpt. 15)\n
                      160. \n
                      161. ^ Logical inference: Russell & Norvig (2021, chpt. 10)\n
                      162. \n
                      163. ^ logical deduction as search: Russell & Norvig (2021, sects. 9.3, 9.4), Poole, Mackworth & Goebel (1998, pp. ~46–52), Luger & Stubblefield (2004, pp. 62–73), Nilsson (1998, chpt. 4.2, 7.2)\n
                      164. \n
                      165. ^ Resolution and unification: Russell & Norvig (2021, sections 7.5.2, 9.2, 9.5)\n
                      166. \n
                      167. ^ Warren, D.H.; Pereira, L.M.; Pereira, F. (1977). "Prolog-the language and its implementation compared with Lisp". ACM SIGPLAN Notices. 12 (8): 109–115. doi:10.1145/872734.806939.\n
                      168. \n
                      169. ^ Fuzzy logic: Russell & Norvig (2021, pp. 214, 255, 459), Scientific American (1999)\n
                      170. \n
                      171. ^ a b Stochastic methods for uncertain reasoning: Russell & Norvig (2021, chpt. 12–18, 20), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 165–191, 333–381), Nilsson (1998, chpt. 19)\n
                      172. \n
                      173. ^ decision theory and decision analysis: Russell & Norvig (2021, chpt. 16–18), Poole, Mackworth & Goebel (1998, pp. 381–394)\n
                      174. \n
                      175. ^ Information value theory: Russell & Norvig (2021, sect. 16.6)\n
                      176. \n
                      177. ^ Markov decision processes and dynamic decision networks: Russell & Norvig (2021, chpt. 17)\n
                      178. \n
                      179. ^ a b c Stochastic temporal models: Russell & Norvig (2021, chpt. 14)\nHidden Markov model: Russell & Norvig (2021, sect. 14.3)\nKalman filters: Russell & Norvig (2021, sect. 14.4)\nDynamic Bayesian networks: Russell & Norvig (2021, sect. 14.5)\n
                      180. \n
                      181. ^ Game theory and mechanism design: Russell & Norvig (2021, chpt. 18)\n
                      182. \n
                      183. ^ Bayesian networks: Russell & Norvig (2021, sects. 12.5–12.6, 13.4–13.5, 14.3–14.5, 16.5, 20.2–20.3), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~182–190, ≈363–379), Nilsson (1998, chpt. 19.3–19.4)\n
                      184. \n
                      185. ^ Domingos (2015), chpt. 6.\n
                      186. \n
                      187. ^ Bayesian inference algorithm: Russell & Norvig (2021, sect. 13.3–13.5), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~363–379), Nilsson (1998, chpt. 19.4 & 7)\n
                      188. \n
                      189. ^ Domingos (2015), p. 210.\n
                      190. \n
                      191. ^ Bayesian learning and the expectation–maximization algorithm: Russell & Norvig (2021, chpt. 20), Poole, Mackworth & Goebel (1998, pp. 424–433), Nilsson (1998, chpt. 20), Domingos (2015, p. 210)\n
                      192. \n
                      193. ^ Bayesian decision theory and Bayesian decision networks: Russell & Norvig (2021, sect. 16.5)\n
                      194. \n
                      195. ^ Statistical learning methods and classifiers: Russell & Norvig (2021, chpt. 20),\n
                      196. \n
                      197. ^ Ciaramella, Alberto; Ciaramella, Marco (2024). Introduction to Artificial Intelligence: from data analysis to generative AI. Intellisemantic Editions. ISBN 978-8-8947-8760-3.\n
                      198. \n
                      199. ^ Decision trees: Russell & Norvig (2021, sect. 19.3), Domingos (2015, p. 88)\n
                      200. \n
                      201. ^ Non-parameteric learning models such as K-nearest neighbor and support vector machines: Russell & Norvig (2021, sect. 19.7), Domingos (2015, p. 187) (k-nearest neighbor)\n\n
                      202. \n
                      203. ^ Domingos (2015), p. 152.\n
                      204. \n
                      205. ^ Naive Bayes classifier: Russell & Norvig (2021, sect. 12.6), Domingos (2015, p. 152)\n
                      206. \n
                      207. ^ a b Neural networks: Russell & Norvig (2021, chpt. 21), Domingos (2015, Chapter 4)\n
                      208. \n
                      209. ^ Gradient calculation in computational graphs, backpropagation, automatic differentiation: Russell & Norvig (2021, sect. 21.2), Luger & Stubblefield (2004, pp. 467–474), Nilsson (1998, chpt. 3.3)\n
                      210. \n
                      211. ^ Universal approximation theorem: Russell & Norvig (2021, p. 752)\nThe theorem: Cybenko (1988), Hornik, Stinchcombe & White (1989)\n
                      212. \n
                      213. ^ Feedforward neural networks: Russell & Norvig (2021, sect. 21.1)\n
                      214. \n
                      215. ^ Recurrent neural networks: Russell & Norvig (2021, sect. 21.6)\n
                      216. \n
                      217. ^ Perceptrons: Russell & Norvig (2021, pp. 21, 22, 683, 22)\n
                      218. \n
                      219. ^ a b Deep learning: Russell & Norvig (2021, chpt. 21), Goodfellow, Bengio & Courville (2016), Hinton et al. (2016), Schmidhuber (2015)\n
                      220. \n
                      221. ^ Convolutional neural networks: Russell & Norvig (2021, sect. 21.3)\n
                      222. \n
                      223. ^ Deng & Yu (2014), pp. 199–200.\n
                      224. \n
                      225. ^ Ciresan, Meier & Schmidhuber (2012).\n
                      226. \n
                      227. ^ Russell & Norvig (2021), p. 751.\n
                      228. \n
                      229. ^ a b c Russell & Norvig (2021), p. 17.\n
                      230. \n
                      231. ^ a b c d e f g Russell & Norvig (2021), p. 785.\n
                      232. \n
                      233. ^ a b Schmidhuber (2022), sect. 5.\n
                      234. \n
                      235. ^ Schmidhuber (2022), sect. 6.\n
                      236. \n
                      237. ^ a b c Schmidhuber (2022), sect. 7.\n
                      238. \n
                      239. ^ Schmidhuber (2022), sect. 8.\n
                      240. \n
                      241. ^ Quoted in Christian (2020, p. 22)\n
                      242. \n
                      243. ^ Smith (2023).\n
                      244. \n
                      245. ^ "Explained: Generative AI". 9 November 2023.\n
                      246. \n
                      247. ^ "AI Writing and Content Creation Tools". MIT Sloan Teaching & Learning Technologies. Archived from the original on 25 December 2023. Retrieved 25 December 2023.\n
                      248. \n
                      249. ^ Marmouyet (2023).\n
                      250. \n
                      251. ^ Kobielus (2019).\n
                      252. \n
                      253. ^ Thomason, James (21 May 2024). "Mojo Rising: The resurgence of AI-first programming languages". VentureBeat. Archived from the original on 27 June 2024. Retrieved 26 May 2024.\n
                      254. \n
                      255. ^ Wodecki, Ben (5 May 2023). "7 AI Programming Languages You Need to Know". AI Business. Archived from the original on 25 July 2024. Retrieved 5 October 2024.\n
                      256. \n
                      257. ^ Plumb, Taryn (18 September 2024). "Why Jensen Huang and Marc Benioff see \'gigantic\' opportunity for agentic AI". VentureBeat. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
                      258. \n
                      259. ^ Davenport, T; Kalakota, R (June 2019). "The potential for artificial intelligence in healthcare". Future Healthc J. 6 (2): 94–98. doi:10.7861/futurehosp.6-2-94. PMC 6616181. PMID 31363513.\n
                      260. \n
                      261. ^ Lyakhova, U.A.; Lyakhov, P.A. (2024). "Systematic review of approaches to detection and classification of skin cancer using artificial intelligence: Development and prospects". Computers in Biology and Medicine. 178: 108742. doi:10.1016/j.compbiomed.2024.108742. PMID 38875908.\n
                      262. \n
                      263. ^ Alqudaihi, Kawther S.; Aslam, Nida; Khan, Irfan Ullah; Almuhaideb, Abdullah M.; Alsunaidi, Shikah J.; Ibrahim, Nehad M. Abdel Rahman; Alhaidari, Fahd A.; Shaikh, Fatema S.; Alsenbel, Yasmine M.; Alalharith, Dima M.; Alharthi, Hajar M.; Alghamdi, Wejdan M.; Alshahrani, Mohammed S. (2021). "Cough Sound Detection and Diagnosis Using Artificial Intelligence Techniques: Challenges and Opportunities". IEEE Access. 9: 102327–102344. Bibcode:2021IEEEA...9j2327A. doi:10.1109/ACCESS.2021.3097559. ISSN 2169-3536. PMC 8545201. PMID 34786317.\n
                      264. \n
                      265. ^ a b Bax, Monique; Thorpe, Jordan; Romanov, Valentin (December 2023). "The future of personalized cardiovascular medicine demands 3D and 4D printing, stem cells, and artificial intelligence". Frontiers in Sensors. 4. doi:10.3389/fsens.2023.1294721. ISSN 2673-5067.\n
                      266. \n
                      267. ^ Jumper, J; Evans, R; Pritzel, A (2021). "Highly accurate protein structure prediction with AlphaFold". Nature. 596 (7873): 583–589. Bibcode:2021Natur.596..583J. doi:10.1038/s41586-021-03819-2. PMC 8371605. PMID 34265844.\n
                      268. \n
                      269. ^ "AI discovers new class of antibiotics to kill drug-resistant bacteria". 20 December 2023. Archived from the original on 16 September 2024. Retrieved 5 October 2024.\n
                      270. \n
                      271. ^ "AI speeds up drug design for Parkinson\'s ten-fold". Cambridge University. 17 April 2024. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                      272. \n
                      273. ^ Horne, Robert I.; Andrzejewska, Ewa A.; Alam, Parvez; Brotzakis, Z. Faidon; Srivastava, Ankit; Aubert, Alice; Nowinska, Magdalena; Gregory, Rebecca C.; Staats, Roxine; Possenti, Andrea; Chia, Sean; Sormanni, Pietro; Ghetti, Bernardino; Caughey, Byron; Knowles, Tuomas P. J.; Vendruscolo, Michele (17 April 2024). "Discovery of potent inhibitors of α-synuclein aggregation using structure-based iterative learning". Nature Chemical Biology. 20 (5). Nature: 634–645. doi:10.1038/s41589-024-01580-x. PMC 11062903. PMID 38632492.\n
                      274. \n
                      275. ^ Grant, Eugene F.; Lardner, Rex (25 July 1952). "The Talk of the Town – It". The New Yorker. ISSN 0028-792X. Archived from the original on 16 February 2020. Retrieved 28 January 2024.\n
                      276. \n
                      277. ^ Anderson, Mark Robert (11 May 2017). "Twenty years on from Deep Blue vs Kasparov: how a chess match started the big data revolution". The Conversation. Archived from the original on 17 September 2024. Retrieved 28 January 2024.\n
                      278. \n
                      279. ^ Markoff, John (16 February 2011). "Computer Wins on \'Jeopardy!\': Trivial, It\'s Not". The New York Times. ISSN 0362-4331. Archived from the original on 22 October 2014. Retrieved 28 January 2024.\n
                      280. \n
                      281. ^ Byford, Sam (27 May 2017). "AlphaGo retires from competitive Go after defeating world number one 3–0". The Verge. Archived from the original on 7 June 2017. Retrieved 28 January 2024.\n
                      282. \n
                      283. ^ Brown, Noam; Sandholm, Tuomas (30 August 2019). "Superhuman AI for multiplayer poker". Science. 365 (6456): 885–890. Bibcode:2019Sci...365..885B. doi:10.1126/science.aay2400. ISSN 0036-8075. PMID 31296650.\n
                      284. \n
                      285. ^ "MuZero: Mastering Go, chess, shogi and Atari without rules". Google DeepMind. 23 December 2020. Retrieved 28 January 2024.\n
                      286. \n
                      287. ^ Sample, Ian (30 October 2019). "AI becomes grandmaster in \'fiendishly complex\' StarCraft II". The Guardian. ISSN 0261-3077. Archived from the original on 29 December 2020. Retrieved 28 January 2024.\n
                      288. \n
                      289. ^ Wurman, P. R.; Barrett, S.; Kawamoto, K. (2022). "Outracing champion Gran Turismo drivers with deep reinforcement learning" (PDF). Nature. 602 (7896): 223–228. Bibcode:2022Natur.602..223W. doi:10.1038/s41586-021-04357-7. PMID 35140384.\n
                      290. \n
                      291. ^ Wilkins, Alex (13 March 2024). "Google AI learns to play open-world video games by watching them". New Scientist. Archived from the original on 26 July 2024. Retrieved 21 July 2024.\n
                      292. \n
                      293. ^ Uesato, J. et al.: Improving mathematical reasoning with process supervision. Archived 15 September 2024 at the Wayback Machine openai.com, May 31, 2023. Retrieved 2024-08-07.\n
                      294. \n
                      295. ^ Srivastava, Saurabh (29 February 2024). "Functional Benchmarks for Robust Evaluation of Reasoning Performance, and the Reasoning Gap". arXiv:2402.19450 [cs.AI].\n
                      296. \n
                      297. ^ Roberts, Siobhan (25 July 2024). "AI achieves silver-medal standard solving International Mathematical Olympiad problems". The New York Times. Archived from the original on 26 September 2024. Retrieved 7 August 2024.\n
                      298. \n
                      299. ^ LLEMMA. eleuther.ai. Retrieved 2024-08-07.\n
                      300. \n
                      301. ^ AI Math. Archived 5 October 2024 at the Wayback Machine Caesars Labs, 2024. Retrieved 2024-08-07.\n
                      302. \n
                      303. ^ Alex McFarland: 7 Best AI for Math Tools. Archived 11 September 2024 at the Wayback Machine unite.ai. Retrieved 2024-08-07\n
                      304. \n
                      305. ^ Matthew Finio & Amanda Downie: IBM Think 2024 Primer, "What is Artificial Intelligence (AI) in Finance?" 8 Dec. 2023\n
                      306. \n
                      307. ^ M. Nicolas, J. Firzli: Pensions Age/European Pensions magazine, "Artificial Intelligence: Ask the Industry" May June 2024 https://videovoice.org/ai-in-finance-innovation-entrepreneurship-vs-over-regulation-with-the-eus-artificial-intelligence-act-wont-work-as-intended/ Archived 11 September 2024 at the Wayback Machine.\n
                      308. \n
                      309. ^ a b c Congressional Research Service (2019). Artificial Intelligence and National Security (PDF). Washington, DC: Congressional Research Service. Archived (PDF) from the original on 8 May 2020. Retrieved 5 October 2024.PD-notice\n
                      310. \n
                      311. ^ a b Slyusar, Vadym (2019). "Artificial intelligence as the basis of future control networks". ResearchGate. doi:10.13140/RG.2.2.30247.50087. Archived from the original on 28 April 2021. Retrieved 20 July 2019.\n
                      312. \n
                      313. ^ Knight, Will. "The US and 30 Other Nations Agree to Set Guardrails for Military AI". Wired. ISSN 1059-1028. Archived from the original on 20 September 2024. Retrieved 24 January 2024.\n
                      314. \n
                      315. ^ Newsom, Gavin; Weber, Shirley N. (6 September 2023). "Executive Order N-12-23" (PDF). Executive Department, State of California. Archived (PDF) from the original on 21 February 2024. Retrieved 7 September 2023.\n
                      316. \n
                      317. ^ Pinaya, Walter H. L.; Graham, Mark S.; Kerfoot, Eric; Tudosiu, Petru-Daniel; Dafflon, Jessica; Fernandez, Virginia; Sanchez, Pedro; Wolleb, Julia; da Costa, Pedro F.; Patel, Ashay (2023). "Generative AI for Medical Imaging: extending the MONAI Framework". arXiv:2307.15208 [eess.IV].\n
                      318. \n
                      319. ^ Griffith, Erin; Metz, Cade (27 January 2023). "Anthropic Said to Be Closing In on $300 Million in New A.I. Funding". The New York Times. Archived from the original on 9 December 2023. Retrieved 14 March 2023.\n
                      320. \n
                      321. ^ Lanxon, Nate; Bass, Dina; Davalos, Jackie (10 March 2023). "A Cheat Sheet to AI Buzzwords and Their Meanings". Bloomberg News. Archived from the original on 17 November 2023. Retrieved 14 March 2023.\n
                      322. \n
                      323. ^ Marcelline, Marco (27 May 2023). "ChatGPT: Most Americans Know About It, But Few Actually Use the AI Chatbot". PCMag. Archived from the original on 21 May 2024. Retrieved 28 January 2024.\n
                      324. \n
                      325. ^ Lu, Donna (31 March 2023). "Misinformation, mistakes and the Pope in a puffer: what rapidly evolving AI can – and can\'t – do". The Guardian. ISSN 0261-3077. Archived from the original on 10 June 2024. Retrieved 28 January 2024.\n
                      326. \n
                      327. ^ Hurst, Luke (23 May 2023). "How a fake image of a Pentagon explosion shared on Twitter caused a real dip on Wall Street". euronews. Retrieved 28 January 2024.\n
                      328. \n
                      329. ^ Poole, David; Mackworth, Alan (2023). Artificial Intelligence, Foundations of Computational Agents (3rd ed.). Cambridge University Press. doi:10.1017/9781009258227. ISBN 978-1-0092-5819-7. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                      330. \n
                      331. ^ Russell, Stuart; Norvig, Peter (2020). Artificial Intelligence: A Modern Approach (4th ed.). Pearson. ISBN 978-0-1346-1099-3.\n
                      332. \n
                      333. ^ "Why agents are the next frontier of generative AI". McKinsey Digital. 24 July 2024. Archived from the original on 3 October 2024. Retrieved 10 August 2024.\n
                      334. \n
                      335. ^ Ransbotham, Sam; Kiron, David; Gerbert, Philipp; Reeves, Martin (6 September 2017). "Reshaping Business With Artificial Intelligence". MIT Sloan Management Review. Archived from the original on 13 February 2024.\n
                      336. \n
                      337. ^ Sun, Yuran; Zhao, Xilei; Lovreglio, Ruggiero; Kuligowski, Erica (1 January 2024), Naser, M. Z. (ed.), "8 – AI for large-scale evacuation modeling: promises and challenges", Interpretable Machine Learning for the Analysis, Design, Assessment, and Informed Decision Making for Civil Infrastructure, Woodhead Publishing Series in Civil and Structural Engineering, Woodhead Publishing, pp. 185–204, ISBN 978-0-1282-4073-1, archived from the original on 19 May 2024, retrieved 28 June 2024.\n
                      338. \n
                      339. ^ Gomaa, Islam; Adelzadeh, Masoud; Gwynne, Steven; Spencer, Bruce; Ko, Yoon; Bénichou, Noureddine; Ma, Chunyun; Elsagan, Nour; Duong, Dana; Zalok, Ehab; Kinateder, Max (1 November 2021). "A Framework for Intelligent Fire Detection and Evacuation System". Fire Technology. 57 (6): 3179–3185. doi:10.1007/s10694-021-01157-3. ISSN 1572-8099. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                      340. \n
                      341. ^ Zhao, Xilei; Lovreglio, Ruggiero; Nilsson, Daniel (1 May 2020). "Modelling and interpreting pre-evacuation decision-making using machine learning". Automation in Construction. 113: 103140. doi:10.1016/j.autcon.2020.103140. ISSN 0926-5805. Archived from the original on 19 May 2024. Retrieved 5 October 2024.\n
                      342. \n
                      343. ^ Müller, Vincent C. (30 April 2020). "Ethics of Artificial Intelligence and Robotics". Stanford Encyclopedia of Philosophy Archive. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                      344. \n
                      345. ^ Simonite (2016).\n
                      346. \n
                      347. ^ Russell & Norvig (2021), p. 987.\n
                      348. \n
                      349. ^ Laskowski (2023).\n
                      350. \n
                      351. ^ GAO (2022).\n
                      352. \n
                      353. ^ Valinsky (2019).\n
                      354. \n
                      355. ^ Russell & Norvig (2021), p. 991.\n
                      356. \n
                      357. ^ Russell & Norvig (2021), pp. 991–992.\n
                      358. \n
                      359. ^ Christian (2020), p. 63.\n
                      360. \n
                      361. ^ Vincent (2022).\n
                      362. \n
                      363. ^ Kopel, Matthew. "Copyright Services: Fair Use". Cornell University Library. Archived from the original on 26 September 2024. Retrieved 26 April 2024.\n
                      364. \n
                      365. ^ Burgess, Matt. "How to Stop Your Data From Being Used to Train AI". Wired. ISSN 1059-1028. Archived from the original on 3 October 2024. Retrieved 26 April 2024.\n
                      366. \n
                      367. ^ Reisner (2023).\n
                      368. \n
                      369. ^ Alter & Harris (2023).\n
                      370. \n
                      371. ^ "Getting the Innovation Ecosystem Ready for AI. An IP policy toolkit" (PDF). WIPO.\n
                      372. \n
                      373. ^ Hammond, George (27 December 2023). "Big Tech is spending more than VC firms on AI startups". Ars Technica. Archived from the original on 10 January 2024.\n
                      374. \n
                      375. ^ Wong, Matteo (24 October 2023). "The Future of AI Is GOMA". The Atlantic. Archived from the original on 5 January 2024.\n
                      376. \n
                      377. ^ "Big tech and the pursuit of AI dominance". The Economist. 26 March 2023. Archived from the original on 29 December 2023.\n
                      378. \n
                      379. ^ Fung, Brian (19 December 2023). "Where the battle to dominate AI may be won". CNN Business. Archived from the original on 13 January 2024.\n
                      380. \n
                      381. ^ Metz, Cade (5 July 2023). "In the Age of A.I., Tech\'s Little Guys Need Big Friends". The New York Times. Archived from the original on 8 July 2024. Retrieved 5 October 2024.\n
                      382. \n
                      383. ^ "Electricity 2024 – Analysis". IEA. 24 January 2024. Retrieved 13 July 2024.\n
                      384. \n
                      385. ^ Calvert, Brian (28 March 2024). "AI already uses as much energy as a small country. It\'s only the beginning". Vox. New York, New York. Archived from the original on 3 July 2024. Retrieved 5 October 2024.\n
                      386. \n
                      387. ^ Halper, Evan; O\'Donovan, Caroline (21 June 2024). "AI is exhausting the power grid. Tech firms are seeking a miracle solution". Washington Post.\n
                      388. \n
                      389. ^ Davenport, Carly. "AI Data Centers and the Coming YS Power Demand Surge" (PDF). Goldman Sachs. Archived from the original (PDF) on 26 July 2024. Retrieved 5 October 2024.\n
                      390. \n
                      391. ^ Ryan, Carol (12 April 2024). "Energy-Guzzling AI Is Also the Future of Energy Savings". Wall Street Journal. Dow Jones.\n
                      392. \n
                      393. ^ Hiller, Jennifer (1 July 2024). "Tech Industry Wants to Lock Up Nuclear Power for AI". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                      394. \n
                      395. ^ Halper, Evan (20 September 2024). "Microsoft deal would reopen Three Mile Island nuclear plant to power AI". Washington Post.\n
                      396. \n
                      397. ^ Hiller, Jennifer (20 September 2024). "Three Mile Island\'s Nuclear Plant to Reopen, Help Power Microsoft\'s AI Centers". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                      398. \n
                      399. ^ Nicas (2018).\n
                      400. \n
                      401. ^ Rainie, Lee; Keeter, Scott; Perrin, Andrew (22 July 2019). "Trust and Distrust in America". Pew Research Center. Archived from the original on 22 February 2024.\n
                      402. \n
                      403. ^ Williams (2023).\n
                      404. \n
                      405. ^ Taylor & Hern (2023).\n
                      406. \n
                      407. ^ a b Samuel, Sigal (19 April 2022). "Why it\'s so damn hard to make AI fair and unbiased". Vox. Archived from the original on 5 October 2024. Retrieved 24 July 2024.\n
                      408. \n
                      409. ^ a b Rose (2023).\n
                      410. \n
                      411. ^ CNA (2019).\n
                      412. \n
                      413. ^ Goffrey (2008), p. 17.\n
                      414. \n
                      415. ^ Berdahl et al. (2023); Goffrey (2008, p. 17); Rose (2023); Russell & Norvig (2021, p. 995)\n
                      416. \n
                      417. ^ Christian (2020), p. 25.\n
                      418. \n
                      419. ^ a b Russell & Norvig (2021), p. 995.\n
                      420. \n
                      421. ^ Grant & Hill (2023).\n
                      422. \n
                      423. ^ Larson & Angwin (2016).\n
                      424. \n
                      425. ^ Christian (2020), p. 67–70.\n
                      426. \n
                      427. ^ Christian (2020, pp. 67–70); Russell & Norvig (2021, pp. 993–994)\n
                      428. \n
                      429. ^ Russell & Norvig (2021, p. 995); Lipartito (2011, p. 36); Goodman & Flaxman (2017, p. 6); Christian (2020, pp. 39–40, 65)\n
                      430. \n
                      431. ^ Quoted in Christian (2020, p. 65).\n
                      432. \n
                      433. ^ Russell & Norvig (2021, p. 994); Christian (2020, pp. 40, 80–81)\n
                      434. \n
                      435. ^ Quoted in Christian (2020, p. 80)\n
                      436. \n
                      437. ^ Dockrill (2022).\n
                      438. \n
                      439. ^ Sample (2017).\n
                      440. \n
                      441. ^ "Black Box AI". 16 June 2023. Archived from the original on 15 June 2024. Retrieved 5 October 2024.\n
                      442. \n
                      443. ^ Christian (2020), p. 110.\n
                      444. \n
                      445. ^ Christian (2020), pp. 88–91.\n
                      446. \n
                      447. ^ Christian (2020, p. 83); Russell & Norvig (2021, p. 997)\n
                      448. \n
                      449. ^ Christian (2020), p. 91.\n
                      450. \n
                      451. ^ Christian (2020), p. 83.\n
                      452. \n
                      453. ^ Verma (2021).\n
                      454. \n
                      455. ^ Rothman (2020).\n
                      456. \n
                      457. ^ Christian (2020), pp. 105–108.\n
                      458. \n
                      459. ^ Christian (2020), pp. 108–112.\n
                      460. \n
                      461. ^ Ropek, Lucas (21 May 2024). "New Anthropic Research Sheds Light on AI\'s \'Black Box\'". Gizmodo. Archived from the original on 5 October 2024. Retrieved 23 May 2024.\n
                      462. \n
                      463. ^ Russell & Norvig (2021), p. 989.\n
                      464. \n
                      465. ^ a b Russell & Norvig (2021), pp. 987–990.\n
                      466. \n
                      467. ^ Russell & Norvig (2021), p. 988.\n
                      468. \n
                      469. ^ Robitzski (2018); Sainato (2015)\n
                      470. \n
                      471. ^ Harari (2018).\n
                      472. \n
                      473. ^ Buckley, Chris; Mozur, Paul (22 May 2019). "How China Uses High-Tech Surveillance to Subdue Minorities". The New York Times. Archived from the original on 25 November 2019. Retrieved 2 July 2019.\n
                      474. \n
                      475. ^ "Security lapse exposed a Chinese smart city surveillance system". 3 May 2019. Archived from the original on 7 March 2021. Retrieved 14 September 2020.\n
                      476. \n
                      477. ^ Urbina et al. (2022).\n
                      478. \n
                      479. ^ a b E. McGaughey, \'Will Robots Automate Your Job Away? Full Employment, Basic Income, and Economic Democracy\' (2022), 51(3) Industrial Law Journal 511–559. Archived 27 May 2023 at the Wayback Machine.\n
                      480. \n
                      481. ^ Ford & Colvin (2015);McGaughey (2022)\n
                      482. \n
                      483. ^ IGM Chicago (2017).\n
                      484. \n
                      485. ^ Arntz, Gregory & Zierahn (2016), p. 33.\n
                      486. \n
                      487. ^ Lohr (2017); Frey & Osborne (2017); Arntz, Gregory & Zierahn (2016, p. 33)\n
                      488. \n
                      489. ^ Zhou, Viola (11 April 2023). "AI is already taking video game illustrators\' jobs in China". Rest of World. Archived from the original on 21 February 2024. Retrieved 17 August 2023.\n
                      490. \n
                      491. ^ Carter, Justin (11 April 2023). "China\'s game art industry reportedly decimated by growing AI use". Game Developer. Archived from the original on 17 August 2023. Retrieved 17 August 2023.\n
                      492. \n
                      493. ^ Morgenstern (2015).\n
                      494. \n
                      495. ^ Mahdawi (2017); Thompson (2014)\n
                      496. \n
                      497. ^ Tarnoff, Ben (4 August 2023). "Lessons from Eliza". The Guardian Weekly. pp. 34–39.\n
                      498. \n
                      499. ^ Cellan-Jones (2014).\n
                      500. \n
                      501. ^ Russell & Norvig 2021, p. 1001.\n
                      502. \n
                      503. ^ Bostrom (2014).\n
                      504. \n
                      505. ^ Russell (2019).\n
                      506. \n
                      507. ^ Bostrom (2014); Müller & Bostrom (2014); Bostrom (2015).\n
                      508. \n
                      509. ^ Harari (2023).\n
                      510. \n
                      511. ^ Müller & Bostrom (2014).\n
                      512. \n
                      513. ^ Leaders\' concerns about the existential risks of AI around 2015: Rawlinson (2015), Holley (2015), Gibbs (2014), Sainato (2015)\n
                      514. \n
                      515. ^ ""Godfather of artificial intelligence" talks impact and potential of new AI". CBS News. 25 March 2023. Archived from the original on 28 March 2023. Retrieved 28 March 2023.\n
                      516. \n
                      517. ^ Pittis, Don (4 May 2023). "Canadian artificial intelligence leader Geoffrey Hinton piles on fears of computer takeover". CBC. Archived from the original on 7 July 2024. Retrieved 5 October 2024.\n
                      518. \n
                      519. ^ "\'50–50 chance\' that AI outsmarts humanity, Geoffrey Hinton says". Bloomberg BNN. 14 June 2024. Retrieved 6 July 2024.\n
                      520. \n
                      521. ^ Valance (2023).\n
                      522. \n
                      523. ^ Taylor, Josh (7 May 2023). "Rise of artificial intelligence is inevitable but should not be feared, \'father of AI\' says". The Guardian. Archived from the original on 23 October 2023. Retrieved 26 May 2023.\n
                      524. \n
                      525. ^ Colton, Emma (7 May 2023). "\'Father of AI\' says tech fears misplaced: \'You cannot stop it\'". Fox News. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
                      526. \n
                      527. ^ Jones, Hessie (23 May 2023). "Juergen Schmidhuber, Renowned \'Father Of Modern AI,\' Says His Life\'s Work Won\'t Lead To Dystopia". Forbes. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
                      528. \n
                      529. ^ McMorrow, Ryan (19 December 2023). "Andrew Ng: \'Do we think the world is better off with more or less intelligence?\'". Financial Times. Archived from the original on 25 January 2024. Retrieved 30 December 2023.\n
                      530. \n
                      531. ^ Levy, Steven (22 December 2023). "How Not to Be Stupid About AI, With Yann LeCun". Wired. Archived from the original on 28 December 2023. Retrieved 30 December 2023.\n
                      532. \n
                      533. ^ Arguments that AI is not an imminent risk: Brooks (2014), Geist (2015), Madrigal (2015), Lee (2014)\n
                      534. \n
                      535. ^ a b Christian (2020), pp. 67, 73.\n
                      536. \n
                      537. ^ Yudkowsky (2008).\n
                      538. \n
                      539. ^ a b Anderson & Anderson (2011).\n
                      540. \n
                      541. ^ AAAI (2014).\n
                      542. \n
                      543. ^ Wallach (2010).\n
                      544. \n
                      545. ^ Russell (2019), p. 173.\n
                      546. \n
                      547. ^ Stewart, Ashley; Melton, Monica. "Hugging Face CEO says he\'s focused on building a \'sustainable model\' for the $4.5 billion open-source-AI startup". Business Insider. Archived from the original on 25 September 2024. Retrieved 14 April 2024.\n
                      548. \n
                      549. ^ Wiggers, Kyle (9 April 2024). "Google open sources tools to support AI model development". TechCrunch. Archived from the original on 10 September 2024. Retrieved 14 April 2024.\n
                      550. \n
                      551. ^ Heaven, Will Douglas (12 May 2023). "The open-source AI boom is built on Big Tech\'s handouts. How long will it last?". MIT Technology Review. Retrieved 14 April 2024.\n
                      552. \n
                      553. ^ Brodsky, Sascha (19 December 2023). "Mistral AI\'s New Language Model Aims for Open Source Supremacy". AI Business. Archived from the original on 5 September 2024. Retrieved 5 October 2024.\n
                      554. \n
                      555. ^ Edwards, Benj (22 February 2024). "Stability announces Stable Diffusion 3, a next-gen AI image generator". Ars Technica. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
                      556. \n
                      557. ^ Marshall, Matt (29 January 2024). "How enterprises are using open source LLMs: 16 examples". VentureBeat. Archived from the original on 26 September 2024. Retrieved 5 October 2024.\n
                      558. \n
                      559. ^ Piper, Kelsey (2 February 2024). "Should we make our most powerful AI models open source to all?". Vox. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
                      560. \n
                      561. ^ Alan Turing Institute (2019). "Understanding artificial intelligence ethics and safety" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
                      562. \n
                      563. ^ Alan Turing Institute (2023). "AI Ethics and Governance in Practice" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
                      564. \n
                      565. ^ Floridi, Luciano; Cowls, Josh (23 June 2019). "A Unified Framework of Five Principles for AI in Society". Harvard Data Science Review. 1 (1). doi:10.1162/99608f92.8cd550d1. S2CID 198775713.\n
                      566. \n
                      567. ^ Buruk, Banu; Ekmekci, Perihan Elif; Arda, Berna (1 September 2020). "A critical perspective on guidelines for responsible and trustworthy artificial intelligence". Medicine, Health Care and Philosophy. 23 (3): 387–399. doi:10.1007/s11019-020-09948-1. ISSN 1572-8633. PMID 32236794. S2CID 214766800. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                      568. \n
                      569. ^ Kamila, Manoj Kumar; Jasrotia, Sahil Singh (1 January 2023). "Ethical issues in the development of artificial intelligence: recognizing the risks". International Journal of Ethics and Systems. ahead-of-print (ahead-of-print). doi:10.1108/IJOES-05-2023-0107. ISSN 2514-9369. S2CID 259614124. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                      570. \n
                      571. ^ "AI Safety Institute releases new AI safety evaluations platform". UK Government. 10 May 2024. Archived from the original on 5 October 2024. Retrieved 14 May 2024.\n
                      572. \n
                      573. ^ Regulation of AI to mitigate risks: Berryhill et al. (2019), Barfield & Pagallo (2018), Iphofen & Kritikos (2019), Wirtz, Weyerer & Geyer (2018), Buiten (2019)\n
                      574. \n\n
                      575. ^ a b Vincent (2023).\n
                      576. \n
                      577. ^ Stanford University (2023).\n
                      578. \n
                      579. ^ a b c d UNESCO (2021).\n
                      580. \n
                      581. ^ Kissinger (2021).\n
                      582. \n
                      583. ^ Altman, Brockman & Sutskever (2023).\n
                      584. \n
                      585. ^ VOA News (25 October 2023). "UN Announces Advisory Body on Artificial Intelligence". Archived from the original on 18 September 2024. Retrieved 5 October 2024.\n
                      586. \n
                      587. ^ "Council of Europe opens first ever global treaty on AI for signature". Council of Europe. 5 September 2024. Archived from the original on 17 September 2024. Retrieved 17 September 2024.\n
                      588. \n
                      589. ^ Edwards (2023).\n
                      590. \n
                      591. ^ Kasperowicz (2023).\n
                      592. \n
                      593. ^ Fox News (2023).\n
                      594. \n
                      595. ^ Milmo, Dan (3 November 2023). "Hope or Horror? The great AI debate dividing its pioneers". The Guardian Weekly. pp. 10–12.\n
                      596. \n
                      597. ^ "The Bletchley Declaration by Countries Attending the AI Safety Summit, 1–2 November 2023". GOV.UK. 1 November 2023. Archived from the original on 1 November 2023. Retrieved 2 November 2023.\n
                      598. \n
                      599. ^ "Countries agree to safe and responsible development of frontier AI in landmark Bletchley Declaration". GOV.UK (Press release). Archived from the original on 1 November 2023. Retrieved 1 November 2023.\n
                      600. \n
                      601. ^ "Second global AI summit secures safety commitments from companies". Reuters. 21 May 2024. Retrieved 23 May 2024.\n
                      602. \n
                      603. ^ "Frontier AI Safety Commitments, AI Seoul Summit 2024". gov.uk. 21 May 2024. Archived from the original on 23 May 2024. Retrieved 23 May 2024.\n
                      604. \n
                      605. ^ a b Russell & Norvig 2021, p. 9.\n
                      606. \n
                      607. ^ a b c Copeland, J., ed. (2004). The Essential Turing: the ideas that gave birth to the computer age. Oxford, England: Clarendon Press. ISBN 0-1982-5079-7.\n
                      608. \n
                      609. ^ "Google books ngram". Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                      610. \n
                      611. ^ AI\'s immediate precursors: McCorduck (2004, pp. 51–107), Crevier (1993, pp. 27–32), Russell & Norvig (2021, pp. 8–17), Moravec (1988, p. 3)\n
                      612. \n
                      613. ^ a b Turing\'s original publication of the Turing test in "Computing machinery and intelligence": Turing (1950)\nHistorical influence and philosophical implications: Haugeland (1985, pp. 6–9), Crevier (1993, p. 24), McCorduck (2004, pp. 70–71), Russell & Norvig (2021, pp. 2, 984)\n
                      614. \n
                      615. ^ Crevier (1993), pp. 47–49.\n
                      616. \n
                      617. ^ Russell & Norvig (2003), p. 17.\n
                      618. \n
                      619. ^ Russell & Norvig (2003), p. 18.\n
                      620. \n
                      621. ^ Newquist (1994), pp. 86–86.\n
                      622. \n
                      623. ^ Simon (1965, p. 96) quoted in Crevier (1993, p. 109)\n
                      624. \n
                      625. ^ Minsky (1967, p. 2) quoted in Crevier (1993, p. 109)\n
                      626. \n
                      627. ^ Russell & Norvig (2021), p. 21.\n
                      628. \n
                      629. ^ Lighthill (1973).\n
                      630. \n
                      631. ^ NRC 1999, pp. 212–213.\n
                      632. \n
                      633. ^ Russell & Norvig (2021), p. 22.\n
                      634. \n
                      635. ^ Expert systems: Russell & Norvig (2021, pp. 23, 292), Luger & Stubblefield (2004, pp. 227–331), Nilsson (1998, chpt. 17.4), McCorduck (2004, pp. 327–335, 434–435), Crevier (1993, pp. 145–162, 197–203), Newquist (1994, pp. 155–183)\n
                      636. \n
                      637. ^ Russell & Norvig (2021), p. 24.\n
                      638. \n
                      639. ^ Nilsson (1998), p. 7.\n
                      640. \n
                      641. ^ McCorduck (2004), pp. 454–462.\n
                      642. \n
                      643. ^ Moravec (1988).\n
                      644. \n
                      645. ^ a b Brooks (1990).\n
                      646. \n
                      647. ^ Developmental robotics: Weng et al. (2001), Lungarella et al. (2003), Asada et al. (2009), Oudeyer (2010)\n
                      648. \n
                      649. ^ Russell & Norvig (2021), p. 25.\n
                      650. \n
                      651. ^ Crevier (1993, pp. 214–215), Russell & Norvig (2021, pp. 24, 26)\n
                      652. \n
                      653. ^ Russell & Norvig (2021), p. 26.\n
                      654. \n
                      655. ^ Formal and narrow methods adopted in the 1990s: Russell & Norvig (2021, pp. 24–26), McCorduck (2004, pp. 486–487)\n
                      656. \n
                      657. ^ AI widely used in the late 1990s: Kurzweil (2005, p. 265), NRC (1999, pp. 216–222), Newquist (1994, pp. 189–201)\n
                      658. \n
                      659. ^ Wong (2023).\n
                      660. \n
                      661. ^ Moore\'s Law and AI: Russell & Norvig (2021, pp. 14, 27)\n
                      662. \n
                      663. ^ a b c Clark (2015b).\n
                      664. \n
                      665. ^ Big data: Russell & Norvig (2021, p. 26)\n
                      666. \n
                      667. ^ Sagar, Ram (3 June 2020). "OpenAI Releases GPT-3, The Largest Model So Far". Analytics India Magazine. Archived from the original on 4 August 2020. Retrieved 15 March 2023.\n
                      668. \n
                      669. ^ DiFeliciantonio (2023).\n
                      670. \n
                      671. ^ Goswami (2023).\n
                      672. \n
                      673. ^ Grayling, Anthony; Ball, Brian (1 August 2024). "Philosophy is crucial in the age of AI". The Conversation. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
                      674. \n
                      675. ^ a b Jarow, Oshan (15 June 2024). "Will AI ever become conscious? It depends on how you think about biology". Vox. Archived from the original on 21 September 2024. Retrieved 4 October 2024.\n
                      676. \n
                      677. ^ McCarthy, John. "The Philosophy of AI and the AI of Philosophy". jmc.stanford.edu. Archived from the original on 23 October 2018. Retrieved 3 October 2024.\n
                      678. \n
                      679. ^ a b Turing (1950), p. 1.\n
                      680. \n
                      681. ^ Turing (1950), Under "The Argument from Consciousness".\n
                      682. \n
                      683. ^ Kirk-Giannini, Cameron Domenico; Goldstein, Simon (16 October 2023). "AI is closer than ever to passing the Turing test for \'intelligence\'. What happens when it does?". The Conversation. Archived from the original on 25 September 2024. Retrieved 17 August 2024.\n
                      684. \n
                      685. ^ Russell & Norvig (2021), p. 3.\n
                      686. \n
                      687. ^ Maker (2006).\n
                      688. \n
                      689. ^ McCarthy (1999).\n
                      690. \n
                      691. ^ Minsky (1986).\n
                      692. \n
                      693. ^ "What Is Artificial Intelligence (AI)?". Google Cloud Platform. Archived from the original on 31 July 2023. Retrieved 16 October 2023.\n
                      694. \n
                      695. ^ "One of the Biggest Problems in Regulating AI Is Agreeing on a Definition". carnegieendowment.org. Retrieved 31 July 2024.\n
                      696. \n
                      697. ^ "AI or BS? How to tell if a marketing tool really uses artificial intelligence". The Drum. Retrieved 31 July 2024.\n
                      698. \n
                      699. ^ Nilsson (1983), p. 10.\n
                      700. \n
                      701. ^ Haugeland (1985), pp. 112–117.\n
                      702. \n
                      703. ^ Physical symbol system hypothesis: Newell & Simon (1976, p. 116)\nHistorical significance: McCorduck (2004, p. 153), Russell & Norvig (2021, p. 19)\n
                      704. \n
                      705. ^ Moravec\'s paradox: Moravec (1988, pp. 15–16), Minsky (1986, p. 29), Pinker (2007, pp. 190–191)\n
                      706. \n
                      707. ^ Dreyfus\' critique of AI: Dreyfus (1972), Dreyfus & Dreyfus (1986)\nHistorical significance and philosophical implications: Crevier (1993, pp. 120–132), McCorduck (2004, pp. 211–239), Russell & Norvig (2021, pp. 981–982), Fearn (2007, chpt. 3)\n
                      708. \n
                      709. ^ Crevier (1993), p. 125.\n
                      710. \n
                      711. ^ Langley (2011).\n
                      712. \n
                      713. ^ Katz (2012).\n
                      714. \n
                      715. ^ Neats vs. scruffies, the historic debate: McCorduck (2004, pp. 421–424, 486–489), Crevier (1993, p. 168), Nilsson (1983, pp. 10–11), Russell & Norvig (2021, p. 24)\nA classic example of the "scruffy" approach to intelligence: Minsky (1986)\nA modern example of neat AI and its aspirations in the 21st century: Domingos (2015)\n
                      716. \n
                      717. ^ Pennachin & Goertzel (2007).\n
                      718. \n
                      719. ^ a b Roberts (2016).\n
                      720. \n
                      721. ^ Russell & Norvig (2021), p. 986.\n
                      722. \n
                      723. ^ Chalmers (1995).\n
                      724. \n
                      725. ^ Dennett (1991).\n
                      726. \n
                      727. ^ Horst (2005).\n
                      728. \n
                      729. ^ Searle (1999).\n
                      730. \n
                      731. ^ Searle (1980), p. 1.\n
                      732. \n
                      733. ^ Russell & Norvig (2021), p. 9817.\n
                      734. \n
                      735. ^ Searle\'s Chinese room argument: Searle (1980). Searle\'s original presentation of the thought experiment., Searle (1999).\nDiscussion: Russell & Norvig (2021, pp. 985), McCorduck (2004, pp. 443–445), Crevier (1993, pp. 269–271)\n
                      736. \n
                      737. ^ Leith, Sam (7 July 2022). "Nick Bostrom: How can we be certain a machine isn\'t conscious?". The Spectator. Archived from the original on 26 September 2024. Retrieved 23 February 2024.\n
                      738. \n
                      739. ^ a b c Thomson, Jonny (31 October 2022). "Why don\'t robots have rights?". Big Think. Archived from the original on 13 September 2024. Retrieved 23 February 2024.\n
                      740. \n
                      741. ^ a b Kateman, Brian (24 July 2023). "AI Should Be Terrified of Humans". Time. Archived from the original on 25 September 2024. Retrieved 23 February 2024.\n
                      742. \n
                      743. ^ Wong, Jeff (10 July 2023). "What leaders need to know about robot rights". Fast Company.\n
                      744. \n
                      745. ^ Hern, Alex (12 January 2017). "Give robots \'personhood\' status, EU committee argues". The Guardian. ISSN 0261-3077. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
                      746. \n
                      747. ^ Dovey, Dana (14 April 2018). "Experts Don\'t Think Robots Should Have Rights". Newsweek. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
                      748. \n
                      749. ^ Cuddy, Alice (13 April 2018). "Robot rights violate human rights, experts warn EU". euronews. Archived from the original on 19 September 2024. Retrieved 23 February 2024.\n
                      750. \n
                      751. ^ The Intelligence explosion and technological singularity: Russell & Norvig (2021, pp. 1004–1005), Omohundro (2008), Kurzweil (2005)\n\nI. J. Good\'s "intelligence explosion": Good (1965)\n\nVernor Vinge\'s "singularity": Vinge (1993)\n
                      752. \n
                      753. ^ Russell & Norvig (2021), p. 1005.\n
                      754. \n
                      755. ^ Transhumanism: Moravec (1988), Kurzweil (2005), Russell & Norvig (2021, p. 1005)\n
                      756. \n
                      757. ^ AI as evolution: Edward Fredkin is quoted in McCorduck (2004, p. 401), Butler (1863), Dyson (1998)\n
                      758. \n
                      759. ^ AI in myth: McCorduck (2004, pp. 4–5)\n
                      760. \n
                      761. ^ McCorduck (2004), pp. 340–400.\n
                      762. \n
                      763. ^ Buttazzo (2001).\n
                      764. \n
                      765. ^ Anderson (2008).\n
                      766. \n
                      767. ^ McCauley (2007).\n
                      768. \n
                      769. ^ Galvan (1997).\n
                      770. \n
                      \n

                      AI textbooks

                      \n

                      The two most widely used textbooks in 2023 (see the Open Syllabus):\n

                      \n\n

                      These were the four of the most widely used AI textbooks in 2008:\n

                      \n
                      \n\n
                      \n

                      Other textbooks:\n

                      \n\n

                      History of AI

                      \n
                      \n\n
                      \n

                      Other sources

                      \n
                      \n\n\n
                      \n

                      Further reading

                      \n
                      \n\n
                      \n
                      \n\n\n\n\n\n\n\n\n\n
                      \n
                      \n\t\t\t\t\t\n\t\t\t\t
                      \n\t\t\t\n\t\t\t\n\t\t\n\t\t\n\t \n \n
                      \n\t
                        \n
                        \n\n\n'}'. -[2024-10-27 22:50:32,001] [DEBUG] [Engine]: Agent 'agent3' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_LzPenVw46yRZhjf135NiViwn', type='function')], function_call=None) -[2024-10-27 22:50:32,001] [INFO] [Engine]: Assigning task to agent4: Search for the latest trends in AI using Twitter. -[2024-10-27 22:50:32,002] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Search for the latest trends in AI using Twitter.'. -[2024-10-27 22:50:33,830] [INFO] [BaseAgent]: Agent 'agent4' called 'fetch_webpage' with args '{'url': 'https://twitter.com/search?q=AI%20trends&src=typed_query'}'. -[2024-10-27 22:50:33,831] [INFO] [BaseAgent]: Agent 'agent4' obtained result '{'success': True, 'error-msg': '', 'url': 'https://twitter.com/search?q=AI%20trends&src=typed_query', 'content': '\n \n \n x.com\n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n \n \n '}'. -[2024-10-27 22:50:33,832] [DEBUG] [Engine]: Agent 'agent4' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=AI%20trends&src=typed_query"}', name='fetch_webpage'), id='call_UzjJ66l5wtI9wFUbtW58lNsf', type='function')], function_call=None) -[2024-10-27 22:50:33,833] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_2slixY8EDJc9BeSxtIE7cYNE', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_LzPenVw46yRZhjf135NiViwn', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=AI%20trends&src=typed_query"}', name='fetch_webpage'), id='call_UzjJ66l5wtI9wFUbtW58lNsf', type='function')], function_call=None) - -[2024-10-27 22:50:33,833] [INFO] [Engine]: Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_2slixY8EDJc9BeSxtIE7cYNE', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_LzPenVw46yRZhjf135NiViwn', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=AI%20trends&src=typed_query"}', name='fetch_webpage'), id='call_UzjJ66l5wtI9wFUbtW58lNsf', type='function')], function_call=None) - -[2024-10-27 22:50:33,833] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_2slixY8EDJc9BeSxtIE7cYNE', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_LzPenVw46yRZhjf135NiViwn', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=AI%20trends&src=typed_query"}', name='fetch_webpage'), id='call_UzjJ66l5wtI9wFUbtW58lNsf', type='function')], function_call=None) - -[2024-10-27 22:50:34,114] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-10-27 22:50:34,114] [INFO] [Engine]: Starting iteration 2 -[2024-10-27 22:50:34,114] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_2slixY8EDJc9BeSxtIE7cYNE', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_LzPenVw46yRZhjf135NiViwn', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=AI%20trends&src=typed_query"}', name='fetch_webpage'), id='call_UzjJ66l5wtI9wFUbtW58lNsf', type='function')], function_call=None) - - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-10-27 22:50:35,080] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent1': 'Please gather the information provided by agents 2, 3, and 4 about the latest trends in AI and compile a summary report.', 'agent2': 'Task description for agent2', 'agent3': 'Task description for agent3', 'agent4': 'Task description for agent4'}, 'continue': True} -[2024-10-27 22:50:35,080] [INFO] [Engine]: Assigned tasks: {'agent1': 'Please gather the information provided by agents 2, 3, and 4 about the latest trends in AI and compile a summary report.', 'agent2': 'Task description for agent2', 'agent3': 'Task description for agent3', 'agent4': 'Task description for agent4'} -[2024-10-27 22:50:35,080] [INFO] [Engine]: Assigning task to agent1: Please gather the information provided by agents 2, 3, and 4 about the latest trends in AI and compile a summary report. -[2024-10-27 22:50:35,080] [INFO] [BaseAgent]: Agent 'agent1' acting on task 'Please gather the information provided by agents 2, 3, and 4 about the latest trends in AI and compile a summary report.'. -[2024-10-27 22:50:42,163] [INFO] [BaseAgent]: Agent agent1 sent message to agent2: Please provide information on the latest trends in AI. -[2024-10-27 22:50:42,164] [INFO] [BaseAgent]: Agent agent2 received message from agent1: Please provide information on the latest trends in AI. -[2024-10-27 22:50:42,164] [INFO] [BaseAgent]: Agent 'agent1' called 'communicate_to' with args '{'target_agent_id': 'agent2', 'message': 'Please provide information on the latest trends in AI.'}'. -[2024-10-27 22:50:42,165] [INFO] [BaseAgent]: Agent 'agent1' obtained result '{'success': True, 'message': 'Successfully sent message to agent agent2', 'session_id': 'default_session'}'. -[2024-10-27 22:50:42,165] [DEBUG] [Engine]: Agent 'agent1' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"Please provide information on the latest trends in AI."}', name='communicate_to'), id='call_2ZfW0DHQ6ecc3A5KCUh60tuT', type='function')], function_call=None) -[2024-10-27 22:50:42,166] [INFO] [Engine]: Assigning task to agent2: Task description for agent2 -[2024-10-27 22:50:42,166] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Task description for agent2'. -[2024-10-27 22:50:51,738] [ERROR] [Engine]: Error while executing task for agent 'agent2': -[2024-10-27 22:50:51,739] [INFO] [Engine]: Assigning task to agent3: Task description for agent3 -[2024-10-27 22:50:51,739] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Task description for agent3'. -[2024-10-27 22:52:01,089] [ERROR] [Engine]: Error while executing task for agent 'agent3': -[2024-10-27 22:52:01,090] [INFO] [Engine]: Assigning task to agent4: Task description for agent4 -[2024-10-27 22:52:01,090] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Task description for agent4'. -[2024-10-27 22:52:02,944] [ERROR] [Engine]: Error while executing task for agent 'agent4': -[2024-10-27 22:52:02,945] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- agent1: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"Please provide information on the latest trends in AI."}', name='communicate_to'), id='call_2ZfW0DHQ6ecc3A5KCUh60tuT', type='function')], function_call=None) - -[2024-10-27 22:52:02,946] [INFO] [Engine]: Agents' Results Summary: -- agent1: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"Please provide information on the latest trends in AI."}', name='communicate_to'), id='call_2ZfW0DHQ6ecc3A5KCUh60tuT', type='function')], function_call=None) - -[2024-10-27 22:52:02,946] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_2slixY8EDJc9BeSxtIE7cYNE', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_LzPenVw46yRZhjf135NiViwn', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=AI%20trends&src=typed_query"}', name='fetch_webpage'), id='call_UzjJ66l5wtI9wFUbtW58lNsf', type='function')], function_call=None) - -Agents' Results Summary: -- agent1: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"Please provide information on the latest trends in AI."}', name='communicate_to'), id='call_2ZfW0DHQ6ecc3A5KCUh60tuT', type='function')], function_call=None) - -[2024-10-27 22:52:03,335] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': False} -[2024-10-27 22:52:03,336] [INFO] [Engine]: EnginePlanner decided to terminate the simulation. -[2024-10-27 22:52:03,336] [INFO] [Engine]: Engine simulation loop completed. -[2024-10-27 22:52:03,336] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-10-27 22:52:03,336] [INFO] [Evaluator]: Total Token Consumption: 102 -[2024-10-27 22:52:03,336] [INFO] [Evaluator]: Average Tokens per Iteration: 51.0 -[2024-10-27 22:52:03,336] [INFO] [Engine]: Simulation completed. -[2024-10-27 22:52:06,936] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-10-27 22:52:06,937] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-10-27 22:52:06,937] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-10-27 22:52:06,937] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-10-27 22:52:06,937] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-10-27 22:52:06,937] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-10-27 22:52:06,937] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-10-27 22:52:06,938] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-10-27 22:52:06,938] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-10-27 22:52:06,938] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-10-27 22:52:06,938] [INFO] [AgentGraph]: Relationship added: agent2 --[reports_to]--> agent1 -[2024-10-27 22:52:06,938] [INFO] [AgentGraph]: Relationship added: agent3 --[reports_to]--> agent1 -[2024-10-27 22:52:06,938] [INFO] [AgentGraph]: Relationship added: agent4 --[reports_to]--> agent1 -[2024-10-27 22:52:06,938] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-10-27 22:52:06,939] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-10-27 22:52:06,939] [INFO] [Engine]: Engine initialized. -[2024-10-27 22:52:06,939] [INFO] [Engine]: Engine starting simulation. -[2024-10-27 22:52:06,939] [INFO] [Engine]: Starting iteration 1 -[2024-10-27 22:52:06,939] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-10-27 22:52:07,865] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'}, 'continue': True} -[2024-10-27 22:52:07,866] [INFO] [Engine]: Assigned tasks: {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'} -[2024-10-27 22:52:07,866] [INFO] [Engine]: Assigning task to agent2: Search for the latest trends in AI using Google. -[2024-10-27 22:52:07,866] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Search for the latest trends in AI using Google.'. -[2024-10-27 22:52:09,287] [INFO] [BaseAgent]: Agent agent2 sent message to agent1: Could you please search for the latest trends in AI using Google? -[2024-10-27 22:52:09,287] [INFO] [BaseAgent]: Agent agent1 received message from agent2: Could you please search for the latest trends in AI using Google? -[2024-10-27 22:52:09,288] [INFO] [BaseAgent]: Agent 'agent2' called 'communicate_to' with args '{'target_agent_id': 'agent1', 'message': 'Could you please search for the latest trends in AI using Google?'}'. -[2024-10-27 22:52:09,288] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'message': 'Successfully sent message to agent agent1', 'session_id': 'default_session'}'. -[2024-10-27 22:52:09,288] [DEBUG] [Engine]: Agent 'agent2' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Could you please search for the latest trends in AI using Google?"}', name='communicate_to'), id='call_kuc4xYiVcX3NHPRwoLOF1qud', type='function')], function_call=None) -[2024-10-27 22:52:09,288] [INFO] [Engine]: Assigning task to agent3: Search for the latest trends in AI using Wikipedia. -[2024-10-27 22:52:09,288] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Search for the latest trends in AI using Wikipedia.'. -[2024-10-27 22:52:10,801] [INFO] [BaseAgent]: Agent 'agent3' called 'fetch_webpage' with args '{'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence'}'. -[2024-10-27 22:52:10,811] [INFO] [BaseAgent]: Agent 'agent3' obtained result '{'success': True, 'error-msg': '', 'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence', 'content': '\n\n\n\nArtificial intelligence - Wikipedia\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nJump to content\n
                        \n\t
                        \n\t\t
                        \n\t\t\t
                        \n\n\t\t\n\t\t\t\n\n\n\t\t
                        \n\t\t
                        \n\t\t\t\n\n\n\t\t\t\n\n\t\t
                        \n\t\n\n
                        \n\t
                        \n\t\t
                        \n\t\t\t
                        \n\t\t
                        \n\t\t
                        \n\t\t\t
                        \n\t\t
                        \n\t\t\t\n\t\t
                        \n\t
                        \n\t
                        \n\t\t\t\t
                        \n\t\t\n\t\t\t
                        \n\t\t
                        \n\t\t
                        \n\t\t\t
                        \n\t\t\t\t
                        \n\t\t\t\t\t\n\t\t\t\t\t

                        Artificial intelligence

                        \n\t\t\t\t\t\t\t\n
                        \n\t\n\t\n\t
                        \n\n\t\t
                        \n\t\t\t\n\t\t\t\n\t\t\t\n\t\t
                        \n\n\t
                        \n
                        \n
                        \n\t\t\t\t
                        \n\t\t\t\t\t
                        \n\t\t\t\t\t\t
                        \n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
                        \n\t\t\t\t\t\t
                        \n\t\t\t\t\t\t\t\n\t\t\t\t\n\t\t\t\t\t\t\t
                        \n\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t
                        \n\t\t\t\t\t
                        \n\t\t\t\t
                        \n\t\t\t\t
                        \n\t\t\t\t\t
                        \n\t\t\t\t\t\t\n\t\t\t\t\t\t
                        \n\t\t\n\t\t\t\t\t
                        \n\t\t\t\t
                        \n\t\t\t\t
                        \n\t\t\t\t\t
                        \n\t\t\t\t\t\t\t
                        \n\t\t
                        Page semi-protected
                        \n\t\t
                        \n\n\t\t\t\t\t\t
                        From Wikipedia, the free encyclopedia
                        \n\t\t\t\t\t
                        \n\t\t\t\t\t
                        \n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t
                        \n\n

                        \n

                        \n\n\n\n\n\n\n\n

                        Artificial intelligence (AI), in its broadest sense, is intelligence exhibited by machines, particularly computer systems. It is a field of research in computer science that develops and studies methods and software that enable machines to perceive their environment and use learning and intelligence to take actions that maximize their chances of achieving defined goals.[1] Such machines may be called AIs.\n

                        Some high-profile applications of AI include advanced web search engines (e.g., Google Search); recommendation systems (used by YouTube, Amazon, and Netflix); interacting via human speech (e.g., Google Assistant, Siri, and Alexa); autonomous vehicles (e.g., Waymo); generative and creative tools (e.g., ChatGPT, and AI art); and superhuman play and analysis in strategy games (e.g., chess and Go). However, many AI applications are not perceived as AI: "A lot of cutting edge AI has filtered into general applications, often without being called AI because once something becomes useful enough and common enough it\'s not labeled AI anymore."[2][3]\n

                        The various subfields of AI research are centered around particular goals and the use of particular tools. The traditional goals of AI research include reasoning, knowledge representation, planning, learning, natural language processing, perception, and support for robotics.[a] General intelligence—the ability to complete any task performable by a human on an at least equal level—is among the field\'s long-term goals.[4] To reach these goals, AI researchers have adapted and integrated a wide range of techniques, including search and mathematical optimization, formal logic, artificial neural networks, and methods based on statistics, operations research, and economics.[b] AI also draws upon psychology, linguistics, philosophy, neuroscience, and other fields.[5]\n

                        Artificial intelligence was founded as an academic discipline in 1956,[6] and the field went through multiple cycles of optimism,[7][8] followed by periods of disappointment and loss of funding, known as AI winter.[9][10] Funding and interest vastly increased after 2012 when deep learning outperformed previous AI techniques.[11] This growth accelerated further after 2017 with the transformer architecture,[12] and by the early 2020s hundreds of billions of dollars were being invested in AI (known as the "AI boom"). The widespread use of AI in the 21st century exposed several unintended consequences and harms in the present and raised concerns about its risks and long-term effects in the future, prompting discussions about regulatory policies to ensure the safety and benefits of the technology.\n

                        \n\n

                        Goals

                        \n

                        The general problem of simulating (or creating) intelligence has been broken into subproblems. These consist of particular traits or capabilities that researchers expect an intelligent system to display. The traits described below have received the most attention and cover the scope of AI research.[a]\n

                        \n

                        Reasoning and problem-solving

                        \n

                        Early researchers developed algorithms that imitated step-by-step reasoning that humans use when they solve puzzles or make logical deductions.[13] By the late 1980s and 1990s, methods were developed for dealing with uncertain or incomplete information, employing concepts from probability and economics.[14]\n

                        Many of these algorithms are insufficient for solving large reasoning problems because they experience a "combinatorial explosion": They become exponentially slower as the problems grow.[15] Even humans rarely use the step-by-step deduction that early AI research could model. They solve most of their problems using fast, intuitive judgments.[16] Accurate and efficient reasoning is an unsolved problem.\n

                        \n

                        Knowledge representation

                        \n
                        An ontology represents knowledge as a set of concepts within a domain and the relationships between those concepts.
                        \n

                        Knowledge representation and knowledge engineering[17] allow AI programs to answer questions intelligently and make deductions about real-world facts. Formal knowledge representations are used in content-based indexing and retrieval,[18] scene interpretation,[19] clinical decision support,[20] knowledge discovery (mining "interesting" and actionable inferences from large databases),[21] and other areas.[22]\n

                        A knowledge base is a body of knowledge represented in a form that can be used by a program. An ontology is the set of objects, relations, concepts, and properties used by a particular domain of knowledge.[23] Knowledge bases need to represent things such as objects, properties, categories, and relations between objects;[24] situations, events, states, and time;[25] causes and effects;[26] knowledge about knowledge (what we know about what other people know);[27] default reasoning (things that humans assume are true until they are told differently and will remain true even when other facts are changing);[28] and many other aspects and domains of knowledge.\n

                        Among the most difficult problems in knowledge representation are the breadth of commonsense knowledge (the set of atomic facts that the average person knows is enormous);[29] and the sub-symbolic form of most commonsense knowledge (much of what people know is not represented as "facts" or "statements" that they could express verbally).[16] There is also the difficulty of knowledge acquisition, the problem of obtaining knowledge for AI applications.[c]\n

                        \n

                        Planning and decision-making

                        \n

                        An "agent" is anything that perceives and takes actions in the world. A rational agent has goals or preferences and takes actions to make them happen.[d][32] In automated planning, the agent has a specific goal.[33] In automated decision-making, the agent has preferences—there are some situations it would prefer to be in, and some situations it is trying to avoid. The decision-making agent assigns a number to each situation (called the "utility") that measures how much the agent prefers it. For each possible action, it can calculate the "expected utility": the utility of all possible outcomes of the action, weighted by the probability that the outcome will occur. It can then choose the action with the maximum expected utility.[34]\n

                        In classical planning, the agent knows exactly what the effect of any action will be.[35] In most real-world problems, however, the agent may not be certain about the situation they are in (it is "unknown" or "unobservable") and it may not know for certain what will happen after each possible action (it is not "deterministic"). It must choose an action by making a probabilistic guess and then reassess the situation to see if the action worked.[36]\n

                        In some problems, the agent\'s preferences may be uncertain, especially if there are other agents or humans involved. These can be learned (e.g., with inverse reinforcement learning), or the agent can seek information to improve its preferences.[37] Information value theory can be used to weigh the value of exploratory or experimental actions.[38] The space of possible future actions and situations is typically intractably large, so the agents must take actions and evaluate situations while being uncertain of what the outcome will be.\n

                        A Markov decision process has a transition model that describes the probability that a particular action will change the state in a particular way and a reward function that supplies the utility of each state and the cost of each action. A policy associates a decision with each possible state. The policy could be calculated (e.g., by iteration), be heuristic, or it can be learned.[39]\n

                        Game theory describes the rational behavior of multiple interacting agents and is used in AI programs that make decisions that involve other agents.[40]\n

                        \n

                        Learning

                        \n

                        Machine learning is the study of programs that can improve their performance on a given task automatically.[41] It has been a part of AI from the beginning.[e]\n

                        There are several kinds of machine learning. Unsupervised learning analyzes a stream of data and finds patterns and makes predictions without any other guidance.[44] Supervised learning requires a human to label the input data first, and comes in two main varieties: classification (where the program must learn to predict what category the input belongs in) and regression (where the program must deduce a numeric function based on numeric input).[45]\n

                        In reinforcement learning, the agent is rewarded for good responses and punished for bad ones. The agent learns to choose responses that are classified as "good".[46] Transfer learning is when the knowledge gained from one problem is applied to a new problem.[47] Deep learning is a type of machine learning that runs inputs through biologically inspired artificial neural networks for all of these types of learning.[48]\n

                        Computational learning theory can assess learners by computational complexity, by sample complexity (how much data is required), or by other notions of optimization.[49]\n

                        \n
                        \n

                        Natural language processing

                        \n

                        Natural language processing (NLP)[50] allows programs to read, write and communicate in human languages such as English. Specific problems include speech recognition, speech synthesis, machine translation, information extraction, information retrieval and question answering.[51]\n

                        Early work, based on Noam Chomsky\'s generative grammar and semantic networks, had difficulty with word-sense disambiguation[f] unless restricted to small domains called "micro-worlds" (due to the common sense knowledge problem[29]). Margaret Masterman believed that it was meaning and not grammar that was the key to understanding languages, and that thesauri and not dictionaries should be the basis of computational language structure.\n

                        Modern deep learning techniques for NLP include word embedding (representing words, typically as vectors encoding their meaning),[52] transformers (a deep learning architecture using an attention mechanism),[53] and others.[54] In 2019, generative pre-trained transformer (or "GPT") language models began to generate coherent text,[55][56] and by 2023, these models were able to get human-level scores on the bar exam, SAT test, GRE test, and many other real-world applications.[57]\n

                        \n

                        Perception

                        \n

                        Machine perception is the ability to use input from sensors (such as cameras, microphones, wireless signals, active lidar, sonar, radar, and tactile sensors) to deduce aspects of the world. Computer vision is the ability to analyze visual input.[58]\n

                        The field includes speech recognition,[59] image classification,[60] facial recognition, object recognition,[61]object tracking,[62] and robotic perception.[63]\n

                        \n

                        Social intelligence

                        \n
                        Kismet, a robot head which was made in the 1990s; a machine that can recognize and simulate emotions[64]
                        \n

                        Affective computing is an interdisciplinary umbrella that comprises systems that recognize, interpret, process, or simulate human feeling, emotion, and mood.[65] For example, some virtual assistants are programmed to speak conversationally or even to banter humorously; it makes them appear more sensitive to the emotional dynamics of human interaction, or to otherwise facilitate human–computer interaction.\n

                        However, this tends to give naïve users an unrealistic conception of the intelligence of existing computer agents.[66] Moderate successes related to affective computing include textual sentiment analysis and, more recently, multimodal sentiment analysis, wherein AI classifies the affects displayed by a videotaped subject.[67]\n

                        \n

                        General intelligence

                        \n

                        A machine with artificial general intelligence should be able to solve a wide variety of problems with breadth and versatility similar to human intelligence.[4]\n

                        \n

                        Techniques

                        \n

                        AI research uses a wide variety of techniques to accomplish the goals above.[b]\n

                        \n

                        Search and optimization

                        \n

                        AI can solve many problems by intelligently searching through many possible solutions.[68] There are two very different kinds of search used in AI: state space search and local search.\n

                        \n
                        \n

                        State space search searches through a tree of possible states to try to find a goal state.[69] For example, planning algorithms search through trees of goals and subgoals, attempting to find a path to a target goal, a process called means-ends analysis.[70]\n

                        Simple exhaustive searches[71] are rarely sufficient for most real-world problems: the search space (the number of places to search) quickly grows to astronomical numbers. The result is a search that is too slow or never completes.[15] "Heuristics" or "rules of thumb" can help prioritize choices that are more likely to reach a goal.[72]\n

                        Adversarial search is used for game-playing programs, such as chess or Go. It searches through a tree of possible moves and counter-moves, looking for a winning position.[73]\n

                        \n
                        \n
                        Illustration of gradient descent for 3 different starting points; two parameters (represented by the plan coordinates) are adjusted in order to minimize the loss function (the height)

                        Local search uses mathematical optimization to find a solution to a problem. It begins with some form of guess and refines it incrementally.[74]\n

                        Gradient descent is a type of local search that optimizes a set of numerical parameters by incrementally adjusting them to minimize a loss function. Variants of gradient descent are commonly used to train neural networks.[75]\n

                        Another type of local search is evolutionary computation, which aims to iteratively improve a set of candidate solutions by "mutating" and "recombining" them, selecting only the fittest to survive each generation.[76]\n

                        Distributed search processes can coordinate via swarm intelligence algorithms. Two popular swarm algorithms used in search are particle swarm optimization (inspired by bird flocking) and ant colony optimization (inspired by ant trails).[77]\n

                        \n

                        Logic

                        \n

                        Formal logic is used for reasoning and knowledge representation.[78]\nFormal logic comes in two main forms: propositional logic (which operates on statements that are true or false and uses logical connectives such as "and", "or", "not" and "implies")[79] and predicate logic (which also operates on objects, predicates and relations and uses quantifiers such as "Every X is a Y" and "There are some Xs that are Ys").[80]\n

                        Deductive reasoning in logic is the process of proving a new statement (conclusion) from other statements that are given and assumed to be true (the premises).[81] Proofs can be structured as proof trees, in which nodes are labelled by sentences, and children nodes are connected to parent nodes by inference rules.\n

                        Given a problem and a set of premises, problem-solving reduces to searching for a proof tree whose root node is labelled by a solution of the problem and whose leaf nodes are labelled by premises or axioms. In the case of Horn clauses, problem-solving search can be performed by reasoning forwards from the premises or backwards from the problem.[82] In the more general case of the clausal form of first-order logic, resolution is a single, axiom-free rule of inference, in which a problem is solved by proving a contradiction from premises that include the negation of the problem to be solved.[83]\n

                        Inference in both Horn clause logic and first-order logic is undecidable, and therefore intractable. However, backward reasoning with Horn clauses, which underpins computation in the logic programming language Prolog, is Turing complete. Moreover, its efficiency is competitive with computation in other symbolic programming languages.[84]\n

                        Fuzzy logic assigns a "degree of truth" between 0 and 1. It can therefore handle propositions that are vague and partially true.[85]\n

                        Non-monotonic logics, including logic programming with negation as failure, are designed to handle default reasoning.[28] Other specialized versions of logic have been developed to describe many complex domains.\n

                        \n

                        Probabilistic methods for uncertain reasoning

                        \n
                        A simple Bayesian network, with the associated conditional probability tables
                        \n

                        Many problems in AI (including in reasoning, planning, learning, perception, and robotics) require the agent to operate with incomplete or uncertain information. AI researchers have devised a number of tools to solve these problems using methods from probability theory and economics.[86] Precise mathematical tools have been developed that analyze how an agent can make choices and plan, using decision theory, decision analysis,[87] and information value theory.[88] These tools include models such as Markov decision processes,[89] dynamic decision networks,[90] game theory and mechanism design.[91]\n

                        Bayesian networks[92] are a tool that can be used for reasoning (using the Bayesian inference algorithm),[g][94] learning (using the expectation–maximization algorithm),[h][96] planning (using decision networks)[97] and perception (using dynamic Bayesian networks).[90]\n

                        Probabilistic algorithms can also be used for filtering, prediction, smoothing, and finding explanations for streams of data, thus helping perception systems analyze processes that occur over time (e.g., hidden Markov models or Kalman filters).[90]\n

                        \n
                        Expectation–maximization clustering of Old Faithful eruption data starts from a random guess but then successfully converges on an accurate clustering of the two physically distinct modes of eruption.
                        \n

                        Classifiers and statistical learning methods

                        \n

                        The simplest AI applications can be divided into two types: classifiers (e.g., "if shiny then diamond"), on one hand, and controllers (e.g., "if diamond then pick up"), on the other hand. Classifiers[98] are functions that use pattern matching to determine the closest match. They can be fine-tuned based on chosen examples using supervised learning. Each pattern (also called an "observation") is labeled with a certain predefined class. All the observations combined with their class labels are known as a data set. When a new observation is received, that observation is classified based on previous experience.[45]\n

                        There are many kinds of classifiers in use.[99] The decision tree is the simplest and most widely used symbolic machine learning algorithm.[100] K-nearest neighbor algorithm was the most widely used analogical AI until the mid-1990s, and Kernel methods such as the support vector machine (SVM) displaced k-nearest neighbor in the 1990s.[101]\nThe naive Bayes classifier is reportedly the "most widely used learner"[102] at Google, due in part to its scalability.[103]\nNeural networks are also used as classifiers.[104]\n

                        \n

                        Artificial neural networks

                        \n
                        A neural network is an interconnected group of nodes, akin to the vast network of neurons in the human brain.
                        \n

                        An artificial neural network is based on a collection of nodes also known as artificial neurons, which loosely model the neurons in a biological brain. It is trained to recognise patterns; once trained, it can recognise those patterns in fresh data. There is an input, at least one hidden layer of nodes and an output. Each node applies a function and once the weight crosses its specified threshold, the data is transmitted to the next layer. A network is typically called a deep neural network if it has at least 2 hidden layers.[104]\n

                        Learning algorithms for neural networks use local search to choose the weights that will get the right output for each input during training. The most common training technique is the backpropagation algorithm.[105] Neural networks learn to model complex relationships between inputs and outputs and find patterns in data. In theory, a neural network can learn any function.[106]\n

                        In feedforward neural networks the signal passes in only one direction.[107] Recurrent neural networks feed the output signal back into the input, which allows short-term memories of previous input events. Long short term memory is the most successful network architecture for recurrent networks.[108] Perceptrons[109] use only a single layer of neurons; deep learning[110] uses multiple layers. Convolutional neural networks strengthen the connection between neurons that are "close" to each other—this is especially important in image processing, where a local set of neurons must identify an "edge" before the network can identify an object.[111]\n

                        \n
                        \n

                        Deep learning

                        \n
                        \n

                        Deep learning[110] uses several layers of neurons between the network\'s inputs and outputs. The multiple layers can progressively extract higher-level features from the raw input. For example, in image processing, lower layers may identify edges, while higher layers may identify the concepts relevant to a human such as digits, letters, or faces.[112]\n

                        Deep learning has profoundly improved the performance of programs in many important subfields of artificial intelligence, including computer vision, speech recognition, natural language processing, image classification,[113] and others. The reason that deep learning performs so well in so many applications is not known as of 2023.[114] The sudden success of deep learning in 2012–2015 did not occur because of some new discovery or theoretical breakthrough (deep neural networks and backpropagation had been described by many people, as far back as the 1950s)[i] but because of two factors: the incredible increase in computer power (including the hundred-fold increase in speed by switching to GPUs) and the availability of vast amounts of training data, especially the giant curated datasets used for benchmark testing, such as ImageNet.[j]\n

                        \n

                        GPT

                        \n

                        Generative pre-trained transformers (GPT) are large language models (LLMs) that generate text based on the semantic relationships between words in sentences. Text-based GPT models are pretrained on a large corpus of text that can be from the Internet. The pretraining consists of predicting the next token (a token being usually a word, subword, or punctuation). Throughout this pretraining, GPT models accumulate knowledge about the world and can then generate human-like text by repeatedly predicting the next token. Typically, a subsequent training phase makes the model more truthful, useful, and harmless, usually with a technique called reinforcement learning from human feedback (RLHF). Current GPT models are prone to generating falsehoods called "hallucinations", although this can be reduced with RLHF and quality data. They are used in chatbots, which allow people to ask a question or request a task in simple text.[122][123]\n

                        Current models and services include Gemini (formerly Bard), ChatGPT, Grok, Claude, Copilot, and LLaMA.[124] Multimodal GPT models can process different types of data (modalities) such as images, videos, sound, and text.[125]\n

                        \n

                        Hardware and software

                        \n\n

                        In the late 2010s, graphics processing units (GPUs) that were increasingly designed with AI-specific enhancements and used with specialized TensorFlow software had replaced previously used central processing unit (CPUs) as the dominant means for large-scale (commercial and academic) machine learning models\' training.[126] Specialized programming languages such as Prolog were used in early AI research,[127] but general-purpose programming languages like Python have become predominant.[128]\n

                        The transistor density in integrated circuits has been observed to roughly double every 18 months—a trend known as Moore\'s law, named after the Intel co-founder Gordon Moore, who first identified it. Improvements in GPUs have been even faster.[129]\n

                        \n

                        Applications

                        \n

                        AI and machine learning technology is used in most of the essential applications of the 2020s, including: search engines (such as Google Search), targeting online advertisements, recommendation systems (offered by Netflix, YouTube or Amazon), driving internet traffic, targeted advertising (AdSense, Facebook), virtual assistants (such as Siri or Alexa), autonomous vehicles (including drones, ADAS and self-driving cars), automatic language translation (Microsoft Translator, Google Translate), facial recognition (Apple\'s Face ID or Microsoft\'s DeepFace and Google\'s FaceNet) and image labeling (used by Facebook, Apple\'s iPhoto and TikTok). The deployment of AI may be overseen by a Chief automation officer (CAO).\n

                        Health and medicine

                        \n\n

                        The application of AI in medicine and medical research has the potential to increase patient care and quality of life.[130] Through the lens of the Hippocratic Oath, medical professionals are ethically compelled to use AI, if applications can more accurately diagnose and treat patients.[131][132]\n

                        For medical research, AI is an important tool for processing and integrating big data. This is particularly important for organoid and tissue engineering development which use microscopy imaging as a key technique in fabrication.[133] It has been suggested that AI can overcome discrepancies in funding allocated to different fields of research.[133] New AI tools can deepen the understanding of biomedically relevant pathways. For example, AlphaFold 2 (2021) demonstrated the ability to approximate, in hours rather than months, the 3D structure of a protein.[134] In 2023, it was reported that AI-guided drug discovery helped find a class of antibiotics capable of killing two different types of drug-resistant bacteria.[135] In 2024, researchers used machine learning to accelerate the search for Parkinson\'s disease drug treatments. Their aim was to identify compounds that block the clumping, or aggregation, of alpha-synuclein (the protein that characterises Parkinson\'s disease). They were able to speed up the initial screening process ten-fold and reduce the cost by a thousand-fold.[136][137]\n

                        \n

                        Games

                        \n\n

                        Game playing programs have been used since the 1950s to demonstrate and test AI\'s most advanced techniques.[138] Deep Blue became the first computer chess-playing system to beat a reigning world chess champion, Garry Kasparov, on 11 May 1997.[139] In 2011, in a Jeopardy! quiz show exhibition match, IBM\'s question answering system, Watson, defeated the two greatest Jeopardy! champions, Brad Rutter and Ken Jennings, by a significant margin.[140] In March 2016, AlphaGo won 4 out of 5 games of Go in a match with Go champion Lee Sedol, becoming the first computer Go-playing system to beat a professional Go player without handicaps. Then, in 2017, it defeated Ke Jie, who was the best Go player in the world.[141] Other programs handle imperfect-information games, such as the poker-playing program Pluribus.[142] DeepMind developed increasingly generalistic reinforcement learning models, such as with MuZero, which could be trained to play chess, Go, or Atari games.[143] In 2019, DeepMind\'s AlphaStar achieved grandmaster level in StarCraft II, a particularly challenging real-time strategy game that involves incomplete knowledge of what happens on the map.[144] In 2021, an AI agent competed in a PlayStation Gran Turismo competition, winning against four of the world\'s best Gran Turismo drivers using deep reinforcement learning.[145] In 2024, Google DeepMind introduced SIMA, a type of AI capable of autonomously playing nine previously unseen open-world video games by observing screen output, as well as executing short, specific tasks in response to natural language instructions.[146]\n

                        \n

                        Mathematics

                        \n

                        In mathematics, special forms of formal step-by-step reasoning are used. In contrast, LLMs such as GPT-4 Turbo, Gemini Ultra, Claude Opus, LLaMa-2 or Mistral Large are working with probabilistic models, which can produce wrong answers in the form of hallucinations. Therefore, they need not only a large database of mathematical problems to learn from but also methods such as supervised fine-tuning or trained classifiers with human-annotated data to improve answers for new problems and learn from corrections.[147] A 2024 study showed that the performance of some language models for reasoning capabilities in solving math problems not included in their training data was low, even for problems with only minor deviations from trained data.[148]\n

                        Alternatively, dedicated models for mathematic problem solving with higher precision for the outcome including proof of theorems have been developed such as Alpha Tensor, Alpha Geometry and Alpha Proof all from Google DeepMind,[149] Llemma from eleuther[150] or Julius.[151]\n

                        When natural language is used to describe mathematical problems, converters transform such prompts into a formal language such as Lean to define mathematic tasks.\n

                        Some models have been developed to solve challenging problems and reach good results in benchmark tests, others to serve as educational tools in mathematics.[152]\n

                        \n

                        Finance

                        \n

                        Finance is one of the fastest growing sectors where applied AI tools are being deployed: from retail online banking to investment advice and insurance, where automated "robot advisers" have been in use for some years.[153]\n

                        World Pensions experts like Nicolas Firzli insist it may be too early to see the emergence of highly innovative AI-informed financial products and services: "the deployment of AI tools will simply further automatise things: destroying tens of thousands of jobs in banking, financial planning, and pension advice in the process, but I\'m not sure it will unleash a new wave of [e.g., sophisticated] pension innovation."[154]\n

                        \n

                        Military

                        \n\n

                        Various countries are deploying AI military applications.[155] The main applications enhance command and control, communications, sensors, integration and interoperability.[156] Research is targeting intelligence collection and analysis, logistics, cyber operations, information operations, and semiautonomous and autonomous vehicles.[155] AI technologies enable coordination of sensors and effectors, threat detection and identification, marking of enemy positions, target acquisition, coordination and deconfliction of distributed Joint Fires between networked combat vehicles involving manned and unmanned teams.[156] AI was incorporated into military operations in Iraq and Syria.[155]\n

                        In November 2023, US Vice President Kamala Harris disclosed a declaration signed by 31 nations to set guardrails for the military use of AI. The commitments include using legal reviews to ensure the compliance of military AI with international laws, and being cautious and transparent in the development of this technology.[157]\n

                        \n

                        Generative AI

                        \n\n
                        Vincent van Gogh in watercolour created by generative AI software
                        \n

                        In the early 2020s, generative AI gained widespread prominence. GenAI is AI capable of generating text, images, videos, or other data using generative models,[158][159] often in response to prompts.[160][161]\n

                        In March 2023, 58% of U.S. adults had heard about ChatGPT and 14% had tried it.[162] The increasing realism and ease-of-use of AI-based text-to-image generators such as Midjourney, DALL-E, and Stable Diffusion sparked a trend of viral AI-generated photos. Widespread attention was gained by a fake photo of Pope Francis wearing a white puffer coat, the fictional arrest of Donald Trump, and a hoax of an attack on the Pentagon, as well as the usage in professional creative arts.[163][164]\n

                        \n

                        Agents

                        \n

                        Artificial intelligent (AI) agents are software entities designed to perceive their environment, make decisions, and take actions autonomously to achieve specific goals. These agents can interact with users, their environment, or other agents. AI agents are used in various applications, including virtual assistants, chatbots, autonomous vehicles, game-playing systems, and industrial robotics. AI agents operate within the constraints of their programming, available computational resources, and hardware limitations. This means they are restricted to performing tasks within their defined scope and have finite memory and processing capabilities. In real-world applications, AI agents often face time constraints for decision-making and action execution. Many AI agents incorporate learning algorithms, enabling them to improve their performance over time through experience or training. Using machine learning, AI agents can adapt to new situations and optimise their behaviour for their designated tasks.[165][166][167]\n

                        \n

                        Other industry-specific tasks

                        \n

                        There are also thousands of successful AI applications used to solve specific problems for specific industries or institutions. In a 2017 survey, one in five companies reported having incorporated "AI" in some offerings or processes.[168] A few examples are energy storage, medical diagnosis, military logistics, applications that predict the result of judicial decisions, foreign policy, or supply chain management.\n

                        AI applications for evacuation and disaster management are growing. AI has been used to investigate if and how people evacuated in large scale and small scale evacuations using historical data from GPS, videos or social media. Further, AI can provide real time information on the real time evacuation conditions.[169][170][171]\n

                        In agriculture, AI has helped farmers identify areas that need irrigation, fertilization, pesticide treatments or increasing yield. Agronomists use AI to conduct research and development. AI has been used to predict the ripening time for crops such as tomatoes, monitor soil moisture, operate agricultural robots, conduct predictive analytics, classify livestock pig call emotions, automate greenhouses, detect diseases and pests, and save water.\n

                        Artificial intelligence is used in astronomy to analyze increasing amounts of available data and applications, mainly for "classification, regression, clustering, forecasting, generation, discovery, and the development of new scientific insights." For example, it is used for discovering exoplanets, forecasting solar activity, and distinguishing between signals and instrumental effects in gravitational wave astronomy. Additionally, it could be used for activities in space, such as space exploration, including the analysis of data from space missions, real-time science decisions of spacecraft, space debris avoidance, and more autonomous operation.\n

                        \n

                        Ethics

                        \n\n

                        AI has potential benefits and potential risks.[172] AI may be able to advance science and find solutions for serious problems: Demis Hassabis of Deep Mind hopes to "solve intelligence, and then use that to solve everything else".[173] However, as the use of AI has become widespread, several unintended consequences and risks have been identified.[174] In-production systems can sometimes not factor ethics and bias into their AI training processes, especially when the AI algorithms are inherently unexplainable in deep learning.[175]\n

                        \n

                        Risks and harm

                        \n
                        \n\n

                        Machine learning algorithms require large amounts of data. The techniques used to acquire this data have raised concerns about privacy, surveillance and copyright.\n

                        AI-powered devices and services, such as virtual assistants and IoT products, continuously collect personal information, raising concerns about intrusive data gathering and unauthorized access by third parties. The loss of privacy is further exacerbated by AI\'s ability to process and combine vast amounts of data, potentially leading to a surveillance society where individual activities are constantly monitored and analyzed without adequate safeguards or transparency.\n

                        Sensitive user data collected may include online activity records, geolocation data, video or audio.[176] For example, in order to build speech recognition algorithms, Amazon has recorded millions of private conversations and allowed temporary workers to listen to and transcribe some of them.[177] Opinions about this widespread surveillance range from those who see it as a necessary evil to those for whom it is clearly unethical and a violation of the right to privacy.[178]\n

                        AI developers argue that this is the only way to deliver valuable applications. and have developed several techniques that attempt to preserve privacy while still obtaining the data, such as data aggregation, de-identification and differential privacy.[179] Since 2016, some privacy experts, such as Cynthia Dwork, have begun to view privacy in terms of fairness. Brian Christian wrote that experts have pivoted "from the question of \'what they know\' to the question of \'what they\'re doing with it\'."[180]\n

                        Generative AI is often trained on unlicensed copyrighted works, including in domains such as images or computer code; the output is then used under the rationale of "fair use". Experts disagree about how well and under what circumstances this rationale will hold up in courts of law; relevant factors may include "the purpose and character of the use of the copyrighted work" and "the effect upon the potential market for the copyrighted work".[181][182] Website owners who do not wish to have their content scraped can indicate it in a "robots.txt" file.[183] In 2023, leading authors (including John Grisham and Jonathan Franzen) sued AI companies for using their work to train generative AI.[184][185] Another discussed approach is to envision a separate sui generis system of protection for creations generated by AI to ensure fair attribution and compensation for human authors.[186]\n

                        \n

                        Dominance by tech giants

                        \n

                        The commercial AI scene is dominated by Big Tech companies such as Alphabet Inc., Amazon, Apple Inc., Meta Platforms, and Microsoft.[187][188][189] Some of these players already own the vast majority of existing cloud infrastructure and computing power from data centers, allowing them to entrench further in the marketplace.[190][191]\n

                        \n

                        Substantial power needs and other environmental impacts

                        \n\n

                        In January 2024, the International Energy Agency (IEA) released Electricity 2024, Analysis and Forecast to 2026, forecasting electric power use.[192] This is the first IEA report to make projections for data centers and power consumption for artificial intelligence and cryptocurrency. The report states that power demand for these uses might double by 2026, with additional electric power usage equal to electricity used by the whole Japanese nation.[193]\n

                        Prodigious power consumption by AI is responsible for the growth of fossil fuels use, and might delay closings of obsolete, carbon-emitting coal energy facilities. There is a feverish rise in the construction of data centers throughout the US, making large technology firms (e.g., Microsoft, Meta, Google, Amazon) into voracious consumers of electric power. Projected electric consumption is so immense that there is concern that it will be fulfilled no matter the source. A ChatGPT search involves the use of 10 times the electrical energy as a Google search. The large firms are in haste to find power sources – from nuclear energy to geothermal to fusion. The tech firms argue that – in the long view – AI will be eventually kinder to the environment, but they need the energy now. AI makes the power grid more efficient and "intelligent", will assist in the growth of nuclear power, and track overall carbon emissions, according to technology firms.[194]\n

                        A 2024 Goldman Sachs Research Paper, AI Data Centers and the Coming US Power Demand Surge, found "US power demand (is) likely to experience growth not seen in a generation...." and forecasts that, by 2030, US data centers will consume 8% of US power, as opposed to 3% in 2022, presaging growth for the electrical power generation industry by a variety of means.[195] Data centers\' need for more and more electrical power is such that they might max out the electrical grid. The Big Tech companies counter that AI can be used to maximize the utilization of the grid by all.[196]\n

                        In 2024, the Wall Street Journal reported that big AI companies have begun negotiations with the US nuclear power providers to provide electricity to the data centers. In March 2024 Amazon purchased a Pennsylvania nuclear-powered data center for $650 Million (US).[197]\n

                        In September 2024, Microsoft announced an agreement with Constellation Energy to re-open the Three Mile Island nuclear power plant to provide Microsoft with 100% of all electric power produced by the plant for 20 years. Reopening the plant, which suffered a partial nuclear meltdown of its Unit 2 reactor in 1979, will require Constellation to get through strict regulatory processes which will include extensive safety scrutiny from the US Nuclear Regulatory Commission. If approved (this will be the first ever US re-commissioning of a nuclear plant), over 835 megawatts of power – enough for 800,000 homes – of energy will be produced. The cost for re-opening and upgrading is estimated at $1.6 billion (US) and is dependent on tax breaks for nuclear power contained in the 2022 US Inflation Reduction Act.[198] The US government and the state of Michigan are investing almost $2 billion (US) to reopen the Palisades Nuclear reactor on Lake Michigan. Closed since 2022, the plant is planned to be reopened in October 2025. The Three Mile Island facility will be renamed the Crane Clean Energy Center after Chris Crane, a nuclear proponent and former CEO of Exelon who was responsible for Exelon spinoff of Constellation.[199]\n

                        \n

                        Misinformation

                        \n\n

                        YouTube, Facebook and others use recommender systems to guide users to more content. These AI programs were given the goal of maximizing user engagement (that is, the only goal was to keep people watching). The AI learned that users tended to choose misinformation, conspiracy theories, and extreme partisan content, and, to keep them watching, the AI recommended more of it. Users also tended to watch more content on the same subject, so the AI led people into filter bubbles where they received multiple versions of the same misinformation.[200] This convinced many users that the misinformation was true, and ultimately undermined trust in institutions, the media and the government.[201] The AI program had correctly learned to maximize its goal, but the result was harmful to society. After the U.S. election in 2016, major technology companies took steps to mitigate the problem [citation needed].\n

                        In 2022, generative AI began to create images, audio, video and text that are indistinguishable from real photographs, recordings, films, or human writing. It is possible for bad actors to use this technology to create massive amounts of misinformation or propaganda.[202] AI pioneer Geoffrey Hinton expressed concern about AI enabling "authoritarian leaders to manipulate their electorates" on a large scale, among other risks.[203]\n

                        \n

                        Algorithmic bias and fairness

                        \n\n

                        Machine learning applications will be biased[k] if they learn from biased data.[205] The developers may not be aware that the bias exists.[206] Bias can be introduced by the way training data is selected and by the way a model is deployed.[207][205] If a biased algorithm is used to make decisions that can seriously harm people (as it can in medicine, finance, recruitment, housing or policing) then the algorithm may cause discrimination.[208] The field of fairness studies how to prevent harms from algorithmic biases.\n

                        On June 28, 2015, Google Photos\'s new image labeling feature mistakenly identified Jacky Alcine and a friend as "gorillas" because they were black. The system was trained on a dataset that contained very few images of black people,[209] a problem called "sample size disparity".[210] Google "fixed" this problem by preventing the system from labelling anything as a "gorilla". Eight years later, in 2023, Google Photos still could not identify a gorilla, and neither could similar products from Apple, Facebook, Microsoft and Amazon.[211]\n

                        COMPAS is a commercial program widely used by U.S. courts to assess the likelihood of a defendant becoming a recidivist. In 2016, Julia Angwin at ProPublica discovered that COMPAS exhibited racial bias, despite the fact that the program was not told the races of the defendants. Although the error rate for both whites and blacks was calibrated equal at exactly 61%, the errors for each race were different—the system consistently overestimated the chance that a black person would re-offend and would underestimate the chance that a white person would not re-offend.[212] In 2017, several researchers[l] showed that it was mathematically impossible for COMPAS to accommodate all possible measures of fairness when the base rates of re-offense were different for whites and blacks in the data.[214]\n

                        A program can make biased decisions even if the data does not explicitly mention a problematic feature (such as "race" or "gender"). The feature will correlate with other features (like "address", "shopping history" or "first name"), and the program will make the same decisions based on these features as it would on "race" or "gender".[215] Moritz Hardt said "the most robust fact in this research area is that fairness through blindness doesn\'t work."[216]\n

                        Criticism of COMPAS highlighted that machine learning models are designed to make "predictions" that are only valid if we assume that the future will resemble the past. If they are trained on data that includes the results of racist decisions in the past, machine learning models must predict that racist decisions will be made in the future. If an application then uses these predictions as recommendations, some of these "recommendations" will likely be racist.[217] Thus, machine learning is not well suited to help make decisions in areas where there is hope that the future will be better than the past. It is descriptive rather than prescriptive.[m]\n

                        Bias and unfairness may go undetected because the developers are overwhelmingly white and male: among AI engineers, about 4% are black and 20% are women.[210]\n

                        There are various conflicting definitions and mathematical models of fairness. These notions depend on ethical assumptions, and are influenced by beliefs about society. One broad category is distributive fairness, which focuses on the outcomes, often identifying groups and seeking to compensate for statistical disparities. Representational fairness tries to ensure that AI systems do not reinforce negative stereotypes or render certain groups invisible. Procedural fairness focuses on the decision process rather than the outcome. The most relevant notions of fairness may depend on the context, notably the type of AI application and the stakeholders. The subjectivity in the notions of bias and fairness makes it difficult for companies to operationalize them. Having access to sensitive attributes such as race or gender is also considered by many AI ethicists to be necessary in order to compensate for biases, but it may conflict with anti-discrimination laws.[204]\n

                        At its 2022 Conference on Fairness, Accountability, and Transparency (ACM FAccT 2022), the Association for Computing Machinery, in Seoul, South Korea, presented and published findings that recommend that until AI and robotics systems are demonstrated to be free of bias mistakes, they are unsafe, and the use of self-learning neural networks trained on vast, unregulated sources of flawed internet data should be curtailed.[dubiousdiscuss][219]\n

                        \n

                        Lack of transparency

                        \n\n

                        Many AI systems are so complex that their designers cannot explain how they reach their decisions.[220] Particularly with deep neural networks, in which there are a large amount of non-linear relationships between inputs and outputs. But some popular explainability techniques exist.[221]\n

                        It is impossible to be certain that a program is operating correctly if no one knows how exactly it works. There have been many cases where a machine learning program passed rigorous tests, but nevertheless learned something different than what the programmers intended. For example, a system that could identify skin diseases better than medical professionals was found to actually have a strong tendency to classify images with a ruler as "cancerous", because pictures of malignancies typically include a ruler to show the scale.[222] Another machine learning system designed to help effectively allocate medical resources was found to classify patients with asthma as being at "low risk" of dying from pneumonia. Having asthma is actually a severe risk factor, but since the patients having asthma would usually get much more medical care, they were relatively unlikely to die according to the training data. The correlation between asthma and low risk of dying from pneumonia was real, but misleading.[223]\n

                        People who have been harmed by an algorithm\'s decision have a right to an explanation.[224] Doctors, for example, are expected to clearly and completely explain to their colleagues the reasoning behind any decision they make. Early drafts of the European Union\'s General Data Protection Regulation in 2016 included an explicit statement that this right exists.[n] Industry experts noted that this is an unsolved problem with no solution in sight. Regulators argued that nevertheless the harm is real: if the problem has no solution, the tools should not be used.[225]\n

                        DARPA established the XAI ("Explainable Artificial Intelligence") program in 2014 to try to solve these problems.[226]\n

                        Several approaches aim to address the transparency problem. SHAP enables to visualise the contribution of each feature to the output.[227] LIME can locally approximate a model\'s outputs with a simpler, interpretable model.[228] Multitask learning provides a large number of outputs in addition to the target classification. These other outputs can help developers deduce what the network has learned.[229] Deconvolution, DeepDream and other generative methods can allow developers to see what different layers of a deep network for computer vision have learned, and produce output that can suggest what the network is learning.[230] For generative pre-trained transformers, Anthropic developed a technique based on dictionary learning that associates patterns of neuron activations with human-understandable concepts.[231]\n

                        \n

                        Bad actors and weaponized AI

                        \n\n

                        Artificial intelligence provides a number of tools that are useful to bad actors, such as authoritarian governments, terrorists, criminals or rogue states.\n

                        A lethal autonomous weapon is a machine that locates, selects and engages human targets without human supervision.[o] Widely available AI tools can be used by bad actors to develop inexpensive autonomous weapons and, if produced at scale, they are potentially weapons of mass destruction.[233] Even when used in conventional warfare, it is unlikely that they will be unable to reliably choose targets and could potentially kill an innocent person.[233] In 2014, 30 nations (including China) supported a ban on autonomous weapons under the United Nations\' Convention on Certain Conventional Weapons, however the United States and others disagreed.[234] By 2015, over fifty countries were reported to be researching battlefield robots.[235]\n

                        AI tools make it easier for authoritarian governments to efficiently control their citizens in several ways. Face and voice recognition allow widespread surveillance. Machine learning, operating this data, can classify potential enemies of the state and prevent them from hiding. Recommendation systems can precisely target propaganda and misinformation for maximum effect. Deepfakes and generative AI aid in producing misinformation. Advanced AI can make authoritarian centralized decision making more competitive than liberal and decentralized systems such as markets. It lowers the cost and difficulty of digital warfare and advanced spyware.[236] All these technologies have been available since 2020 or earlier—AI facial recognition systems are already being used for mass surveillance in China.[237][238]\n

                        There many other ways that AI is expected to help bad actors, some of which can not be foreseen. For example, machine-learning AI is able to design tens of thousands of toxic molecules in a matter of hours.[239]\n

                        \n

                        Technological unemployment

                        \n\n

                        Economists have frequently highlighted the risks of redundancies from AI, and speculated about unemployment if there is no adequate social policy for full employment.[240]\n

                        In the past, technology has tended to increase rather than reduce total employment, but economists acknowledge that "we\'re in uncharted territory" with AI.[241] A survey of economists showed disagreement about whether the increasing use of robots and AI will cause a substantial increase in long-term unemployment, but they generally agree that it could be a net benefit if productivity gains are redistributed.[242] Risk estimates vary; for example, in the 2010s, Michael Osborne and Carl Benedikt Frey estimated 47% of U.S. jobs are at "high risk" of potential automation, while an OECD report classified only 9% of U.S. jobs as "high risk".[p][244] The methodology of speculating about future employment levels has been criticised as lacking evidential foundation, and for implying that technology, rather than social policy, creates unemployment, as opposed to redundancies.[240] In April 2023, it was reported that 70% of the jobs for Chinese video game illustrators had been eliminated by generative artificial intelligence.[245][246]\n

                        Unlike previous waves of automation, many middle-class jobs may be eliminated by artificial intelligence; The Economist stated in 2015 that "the worry that AI could do to white-collar jobs what steam power did to blue-collar ones during the Industrial Revolution" is "worth taking seriously".[247] Jobs at extreme risk range from paralegals to fast food cooks, while job demand is likely to increase for care-related professions ranging from personal healthcare to the clergy.[248]\n

                        From the early days of the development of artificial intelligence, there have been arguments, for example, those put forward by Joseph Weizenbaum, about whether tasks that can be done by computers actually should be done by them, given the difference between computers and humans, and between quantitative calculation and qualitative, value-based judgement.[249]\n

                        \n

                        Existential risk

                        \n\n

                        It has been argued AI will become so powerful that humanity may irreversibly lose control of it. This could, as physicist Stephen Hawking stated, "spell the end of the human race".[250] This scenario has been common in science fiction, when a computer or robot suddenly develops a human-like "self-awareness" (or "sentience" or "consciousness") and becomes a malevolent character.[q] These sci-fi scenarios are misleading in several ways.\n

                        First, AI does not require human-like "sentience" to be an existential risk. Modern AI programs are given specific goals and use learning and intelligence to achieve them. Philosopher Nick Bostrom argued that if one gives almost any goal to a sufficiently powerful AI, it may choose to destroy humanity to achieve it (he used the example of a paperclip factory manager).[252] Stuart Russell gives the example of household robot that tries to find a way to kill its owner to prevent it from being unplugged, reasoning that "you can\'t fetch the coffee if you\'re dead."[253] In order to be safe for humanity, a superintelligence would have to be genuinely aligned with humanity\'s morality and values so that it is "fundamentally on our side".[254]\n

                        Second, Yuval Noah Harari argues that AI does not require a robot body or physical control to pose an existential risk. The essential parts of civilization are not physical. Things like ideologies, law, government, money and the economy are made of language; they exist because there are stories that billions of people believe. The current prevalence of misinformation suggests that an AI could use language to convince people to believe anything, even to take actions that are destructive.[255]\n

                        The opinions amongst experts and industry insiders are mixed, with sizable fractions both concerned and unconcerned by risk from eventual superintelligent AI.[256] Personalities such as Stephen Hawking, Bill Gates, and Elon Musk,[257] as well as AI pioneers such as Yoshua Bengio, Stuart Russell, Demis Hassabis, and Sam Altman, have expressed concerns about existential risk from AI.\n

                        In May 2023, Geoffrey Hinton announced his resignation from Google in order to be able to "freely speak out about the risks of AI" without "considering how this impacts Google."[258] He notably mentioned risks of an AI takeover,[259] and stressed that in order to avoid the worst outcomes, establishing safety guidelines will require cooperation among those competing in use of AI.[260]\n

                        In 2023, many leading AI experts issued the joint statement that "Mitigating the risk of extinction from AI should be a global priority alongside other societal-scale risks such as pandemics and nuclear war".[261]\n

                        Other researchers, however, spoke in favor of a less dystopian view. AI pioneer Juergen Schmidhuber did not sign the joint statement, emphasising that in 95% of all cases, AI research is about making "human lives longer and healthier and easier."[262] While the tools that are now being used to improve lives can also be used by bad actors, "they can also be used against the bad actors."[263][264] Andrew Ng also argued that "it\'s a mistake to fall for the doomsday hype on AI—and that regulators who do will only benefit vested interests."[265] Yann LeCun "scoffs at his peers\' dystopian scenarios of supercharged misinformation and even, eventually, human extinction."[266] In the early 2010s, experts argued that the risks are too distant in the future to warrant research or that humans will be valuable from the perspective of a superintelligent machine.[267] However, after 2016, the study of current and future risks and possible solutions became a serious area of research.[268]\n

                        \n

                        Ethical machines and alignment

                        \n\n

                        Friendly AI are machines that have been designed from the beginning to minimize risks and to make choices that benefit humans. Eliezer Yudkowsky, who coined the term, argues that developing friendly AI should be a higher research priority: it may require a large investment and it must be completed before AI becomes an existential risk.[269]\n

                        Machines with intelligence have the potential to use their intelligence to make ethical decisions. The field of machine ethics provides machines with ethical principles and procedures for resolving ethical dilemmas.[270]\nThe field of machine ethics is also called computational morality,[270]\nand was founded at an AAAI symposium in 2005.[271]\n

                        Other approaches include Wendell Wallach\'s "artificial moral agents"[272] and Stuart J. Russell\'s three principles for developing provably beneficial machines.[273]\n

                        \n

                        Open source

                        \n

                        Active organizations in the AI open-source community include Hugging Face,[274] Google,[275] EleutherAI and Meta.[276] Various AI models, such as Llama 2, Mistral or Stable Diffusion, have been made open-weight,[277][278] meaning that their architecture and trained parameters (the "weights") are publicly available. Open-weight models can be freely fine-tuned, which allows companies to specialize them with their own data and for their own use-case.[279] Open-weight models are useful for research and innovation but can also be misused. Since they can be fine-tuned, any built-in security measure, such as objecting to harmful requests, can be trained away until it becomes ineffective. Some researchers warn that future AI models may develop dangerous capabilities (such as the potential to drastically facilitate bioterrorism) and that once released on the Internet, they cannot be deleted everywhere if needed. They recommend pre-release audits and cost-benefit analyses.[280]\n

                        \n

                        Frameworks

                        \n

                        Artificial Intelligence projects can have their ethical permissibility tested while designing, developing, and implementing an AI system. An AI framework such as the Care and Act Framework containing the SUM values—developed by the Alan Turing Institute tests projects in four main areas:[281][282]\n

                        \n
                        • Respect the dignity of individual people
                        • \n
                        • Connect with other people sincerely, openly, and inclusively
                        • \n
                        • Care for the wellbeing of everyone
                        • \n
                        • Protect social values, justice, and the public interest
                        \n

                        Other developments in ethical frameworks include those decided upon during the Asilomar Conference, the Montreal Declaration for Responsible AI, and the IEEE\'s Ethics of Autonomous Systems initiative, among others;[283] however, these principles do not go without their criticisms, especially regards to the people chosen contributes to these frameworks.[284]\n

                        Promotion of the wellbeing of the people and communities that these technologies affect requires consideration of the social and ethical implications at all stages of AI system design, development and implementation, and collaboration between job roles such as data scientists, product managers, data engineers, domain experts, and delivery managers.[285]\n

                        The UK AI Safety Institute released in 2024 a testing toolset called \'Inspect\' for AI safety evaluations available under a MIT open-source licence which is freely available on GitHub and can be improved with third-party packages. It can be used to evaluate AI models in a range of areas including core knowledge, ability to reason, and autonomous capabilities.[286]\n

                        \n

                        Regulation

                        \n\n
                        AI Safety Summit
                        The first global AI Safety Summit was held in 2023 with a declaration calling for international co-operation.
                        \n

                        The regulation of artificial intelligence is the development of public sector policies and laws for promoting and regulating AI; it is therefore related to the broader regulation of algorithms.[287] The regulatory and policy landscape for AI is an emerging issue in jurisdictions globally.[288] According to AI Index at Stanford, the annual number of AI-related laws passed in the 127 survey countries jumped from one passed in 2016 to 37 passed in 2022 alone.[289][290] Between 2016 and 2020, more than 30 countries adopted dedicated strategies for AI.[291] Most EU member states had released national AI strategies, as had Canada, China, India, Japan, Mauritius, the Russian Federation, Saudi Arabia, United Arab Emirates, U.S., and Vietnam. Others were in the process of elaborating their own AI strategy, including Bangladesh, Malaysia and Tunisia.[291] The Global Partnership on Artificial Intelligence was launched in June 2020, stating a need for AI to be developed in accordance with human rights and democratic values, to ensure public confidence and trust in the technology.[291] Henry Kissinger, Eric Schmidt, and Daniel Huttenlocher published a joint statement in November 2021 calling for a government commission to regulate AI.[292] In 2023, OpenAI leaders published recommendations for the governance of superintelligence, which they believe may happen in less than 10 years.[293] In 2023, the United Nations also launched an advisory body to provide recommendations on AI governance; the body comprises technology company executives, governments officials and academics.[294] In 2024, the Council of Europe created the first international legally binding treaty on AI, called the "Framework Convention on Artificial Intelligence and Human Rights, Democracy and the Rule of Law". It was adopted by the European Union, the United States, the United Kingdom, and other signatories.[295]\n

                        In a 2022 Ipsos survey, attitudes towards AI varied greatly by country; 78% of Chinese citizens, but only 35% of Americans, agreed that "products and services using AI have more benefits than drawbacks".[289] A 2023 Reuters/Ipsos poll found that 61% of Americans agree, and 22% disagree, that AI poses risks to humanity.[296] In a 2023 Fox News poll, 35% of Americans thought it "very important", and an additional 41% thought it "somewhat important", for the federal government to regulate AI, versus 13% responding "not very important" and 8% responding "not at all important".[297][298]\n

                        In November 2023, the first global AI Safety Summit was held in Bletchley Park in the UK to discuss the near and far term risks of AI and the possibility of mandatory and voluntary regulatory frameworks.[299] 28 countries including the United States, China, and the European Union issued a declaration at the start of the summit, calling for international co-operation to manage the challenges and risks of artificial intelligence.[300][301] In May 2024 at the AI Seoul Summit, 16 global AI tech companies agreed to safety commitments on the development of AI.[302][303]\n

                        \n

                        History

                        \n\n\n

                        The study of mechanical or "formal" reasoning began with philosophers and mathematicians in antiquity. The study of logic led directly to Alan Turing\'s theory of computation, which suggested that a machine, by shuffling symbols as simple as "0" and "1", could simulate any conceivable form of mathematical reasoning.[304][305] This, along with concurrent discoveries in cybernetics, information theory and neurobiology, led researchers to consider the possibility of building an "electronic brain".[r] They developed several areas of research that would become part of AI,[307] such as McCullouch and Pitts design for "artificial neurons" in 1943,[115] and Turing\'s influential 1950 paper \'Computing Machinery and Intelligence\', which introduced the Turing test and showed that "machine intelligence" was plausible.[308][305]\n

                        The field of AI research was founded at a workshop at Dartmouth College in 1956.[s][6] The attendees became the leaders of AI research in the 1960s.[t] They and their students produced programs that the press described as "astonishing":[u] computers were learning checkers strategies, solving word problems in algebra, proving logical theorems and speaking English.[v][7] Artificial intelligence laboratories were set up at a number of British and U.S. universities in the latter 1950s and early 1960s.[305]\n

                        Researchers in the 1960s and the 1970s were convinced that their methods would eventually succeed in creating a machine with general intelligence and considered this the goal of their field.[312] In 1965 Herbert Simon predicted, "machines will be capable, within twenty years, of doing any work a man can do".[313] In 1967 Marvin Minsky agreed, writing that "within a generation ... the problem of creating \'artificial intelligence\' will substantially be solved".[314] They had, however, underestimated the difficulty of the problem.[w] In 1974, both the U.S. and British governments cut off exploratory research in response to the criticism of Sir James Lighthill[316] and ongoing pressure from the U.S. Congress to fund more productive projects.[317] Minsky\'s and Papert\'s book Perceptrons was understood as proving that artificial neural networks would never be useful for solving real-world tasks, thus discrediting the approach altogether.[318] The "AI winter", a period when obtaining funding for AI projects was difficult, followed.[9]\n

                        In the early 1980s, AI research was revived by the commercial success of expert systems,[319] a form of AI program that simulated the knowledge and analytical skills of human experts. By 1985, the market for AI had reached over a billion dollars. At the same time, Japan\'s fifth generation computer project inspired the U.S. and British governments to restore funding for academic research.[8] However, beginning with the collapse of the Lisp Machine market in 1987, AI once again fell into disrepute, and a second, longer-lasting winter began.[10]\n

                        Up to this point, most of AI\'s funding had gone to projects that used high-level symbols to represent mental objects like plans, goals, beliefs, and known facts. In the 1980s, some researchers began to doubt that this approach would be able to imitate all the processes of human cognition, especially perception, robotics, learning and pattern recognition,[320] and began to look into "sub-symbolic" approaches.[321] Rodney Brooks rejected "representation" in general and focussed directly on engineering machines that move and survive.[x] Judea Pearl, Lofti Zadeh and others developed methods that handled incomplete and uncertain information by making reasonable guesses rather than precise logic.[86][326] But the most important development was the revival of "connectionism", including neural network research, by Geoffrey Hinton and others.[327] In 1990, Yann LeCun successfully showed that convolutional neural networks can recognize handwritten digits, the first of many successful applications of neural networks.[328]\n

                        AI gradually restored its reputation in the late 1990s and early 21st century by exploiting formal mathematical methods and by finding specific solutions to specific problems. This "narrow" and "formal" focus allowed researchers to produce verifiable results and collaborate with other fields (such as statistics, economics and mathematics).[329] By 2000, solutions developed by AI researchers were being widely used, although in the 1990s they were rarely described as "artificial intelligence" (a tendency known as the AI effect).[330]\nHowever, several academic researchers became concerned that AI was no longer pursuing its original goal of creating versatile, fully intelligent machines. Beginning around 2002, they founded the subfield of artificial general intelligence (or "AGI"), which had several well-funded institutions by the 2010s.[4]\n

                        Deep learning began to dominate industry benchmarks in 2012 and was adopted throughout the field.[11]\nFor many specific tasks, other methods were abandoned.[y]\nDeep learning\'s success was based on both hardware improvements (faster computers,[332] graphics processing units, cloud computing[333]) and access to large amounts of data[334] (including curated datasets,[333] such as ImageNet). Deep learning\'s success led to an enormous increase in interest and funding in AI.[z] The amount of machine learning research (measured by total publications) increased by 50% in the years 2015–2019.[291]\n

                        In 2016, issues of fairness and the misuse of technology were catapulted into center stage at machine learning conferences, publications vastly increased, funding became available, and many researchers re-focussed their careers on these issues. The alignment problem became a serious field of academic study.[268]\n

                        In the late teens and early 2020s, AGI companies began to deliver programs that created enormous interest. In 2015, AlphaGo, developed by DeepMind, beat the world champion Go player. The program was taught only the rules of the game and developed strategy by itself. GPT-3 is a large language model that was released in 2020 by OpenAI and is capable of generating high-quality human-like text.[335] These programs, and others, inspired an aggressive AI boom, where large companies began investing billions in AI research. According to AI Impacts, about $50 billion annually was invested in "AI" around 2022 in the U.S. alone and about 20% of the new U.S. Computer Science PhD graduates have specialized in "AI".[336] About 800,000 "AI"-related U.S. job openings existed in 2022.[337]\n

                        \n

                        Philosophy

                        \n

                        Philosophical debates have historically sought to determine the nature of intelligence and how to make intelligent machines.[338] Another major focus has been whether machines can be conscious, and the associated ethical implications.[339] Many other topics in philosophy can relevant to AI, such as epistemology and free will.[340] Rapid advancements have intensified public discussions on the philosophy and ethics of AI.[339]\n

                        Defining artificial intelligence

                        \n\n

                        Alan Turing wrote in 1950 "I propose to consider the question \'can machines think\'?"[341] He advised changing the question from whether a machine "thinks", to "whether or not it is possible for machinery to show intelligent behaviour".[341] He devised the Turing test, which measures the ability of a machine to simulate human conversation.[308] Since we can only observe the behavior of the machine, it does not matter if it is "actually" thinking or literally has a "mind". Turing notes that we can not determine these things about other people but "it is usual to have a polite convention that everyone thinks."[342]\n

                        \n
                        The Turing test can provide some evidence of intelligence, but it penalizes non-human intelligent behavior.[343]
                        \n

                        Russell and Norvig agree with Turing that intelligence must be defined in terms of external behavior, not internal structure.[1] However, they are critical that the test requires the machine to imitate humans. "Aeronautical engineering texts," they wrote, "do not define the goal of their field as making \'machines that fly so exactly like pigeons that they can fool other pigeons.\'"[344] AI founder John McCarthy agreed, writing that "Artificial intelligence is not, by definition, simulation of human intelligence".[345]\n

                        McCarthy defines intelligence as "the computational part of the ability to achieve goals in the world".[346] Another AI founder, Marvin Minsky similarly describes it as "the ability to solve hard problems".[347] The leading AI textbook defines it as the study of agents that perceive their environment and take actions that maximize their chances of achieving defined goals.[1] These definitions view intelligence in terms of well-defined problems with well-defined solutions, where both the difficulty of the problem and the performance of the program are direct measures of the "intelligence" of the machine—and no other philosophical discussion is required, or may not even be possible.\n

                        Another definition has been adopted by Google,[348] a major practitioner in the field of AI. This definition stipulates the ability of systems to synthesize information as the manifestation of intelligence, similar to the way it is defined in biological intelligence.\n

                        Some authors have suggested in practice, that the definition of AI is vague and difficult to define, with contention as to whether classical algorithms should be categorised as AI,[349] with many companies during the early 2020s AI boom using the term as a marketing buzzword, often even if they did "not actually use AI in a material way".[350]\n

                        \n

                        Evaluating approaches to AI

                        \n

                        No established unifying theory or paradigm has guided AI research for most of its history.[aa] The unprecedented success of statistical machine learning in the 2010s eclipsed all other approaches (so much so that some sources, especially in the business world, use the term "artificial intelligence" to mean "machine learning with neural networks"). This approach is mostly sub-symbolic, soft and narrow. Critics argue that these questions may have to be revisited by future generations of AI researchers.\n

                        \n

                        Symbolic AI and its limits

                        \n

                        Symbolic AI (or "GOFAI")[352] simulated the high-level conscious reasoning that people use when they solve puzzles, express legal reasoning and do mathematics. They were highly successful at "intelligent" tasks such as algebra or IQ tests. In the 1960s, Newell and Simon proposed the physical symbol systems hypothesis: "A physical symbol system has the necessary and sufficient means of general intelligent action."[353]\n

                        However, the symbolic approach failed on many tasks that humans solve easily, such as learning, recognizing an object or commonsense reasoning. Moravec\'s paradox is the discovery that high-level "intelligent" tasks were easy for AI, but low level "instinctive" tasks were extremely difficult.[354] Philosopher Hubert Dreyfus had argued since the 1960s that human expertise depends on unconscious instinct rather than conscious symbol manipulation, and on having a "feel" for the situation, rather than explicit symbolic knowledge.[355] Although his arguments had been ridiculed and ignored when they were first presented, eventually, AI research came to agree with him.[ab][16]\n

                        The issue is not resolved: sub-symbolic reasoning can make many of the same inscrutable mistakes that human intuition does, such as algorithmic bias. Critics such as Noam Chomsky argue continuing research into symbolic AI will still be necessary to attain general intelligence,[357][358] in part because sub-symbolic AI is a move away from explainable AI: it can be difficult or impossible to understand why a modern statistical AI program made a particular decision. The emerging field of neuro-symbolic artificial intelligence attempts to bridge the two approaches.\n

                        \n

                        Neat vs. scruffy

                        \n\n

                        "Neats" hope that intelligent behavior is described using simple, elegant principles (such as logic, optimization, or neural networks). "Scruffies" expect that it necessarily requires solving a large number of unrelated problems. Neats defend their programs with theoretical rigor, scruffies rely mainly on incremental testing to see if they work. This issue was actively discussed in the 1970s and 1980s,[359] but eventually was seen as irrelevant. Modern AI has elements of both.\n

                        \n

                        Soft vs. hard computing

                        \n\n

                        Finding a provably correct or optimal solution is intractable for many important problems.[15] Soft computing is a set of techniques, including genetic algorithms, fuzzy logic and neural networks, that are tolerant of imprecision, uncertainty, partial truth and approximation. Soft computing was introduced in the late 1980s and most successful AI programs in the 21st century are examples of soft computing with neural networks.\n

                        \n

                        Narrow vs. general AI

                        \n\n

                        AI researchers are divided as to whether to pursue the goals of artificial general intelligence and superintelligence directly or to solve as many specific problems as possible (narrow AI) in hopes these solutions will lead indirectly to the field\'s long-term goals.[360][361] General intelligence is difficult to define and difficult to measure, and modern AI has had more verifiable successes by focusing on specific problems with specific solutions. The sub-field of artificial general intelligence studies this area exclusively.\n

                        \n

                        Machine consciousness, sentience, and mind

                        \n\n

                        The philosophy of mind does not know whether a machine can have a mind, consciousness and mental states, in the same sense that human beings do. This issue considers the internal experiences of the machine, rather than its external behavior. Mainstream AI research considers this issue irrelevant because it does not affect the goals of the field: to build machines that can solve problems using intelligence. Russell and Norvig add that "[t]he additional project of making a machine conscious in exactly the way humans are is not one that we are equipped to take on."[362] However, the question has become central to the philosophy of mind. It is also typically the central question at issue in artificial intelligence in fiction.\n

                        \n

                        Consciousness

                        \n\n

                        David Chalmers identified two problems in understanding the mind, which he named the "hard" and "easy" problems of consciousness.[363] The easy problem is understanding how the brain processes signals, makes plans and controls behavior. The hard problem is explaining how this feels or why it should feel like anything at all, assuming we are right in thinking that it truly does feel like something (Dennett\'s consciousness illusionism says this is an illusion). While human information processing is easy to explain, human subjective experience is difficult to explain. For example, it is easy to imagine a color-blind person who has learned to identify which objects in their field of view are red, but it is not clear what would be required for the person to know what red looks like.[364]\n

                        \n

                        Computationalism and functionalism

                        \n\n

                        Computationalism is the position in the philosophy of mind that the human mind is an information processing system and that thinking is a form of computing. Computationalism argues that the relationship between mind and body is similar or identical to the relationship between software and hardware and thus may be a solution to the mind–body problem. This philosophical position was inspired by the work of AI researchers and cognitive scientists in the 1960s and was originally proposed by philosophers Jerry Fodor and Hilary Putnam.[365]\n

                        Philosopher John Searle characterized this position as "strong AI": "The appropriately programmed computer with the right inputs and outputs would thereby have a mind in exactly the same sense human beings have minds."[ac] Searle counters this assertion with his Chinese room argument, which attempts to show that, even if a machine perfectly simulates human behavior, there is still no reason to suppose it also has a mind.[369]\n

                        \n

                        AI welfare and rights

                        \n

                        It is difficult or impossible to reliably evaluate whether an advanced AI is sentient (has the ability to feel), and if so, to what degree.[370] But if there is a significant chance that a given machine can feel and suffer, then it may be entitled to certain rights or welfare protection measures, similarly to animals.[371][372] Sapience (a set of capacities related to high intelligence, such as discernment or self-awareness) may provide another moral basis for AI rights.[371] Robot rights are also sometimes proposed as a practical way to integrate autonomous agents into society.[373]\n

                        In 2017, the European Union considered granting "electronic personhood" to some of the most capable AI systems. Similarly to the legal status of companies, it would have conferred rights but also responsibilities.[374] Critics argued in 2018 that granting rights to AI systems would downplay the importance of human rights, and that legislation should focus on user needs rather than speculative futuristic scenarios. They also noted that robots lacked the autonomy to take part to society on their own.[375][376]\n

                        Progress in AI increased interest in the topic. Proponents of AI welfare and rights often argue that AI sentience, if it emerges, would be particularly easy to deny. They warn that this may be a moral blind spot analogous to slavery or factory farming, which could lead to large-scale suffering if sentient AI is created and carelessly exploited.[372][371]\n

                        \n

                        Future

                        \n

                        Superintelligence and the singularity

                        \n

                        A superintelligence is a hypothetical agent that would possess intelligence far surpassing that of the brightest and most gifted human mind.[361]If research into artificial general intelligence produced sufficiently intelligent software, it might be able to reprogram and improve itself. The improved software would be even better at improving itself, leading to what I. J. Good called an "intelligence explosion" and Vernor Vinge called a "singularity".[377]\n

                        However, technologies cannot improve exponentially indefinitely, and typically follow an S-shaped curve, slowing when they reach the physical limits of what the technology can do.[378]\n

                        \n

                        Transhumanism

                        \n\n

                        Robot designer Hans Moravec, cyberneticist Kevin Warwick and inventor Ray Kurzweil have predicted that humans and machines may merge in the future into cyborgs that are more capable and powerful than either. This idea, called transhumanism, has roots in the writings of Aldous Huxley and Robert Ettinger.[379]\n

                        Edward Fredkin argues that "artificial intelligence is the next step in evolution", an idea first proposed by Samuel Butler\'s "Darwin among the Machines" as far back as 1863, and expanded upon by George Dyson in his 1998 book Darwin Among the Machines: The Evolution of Global Intelligence.[380]\n

                        \n

                        In fiction

                        \n\n
                        The word "robot" itself was coined by Karel Čapek in his 1921 play R.U.R., the title standing for "Rossum\'s Universal Robots".
                        \n

                        Thought-capable artificial beings have appeared as storytelling devices since antiquity,[381] and have been a persistent theme in science fiction.[382]\n

                        A common trope in these works began with Mary Shelley\'s Frankenstein, where a human creation becomes a threat to its masters. This includes such works as Arthur C. Clarke\'s and Stanley Kubrick\'s 2001: A Space Odyssey (both 1968), with HAL 9000, the murderous computer in charge of the Discovery One spaceship, as well as The Terminator (1984) and The Matrix (1999). In contrast, the rare loyal robots such as Gort from The Day the Earth Stood Still (1951) and Bishop from Aliens (1986) are less prominent in popular culture.[383]\n

                        Isaac Asimov introduced the Three Laws of Robotics in many stories, most notably with the "Multivac" super-intelligent computer. Asimov\'s laws are often brought up during lay discussions of machine ethics;[384] while almost all artificial intelligence researchers are familiar with Asimov\'s laws through popular culture, they generally consider the laws useless for many reasons, one of which is their ambiguity.[385]\n

                        Several works use AI to force us to confront the fundamental question of what makes us human, showing us artificial beings that have the ability to feel, and thus to suffer. This appears in Karel Čapek\'s R.U.R., the films A.I. Artificial Intelligence and Ex Machina, as well as the novel Do Androids Dream of Electric Sheep?, by Philip K. Dick. Dick considers the idea that our understanding of human subjectivity is altered by technology created with artificial intelligence.[386]\n

                        \n

                        See also

                        \n\n

                        Explanatory notes

                        \n
                        \n
                          \n
                        1. ^ a b This list of intelligent traits is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
                        2. \n
                        3. ^ a b This list of tools is based on the topics covered by the major AI textbooks, including: Russell & Norvig (2021), Luger & Stubblefield (2004), Poole, Mackworth & Goebel (1998) and Nilsson (1998)\n
                        4. \n
                        5. ^ It is among the reasons that expert systems proved to be inefficient for capturing knowledge.[30][31]\n
                        6. \n
                        7. ^ \n"Rational agent" is general term used in economics, philosophy and theoretical artificial intelligence. It can refer to anything that directs its behavior to accomplish goals, such as a person, an animal, a corporation, a nation, or in the case of AI, a computer program.\n
                        8. \n
                        9. ^ Alan Turing discussed the centrality of learning as early as 1950, in his classic paper "Computing Machinery and Intelligence".[42] In 1956, at the original Dartmouth AI summer conference, Ray Solomonoff wrote a report on unsupervised probabilistic machine learning: "An Inductive Inference Machine".[43]\n
                        10. \n
                        11. ^ See AI winter § Machine translation and the ALPAC report of 1966\n
                        12. \n
                        13. ^ \nCompared with symbolic logic, formal Bayesian inference is computationally expensive. For inference to be tractable, most observations must be conditionally independent of one another. AdSense uses a Bayesian network with over 300 million edges to learn which ads to serve.[93]\n
                        14. \n
                        15. ^ Expectation–maximization, one of the most popular algorithms in machine learning, allows clustering in the presence of unknown latent variables.[95]\n
                        16. \n
                        17. ^ \nSome form of deep neural networks (without a specific learning algorithm) were described by:\nWarren S. McCulloch and Walter Pitts (1943)[115]\nAlan Turing (1948);[116]\nKarl Steinbuch and Roger David Joseph (1961).[117]\nDeep or recurrent networks that learned (or used gradient descent) were developed by:\nFrank Rosenblatt(1957);[116]\nOliver Selfridge (1959);[117]\nAlexey Ivakhnenko and Valentin Lapa (1965);[118]\nKaoru Nakano (1971);[119]\nShun-Ichi Amari (1972);[119]\nJohn Joseph Hopfield (1982).[119]\nPrecursors to backpropagation were developed by:\nHenry J. Kelley (1960);[116]\nArthur E. Bryson (1962);[116]\nStuart Dreyfus (1962);[116]\nArthur E. Bryson and Yu-Chi Ho (1969);[116]\nBackpropagation was independently developed by:\nSeppo Linnainmaa (1970);[120]\nPaul Werbos (1974).[116]\n
                        18. \n
                        19. ^ Geoffrey Hinton said, of his work on neural networks in the 1990s, "our labeled datasets were thousands of times too small. [And] our computers were millions of times too slow."[121]\n
                        20. \n
                        21. ^ In statistics, a bias is a systematic error or deviation from the correct value. But in the context of fairness, it refers to a tendency in favor or against a certain group or individual characteristic, usually in a way that is considered unfair or harmful. A statistically unbiased AI system that produces disparate outcomes for different demographic groups may thus be viewed as biased in the ethical sense.[204]\n
                        22. \n
                        23. ^ Including Jon Kleinberg (Cornell University), Sendhil Mullainathan (University of Chicago), Cynthia Chouldechova (Carnegie Mellon) and Sam Corbett-Davis (Stanford)[213]\n
                        24. \n
                        25. ^ Moritz Hardt (a director at the Max Planck Institute for Intelligent Systems) argues that machine learning "is fundamentally the wrong tool for a lot of domains, where you\'re trying to design interventions and mechanisms that change the world."[218]\n
                        26. \n
                        27. ^ When the law was passed in 2018, it still contained a form of this provision.\n
                        28. \n
                        29. ^ This is the United Nations\' definition, and includes things like land mines as well.[232]\n
                        30. \n
                        31. ^ See table 4; 9% is both the OECD average and the U.S. average.[243]\n
                        32. \n
                        33. ^ Sometimes called a "robopocalypse"[251]\n
                        34. \n
                        35. ^ "Electronic brain" was the term used by the press around this time.[304][306]\n
                        36. \n
                        37. ^ \nDaniel Crevier wrote, "the conference is generally recognized as the official birthdate of the new science."[309] Russell and Norvig called the conference "the inception of artificial intelligence."[115]\n
                        38. \n
                        39. ^ \nRussell and Norvig wrote "for the next 20 years the field would be dominated by these people and their students."[310]\n
                        40. \n
                        41. ^ \nRussell and Norvig wrote "it was astonishing whenever a computer did anything kind of smartish".[311]\n
                        42. \n
                        43. ^ \nThe programs described are Arthur Samuel\'s checkers program for the IBM 701, Daniel Bobrow\'s STUDENT, Newell and Simon\'s Logic Theorist and Terry Winograd\'s SHRDLU.\n
                        44. \n
                        45. ^ Russell and Norvig write: "in almost all cases, these early systems failed on more difficult problems"[315]\n
                        46. \n
                        47. ^ \nEmbodied approaches to AI[322] were championed by Hans Moravec[323] and Rodney Brooks[324] and went by many names: Nouvelle AI.[324] Developmental robotics.[325]\n
                        48. \n
                        49. ^ Matteo Wong wrote in The Atlantic: "Whereas for decades, computer-science fields such as natural-language processing, computer vision, and robotics used extremely different methods, now they all use a programming method called "deep learning." As a result, their code and approaches have become more similar, and their models are easier to integrate into one another."[331]\n
                        50. \n
                        51. ^ Jack Clark wrote in Bloomberg: "After a half-decade of quiet breakthroughs in artificial intelligence, 2015 has been a landmark year. Computers are smarter and learning faster than ever", and noted that the number of software projects that use machine learning at Google increased from a "sporadic usage" in 2012 to more than 2,700 projects in 2015.[333]\n
                        52. \n
                        53. ^ Nils Nilsson wrote in 1983: "Simply put, there is wide disagreement in the field about what AI is all about."[351]\n
                        54. \n
                        55. ^ \nDaniel Crevier wrote that "time has proven the accuracy and perceptiveness of some of Dreyfus\'s comments. Had he formulated them less aggressively, constructive actions they suggested might have been taken much earlier."[356]\n
                        56. \n
                        57. ^ \nSearle presented this definition of "Strong AI" in 1999.[366] Searle\'s original formulation was "The appropriately programmed computer really is a mind, in the sense that computers given the right programs can be literally said to understand and have other cognitive states."[367] Strong AI is defined similarly by Russell and Norvig: "Stong AI – the assertion that machines that do so are actually thinking (as opposed to simulating thinking)."[368]\n
                        58. \n
                        \n

                        References

                        \n
                        \n
                          \n
                        1. ^ a b c Russell & Norvig (2021), pp. 1–4.\n
                        2. \n
                        3. ^ AI set to exceed human brain power Archived 2008-02-19 at the Wayback Machine CNN.com (July 26, 2006)\n
                        4. \n
                        5. ^ Kaplan, Andreas; Haenlein, Michael (2019). "Siri, Siri, in my hand: Who\'s the fairest in the land? On the interpretations, illustrations, and implications of artificial intelligence". Business Horizons. 62: 15–25. doi:10.1016/j.bushor.2018.08.004. ISSN 0007-6813. S2CID 158433736.\n
                        6. \n
                        7. ^ a b c \nArtificial general intelligence: Russell & Norvig (2021, pp. 32–33, 1020–1021)
                          Proposal for the modern version: Pennachin & Goertzel (2007)
                          Warnings of overspecialization in AI from leading researchers: Nilsson (1995), McCarthy (2007), Beal & Winston (2009)
                          \n
                        8. \n
                        9. ^ Russell & Norvig (2021, §1.2).\n
                        10. \n
                        11. ^ a b Dartmouth workshop: Russell & Norvig (2021, p. 18), McCorduck (2004, pp. 111–136), NRC (1999, pp. 200–201)
                          The proposal: McCarthy et al. (1955)
                          \n
                        12. \n
                        13. ^ a b Successful programs the 1960s: McCorduck (2004, pp. 243–252), Crevier (1993, pp. 52–107), Moravec (1988, p. 9), Russell & Norvig (2021, pp. 19–21)\n
                        14. \n
                        15. ^ a b Funding initiatives in the early 1980s: Fifth Generation Project (Japan), Alvey (UK), Microelectronics and Computer Technology Corporation (US), Strategic Computing Initiative (US): McCorduck (2004, pp. 426–441), Crevier (1993, pp. 161–162, 197–203, 211, 240), Russell & Norvig (2021, p. 23), NRC (1999, pp. 210–211), Newquist (1994, pp. 235–248)\n
                        16. \n
                        17. ^ a b First AI Winter, Lighthill report, Mansfield Amendment: Crevier (1993, pp. 115–117), Russell & Norvig (2021, pp. 21–22), NRC (1999, pp. 212–213), Howe (1994), Newquist (1994, pp. 189–201)\n
                        18. \n
                        19. ^ a b Second AI Winter: Russell & Norvig (2021, p. 24), McCorduck (2004, pp. 430–435), Crevier (1993, pp. 209–210), NRC (1999, pp. 214–216), Newquist (1994, pp. 301–318)\n
                        20. \n
                        21. ^ a b Deep learning revolution, AlexNet: Goldman (2022), Russell & Norvig (2021, p. 26), McKinsey (2018)\n
                        22. \n
                        23. ^ Toews (2023).\n
                        24. \n
                        25. ^ Problem-solving, puzzle solving, game playing, and deduction: Russell & Norvig (2021, chpt. 3–5), Russell & Norvig (2021, chpt. 6) (constraint satisfaction), Poole, Mackworth & Goebel (1998, chpt. 2, 3, 7, 9), Luger & Stubblefield (2004, chpt. 3, 4, 6, 8), Nilsson (1998, chpt. 7–12)\n
                        26. \n
                        27. ^ Uncertain reasoning: Russell & Norvig (2021, chpt. 12–18), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 333–381), Nilsson (1998, chpt. 7–12)\n
                        28. \n
                        29. ^ a b c Intractability and efficiency and the combinatorial explosion: Russell & Norvig (2021, p. 21)\n
                        30. \n
                        31. ^ a b c Psychological evidence of the prevalence of sub-symbolic reasoning and knowledge: Kahneman (2011), Dreyfus & Dreyfus (1986), Wason & Shapiro (1966), Kahneman, Slovic & Tversky (1982)\n
                        32. \n
                        33. ^ Knowledge representation and knowledge engineering: Russell & Norvig (2021, chpt. 10), Poole, Mackworth & Goebel (1998, pp. 23–46, 69–81, 169–233, 235–277, 281–298, 319–345), Luger & Stubblefield (2004, pp. 227–243), Nilsson (1998, chpt. 17.1–17.4, 18)\n
                        34. \n
                        35. ^ Smoliar & Zhang (1994).\n
                        36. \n
                        37. ^ Neumann & Möller (2008).\n
                        38. \n
                        39. ^ Kuperman, Reichley & Bailey (2006).\n
                        40. \n
                        41. ^ McGarry (2005).\n
                        42. \n
                        43. ^ Bertini, Del Bimbo & Torniai (2006).\n
                        44. \n
                        45. ^ Russell & Norvig (2021), pp. 272.\n
                        46. \n
                        47. ^ Representing categories and relations: Semantic networks, description logics, inheritance (including frames, and scripts): Russell & Norvig (2021, §10.2 & 10.5), Poole, Mackworth & Goebel (1998, pp. 174–177), Luger & Stubblefield (2004, pp. 248–258), Nilsson (1998, chpt. 18.3)\n
                        48. \n
                        49. ^ Representing events and time:Situation calculus, event calculus, fluent calculus (including solving the frame problem): Russell & Norvig (2021, §10.3), Poole, Mackworth & Goebel (1998, pp. 281–298), Nilsson (1998, chpt. 18.2)\n
                        50. \n
                        51. ^ Causal calculus: Poole, Mackworth & Goebel (1998, pp. 335–337)\n
                        52. \n
                        53. ^ Representing knowledge about knowledge: Belief calculus, modal logics: Russell & Norvig (2021, §10.4), Poole, Mackworth & Goebel (1998, pp. 275–277)\n
                        54. \n
                        55. ^ a b Default reasoning, Frame problem, default logic, non-monotonic logics, circumscription, closed world assumption, abduction: Russell & Norvig (2021, §10.6), Poole, Mackworth & Goebel (1998, pp. 248–256, 323–335), Luger & Stubblefield (2004, pp. 335–363), Nilsson (1998, ~18.3.3)\n(Poole et al. places abduction under "default reasoning". Luger et al. places this under "uncertain reasoning").\n
                        56. \n
                        57. ^ a b Breadth of commonsense knowledge: Lenat & Guha (1989, Introduction), Crevier (1993, pp. 113–114), Moravec (1988, p. 13), Russell & Norvig (2021, pp. 241, 385, 982) (qualification problem)\n
                        58. \n
                        59. ^ Newquist (1994), p. 296.\n
                        60. \n
                        61. ^ Crevier (1993), pp. 204–208.\n
                        62. \n
                        63. ^ Russell & Norvig (2021), p. 528.\n
                        64. \n
                        65. ^ Automated planning: Russell & Norvig (2021, chpt. 11).\n
                        66. \n
                        67. ^ Automated decision making, Decision theory: Russell & Norvig (2021, chpt. 16–18).\n
                        68. \n
                        69. ^ Classical planning: Russell & Norvig (2021, Section 11.2).\n
                        70. \n
                        71. ^ Sensorless or "conformant" planning, contingent planning, replanning (a.k.a online planning): Russell & Norvig (2021, Section 11.5).\n
                        72. \n
                        73. ^ Uncertain preferences: Russell & Norvig (2021, Section 16.7)\nInverse reinforcement learning: Russell & Norvig (2021, Section 22.6)\n
                        74. \n
                        75. ^ Information value theory: Russell & Norvig (2021, Section 16.6).\n
                        76. \n
                        77. ^ Markov decision process: Russell & Norvig (2021, chpt. 17).\n
                        78. \n
                        79. ^ Game theory and multi-agent decision theory: Russell & Norvig (2021, chpt. 18).\n
                        80. \n
                        81. ^ Learning: Russell & Norvig (2021, chpt. 19–22), Poole, Mackworth & Goebel (1998, pp. 397–438), Luger & Stubblefield (2004, pp. 385–542), Nilsson (1998, chpt. 3.3, 10.3, 17.5, 20)\n
                        82. \n
                        83. ^ Turing (1950).\n
                        84. \n
                        85. ^ Solomonoff (1956).\n
                        86. \n
                        87. ^ Unsupervised learning: Russell & Norvig (2021, pp. 653) (definition), Russell & Norvig (2021, pp. 738–740) (cluster analysis), Russell & Norvig (2021, pp. 846–860) (word embedding)\n
                        88. \n
                        89. ^ a b Supervised learning: Russell & Norvig (2021, §19.2) (Definition), Russell & Norvig (2021, Chpt. 19–20) (Techniques)\n
                        90. \n
                        91. ^ Reinforcement learning: Russell & Norvig (2021, chpt. 22), Luger & Stubblefield (2004, pp. 442–449)\n
                        92. \n
                        93. ^ Transfer learning: Russell & Norvig (2021, pp. 281), The Economist (2016)\n
                        94. \n
                        95. ^ "Artificial Intelligence (AI): What Is AI and How Does It Work? | Built In". builtin.com. Retrieved 30 October 2023.\n
                        96. \n
                        97. ^ Computational learning theory: Russell & Norvig (2021, pp. 672–674), Jordan & Mitchell (2015)\n
                        98. \n
                        99. ^ Natural language processing (NLP): Russell & Norvig (2021, chpt. 23–24), Poole, Mackworth & Goebel (1998, pp. 91–104), Luger & Stubblefield (2004, pp. 591–632)\n
                        100. \n
                        101. ^ Subproblems of NLP: Russell & Norvig (2021, pp. 849–850)\n
                        102. \n
                        103. ^ Russell & Norvig (2021), pp. 856–858.\n
                        104. \n
                        105. ^ Dickson (2022).\n
                        106. \n
                        107. ^ Modern statistical and deep learning approaches to NLP: Russell & Norvig (2021, chpt. 24), Cambria & White (2014)\n
                        108. \n
                        109. ^ Vincent (2019).\n
                        110. \n
                        111. ^ Russell & Norvig (2021), pp. 875–878.\n
                        112. \n
                        113. ^ Bushwick (2023).\n
                        114. \n
                        115. ^ Computer vision: Russell & Norvig (2021, chpt. 25), Nilsson (1998, chpt. 6)\n
                        116. \n
                        117. ^ Russell & Norvig (2021), pp. 849–850.\n
                        118. \n
                        119. ^ Russell & Norvig (2021), pp. 895–899.\n
                        120. \n
                        121. ^ Russell & Norvig (2021), pp. 899–901.\n
                        122. \n
                        123. ^ Challa et al. (2011).\n
                        124. \n
                        125. ^ Russell & Norvig (2021), pp. 931–938.\n
                        126. \n
                        127. ^ MIT AIL (2014).\n
                        128. \n
                        129. ^ Affective computing: Thro (1993), Edelson (1991), Tao & Tan (2005), Scassellati (2002)\n
                        130. \n
                        131. ^ Waddell (2018).\n
                        132. \n
                        133. ^ Poria et al. (2017).\n
                        134. \n
                        135. ^ Search algorithms: Russell & Norvig (2021, chpts. 3–5), Poole, Mackworth & Goebel (1998, pp. 113–163), Luger & Stubblefield (2004, pp. 79–164, 193–219), Nilsson (1998, chpts. 7–12)\n
                        136. \n
                        137. ^ State space search: Russell & Norvig (2021, chpt. 3)\n
                        138. \n
                        139. ^ Russell & Norvig (2021), sect. 11.2.\n
                        140. \n
                        141. ^ Uninformed searches (breadth first search, depth-first search and general state space search): Russell & Norvig (2021, sect. 3.4), Poole, Mackworth & Goebel (1998, pp. 113–132), Luger & Stubblefield (2004, pp. 79–121), Nilsson (1998, chpt. 8)\n
                        142. \n
                        143. ^ Heuristic or informed searches (e.g., greedy best first and A*): Russell & Norvig (2021, sect. 3.5), Poole, Mackworth & Goebel (1998, pp. 132–147), Poole & Mackworth (2017, sect. 3.6), Luger & Stubblefield (2004, pp. 133–150)\n
                        144. \n
                        145. ^ Adversarial search: Russell & Norvig (2021, chpt. 5)\n
                        146. \n
                        147. ^ Local or "optimization" search: Russell & Norvig (2021, chpt. 4)\n
                        148. \n
                        149. ^ Singh Chauhan, Nagesh (18 December 2020). "Optimization Algorithms in Neural Networks". KDnuggets. Retrieved 13 January 2024.\n
                        150. \n
                        151. ^ Evolutionary computation: Russell & Norvig (2021, sect. 4.1.2)\n
                        152. \n
                        153. ^ Merkle & Middendorf (2013).\n
                        154. \n
                        155. ^ Logic: Russell & Norvig (2021, chpts. 6–9), Luger & Stubblefield (2004, pp. 35–77), Nilsson (1998, chpt. 13–16)\n
                        156. \n
                        157. ^ Propositional logic: Russell & Norvig (2021, chpt. 6), Luger & Stubblefield (2004, pp. 45–50), Nilsson (1998, chpt. 13)\n
                        158. \n
                        159. ^ First-order logic and features such as equality: Russell & Norvig (2021, chpt. 7), Poole, Mackworth & Goebel (1998, pp. 268–275), Luger & Stubblefield (2004, pp. 50–62), Nilsson (1998, chpt. 15)\n
                        160. \n
                        161. ^ Logical inference: Russell & Norvig (2021, chpt. 10)\n
                        162. \n
                        163. ^ logical deduction as search: Russell & Norvig (2021, sects. 9.3, 9.4), Poole, Mackworth & Goebel (1998, pp. ~46–52), Luger & Stubblefield (2004, pp. 62–73), Nilsson (1998, chpt. 4.2, 7.2)\n
                        164. \n
                        165. ^ Resolution and unification: Russell & Norvig (2021, sections 7.5.2, 9.2, 9.5)\n
                        166. \n
                        167. ^ Warren, D.H.; Pereira, L.M.; Pereira, F. (1977). "Prolog-the language and its implementation compared with Lisp". ACM SIGPLAN Notices. 12 (8): 109–115. doi:10.1145/872734.806939.\n
                        168. \n
                        169. ^ Fuzzy logic: Russell & Norvig (2021, pp. 214, 255, 459), Scientific American (1999)\n
                        170. \n
                        171. ^ a b Stochastic methods for uncertain reasoning: Russell & Norvig (2021, chpt. 12–18, 20), Poole, Mackworth & Goebel (1998, pp. 345–395), Luger & Stubblefield (2004, pp. 165–191, 333–381), Nilsson (1998, chpt. 19)\n
                        172. \n
                        173. ^ decision theory and decision analysis: Russell & Norvig (2021, chpt. 16–18), Poole, Mackworth & Goebel (1998, pp. 381–394)\n
                        174. \n
                        175. ^ Information value theory: Russell & Norvig (2021, sect. 16.6)\n
                        176. \n
                        177. ^ Markov decision processes and dynamic decision networks: Russell & Norvig (2021, chpt. 17)\n
                        178. \n
                        179. ^ a b c Stochastic temporal models: Russell & Norvig (2021, chpt. 14)\nHidden Markov model: Russell & Norvig (2021, sect. 14.3)\nKalman filters: Russell & Norvig (2021, sect. 14.4)\nDynamic Bayesian networks: Russell & Norvig (2021, sect. 14.5)\n
                        180. \n
                        181. ^ Game theory and mechanism design: Russell & Norvig (2021, chpt. 18)\n
                        182. \n
                        183. ^ Bayesian networks: Russell & Norvig (2021, sects. 12.5–12.6, 13.4–13.5, 14.3–14.5, 16.5, 20.2–20.3), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~182–190, ≈363–379), Nilsson (1998, chpt. 19.3–19.4)\n
                        184. \n
                        185. ^ Domingos (2015), chpt. 6.\n
                        186. \n
                        187. ^ Bayesian inference algorithm: Russell & Norvig (2021, sect. 13.3–13.5), Poole, Mackworth & Goebel (1998, pp. 361–381), Luger & Stubblefield (2004, pp. ~363–379), Nilsson (1998, chpt. 19.4 & 7)\n
                        188. \n
                        189. ^ Domingos (2015), p. 210.\n
                        190. \n
                        191. ^ Bayesian learning and the expectation–maximization algorithm: Russell & Norvig (2021, chpt. 20), Poole, Mackworth & Goebel (1998, pp. 424–433), Nilsson (1998, chpt. 20), Domingos (2015, p. 210)\n
                        192. \n
                        193. ^ Bayesian decision theory and Bayesian decision networks: Russell & Norvig (2021, sect. 16.5)\n
                        194. \n
                        195. ^ Statistical learning methods and classifiers: Russell & Norvig (2021, chpt. 20),\n
                        196. \n
                        197. ^ Ciaramella, Alberto; Ciaramella, Marco (2024). Introduction to Artificial Intelligence: from data analysis to generative AI. Intellisemantic Editions. ISBN 978-8-8947-8760-3.\n
                        198. \n
                        199. ^ Decision trees: Russell & Norvig (2021, sect. 19.3), Domingos (2015, p. 88)\n
                        200. \n
                        201. ^ Non-parameteric learning models such as K-nearest neighbor and support vector machines: Russell & Norvig (2021, sect. 19.7), Domingos (2015, p. 187) (k-nearest neighbor)\n\n
                        202. \n
                        203. ^ Domingos (2015), p. 152.\n
                        204. \n
                        205. ^ Naive Bayes classifier: Russell & Norvig (2021, sect. 12.6), Domingos (2015, p. 152)\n
                        206. \n
                        207. ^ a b Neural networks: Russell & Norvig (2021, chpt. 21), Domingos (2015, Chapter 4)\n
                        208. \n
                        209. ^ Gradient calculation in computational graphs, backpropagation, automatic differentiation: Russell & Norvig (2021, sect. 21.2), Luger & Stubblefield (2004, pp. 467–474), Nilsson (1998, chpt. 3.3)\n
                        210. \n
                        211. ^ Universal approximation theorem: Russell & Norvig (2021, p. 752)\nThe theorem: Cybenko (1988), Hornik, Stinchcombe & White (1989)\n
                        212. \n
                        213. ^ Feedforward neural networks: Russell & Norvig (2021, sect. 21.1)\n
                        214. \n
                        215. ^ Recurrent neural networks: Russell & Norvig (2021, sect. 21.6)\n
                        216. \n
                        217. ^ Perceptrons: Russell & Norvig (2021, pp. 21, 22, 683, 22)\n
                        218. \n
                        219. ^ a b Deep learning: Russell & Norvig (2021, chpt. 21), Goodfellow, Bengio & Courville (2016), Hinton et al. (2016), Schmidhuber (2015)\n
                        220. \n
                        221. ^ Convolutional neural networks: Russell & Norvig (2021, sect. 21.3)\n
                        222. \n
                        223. ^ Deng & Yu (2014), pp. 199–200.\n
                        224. \n
                        225. ^ Ciresan, Meier & Schmidhuber (2012).\n
                        226. \n
                        227. ^ Russell & Norvig (2021), p. 751.\n
                        228. \n
                        229. ^ a b c Russell & Norvig (2021), p. 17.\n
                        230. \n
                        231. ^ a b c d e f g Russell & Norvig (2021), p. 785.\n
                        232. \n
                        233. ^ a b Schmidhuber (2022), sect. 5.\n
                        234. \n
                        235. ^ Schmidhuber (2022), sect. 6.\n
                        236. \n
                        237. ^ a b c Schmidhuber (2022), sect. 7.\n
                        238. \n
                        239. ^ Schmidhuber (2022), sect. 8.\n
                        240. \n
                        241. ^ Quoted in Christian (2020, p. 22)\n
                        242. \n
                        243. ^ Smith (2023).\n
                        244. \n
                        245. ^ "Explained: Generative AI". 9 November 2023.\n
                        246. \n
                        247. ^ "AI Writing and Content Creation Tools". MIT Sloan Teaching & Learning Technologies. Archived from the original on 25 December 2023. Retrieved 25 December 2023.\n
                        248. \n
                        249. ^ Marmouyet (2023).\n
                        250. \n
                        251. ^ Kobielus (2019).\n
                        252. \n
                        253. ^ Thomason, James (21 May 2024). "Mojo Rising: The resurgence of AI-first programming languages". VentureBeat. Archived from the original on 27 June 2024. Retrieved 26 May 2024.\n
                        254. \n
                        255. ^ Wodecki, Ben (5 May 2023). "7 AI Programming Languages You Need to Know". AI Business. Archived from the original on 25 July 2024. Retrieved 5 October 2024.\n
                        256. \n
                        257. ^ Plumb, Taryn (18 September 2024). "Why Jensen Huang and Marc Benioff see \'gigantic\' opportunity for agentic AI". VentureBeat. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
                        258. \n
                        259. ^ Davenport, T; Kalakota, R (June 2019). "The potential for artificial intelligence in healthcare". Future Healthc J. 6 (2): 94–98. doi:10.7861/futurehosp.6-2-94. PMC 6616181. PMID 31363513.\n
                        260. \n
                        261. ^ Lyakhova, U.A.; Lyakhov, P.A. (2024). "Systematic review of approaches to detection and classification of skin cancer using artificial intelligence: Development and prospects". Computers in Biology and Medicine. 178: 108742. doi:10.1016/j.compbiomed.2024.108742. PMID 38875908.\n
                        262. \n
                        263. ^ Alqudaihi, Kawther S.; Aslam, Nida; Khan, Irfan Ullah; Almuhaideb, Abdullah M.; Alsunaidi, Shikah J.; Ibrahim, Nehad M. Abdel Rahman; Alhaidari, Fahd A.; Shaikh, Fatema S.; Alsenbel, Yasmine M.; Alalharith, Dima M.; Alharthi, Hajar M.; Alghamdi, Wejdan M.; Alshahrani, Mohammed S. (2021). "Cough Sound Detection and Diagnosis Using Artificial Intelligence Techniques: Challenges and Opportunities". IEEE Access. 9: 102327–102344. Bibcode:2021IEEEA...9j2327A. doi:10.1109/ACCESS.2021.3097559. ISSN 2169-3536. PMC 8545201. PMID 34786317.\n
                        264. \n
                        265. ^ a b Bax, Monique; Thorpe, Jordan; Romanov, Valentin (December 2023). "The future of personalized cardiovascular medicine demands 3D and 4D printing, stem cells, and artificial intelligence". Frontiers in Sensors. 4. doi:10.3389/fsens.2023.1294721. ISSN 2673-5067.\n
                        266. \n
                        267. ^ Jumper, J; Evans, R; Pritzel, A (2021). "Highly accurate protein structure prediction with AlphaFold". Nature. 596 (7873): 583–589. Bibcode:2021Natur.596..583J. doi:10.1038/s41586-021-03819-2. PMC 8371605. PMID 34265844.\n
                        268. \n
                        269. ^ "AI discovers new class of antibiotics to kill drug-resistant bacteria". 20 December 2023. Archived from the original on 16 September 2024. Retrieved 5 October 2024.\n
                        270. \n
                        271. ^ "AI speeds up drug design for Parkinson\'s ten-fold". Cambridge University. 17 April 2024. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                        272. \n
                        273. ^ Horne, Robert I.; Andrzejewska, Ewa A.; Alam, Parvez; Brotzakis, Z. Faidon; Srivastava, Ankit; Aubert, Alice; Nowinska, Magdalena; Gregory, Rebecca C.; Staats, Roxine; Possenti, Andrea; Chia, Sean; Sormanni, Pietro; Ghetti, Bernardino; Caughey, Byron; Knowles, Tuomas P. J.; Vendruscolo, Michele (17 April 2024). "Discovery of potent inhibitors of α-synuclein aggregation using structure-based iterative learning". Nature Chemical Biology. 20 (5). Nature: 634–645. doi:10.1038/s41589-024-01580-x. PMC 11062903. PMID 38632492.\n
                        274. \n
                        275. ^ Grant, Eugene F.; Lardner, Rex (25 July 1952). "The Talk of the Town – It". The New Yorker. ISSN 0028-792X. Archived from the original on 16 February 2020. Retrieved 28 January 2024.\n
                        276. \n
                        277. ^ Anderson, Mark Robert (11 May 2017). "Twenty years on from Deep Blue vs Kasparov: how a chess match started the big data revolution". The Conversation. Archived from the original on 17 September 2024. Retrieved 28 January 2024.\n
                        278. \n
                        279. ^ Markoff, John (16 February 2011). "Computer Wins on \'Jeopardy!\': Trivial, It\'s Not". The New York Times. ISSN 0362-4331. Archived from the original on 22 October 2014. Retrieved 28 January 2024.\n
                        280. \n
                        281. ^ Byford, Sam (27 May 2017). "AlphaGo retires from competitive Go after defeating world number one 3–0". The Verge. Archived from the original on 7 June 2017. Retrieved 28 January 2024.\n
                        282. \n
                        283. ^ Brown, Noam; Sandholm, Tuomas (30 August 2019). "Superhuman AI for multiplayer poker". Science. 365 (6456): 885–890. Bibcode:2019Sci...365..885B. doi:10.1126/science.aay2400. ISSN 0036-8075. PMID 31296650.\n
                        284. \n
                        285. ^ "MuZero: Mastering Go, chess, shogi and Atari without rules". Google DeepMind. 23 December 2020. Retrieved 28 January 2024.\n
                        286. \n
                        287. ^ Sample, Ian (30 October 2019). "AI becomes grandmaster in \'fiendishly complex\' StarCraft II". The Guardian. ISSN 0261-3077. Archived from the original on 29 December 2020. Retrieved 28 January 2024.\n
                        288. \n
                        289. ^ Wurman, P. R.; Barrett, S.; Kawamoto, K. (2022). "Outracing champion Gran Turismo drivers with deep reinforcement learning" (PDF). Nature. 602 (7896): 223–228. Bibcode:2022Natur.602..223W. doi:10.1038/s41586-021-04357-7. PMID 35140384.\n
                        290. \n
                        291. ^ Wilkins, Alex (13 March 2024). "Google AI learns to play open-world video games by watching them". New Scientist. Archived from the original on 26 July 2024. Retrieved 21 July 2024.\n
                        292. \n
                        293. ^ Uesato, J. et al.: Improving mathematical reasoning with process supervision. Archived 15 September 2024 at the Wayback Machine openai.com, May 31, 2023. Retrieved 2024-08-07.\n
                        294. \n
                        295. ^ Srivastava, Saurabh (29 February 2024). "Functional Benchmarks for Robust Evaluation of Reasoning Performance, and the Reasoning Gap". arXiv:2402.19450 [cs.AI].\n
                        296. \n
                        297. ^ Roberts, Siobhan (25 July 2024). "AI achieves silver-medal standard solving International Mathematical Olympiad problems". The New York Times. Archived from the original on 26 September 2024. Retrieved 7 August 2024.\n
                        298. \n
                        299. ^ LLEMMA. eleuther.ai. Retrieved 2024-08-07.\n
                        300. \n
                        301. ^ AI Math. Archived 5 October 2024 at the Wayback Machine Caesars Labs, 2024. Retrieved 2024-08-07.\n
                        302. \n
                        303. ^ Alex McFarland: 7 Best AI for Math Tools. Archived 11 September 2024 at the Wayback Machine unite.ai. Retrieved 2024-08-07\n
                        304. \n
                        305. ^ Matthew Finio & Amanda Downie: IBM Think 2024 Primer, "What is Artificial Intelligence (AI) in Finance?" 8 Dec. 2023\n
                        306. \n
                        307. ^ M. Nicolas, J. Firzli: Pensions Age/European Pensions magazine, "Artificial Intelligence: Ask the Industry" May June 2024 https://videovoice.org/ai-in-finance-innovation-entrepreneurship-vs-over-regulation-with-the-eus-artificial-intelligence-act-wont-work-as-intended/ Archived 11 September 2024 at the Wayback Machine.\n
                        308. \n
                        309. ^ a b c Congressional Research Service (2019). Artificial Intelligence and National Security (PDF). Washington, DC: Congressional Research Service. Archived (PDF) from the original on 8 May 2020. Retrieved 5 October 2024.PD-notice\n
                        310. \n
                        311. ^ a b Slyusar, Vadym (2019). "Artificial intelligence as the basis of future control networks". ResearchGate. doi:10.13140/RG.2.2.30247.50087. Archived from the original on 28 April 2021. Retrieved 20 July 2019.\n
                        312. \n
                        313. ^ Knight, Will. "The US and 30 Other Nations Agree to Set Guardrails for Military AI". Wired. ISSN 1059-1028. Archived from the original on 20 September 2024. Retrieved 24 January 2024.\n
                        314. \n
                        315. ^ Newsom, Gavin; Weber, Shirley N. (6 September 2023). "Executive Order N-12-23" (PDF). Executive Department, State of California. Archived (PDF) from the original on 21 February 2024. Retrieved 7 September 2023.\n
                        316. \n
                        317. ^ Pinaya, Walter H. L.; Graham, Mark S.; Kerfoot, Eric; Tudosiu, Petru-Daniel; Dafflon, Jessica; Fernandez, Virginia; Sanchez, Pedro; Wolleb, Julia; da Costa, Pedro F.; Patel, Ashay (2023). "Generative AI for Medical Imaging: extending the MONAI Framework". arXiv:2307.15208 [eess.IV].\n
                        318. \n
                        319. ^ Griffith, Erin; Metz, Cade (27 January 2023). "Anthropic Said to Be Closing In on $300 Million in New A.I. Funding". The New York Times. Archived from the original on 9 December 2023. Retrieved 14 March 2023.\n
                        320. \n
                        321. ^ Lanxon, Nate; Bass, Dina; Davalos, Jackie (10 March 2023). "A Cheat Sheet to AI Buzzwords and Their Meanings". Bloomberg News. Archived from the original on 17 November 2023. Retrieved 14 March 2023.\n
                        322. \n
                        323. ^ Marcelline, Marco (27 May 2023). "ChatGPT: Most Americans Know About It, But Few Actually Use the AI Chatbot". PCMag. Archived from the original on 21 May 2024. Retrieved 28 January 2024.\n
                        324. \n
                        325. ^ Lu, Donna (31 March 2023). "Misinformation, mistakes and the Pope in a puffer: what rapidly evolving AI can – and can\'t – do". The Guardian. ISSN 0261-3077. Archived from the original on 10 June 2024. Retrieved 28 January 2024.\n
                        326. \n
                        327. ^ Hurst, Luke (23 May 2023). "How a fake image of a Pentagon explosion shared on Twitter caused a real dip on Wall Street". euronews. Retrieved 28 January 2024.\n
                        328. \n
                        329. ^ Poole, David; Mackworth, Alan (2023). Artificial Intelligence, Foundations of Computational Agents (3rd ed.). Cambridge University Press. doi:10.1017/9781009258227. ISBN 978-1-0092-5819-7. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                        330. \n
                        331. ^ Russell, Stuart; Norvig, Peter (2020). Artificial Intelligence: A Modern Approach (4th ed.). Pearson. ISBN 978-0-1346-1099-3.\n
                        332. \n
                        333. ^ "Why agents are the next frontier of generative AI". McKinsey Digital. 24 July 2024. Archived from the original on 3 October 2024. Retrieved 10 August 2024.\n
                        334. \n
                        335. ^ Ransbotham, Sam; Kiron, David; Gerbert, Philipp; Reeves, Martin (6 September 2017). "Reshaping Business With Artificial Intelligence". MIT Sloan Management Review. Archived from the original on 13 February 2024.\n
                        336. \n
                        337. ^ Sun, Yuran; Zhao, Xilei; Lovreglio, Ruggiero; Kuligowski, Erica (1 January 2024), Naser, M. Z. (ed.), "8 – AI for large-scale evacuation modeling: promises and challenges", Interpretable Machine Learning for the Analysis, Design, Assessment, and Informed Decision Making for Civil Infrastructure, Woodhead Publishing Series in Civil and Structural Engineering, Woodhead Publishing, pp. 185–204, ISBN 978-0-1282-4073-1, archived from the original on 19 May 2024, retrieved 28 June 2024.\n
                        338. \n
                        339. ^ Gomaa, Islam; Adelzadeh, Masoud; Gwynne, Steven; Spencer, Bruce; Ko, Yoon; Bénichou, Noureddine; Ma, Chunyun; Elsagan, Nour; Duong, Dana; Zalok, Ehab; Kinateder, Max (1 November 2021). "A Framework for Intelligent Fire Detection and Evacuation System". Fire Technology. 57 (6): 3179–3185. doi:10.1007/s10694-021-01157-3. ISSN 1572-8099. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                        340. \n
                        341. ^ Zhao, Xilei; Lovreglio, Ruggiero; Nilsson, Daniel (1 May 2020). "Modelling and interpreting pre-evacuation decision-making using machine learning". Automation in Construction. 113: 103140. doi:10.1016/j.autcon.2020.103140. ISSN 0926-5805. Archived from the original on 19 May 2024. Retrieved 5 October 2024.\n
                        342. \n
                        343. ^ Müller, Vincent C. (30 April 2020). "Ethics of Artificial Intelligence and Robotics". Stanford Encyclopedia of Philosophy Archive. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                        344. \n
                        345. ^ Simonite (2016).\n
                        346. \n
                        347. ^ Russell & Norvig (2021), p. 987.\n
                        348. \n
                        349. ^ Laskowski (2023).\n
                        350. \n
                        351. ^ GAO (2022).\n
                        352. \n
                        353. ^ Valinsky (2019).\n
                        354. \n
                        355. ^ Russell & Norvig (2021), p. 991.\n
                        356. \n
                        357. ^ Russell & Norvig (2021), pp. 991–992.\n
                        358. \n
                        359. ^ Christian (2020), p. 63.\n
                        360. \n
                        361. ^ Vincent (2022).\n
                        362. \n
                        363. ^ Kopel, Matthew. "Copyright Services: Fair Use". Cornell University Library. Archived from the original on 26 September 2024. Retrieved 26 April 2024.\n
                        364. \n
                        365. ^ Burgess, Matt. "How to Stop Your Data From Being Used to Train AI". Wired. ISSN 1059-1028. Archived from the original on 3 October 2024. Retrieved 26 April 2024.\n
                        366. \n
                        367. ^ Reisner (2023).\n
                        368. \n
                        369. ^ Alter & Harris (2023).\n
                        370. \n
                        371. ^ "Getting the Innovation Ecosystem Ready for AI. An IP policy toolkit" (PDF). WIPO.\n
                        372. \n
                        373. ^ Hammond, George (27 December 2023). "Big Tech is spending more than VC firms on AI startups". Ars Technica. Archived from the original on 10 January 2024.\n
                        374. \n
                        375. ^ Wong, Matteo (24 October 2023). "The Future of AI Is GOMA". The Atlantic. Archived from the original on 5 January 2024.\n
                        376. \n
                        377. ^ "Big tech and the pursuit of AI dominance". The Economist. 26 March 2023. Archived from the original on 29 December 2023.\n
                        378. \n
                        379. ^ Fung, Brian (19 December 2023). "Where the battle to dominate AI may be won". CNN Business. Archived from the original on 13 January 2024.\n
                        380. \n
                        381. ^ Metz, Cade (5 July 2023). "In the Age of A.I., Tech\'s Little Guys Need Big Friends". The New York Times. Archived from the original on 8 July 2024. Retrieved 5 October 2024.\n
                        382. \n
                        383. ^ "Electricity 2024 – Analysis". IEA. 24 January 2024. Retrieved 13 July 2024.\n
                        384. \n
                        385. ^ Calvert, Brian (28 March 2024). "AI already uses as much energy as a small country. It\'s only the beginning". Vox. New York, New York. Archived from the original on 3 July 2024. Retrieved 5 October 2024.\n
                        386. \n
                        387. ^ Halper, Evan; O\'Donovan, Caroline (21 June 2024). "AI is exhausting the power grid. Tech firms are seeking a miracle solution". Washington Post.\n
                        388. \n
                        389. ^ Davenport, Carly. "AI Data Centers and the Coming YS Power Demand Surge" (PDF). Goldman Sachs. Archived from the original (PDF) on 26 July 2024. Retrieved 5 October 2024.\n
                        390. \n
                        391. ^ Ryan, Carol (12 April 2024). "Energy-Guzzling AI Is Also the Future of Energy Savings". Wall Street Journal. Dow Jones.\n
                        392. \n
                        393. ^ Hiller, Jennifer (1 July 2024). "Tech Industry Wants to Lock Up Nuclear Power for AI". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                        394. \n
                        395. ^ Halper, Evan (20 September 2024). "Microsoft deal would reopen Three Mile Island nuclear plant to power AI". Washington Post.\n
                        396. \n
                        397. ^ Hiller, Jennifer (20 September 2024). "Three Mile Island\'s Nuclear Plant to Reopen, Help Power Microsoft\'s AI Centers". Wall Street Journal. Dow Jones. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                        398. \n
                        399. ^ Nicas (2018).\n
                        400. \n
                        401. ^ Rainie, Lee; Keeter, Scott; Perrin, Andrew (22 July 2019). "Trust and Distrust in America". Pew Research Center. Archived from the original on 22 February 2024.\n
                        402. \n
                        403. ^ Williams (2023).\n
                        404. \n
                        405. ^ Taylor & Hern (2023).\n
                        406. \n
                        407. ^ a b Samuel, Sigal (19 April 2022). "Why it\'s so damn hard to make AI fair and unbiased". Vox. Archived from the original on 5 October 2024. Retrieved 24 July 2024.\n
                        408. \n
                        409. ^ a b Rose (2023).\n
                        410. \n
                        411. ^ CNA (2019).\n
                        412. \n
                        413. ^ Goffrey (2008), p. 17.\n
                        414. \n
                        415. ^ Berdahl et al. (2023); Goffrey (2008, p. 17); Rose (2023); Russell & Norvig (2021, p. 995)\n
                        416. \n
                        417. ^ Christian (2020), p. 25.\n
                        418. \n
                        419. ^ a b Russell & Norvig (2021), p. 995.\n
                        420. \n
                        421. ^ Grant & Hill (2023).\n
                        422. \n
                        423. ^ Larson & Angwin (2016).\n
                        424. \n
                        425. ^ Christian (2020), p. 67–70.\n
                        426. \n
                        427. ^ Christian (2020, pp. 67–70); Russell & Norvig (2021, pp. 993–994)\n
                        428. \n
                        429. ^ Russell & Norvig (2021, p. 995); Lipartito (2011, p. 36); Goodman & Flaxman (2017, p. 6); Christian (2020, pp. 39–40, 65)\n
                        430. \n
                        431. ^ Quoted in Christian (2020, p. 65).\n
                        432. \n
                        433. ^ Russell & Norvig (2021, p. 994); Christian (2020, pp. 40, 80–81)\n
                        434. \n
                        435. ^ Quoted in Christian (2020, p. 80)\n
                        436. \n
                        437. ^ Dockrill (2022).\n
                        438. \n
                        439. ^ Sample (2017).\n
                        440. \n
                        441. ^ "Black Box AI". 16 June 2023. Archived from the original on 15 June 2024. Retrieved 5 October 2024.\n
                        442. \n
                        443. ^ Christian (2020), p. 110.\n
                        444. \n
                        445. ^ Christian (2020), pp. 88–91.\n
                        446. \n
                        447. ^ Christian (2020, p. 83); Russell & Norvig (2021, p. 997)\n
                        448. \n
                        449. ^ Christian (2020), p. 91.\n
                        450. \n
                        451. ^ Christian (2020), p. 83.\n
                        452. \n
                        453. ^ Verma (2021).\n
                        454. \n
                        455. ^ Rothman (2020).\n
                        456. \n
                        457. ^ Christian (2020), pp. 105–108.\n
                        458. \n
                        459. ^ Christian (2020), pp. 108–112.\n
                        460. \n
                        461. ^ Ropek, Lucas (21 May 2024). "New Anthropic Research Sheds Light on AI\'s \'Black Box\'". Gizmodo. Archived from the original on 5 October 2024. Retrieved 23 May 2024.\n
                        462. \n
                        463. ^ Russell & Norvig (2021), p. 989.\n
                        464. \n
                        465. ^ a b Russell & Norvig (2021), pp. 987–990.\n
                        466. \n
                        467. ^ Russell & Norvig (2021), p. 988.\n
                        468. \n
                        469. ^ Robitzski (2018); Sainato (2015)\n
                        470. \n
                        471. ^ Harari (2018).\n
                        472. \n
                        473. ^ Buckley, Chris; Mozur, Paul (22 May 2019). "How China Uses High-Tech Surveillance to Subdue Minorities". The New York Times. Archived from the original on 25 November 2019. Retrieved 2 July 2019.\n
                        474. \n
                        475. ^ "Security lapse exposed a Chinese smart city surveillance system". 3 May 2019. Archived from the original on 7 March 2021. Retrieved 14 September 2020.\n
                        476. \n
                        477. ^ Urbina et al. (2022).\n
                        478. \n
                        479. ^ a b E. McGaughey, \'Will Robots Automate Your Job Away? Full Employment, Basic Income, and Economic Democracy\' (2022), 51(3) Industrial Law Journal 511–559. Archived 27 May 2023 at the Wayback Machine.\n
                        480. \n
                        481. ^ Ford & Colvin (2015);McGaughey (2022)\n
                        482. \n
                        483. ^ IGM Chicago (2017).\n
                        484. \n
                        485. ^ Arntz, Gregory & Zierahn (2016), p. 33.\n
                        486. \n
                        487. ^ Lohr (2017); Frey & Osborne (2017); Arntz, Gregory & Zierahn (2016, p. 33)\n
                        488. \n
                        489. ^ Zhou, Viola (11 April 2023). "AI is already taking video game illustrators\' jobs in China". Rest of World. Archived from the original on 21 February 2024. Retrieved 17 August 2023.\n
                        490. \n
                        491. ^ Carter, Justin (11 April 2023). "China\'s game art industry reportedly decimated by growing AI use". Game Developer. Archived from the original on 17 August 2023. Retrieved 17 August 2023.\n
                        492. \n
                        493. ^ Morgenstern (2015).\n
                        494. \n
                        495. ^ Mahdawi (2017); Thompson (2014)\n
                        496. \n
                        497. ^ Tarnoff, Ben (4 August 2023). "Lessons from Eliza". The Guardian Weekly. pp. 34–39.\n
                        498. \n
                        499. ^ Cellan-Jones (2014).\n
                        500. \n
                        501. ^ Russell & Norvig 2021, p. 1001.\n
                        502. \n
                        503. ^ Bostrom (2014).\n
                        504. \n
                        505. ^ Russell (2019).\n
                        506. \n
                        507. ^ Bostrom (2014); Müller & Bostrom (2014); Bostrom (2015).\n
                        508. \n
                        509. ^ Harari (2023).\n
                        510. \n
                        511. ^ Müller & Bostrom (2014).\n
                        512. \n
                        513. ^ Leaders\' concerns about the existential risks of AI around 2015: Rawlinson (2015), Holley (2015), Gibbs (2014), Sainato (2015)\n
                        514. \n
                        515. ^ ""Godfather of artificial intelligence" talks impact and potential of new AI". CBS News. 25 March 2023. Archived from the original on 28 March 2023. Retrieved 28 March 2023.\n
                        516. \n
                        517. ^ Pittis, Don (4 May 2023). "Canadian artificial intelligence leader Geoffrey Hinton piles on fears of computer takeover". CBC. Archived from the original on 7 July 2024. Retrieved 5 October 2024.\n
                        518. \n
                        519. ^ "\'50–50 chance\' that AI outsmarts humanity, Geoffrey Hinton says". Bloomberg BNN. 14 June 2024. Retrieved 6 July 2024.\n
                        520. \n
                        521. ^ Valance (2023).\n
                        522. \n
                        523. ^ Taylor, Josh (7 May 2023). "Rise of artificial intelligence is inevitable but should not be feared, \'father of AI\' says". The Guardian. Archived from the original on 23 October 2023. Retrieved 26 May 2023.\n
                        524. \n
                        525. ^ Colton, Emma (7 May 2023). "\'Father of AI\' says tech fears misplaced: \'You cannot stop it\'". Fox News. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
                        526. \n
                        527. ^ Jones, Hessie (23 May 2023). "Juergen Schmidhuber, Renowned \'Father Of Modern AI,\' Says His Life\'s Work Won\'t Lead To Dystopia". Forbes. Archived from the original on 26 May 2023. Retrieved 26 May 2023.\n
                        528. \n
                        529. ^ McMorrow, Ryan (19 December 2023). "Andrew Ng: \'Do we think the world is better off with more or less intelligence?\'". Financial Times. Archived from the original on 25 January 2024. Retrieved 30 December 2023.\n
                        530. \n
                        531. ^ Levy, Steven (22 December 2023). "How Not to Be Stupid About AI, With Yann LeCun". Wired. Archived from the original on 28 December 2023. Retrieved 30 December 2023.\n
                        532. \n
                        533. ^ Arguments that AI is not an imminent risk: Brooks (2014), Geist (2015), Madrigal (2015), Lee (2014)\n
                        534. \n
                        535. ^ a b Christian (2020), pp. 67, 73.\n
                        536. \n
                        537. ^ Yudkowsky (2008).\n
                        538. \n
                        539. ^ a b Anderson & Anderson (2011).\n
                        540. \n
                        541. ^ AAAI (2014).\n
                        542. \n
                        543. ^ Wallach (2010).\n
                        544. \n
                        545. ^ Russell (2019), p. 173.\n
                        546. \n
                        547. ^ Stewart, Ashley; Melton, Monica. "Hugging Face CEO says he\'s focused on building a \'sustainable model\' for the $4.5 billion open-source-AI startup". Business Insider. Archived from the original on 25 September 2024. Retrieved 14 April 2024.\n
                        548. \n
                        549. ^ Wiggers, Kyle (9 April 2024). "Google open sources tools to support AI model development". TechCrunch. Archived from the original on 10 September 2024. Retrieved 14 April 2024.\n
                        550. \n
                        551. ^ Heaven, Will Douglas (12 May 2023). "The open-source AI boom is built on Big Tech\'s handouts. How long will it last?". MIT Technology Review. Retrieved 14 April 2024.\n
                        552. \n
                        553. ^ Brodsky, Sascha (19 December 2023). "Mistral AI\'s New Language Model Aims for Open Source Supremacy". AI Business. Archived from the original on 5 September 2024. Retrieved 5 October 2024.\n
                        554. \n
                        555. ^ Edwards, Benj (22 February 2024). "Stability announces Stable Diffusion 3, a next-gen AI image generator". Ars Technica. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
                        556. \n
                        557. ^ Marshall, Matt (29 January 2024). "How enterprises are using open source LLMs: 16 examples". VentureBeat. Archived from the original on 26 September 2024. Retrieved 5 October 2024.\n
                        558. \n
                        559. ^ Piper, Kelsey (2 February 2024). "Should we make our most powerful AI models open source to all?". Vox. Archived from the original on 5 October 2024. Retrieved 14 April 2024.\n
                        560. \n
                        561. ^ Alan Turing Institute (2019). "Understanding artificial intelligence ethics and safety" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
                        562. \n
                        563. ^ Alan Turing Institute (2023). "AI Ethics and Governance in Practice" (PDF). Archived (PDF) from the original on 11 September 2024. Retrieved 5 October 2024.\n
                        564. \n
                        565. ^ Floridi, Luciano; Cowls, Josh (23 June 2019). "A Unified Framework of Five Principles for AI in Society". Harvard Data Science Review. 1 (1). doi:10.1162/99608f92.8cd550d1. S2CID 198775713.\n
                        566. \n
                        567. ^ Buruk, Banu; Ekmekci, Perihan Elif; Arda, Berna (1 September 2020). "A critical perspective on guidelines for responsible and trustworthy artificial intelligence". Medicine, Health Care and Philosophy. 23 (3): 387–399. doi:10.1007/s11019-020-09948-1. ISSN 1572-8633. PMID 32236794. S2CID 214766800. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                        568. \n
                        569. ^ Kamila, Manoj Kumar; Jasrotia, Sahil Singh (1 January 2023). "Ethical issues in the development of artificial intelligence: recognizing the risks". International Journal of Ethics and Systems. ahead-of-print (ahead-of-print). doi:10.1108/IJOES-05-2023-0107. ISSN 2514-9369. S2CID 259614124. Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                        570. \n
                        571. ^ "AI Safety Institute releases new AI safety evaluations platform". UK Government. 10 May 2024. Archived from the original on 5 October 2024. Retrieved 14 May 2024.\n
                        572. \n
                        573. ^ Regulation of AI to mitigate risks: Berryhill et al. (2019), Barfield & Pagallo (2018), Iphofen & Kritikos (2019), Wirtz, Weyerer & Geyer (2018), Buiten (2019)\n
                        574. \n\n
                        575. ^ a b Vincent (2023).\n
                        576. \n
                        577. ^ Stanford University (2023).\n
                        578. \n
                        579. ^ a b c d UNESCO (2021).\n
                        580. \n
                        581. ^ Kissinger (2021).\n
                        582. \n
                        583. ^ Altman, Brockman & Sutskever (2023).\n
                        584. \n
                        585. ^ VOA News (25 October 2023). "UN Announces Advisory Body on Artificial Intelligence". Archived from the original on 18 September 2024. Retrieved 5 October 2024.\n
                        586. \n
                        587. ^ "Council of Europe opens first ever global treaty on AI for signature". Council of Europe. 5 September 2024. Archived from the original on 17 September 2024. Retrieved 17 September 2024.\n
                        588. \n
                        589. ^ Edwards (2023).\n
                        590. \n
                        591. ^ Kasperowicz (2023).\n
                        592. \n
                        593. ^ Fox News (2023).\n
                        594. \n
                        595. ^ Milmo, Dan (3 November 2023). "Hope or Horror? The great AI debate dividing its pioneers". The Guardian Weekly. pp. 10–12.\n
                        596. \n
                        597. ^ "The Bletchley Declaration by Countries Attending the AI Safety Summit, 1–2 November 2023". GOV.UK. 1 November 2023. Archived from the original on 1 November 2023. Retrieved 2 November 2023.\n
                        598. \n
                        599. ^ "Countries agree to safe and responsible development of frontier AI in landmark Bletchley Declaration". GOV.UK (Press release). Archived from the original on 1 November 2023. Retrieved 1 November 2023.\n
                        600. \n
                        601. ^ "Second global AI summit secures safety commitments from companies". Reuters. 21 May 2024. Retrieved 23 May 2024.\n
                        602. \n
                        603. ^ "Frontier AI Safety Commitments, AI Seoul Summit 2024". gov.uk. 21 May 2024. Archived from the original on 23 May 2024. Retrieved 23 May 2024.\n
                        604. \n
                        605. ^ a b Russell & Norvig 2021, p. 9.\n
                        606. \n
                        607. ^ a b c Copeland, J., ed. (2004). The Essential Turing: the ideas that gave birth to the computer age. Oxford, England: Clarendon Press. ISBN 0-1982-5079-7.\n
                        608. \n
                        609. ^ "Google books ngram". Archived from the original on 5 October 2024. Retrieved 5 October 2024.\n
                        610. \n
                        611. ^ AI\'s immediate precursors: McCorduck (2004, pp. 51–107), Crevier (1993, pp. 27–32), Russell & Norvig (2021, pp. 8–17), Moravec (1988, p. 3)\n
                        612. \n
                        613. ^ a b Turing\'s original publication of the Turing test in "Computing machinery and intelligence": Turing (1950)\nHistorical influence and philosophical implications: Haugeland (1985, pp. 6–9), Crevier (1993, p. 24), McCorduck (2004, pp. 70–71), Russell & Norvig (2021, pp. 2, 984)\n
                        614. \n
                        615. ^ Crevier (1993), pp. 47–49.\n
                        616. \n
                        617. ^ Russell & Norvig (2003), p. 17.\n
                        618. \n
                        619. ^ Russell & Norvig (2003), p. 18.\n
                        620. \n
                        621. ^ Newquist (1994), pp. 86–86.\n
                        622. \n
                        623. ^ Simon (1965, p. 96) quoted in Crevier (1993, p. 109)\n
                        624. \n
                        625. ^ Minsky (1967, p. 2) quoted in Crevier (1993, p. 109)\n
                        626. \n
                        627. ^ Russell & Norvig (2021), p. 21.\n
                        628. \n
                        629. ^ Lighthill (1973).\n
                        630. \n
                        631. ^ NRC 1999, pp. 212–213.\n
                        632. \n
                        633. ^ Russell & Norvig (2021), p. 22.\n
                        634. \n
                        635. ^ Expert systems: Russell & Norvig (2021, pp. 23, 292), Luger & Stubblefield (2004, pp. 227–331), Nilsson (1998, chpt. 17.4), McCorduck (2004, pp. 327–335, 434–435), Crevier (1993, pp. 145–162, 197–203), Newquist (1994, pp. 155–183)\n
                        636. \n
                        637. ^ Russell & Norvig (2021), p. 24.\n
                        638. \n
                        639. ^ Nilsson (1998), p. 7.\n
                        640. \n
                        641. ^ McCorduck (2004), pp. 454–462.\n
                        642. \n
                        643. ^ Moravec (1988).\n
                        644. \n
                        645. ^ a b Brooks (1990).\n
                        646. \n
                        647. ^ Developmental robotics: Weng et al. (2001), Lungarella et al. (2003), Asada et al. (2009), Oudeyer (2010)\n
                        648. \n
                        649. ^ Russell & Norvig (2021), p. 25.\n
                        650. \n
                        651. ^ Crevier (1993, pp. 214–215), Russell & Norvig (2021, pp. 24, 26)\n
                        652. \n
                        653. ^ Russell & Norvig (2021), p. 26.\n
                        654. \n
                        655. ^ Formal and narrow methods adopted in the 1990s: Russell & Norvig (2021, pp. 24–26), McCorduck (2004, pp. 486–487)\n
                        656. \n
                        657. ^ AI widely used in the late 1990s: Kurzweil (2005, p. 265), NRC (1999, pp. 216–222), Newquist (1994, pp. 189–201)\n
                        658. \n
                        659. ^ Wong (2023).\n
                        660. \n
                        661. ^ Moore\'s Law and AI: Russell & Norvig (2021, pp. 14, 27)\n
                        662. \n
                        663. ^ a b c Clark (2015b).\n
                        664. \n
                        665. ^ Big data: Russell & Norvig (2021, p. 26)\n
                        666. \n
                        667. ^ Sagar, Ram (3 June 2020). "OpenAI Releases GPT-3, The Largest Model So Far". Analytics India Magazine. Archived from the original on 4 August 2020. Retrieved 15 March 2023.\n
                        668. \n
                        669. ^ DiFeliciantonio (2023).\n
                        670. \n
                        671. ^ Goswami (2023).\n
                        672. \n
                        673. ^ Grayling, Anthony; Ball, Brian (1 August 2024). "Philosophy is crucial in the age of AI". The Conversation. Archived from the original on 5 October 2024. Retrieved 4 October 2024.\n
                        674. \n
                        675. ^ a b Jarow, Oshan (15 June 2024). "Will AI ever become conscious? It depends on how you think about biology". Vox. Archived from the original on 21 September 2024. Retrieved 4 October 2024.\n
                        676. \n
                        677. ^ McCarthy, John. "The Philosophy of AI and the AI of Philosophy". jmc.stanford.edu. Archived from the original on 23 October 2018. Retrieved 3 October 2024.\n
                        678. \n
                        679. ^ a b Turing (1950), p. 1.\n
                        680. \n
                        681. ^ Turing (1950), Under "The Argument from Consciousness".\n
                        682. \n
                        683. ^ Kirk-Giannini, Cameron Domenico; Goldstein, Simon (16 October 2023). "AI is closer than ever to passing the Turing test for \'intelligence\'. What happens when it does?". The Conversation. Archived from the original on 25 September 2024. Retrieved 17 August 2024.\n
                        684. \n
                        685. ^ Russell & Norvig (2021), p. 3.\n
                        686. \n
                        687. ^ Maker (2006).\n
                        688. \n
                        689. ^ McCarthy (1999).\n
                        690. \n
                        691. ^ Minsky (1986).\n
                        692. \n
                        693. ^ "What Is Artificial Intelligence (AI)?". Google Cloud Platform. Archived from the original on 31 July 2023. Retrieved 16 October 2023.\n
                        694. \n
                        695. ^ "One of the Biggest Problems in Regulating AI Is Agreeing on a Definition". carnegieendowment.org. Retrieved 31 July 2024.\n
                        696. \n
                        697. ^ "AI or BS? How to tell if a marketing tool really uses artificial intelligence". The Drum. Retrieved 31 July 2024.\n
                        698. \n
                        699. ^ Nilsson (1983), p. 10.\n
                        700. \n
                        701. ^ Haugeland (1985), pp. 112–117.\n
                        702. \n
                        703. ^ Physical symbol system hypothesis: Newell & Simon (1976, p. 116)\nHistorical significance: McCorduck (2004, p. 153), Russell & Norvig (2021, p. 19)\n
                        704. \n
                        705. ^ Moravec\'s paradox: Moravec (1988, pp. 15–16), Minsky (1986, p. 29), Pinker (2007, pp. 190–191)\n
                        706. \n
                        707. ^ Dreyfus\' critique of AI: Dreyfus (1972), Dreyfus & Dreyfus (1986)\nHistorical significance and philosophical implications: Crevier (1993, pp. 120–132), McCorduck (2004, pp. 211–239), Russell & Norvig (2021, pp. 981–982), Fearn (2007, chpt. 3)\n
                        708. \n
                        709. ^ Crevier (1993), p. 125.\n
                        710. \n
                        711. ^ Langley (2011).\n
                        712. \n
                        713. ^ Katz (2012).\n
                        714. \n
                        715. ^ Neats vs. scruffies, the historic debate: McCorduck (2004, pp. 421–424, 486–489), Crevier (1993, p. 168), Nilsson (1983, pp. 10–11), Russell & Norvig (2021, p. 24)\nA classic example of the "scruffy" approach to intelligence: Minsky (1986)\nA modern example of neat AI and its aspirations in the 21st century: Domingos (2015)\n
                        716. \n
                        717. ^ Pennachin & Goertzel (2007).\n
                        718. \n
                        719. ^ a b Roberts (2016).\n
                        720. \n
                        721. ^ Russell & Norvig (2021), p. 986.\n
                        722. \n
                        723. ^ Chalmers (1995).\n
                        724. \n
                        725. ^ Dennett (1991).\n
                        726. \n
                        727. ^ Horst (2005).\n
                        728. \n
                        729. ^ Searle (1999).\n
                        730. \n
                        731. ^ Searle (1980), p. 1.\n
                        732. \n
                        733. ^ Russell & Norvig (2021), p. 9817.\n
                        734. \n
                        735. ^ Searle\'s Chinese room argument: Searle (1980). Searle\'s original presentation of the thought experiment., Searle (1999).\nDiscussion: Russell & Norvig (2021, pp. 985), McCorduck (2004, pp. 443–445), Crevier (1993, pp. 269–271)\n
                        736. \n
                        737. ^ Leith, Sam (7 July 2022). "Nick Bostrom: How can we be certain a machine isn\'t conscious?". The Spectator. Archived from the original on 26 September 2024. Retrieved 23 February 2024.\n
                        738. \n
                        739. ^ a b c Thomson, Jonny (31 October 2022). "Why don\'t robots have rights?". Big Think. Archived from the original on 13 September 2024. Retrieved 23 February 2024.\n
                        740. \n
                        741. ^ a b Kateman, Brian (24 July 2023). "AI Should Be Terrified of Humans". Time. Archived from the original on 25 September 2024. Retrieved 23 February 2024.\n
                        742. \n
                        743. ^ Wong, Jeff (10 July 2023). "What leaders need to know about robot rights". Fast Company.\n
                        744. \n
                        745. ^ Hern, Alex (12 January 2017). "Give robots \'personhood\' status, EU committee argues". The Guardian. ISSN 0261-3077. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
                        746. \n
                        747. ^ Dovey, Dana (14 April 2018). "Experts Don\'t Think Robots Should Have Rights". Newsweek. Archived from the original on 5 October 2024. Retrieved 23 February 2024.\n
                        748. \n
                        749. ^ Cuddy, Alice (13 April 2018). "Robot rights violate human rights, experts warn EU". euronews. Archived from the original on 19 September 2024. Retrieved 23 February 2024.\n
                        750. \n
                        751. ^ The Intelligence explosion and technological singularity: Russell & Norvig (2021, pp. 1004–1005), Omohundro (2008), Kurzweil (2005)\n\nI. J. Good\'s "intelligence explosion": Good (1965)\n\nVernor Vinge\'s "singularity": Vinge (1993)\n
                        752. \n
                        753. ^ Russell & Norvig (2021), p. 1005.\n
                        754. \n
                        755. ^ Transhumanism: Moravec (1988), Kurzweil (2005), Russell & Norvig (2021, p. 1005)\n
                        756. \n
                        757. ^ AI as evolution: Edward Fredkin is quoted in McCorduck (2004, p. 401), Butler (1863), Dyson (1998)\n
                        758. \n
                        759. ^ AI in myth: McCorduck (2004, pp. 4–5)\n
                        760. \n
                        761. ^ McCorduck (2004), pp. 340–400.\n
                        762. \n
                        763. ^ Buttazzo (2001).\n
                        764. \n
                        765. ^ Anderson (2008).\n
                        766. \n
                        767. ^ McCauley (2007).\n
                        768. \n
                        769. ^ Galvan (1997).\n
                        770. \n
                        \n

                        AI textbooks

                        \n

                        The two most widely used textbooks in 2023 (see the Open Syllabus):\n

                        \n\n

                        These were the four of the most widely used AI textbooks in 2008:\n

                        \n
                        \n\n
                        \n

                        Other textbooks:\n

                        \n\n

                        History of AI

                        \n
                        \n\n
                        \n

                        Other sources

                        \n
                        \n\n\n
                        \n

                        Further reading

                        \n
                        \n\n
                        \n
                        \n\n\n\n\n\n\n\n\n\n
                        \n
                        \n\t\t\t\t\t\n\t\t\t\t
                        \n\t\t\t\n\t\t\t\n\t\t\n\t\t\n\t \n \n
                        \n\t
                          \n
                          \n\n\n'}'. -[2024-10-27 22:52:10,988] [DEBUG] [Engine]: Agent 'agent3' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_otnrqgDNgviSW8WMwpU9AYkW', type='function')], function_call=None) -[2024-10-27 22:52:10,988] [INFO] [Engine]: Assigning task to agent4: Search for the latest trends in AI using Twitter. -[2024-10-27 22:52:10,988] [INFO] [BaseAgent]: Agent 'agent4' acting on task 'Search for the latest trends in AI using Twitter.'. -[2024-10-27 22:52:12,619] [INFO] [BaseAgent]: Agent 'agent4' called 'fetch_webpage' with args '{'url': 'https://twitter.com/search?q=latest%20trends%20in%20AI'}'. -[2024-10-27 22:52:12,619] [INFO] [BaseAgent]: Agent 'agent4' obtained result '{'success': True, 'error-msg': '', 'url': 'https://twitter.com/search?q=latest%20trends%20in%20AI', 'content': '\n \n \n x.com\n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n \n \n '}'. -[2024-10-27 22:52:12,620] [DEBUG] [Engine]: Agent 'agent4' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=latest%20trends%20in%20AI"}', name='fetch_webpage'), id='call_DM2gAMnHfKPxzxkipEKwq1i2', type='function')], function_call=None) -[2024-10-27 22:52:12,621] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Could you please search for the latest trends in AI using Google?"}', name='communicate_to'), id='call_kuc4xYiVcX3NHPRwoLOF1qud', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_otnrqgDNgviSW8WMwpU9AYkW', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=latest%20trends%20in%20AI"}', name='fetch_webpage'), id='call_DM2gAMnHfKPxzxkipEKwq1i2', type='function')], function_call=None) - -[2024-10-27 22:52:12,621] [INFO] [Engine]: Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Could you please search for the latest trends in AI using Google?"}', name='communicate_to'), id='call_kuc4xYiVcX3NHPRwoLOF1qud', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_otnrqgDNgviSW8WMwpU9AYkW', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=latest%20trends%20in%20AI"}', name='fetch_webpage'), id='call_DM2gAMnHfKPxzxkipEKwq1i2', type='function')], function_call=None) - -[2024-10-27 22:52:12,622] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent1","message":"Could you please search for the latest trends in AI using Google?"}', name='communicate_to'), id='call_kuc4xYiVcX3NHPRwoLOF1qud', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_otnrqgDNgviSW8WMwpU9AYkW', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=latest%20trends%20in%20AI"}', name='fetch_webpage'), id='call_DM2gAMnHfKPxzxkipEKwq1i2', type='function')], function_call=None) - -[2024-10-27 22:52:13,058] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': False} -[2024-10-27 22:52:13,059] [INFO] [Engine]: EnginePlanner decided to terminate the simulation. -[2024-10-27 22:52:13,059] [INFO] [Engine]: Engine simulation loop completed. -[2024-10-27 22:52:13,059] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-10-27 22:52:13,060] [INFO] [Evaluator]: Total Token Consumption: 36 -[2024-10-27 22:52:13,060] [INFO] [Evaluator]: Average Tokens per Iteration: 36.0 -[2024-10-27 22:52:13,060] [INFO] [Engine]: Simulation completed. -[2024-10-27 22:52:27,098] [DEBUG] [Engine]: Environment 'Web' initialized. -[2024-10-27 22:52:27,098] [INFO] [BaseAgent]: Agent 'agent1' initialized. -[2024-10-27 22:52:27,098] [DEBUG] [Engine]: Agent 'agent1' of type 'BaseAgent' initialized. -[2024-10-27 22:52:27,098] [INFO] [BaseAgent]: Agent 'agent2' initialized. -[2024-10-27 22:52:27,099] [DEBUG] [Engine]: Agent 'agent2' of type 'BaseAgent' initialized. -[2024-10-27 22:52:27,099] [INFO] [BaseAgent]: Agent 'agent3' initialized. -[2024-10-27 22:52:27,099] [DEBUG] [Engine]: Agent 'agent3' of type 'BaseAgent' initialized. -[2024-10-27 22:52:27,099] [INFO] [BaseAgent]: Agent 'agent4' initialized. -[2024-10-27 22:52:27,099] [DEBUG] [Engine]: Agent 'agent4' of type 'BaseAgent' initialized. -[2024-10-27 22:52:27,099] [INFO] [AgentGraph]: AgentGraph initialized with execution mode 'parallel'. -[2024-10-27 22:52:27,099] [INFO] [AgentGraph]: Relationship added: agent2 --[reports_to]--> agent1 -[2024-10-27 22:52:27,100] [INFO] [AgentGraph]: Relationship added: agent3 --[reports_to]--> agent1 -[2024-10-27 22:52:27,100] [INFO] [AgentGraph]: Relationship added: agent4 --[reports_to]--> agent1 -[2024-10-27 22:52:27,100] [DEBUG] [Engine]: Memory of type 'SharedMemory' initialized. -[2024-10-27 22:52:27,100] [INFO] [EnginePlanner]: EnginePlanner initialized. -[2024-10-27 22:52:27,100] [INFO] [Engine]: Engine initialized. -[2024-10-27 22:52:27,100] [INFO] [Engine]: Engine starting simulation. -[2024-10-27 22:52:27,100] [INFO] [Engine]: Starting iteration 1 -[2024-10-27 22:52:27,100] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-10-27 22:52:28,033] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'}, 'continue': True} -[2024-10-27 22:52:28,033] [INFO] [Engine]: Assigned tasks: {'agent2': 'Search for the latest trends in AI using Google.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'} -[2024-10-27 22:52:28,033] [INFO] [Engine]: Assigning task to agent2: Search for the latest trends in AI using Google. -[2024-10-27 22:52:28,033] [INFO] [BaseAgent]: Agent 'agent2' acting on task 'Search for the latest trends in AI using Google.'. -[2024-10-27 22:52:30,259] [INFO] [BaseAgent]: Agent 'agent2' called 'fetch_webpage' with args '{'url': 'https://www.google.com/search?q=latest+trends+in+AI'}'. -[2024-10-27 22:52:30,268] [INFO] [BaseAgent]: Agent 'agent2' obtained result '{'success': True, 'error-msg': '', 'url': 'https://www.google.com/search?q=latest+trends+in+AI', 'content': 'latest trends in AI - Google Search

                          Accessibility Links

                          About 518,000,000 results (0.32 seconds) 

                          Ads

                          Sponsored
                          View the report for AI use cases, AI predictions, tips for overcoming the risks, and more. AI is making software a collaborator in the workplace.
                          Sponsored
                          The AI market is quickly increasing due to the rapid pace of technological change. See how we are helping executives understand how...
                          Sponsored
                          AI Trends 2024
                          NVIDIA
                          ·https://www.nvidia.com
                          NVIDIA
                          ·https://www.nvidia.com
                          Trends in AI — Elevate AI decision-making and innovation in your organization. Discover AI trends and...
                          Sponsored
                          Companies not only applied AI in Q3—they also started measuring value. See what they did. Read "The State of Generative AI in the Enterprise: Moving from potential to performance.".

                          Ads

                          Sponsored
                          Explore the Future: AI Stats & Trends in 2024. Stay Ahead of the Evolution. Get Ahead with AI Statistics & Insights. Explore the Top AI Trends. AI Impact in 2024. AI Insights in 2024.
                          Sponsored
                          Artificial intelligence and related technologies are advancing at a rapid pace. Explore our work on AI use, policy, ethics, and development. View Events. Browse Publications.
                          \u200ePublications · \u200eEvents · \u200eAbout Us
                          Google apps
                          '}'. -[2024-10-27 22:52:30,387] [DEBUG] [Engine]: Agent 'agent2' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_IALKCoF9xx6uk4YBCXk8Co6M', type='function')], function_call=None) -[2024-10-27 22:52:30,388] [INFO] [Engine]: Assigning task to agent3: Search for the latest trends in AI using Wikipedia. -[2024-10-27 22:52:30,388] [INFO] [BaseAgent]: Agent 'agent3' acting on task 'Search for the latest trends in AI using Wikipedia.'. -[2024-10-27 22:52:31,942] [INFO] [BaseAgent]: Agent 'agent3' called 'fetch_webpage' with args '{'url': 'https://en.wikipedia.org/wiki/Artificial_intelligence'}'. -[2024-10-27 22:52:32,128] [DEBUG] [Engine]: Agent 'agent3' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_HigGUIz5miADL4O3bNXPFE5V', type='function')], function_call=None) -[2024-10-27 22:52:32,128] [INFO] [Engine]: Assigning task to agent4: Search for the latest trends in AI using Twitter. -[2024-10-27 22:52:34,241] [DEBUG] [Engine]: Agent 'agent4' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23Trends&src=typed_query"}', name='fetch_webpage'), id='call_QssF2paXKheyxbuFhUBZKHjJ', type='function')], function_call=None) -[2024-10-27 22:52:34,242] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_IALKCoF9xx6uk4YBCXk8Co6M', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_HigGUIz5miADL4O3bNXPFE5V', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23Trends&src=typed_query"}', name='fetch_webpage'), id='call_QssF2paXKheyxbuFhUBZKHjJ', type='function')], function_call=None) - -[2024-10-27 22:52:34,243] [INFO] [Engine]: Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_IALKCoF9xx6uk4YBCXk8Co6M', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_HigGUIz5miADL4O3bNXPFE5V', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23Trends&src=typed_query"}', name='fetch_webpage'), id='call_QssF2paXKheyxbuFhUBZKHjJ', type='function')], function_call=None) - -[2024-10-27 22:52:34,244] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_IALKCoF9xx6uk4YBCXk8Co6M', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_HigGUIz5miADL4O3bNXPFE5V', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23Trends&src=typed_query"}', name='fetch_webpage'), id='call_QssF2paXKheyxbuFhUBZKHjJ', type='function')], function_call=None) - -[2024-10-27 22:52:34,677] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': True} -[2024-10-27 22:52:34,678] [INFO] [Engine]: Starting iteration 2 -[2024-10-27 22:52:34,678] [DEBUG] [EnginePlanner]: Created prompt for task assignment: -You are an orchestrator assigning tasks to a group of agents based on their profiles and current progress and task description. - -Task Description: -Find new about the latest trends in AI. - -Current Progress: Starting the simulation. -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_IALKCoF9xx6uk4YBCXk8Co6M', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_HigGUIz5miADL4O3bNXPFE5V', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23Trends&src=typed_query"}', name='fetch_webpage'), id='call_QssF2paXKheyxbuFhUBZKHjJ', type='function')], function_call=None) - - -Agent Profiles: -- Agent ID: agent1 - Relationships: {'agent2': 'R_reports_to', 'agent3': 'R_reports_to', 'agent4': 'R_reports_to'} - Profile: You are a helpful and supportive team leader. You are not allowed to use the internet. You must ask other agents for an answer in this task. -- Agent ID: agent2 - Relationships: {'agent1': 'reports_to'} - Profile: You are a curious and ambitious team member. You will use google to search results for the task. -- Agent ID: agent3 - Relationships: {'agent1': 'reports_to'} - Profile: You are a diligent and hardworking team member. You will use wikipedia to search results for the task. -- Agent ID: agent4 - Relationships: {'agent1': 'reports_to'} - Profile: You are a creative and innovative team member. You will use twitter to search results for the task. -Based on the current progress and agent profiles, assign the next task to each agent that needs to perform an action. -Provide the assignments in the following JSON format: - -{ - "tasks": { - "agent1": "Task description for agent1", - "agent2": "Task description for agent2" - // Add more agents as needed - }, - "continue": true // Set to false if the task is completed -} - -If an agent does not need to be assigned a task, you can omit it from the 'tasks' section. - -[2024-10-27 22:52:35,938] [DEBUG] [EnginePlanner]: Received task assignment: {'tasks': {'agent1': 'Please gather the information from agent2, agent3, and agent4 regarding the latest trends in AI and compile a summary report.', 'agent2': 'Search for the latest trends in AI using Google search.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'}, 'continue': True} -[2024-10-27 22:52:35,938] [INFO] [Engine]: Assigned tasks: {'agent1': 'Please gather the information from agent2, agent3, and agent4 regarding the latest trends in AI and compile a summary report.', 'agent2': 'Search for the latest trends in AI using Google search.', 'agent3': 'Search for the latest trends in AI using Wikipedia.', 'agent4': 'Search for the latest trends in AI using Twitter.'} -[2024-10-27 22:52:35,938] [INFO] [Engine]: Assigning task to agent1: Please gather the information from agent2, agent3, and agent4 regarding the latest trends in AI and compile a summary report. -[2024-10-27 22:52:40,992] [DEBUG] [Engine]: Agent 'agent1' completed task with result: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"Could you please provide information on the latest trends in AI for a summary report?"}', name='communicate_to'), id='call_CVycaJpKw4PYBRi9QbfTHe6R', type='function')], function_call=None) -[2024-10-27 22:52:40,992] [INFO] [Engine]: Assigning task to agent2: Search for the latest trends in AI using Google search. -[2024-10-27 22:55:09,643] [ERROR] [Engine]: Error while executing task for agent 'agent2': -[2024-10-27 22:55:09,643] [INFO] [Engine]: Assigning task to agent3: Search for the latest trends in AI using Wikipedia. -[2024-10-27 22:55:11,305] [ERROR] [Engine]: Error while executing task for agent 'agent3': -[2024-10-27 22:55:11,306] [INFO] [Engine]: Assigning task to agent4: Search for the latest trends in AI using Twitter. -[2024-10-27 22:55:12,671] [ERROR] [Engine]: Error while executing task for agent 'agent4': -[2024-10-27 22:55:12,672] [DEBUG] [Engine]: Summarized agents' results: -Agents' Results Summary: -- agent1: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"Could you please provide information on the latest trends in AI for a summary report?"}', name='communicate_to'), id='call_CVycaJpKw4PYBRi9QbfTHe6R', type='function')], function_call=None) - -[2024-10-27 22:55:12,673] [INFO] [Engine]: Agents' Results Summary: -- agent1: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"Could you please provide information on the latest trends in AI for a summary report?"}', name='communicate_to'), id='call_CVycaJpKw4PYBRi9QbfTHe6R', type='function')], function_call=None) - -[2024-10-27 22:55:12,674] [DEBUG] [EnginePlanner]: Updated progress: Starting the simulation. -Agents' Results Summary: -- agent2: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://www.google.com/search?q=latest+trends+in+AI"}', name='fetch_webpage'), id='call_IALKCoF9xx6uk4YBCXk8Co6M', type='function')], function_call=None) -- agent3: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://en.wikipedia.org/wiki/Artificial_intelligence"}', name='fetch_webpage'), id='call_HigGUIz5miADL4O3bNXPFE5V', type='function')], function_call=None) -- agent4: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"url":"https://twitter.com/search?q=%23AI%20%23Trends&src=typed_query"}', name='fetch_webpage'), id='call_QssF2paXKheyxbuFhUBZKHjJ', type='function')], function_call=None) - -Agents' Results Summary: -- agent1: Message(content=None, role='assistant', tool_calls=[ChatCompletionMessageToolCall(function=Function(arguments='{"target_agent_id":"agent2","message":"Could you please provide information on the latest trends in AI for a summary report?"}', name='communicate_to'), id='call_CVycaJpKw4PYBRi9QbfTHe6R', type='function')], function_call=None) - -[2024-10-27 22:55:13,052] [DEBUG] [EnginePlanner]: Received continuation decision: {'continue': False} -[2024-10-27 22:55:13,052] [INFO] [Engine]: EnginePlanner decided to terminate the simulation. -[2024-10-27 22:55:13,052] [INFO] [Engine]: Engine simulation loop completed. -[2024-10-27 22:55:13,053] [INFO] [Evaluator]: Task Completion Success Rate: 0.00% -[2024-10-27 22:55:13,053] [INFO] [Evaluator]: Total Token Consumption: 103 -[2024-10-27 22:55:13,053] [INFO] [Evaluator]: Average Tokens per Iteration: 51.5 -[2024-10-27 22:55:13,053] [INFO] [Engine]: Simulation completed. diff --git a/marble/result/__init__.py b/marble/result/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/marble/run_demo.sh b/marble/run_demo.sh index d50e238b..435cb5be 100755 --- a/marble/run_demo.sh +++ b/marble/run_demo.sh @@ -1,7 +1,7 @@ #!/bin/bash # Define the path to the configuration file -CONFIG_FILE="./configs/test_config" +CONFIG_FILE="./configs/test_config_3" # Execute the simulation engine with the specified configuration python main.py --config "$CONFIG_FILE" diff --git a/poetry.lock b/poetry.lock index 0273539c..dbd0f2d6 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand. [[package]] name = "aiohappyeyeballs" @@ -150,13 +150,13 @@ files = [ [[package]] name = "anyio" -version = "4.5.1" +version = "4.6.2.post1" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "anyio-4.5.1-py3-none-any.whl", hash = "sha256:474fa88d0cc1cdcbb357b1e366a74b9d762c3fb5a0acb9d4292cc42db6f884ef"}, - {file = "anyio-4.5.1.tar.gz", hash = "sha256:d0a54cc24769e2755d34f40bcdf5b3ca64b545319bc629c6e3398397e655bc91"}, + {file = "anyio-4.6.2.post1-py3-none-any.whl", hash = "sha256:6d170c36fba3bdd840c73d3868c1e777e33676a69c3a72cf0a0d5d6d8009b61d"}, + {file = "anyio-4.6.2.post1.tar.gz", hash = "sha256:4c8bc31ccdb51c7f7bd251f51c609e038d63e34219b44aa86e47576389880b4c"}, ] [package.dependencies] @@ -227,50 +227,79 @@ files = [ [[package]] name = "attrs" -version = "23.2.0" +version = "24.2.0" description = "Classes Without Boilerplate" optional = false python-versions = ">=3.7" files = [ - {file = "attrs-23.2.0-py3-none-any.whl", hash = "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"}, - {file = "attrs-23.2.0.tar.gz", hash = "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30"}, + {file = "attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2"}, + {file = "attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346"}, ] [package.extras] -cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] -dev = ["attrs[tests]", "pre-commit"] -docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] -tests = ["attrs[tests-no-zope]", "zope-interface"] -tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"] -tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] +benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] [[package]] -name = "backcall" -version = "0.2.0" -description = "Specifications for callback functions passed in to an API" +name = "bcrypt" +version = "4.2.0" +description = "Modern password hashing for your software and your servers" optional = false -python-versions = "*" +python-versions = ">=3.7" files = [ - {file = "backcall-0.2.0-py2.py3-none-any.whl", hash = "sha256:fbbce6a29f263178a1f7915c1940bde0ec2b2a967566fe1c65c1dfb7422bd255"}, - {file = "backcall-0.2.0.tar.gz", hash = "sha256:5cbdbf27be5e7cfadb448baf0aa95508f91f2bbc6c6437cd9cd06e2a4c215e1e"}, + {file = "bcrypt-4.2.0-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:096a15d26ed6ce37a14c1ac1e48119660f21b24cba457f160a4b830f3fe6b5cb"}, + {file = "bcrypt-4.2.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c02d944ca89d9b1922ceb8a46460dd17df1ba37ab66feac4870f6862a1533c00"}, + {file = "bcrypt-4.2.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d84cf6d877918620b687b8fd1bf7781d11e8a0998f576c7aa939776b512b98d"}, + {file = "bcrypt-4.2.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:1bb429fedbe0249465cdd85a58e8376f31bb315e484f16e68ca4c786dcc04291"}, + {file = "bcrypt-4.2.0-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:655ea221910bcac76ea08aaa76df427ef8625f92e55a8ee44fbf7753dbabb328"}, + {file = "bcrypt-4.2.0-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:1ee38e858bf5d0287c39b7a1fc59eec64bbf880c7d504d3a06a96c16e14058e7"}, + {file = "bcrypt-4.2.0-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:0da52759f7f30e83f1e30a888d9163a81353ef224d82dc58eb5bb52efcabc399"}, + {file = "bcrypt-4.2.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:3698393a1b1f1fd5714524193849d0c6d524d33523acca37cd28f02899285060"}, + {file = "bcrypt-4.2.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:762a2c5fb35f89606a9fde5e51392dad0cd1ab7ae64149a8b935fe8d79dd5ed7"}, + {file = "bcrypt-4.2.0-cp37-abi3-win32.whl", hash = "sha256:5a1e8aa9b28ae28020a3ac4b053117fb51c57a010b9f969603ed885f23841458"}, + {file = "bcrypt-4.2.0-cp37-abi3-win_amd64.whl", hash = "sha256:8f6ede91359e5df88d1f5c1ef47428a4420136f3ce97763e31b86dd8280fbdf5"}, + {file = "bcrypt-4.2.0-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:c52aac18ea1f4a4f65963ea4f9530c306b56ccd0c6f8c8da0c06976e34a6e841"}, + {file = "bcrypt-4.2.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3bbbfb2734f0e4f37c5136130405332640a1e46e6b23e000eeff2ba8d005da68"}, + {file = "bcrypt-4.2.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3413bd60460f76097ee2e0a493ccebe4a7601918219c02f503984f0a7ee0aebe"}, + {file = "bcrypt-4.2.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:8d7bb9c42801035e61c109c345a28ed7e84426ae4865511eb82e913df18f58c2"}, + {file = "bcrypt-4.2.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3d3a6d28cb2305b43feac298774b997e372e56c7c7afd90a12b3dc49b189151c"}, + {file = "bcrypt-4.2.0-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:9c1c4ad86351339c5f320ca372dfba6cb6beb25e8efc659bedd918d921956bae"}, + {file = "bcrypt-4.2.0-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:27fe0f57bb5573104b5a6de5e4153c60814c711b29364c10a75a54bb6d7ff48d"}, + {file = "bcrypt-4.2.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:8ac68872c82f1add6a20bd489870c71b00ebacd2e9134a8aa3f98a0052ab4b0e"}, + {file = "bcrypt-4.2.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:cb2a8ec2bc07d3553ccebf0746bbf3d19426d1c6d1adbd4fa48925f66af7b9e8"}, + {file = "bcrypt-4.2.0-cp39-abi3-win32.whl", hash = "sha256:77800b7147c9dc905db1cba26abe31e504d8247ac73580b4aa179f98e6608f34"}, + {file = "bcrypt-4.2.0-cp39-abi3-win_amd64.whl", hash = "sha256:61ed14326ee023917ecd093ee6ef422a72f3aec6f07e21ea5f10622b735538a9"}, + {file = "bcrypt-4.2.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:39e1d30c7233cfc54f5c3f2c825156fe044efdd3e0b9d309512cc514a263ec2a"}, + {file = "bcrypt-4.2.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f4f4acf526fcd1c34e7ce851147deedd4e26e6402369304220250598b26448db"}, + {file = "bcrypt-4.2.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:1ff39b78a52cf03fdf902635e4c81e544714861ba3f0efc56558979dd4f09170"}, + {file = "bcrypt-4.2.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:373db9abe198e8e2c70d12b479464e0d5092cc122b20ec504097b5f2297ed184"}, + {file = "bcrypt-4.2.0.tar.gz", hash = "sha256:cf69eaf5185fd58f268f805b505ce31f9b9fc2d64b376642164e9244540c1221"}, ] +[package.extras] +tests = ["pytest (>=3.2.1,!=3.3.0)"] +typecheck = ["mypy"] + [[package]] name = "beartype" -version = "0.18.5" -description = "Unbearably fast runtime type checking in pure Python." +version = "0.19.0" +description = "Unbearably fast near-real-time hybrid runtime-static type-checking in pure Python." optional = false -python-versions = ">=3.8.0" +python-versions = ">=3.8" files = [ - {file = "beartype-0.18.5-py3-none-any.whl", hash = "sha256:5301a14f2a9a5540fe47ec6d34d758e9cd8331d36c4760fc7a5499ab86310089"}, - {file = "beartype-0.18.5.tar.gz", hash = "sha256:264ddc2f1da9ec94ff639141fbe33d22e12a9f75aa863b83b7046ffff1381927"}, + {file = "beartype-0.19.0-py3-none-any.whl", hash = "sha256:33b2694eda0daf052eb2aff623ed9a8a586703bbf0a90bbc475a83bbf427f699"}, + {file = "beartype-0.19.0.tar.gz", hash = "sha256:de42dfc1ba5c3710fde6c3002e3bd2cad236ed4d2aabe876345ab0b4234a6573"}, ] [package.extras] -all = ["typing-extensions (>=3.10.0.0)"] -dev = ["autoapi (>=0.9.0)", "coverage (>=5.5)", "equinox", "mypy (>=0.800)", "numpy", "pandera", "pydata-sphinx-theme (<=0.7.2)", "pytest (>=4.0.0)", "sphinx", "sphinx (>=4.2.0,<6.0.0)", "sphinxext-opengraph (>=0.7.5)", "tox (>=3.20.1)", "typing-extensions (>=3.10.0.0)"] +dev = ["autoapi (>=0.9.0)", "coverage (>=5.5)", "equinox", "jax[cpu]", "jaxtyping", "mypy (>=0.800)", "numba", "numpy", "pandera", "pydata-sphinx-theme (<=0.7.2)", "pygments", "pyright (>=1.1.370)", "pytest (>=4.0.0)", "sphinx", "sphinx (>=4.2.0,<6.0.0)", "sphinxext-opengraph (>=0.7.5)", "tox (>=3.20.1)", "typing-extensions (>=3.10.0.0)"] doc-rtd = ["autoapi (>=0.9.0)", "pydata-sphinx-theme (<=0.7.2)", "sphinx (>=4.2.0,<6.0.0)", "sphinxext-opengraph (>=0.7.5)"] -test-tox = ["equinox", "mypy (>=0.800)", "numpy", "pandera", "pytest (>=4.0.0)", "sphinx", "typing-extensions (>=3.10.0.0)"] +test = ["coverage (>=5.5)", "equinox", "jax[cpu]", "jaxtyping", "mypy (>=0.800)", "numba", "numpy", "pandera", "pygments", "pyright (>=1.1.370)", "pytest (>=4.0.0)", "sphinx", "tox (>=3.20.1)", "typing-extensions (>=3.10.0.0)"] +test-tox = ["equinox", "jax[cpu]", "jaxtyping", "mypy (>=0.800)", "numba", "numpy", "pandera", "pygments", "pyright (>=1.1.370)", "pytest (>=4.0.0)", "sphinx", "typing-extensions (>=3.10.0.0)"] test-tox-coverage = ["coverage (>=5.5)"] [[package]] @@ -321,63 +350,78 @@ files = [ [[package]] name = "cffi" -version = "1.16.0" +version = "1.17.1" description = "Foreign Function Interface for Python calling C code." optional = false python-versions = ">=3.8" files = [ - {file = "cffi-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088"}, - {file = "cffi-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614"}, - {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743"}, - {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d"}, - {file = "cffi-1.16.0-cp310-cp310-win32.whl", hash = "sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a"}, - {file = "cffi-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1"}, - {file = "cffi-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404"}, - {file = "cffi-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e"}, - {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc"}, - {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb"}, - {file = "cffi-1.16.0-cp311-cp311-win32.whl", hash = "sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab"}, - {file = "cffi-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba"}, - {file = "cffi-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956"}, - {file = "cffi-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969"}, - {file = "cffi-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520"}, - {file = "cffi-1.16.0-cp312-cp312-win32.whl", hash = "sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b"}, - {file = "cffi-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235"}, - {file = "cffi-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324"}, - {file = "cffi-1.16.0-cp38-cp38-win32.whl", hash = "sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a"}, - {file = "cffi-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36"}, - {file = "cffi-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed"}, - {file = "cffi-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098"}, - {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000"}, - {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe"}, - {file = "cffi-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4"}, - {file = "cffi-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8"}, - {file = "cffi-1.16.0.tar.gz", hash = "sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0"}, + {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, + {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"}, + {file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"}, + {file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"}, + {file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"}, + {file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"}, + {file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"}, + {file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"}, + {file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"}, + {file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"}, + {file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"}, + {file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"}, + {file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"}, + {file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"}, + {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"}, + {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"}, ] [package.dependencies] @@ -550,35 +594,88 @@ traitlets = ">=4" [package.extras] test = ["pytest"] +[[package]] +name = "cryptography" +version = "43.0.3" +description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." +optional = false +python-versions = ">=3.7" +files = [ + {file = "cryptography-43.0.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:bf7a1932ac4176486eab36a19ed4c0492da5d97123f1406cf15e41b05e787d2e"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63efa177ff54aec6e1c0aefaa1a241232dcd37413835a9b674b6e3f0ae2bfd3e"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e1ce50266f4f70bf41a2c6dc4358afadae90e2a1e5342d3c08883df1675374f"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:443c4a81bb10daed9a8f334365fe52542771f25aedaf889fd323a853ce7377d6"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:74f57f24754fe349223792466a709f8e0c093205ff0dca557af51072ff47ab18"}, + {file = "cryptography-43.0.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9762ea51a8fc2a88b70cf2995e5675b38d93bf36bd67d91721c309df184f49bd"}, + {file = "cryptography-43.0.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:81ef806b1fef6b06dcebad789f988d3b37ccaee225695cf3e07648eee0fc6b73"}, + {file = "cryptography-43.0.3-cp37-abi3-win32.whl", hash = "sha256:cbeb489927bd7af4aa98d4b261af9a5bc025bd87f0e3547e11584be9e9427be2"}, + {file = "cryptography-43.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:f46304d6f0c6ab8e52770addfa2fc41e6629495548862279641972b6215451cd"}, + {file = "cryptography-43.0.3-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:8ac43ae87929a5982f5948ceda07001ee5e83227fd69cf55b109144938d96984"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:846da004a5804145a5f441b8530b4bf35afbf7da70f82409f151695b127213d5"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f996e7268af62598f2fc1204afa98a3b5712313a55c4c9d434aef49cadc91d4"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f7b178f11ed3664fd0e995a47ed2b5ff0a12d893e41dd0494f406d1cf555cab7"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:c2e6fc39c4ab499049df3bdf567f768a723a5e8464816e8f009f121a5a9f4405"}, + {file = "cryptography-43.0.3-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:e1be4655c7ef6e1bbe6b5d0403526601323420bcf414598955968c9ef3eb7d16"}, + {file = "cryptography-43.0.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:df6b6c6d742395dd77a23ea3728ab62f98379eff8fb61be2744d4679ab678f73"}, + {file = "cryptography-43.0.3-cp39-abi3-win32.whl", hash = "sha256:d56e96520b1020449bbace2b78b603442e7e378a9b3bd68de65c782db1507995"}, + {file = "cryptography-43.0.3-cp39-abi3-win_amd64.whl", hash = "sha256:0c580952eef9bf68c4747774cde7ec1d85a6e61de97281f2dba83c7d2c806362"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d03b5621a135bffecad2c73e9f4deb1a0f977b9a8ffe6f8e002bf6c9d07b918c"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a2a431ee15799d6db9fe80c82b055bae5a752bef645bba795e8e52687c69efe3"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:281c945d0e28c92ca5e5930664c1cefd85efe80e5c0d2bc58dd63383fda29f83"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f18c716be16bc1fea8e95def49edf46b82fccaa88587a45f8dc0ff6ab5d8e0a7"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4a02ded6cd4f0a5562a8887df8b3bd14e822a90f97ac5e544c162899bc467664"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:53a583b6637ab4c4e3591a15bc9db855b8d9dee9a669b550f311480acab6eb08"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1ec0bcf7e17c0c5669d881b1cd38c4972fade441b27bda1051665faaa89bdcaa"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2ce6fae5bdad59577b44e4dfed356944fbf1d925269114c28be377692643b4ff"}, + {file = "cryptography-43.0.3.tar.gz", hash = "sha256:315b9001266a492a6ff443b61238f956b214dbec9910a081ba5b6646a055a805"}, +] + +[package.dependencies] +cffi = {version = ">=1.12", markers = "platform_python_implementation != \"PyPy\""} + +[package.extras] +docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] +docstest = ["pyenchant (>=1.6.11)", "readme-renderer", "sphinxcontrib-spelling (>=4.0.1)"] +nox = ["nox"] +pep8test = ["check-sdist", "click", "mypy", "ruff"] +sdist = ["build"] +ssh = ["bcrypt (>=3.1.5)"] +test = ["certifi", "cryptography-vectors (==43.0.3)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test-randomorder = ["pytest-randomly"] + [[package]] name = "debugpy" -version = "1.8.2" +version = "1.8.8" description = "An implementation of the Debug Adapter Protocol for Python" optional = false python-versions = ">=3.8" files = [ - {file = "debugpy-1.8.2-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:7ee2e1afbf44b138c005e4380097d92532e1001580853a7cb40ed84e0ef1c3d2"}, - {file = "debugpy-1.8.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f8c3f7c53130a070f0fc845a0f2cee8ed88d220d6b04595897b66605df1edd6"}, - {file = "debugpy-1.8.2-cp310-cp310-win32.whl", hash = "sha256:f179af1e1bd4c88b0b9f0fa153569b24f6b6f3de33f94703336363ae62f4bf47"}, - {file = "debugpy-1.8.2-cp310-cp310-win_amd64.whl", hash = "sha256:0600faef1d0b8d0e85c816b8bb0cb90ed94fc611f308d5fde28cb8b3d2ff0fe3"}, - {file = "debugpy-1.8.2-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:8a13417ccd5978a642e91fb79b871baded925d4fadd4dfafec1928196292aa0a"}, - {file = "debugpy-1.8.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:acdf39855f65c48ac9667b2801234fc64d46778021efac2de7e50907ab90c634"}, - {file = "debugpy-1.8.2-cp311-cp311-win32.whl", hash = "sha256:2cbd4d9a2fc5e7f583ff9bf11f3b7d78dfda8401e8bb6856ad1ed190be4281ad"}, - {file = "debugpy-1.8.2-cp311-cp311-win_amd64.whl", hash = "sha256:d3408fddd76414034c02880e891ea434e9a9cf3a69842098ef92f6e809d09afa"}, - {file = "debugpy-1.8.2-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:5d3ccd39e4021f2eb86b8d748a96c766058b39443c1f18b2dc52c10ac2757835"}, - {file = "debugpy-1.8.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:62658aefe289598680193ff655ff3940e2a601765259b123dc7f89c0239b8cd3"}, - {file = "debugpy-1.8.2-cp312-cp312-win32.whl", hash = "sha256:bd11fe35d6fd3431f1546d94121322c0ac572e1bfb1f6be0e9b8655fb4ea941e"}, - {file = "debugpy-1.8.2-cp312-cp312-win_amd64.whl", hash = "sha256:15bc2f4b0f5e99bf86c162c91a74c0631dbd9cef3c6a1d1329c946586255e859"}, - {file = "debugpy-1.8.2-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:5a019d4574afedc6ead1daa22736c530712465c0c4cd44f820d803d937531b2d"}, - {file = "debugpy-1.8.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40f062d6877d2e45b112c0bbade9a17aac507445fd638922b1a5434df34aed02"}, - {file = "debugpy-1.8.2-cp38-cp38-win32.whl", hash = "sha256:c78ba1680f1015c0ca7115671fe347b28b446081dada3fedf54138f44e4ba031"}, - {file = "debugpy-1.8.2-cp38-cp38-win_amd64.whl", hash = "sha256:cf327316ae0c0e7dd81eb92d24ba8b5e88bb4d1b585b5c0d32929274a66a5210"}, - {file = "debugpy-1.8.2-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:1523bc551e28e15147815d1397afc150ac99dbd3a8e64641d53425dba57b0ff9"}, - {file = "debugpy-1.8.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e24ccb0cd6f8bfaec68d577cb49e9c680621c336f347479b3fce060ba7c09ec1"}, - {file = "debugpy-1.8.2-cp39-cp39-win32.whl", hash = "sha256:7f8d57a98c5a486c5c7824bc0b9f2f11189d08d73635c326abef268f83950326"}, - {file = "debugpy-1.8.2-cp39-cp39-win_amd64.whl", hash = "sha256:16c8dcab02617b75697a0a925a62943e26a0330da076e2a10437edd9f0bf3755"}, - {file = "debugpy-1.8.2-py2.py3-none-any.whl", hash = "sha256:16e16df3a98a35c63c3ab1e4d19be4cbc7fdda92d9ddc059294f18910928e0ca"}, - {file = "debugpy-1.8.2.zip", hash = "sha256:95378ed08ed2089221896b9b3a8d021e642c24edc8fef20e5d4342ca8be65c00"}, + {file = "debugpy-1.8.8-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:e59b1607c51b71545cb3496876544f7186a7a27c00b436a62f285603cc68d1c6"}, + {file = "debugpy-1.8.8-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6531d952b565b7cb2fbd1ef5df3d333cf160b44f37547a4e7cf73666aca5d8d"}, + {file = "debugpy-1.8.8-cp310-cp310-win32.whl", hash = "sha256:b01f4a5e5c5fb1d34f4ccba99a20ed01eabc45a4684f4948b5db17a319dfb23f"}, + {file = "debugpy-1.8.8-cp310-cp310-win_amd64.whl", hash = "sha256:535f4fb1c024ddca5913bb0eb17880c8f24ba28aa2c225059db145ee557035e9"}, + {file = "debugpy-1.8.8-cp311-cp311-macosx_14_0_universal2.whl", hash = "sha256:c399023146e40ae373753a58d1be0a98bf6397fadc737b97ad612886b53df318"}, + {file = "debugpy-1.8.8-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:09cc7b162586ea2171eea055985da2702b0723f6f907a423c9b2da5996ad67ba"}, + {file = "debugpy-1.8.8-cp311-cp311-win32.whl", hash = "sha256:eea8821d998ebeb02f0625dd0d76839ddde8cbf8152ebbe289dd7acf2cdc6b98"}, + {file = "debugpy-1.8.8-cp311-cp311-win_amd64.whl", hash = "sha256:d4483836da2a533f4b1454dffc9f668096ac0433de855f0c22cdce8c9f7e10c4"}, + {file = "debugpy-1.8.8-cp312-cp312-macosx_14_0_universal2.whl", hash = "sha256:0cc94186340be87b9ac5a707184ec8f36547fb66636d1029ff4f1cc020e53996"}, + {file = "debugpy-1.8.8-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64674e95916e53c2e9540a056e5f489e0ad4872645399d778f7c598eacb7b7f9"}, + {file = "debugpy-1.8.8-cp312-cp312-win32.whl", hash = "sha256:5c6e885dbf12015aed73770f29dec7023cb310d0dc2ba8bfbeb5c8e43f80edc9"}, + {file = "debugpy-1.8.8-cp312-cp312-win_amd64.whl", hash = "sha256:19ffbd84e757a6ca0113574d1bf5a2298b3947320a3e9d7d8dc3377f02d9f864"}, + {file = "debugpy-1.8.8-cp313-cp313-macosx_14_0_universal2.whl", hash = "sha256:705cd123a773d184860ed8dae99becd879dfec361098edbefb5fc0d3683eb804"}, + {file = "debugpy-1.8.8-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:890fd16803f50aa9cb1a9b9b25b5ec321656dd6b78157c74283de241993d086f"}, + {file = "debugpy-1.8.8-cp313-cp313-win32.whl", hash = "sha256:90244598214bbe704aa47556ec591d2f9869ff9e042e301a2859c57106649add"}, + {file = "debugpy-1.8.8-cp313-cp313-win_amd64.whl", hash = "sha256:4b93e4832fd4a759a0c465c967214ed0c8a6e8914bced63a28ddb0dd8c5f078b"}, + {file = "debugpy-1.8.8-cp38-cp38-macosx_14_0_x86_64.whl", hash = "sha256:143ef07940aeb8e7316de48f5ed9447644da5203726fca378f3a6952a50a9eae"}, + {file = "debugpy-1.8.8-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f95651bdcbfd3b27a408869a53fbefcc2bcae13b694daee5f1365b1b83a00113"}, + {file = "debugpy-1.8.8-cp38-cp38-win32.whl", hash = "sha256:26b461123a030e82602a750fb24d7801776aa81cd78404e54ab60e8b5fecdad5"}, + {file = "debugpy-1.8.8-cp38-cp38-win_amd64.whl", hash = "sha256:f3cbf1833e644a3100eadb6120f25be8a532035e8245584c4f7532937edc652a"}, + {file = "debugpy-1.8.8-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:53709d4ec586b525724819dc6af1a7703502f7e06f34ded7157f7b1f963bb854"}, + {file = "debugpy-1.8.8-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a9c013077a3a0000e83d97cf9cc9328d2b0bbb31f56b0e99ea3662d29d7a6a2"}, + {file = "debugpy-1.8.8-cp39-cp39-win32.whl", hash = "sha256:ffe94dd5e9a6739a75f0b85316dc185560db3e97afa6b215628d1b6a17561cb2"}, + {file = "debugpy-1.8.8-cp39-cp39-win_amd64.whl", hash = "sha256:5c0e5a38c7f9b481bf31277d2f74d2109292179081f11108e668195ef926c0f9"}, + {file = "debugpy-1.8.8-py2.py3-none-any.whl", hash = "sha256:ec684553aba5b4066d4de510859922419febc710df7bba04fe9e7ef3de15d34f"}, + {file = "debugpy-1.8.8.zip", hash = "sha256:e6355385db85cbd666be703a96ab7351bc9e6c61d694893206f8001e22aee091"}, ] [[package]] @@ -594,13 +691,13 @@ files = [ [[package]] name = "distlib" -version = "0.3.8" +version = "0.3.9" description = "Distribution utilities" optional = false python-versions = "*" files = [ - {file = "distlib-0.3.8-py2.py3-none-any.whl", hash = "sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784"}, - {file = "distlib-0.3.8.tar.gz", hash = "sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64"}, + {file = "distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87"}, + {file = "distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403"}, ] [[package]] @@ -616,13 +713,13 @@ files = [ [[package]] name = "exceptiongroup" -version = "1.2.1" +version = "1.2.2" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" files = [ - {file = "exceptiongroup-1.2.1-py3-none-any.whl", hash = "sha256:5258b9ed329c5bbdd31a309f53cbfb0b155341807f6ff7606a1e801a891b29ad"}, - {file = "exceptiongroup-1.2.1.tar.gz", hash = "sha256:a4785e48b045528f5bfe627b6ad554ff32def154f42372786903b7abcfe1aa16"}, + {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, + {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, ] [package.extras] @@ -630,13 +727,13 @@ test = ["pytest (>=6)"] [[package]] name = "executing" -version = "2.0.1" +version = "2.1.0" description = "Get the currently executing AST node of a frame, and other information" optional = false -python-versions = ">=3.5" +python-versions = ">=3.8" files = [ - {file = "executing-2.0.1-py2.py3-none-any.whl", hash = "sha256:eac49ca94516ccc753f9fb5ce82603156e590b27525a8bc32cce8ae302eb61bc"}, - {file = "executing-2.0.1.tar.gz", hash = "sha256:35afe2ce3affba8ee97f2d69927fa823b08b472b7b994e36a52a964b93d16147"}, + {file = "executing-2.1.0-py2.py3-none-any.whl", hash = "sha256:8d63781349375b5ebccc3142f4b30350c0cd9c79f921cde38be2be4637e98eaf"}, + {file = "executing-2.1.0.tar.gz", hash = "sha256:8ea27ddd260da8150fa5a708269c4a10e76161e2496ec3e587da9e3c0fe4b9ab"}, ] [package.extras] @@ -672,115 +769,130 @@ sgmllib3k = "*" [[package]] name = "filelock" -version = "3.15.4" +version = "3.16.1" description = "A platform independent file lock." optional = false python-versions = ">=3.8" files = [ - {file = "filelock-3.15.4-py3-none-any.whl", hash = "sha256:6ca1fffae96225dab4c6eaf1c4f4f28cd2568d3ec2a44e15a08520504de468e7"}, - {file = "filelock-3.15.4.tar.gz", hash = "sha256:2207938cbc1844345cb01a5a95524dae30f0ce089eba5b00378295a17e3e90cb"}, + {file = "filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0"}, + {file = "filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"}, ] [package.extras] -docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] -testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8.0.1)", "pytest (>=7.4.3)", "pytest-asyncio (>=0.21)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)", "virtualenv (>=20.26.2)"] -typing = ["typing-extensions (>=4.8)"] +docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4.1)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.2)", "pytest (>=8.3.3)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.4)"] +typing = ["typing-extensions (>=4.12.2)"] [[package]] name = "frozenlist" -version = "1.4.1" +version = "1.5.0" description = "A list-like structure which implements collections.abc.MutableSequence" optional = false python-versions = ">=3.8" files = [ - {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f9aa1878d1083b276b0196f2dfbe00c9b7e752475ed3b682025ff20c1c1f51ac"}, - {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:29acab3f66f0f24674b7dc4736477bcd4bc3ad4b896f5f45379a67bce8b96868"}, - {file = "frozenlist-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:74fb4bee6880b529a0c6560885fce4dc95936920f9f20f53d99a213f7bf66776"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:590344787a90ae57d62511dd7c736ed56b428f04cd8c161fcc5e7232c130c69a"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:068b63f23b17df8569b7fdca5517edef76171cf3897eb68beb01341131fbd2ad"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c849d495bf5154cd8da18a9eb15db127d4dba2968d88831aff6f0331ea9bd4c"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9750cc7fe1ae3b1611bb8cfc3f9ec11d532244235d75901fb6b8e42ce9229dfe"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9b2de4cf0cdd5bd2dee4c4f63a653c61d2408055ab77b151c1957f221cabf2a"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0633c8d5337cb5c77acbccc6357ac49a1770b8c487e5b3505c57b949b4b82e98"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:27657df69e8801be6c3638054e202a135c7f299267f1a55ed3a598934f6c0d75"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:f9a3ea26252bd92f570600098783d1371354d89d5f6b7dfd87359d669f2109b5"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:4f57dab5fe3407b6c0c1cc907ac98e8a189f9e418f3b6e54d65a718aaafe3950"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e02a0e11cf6597299b9f3bbd3f93d79217cb90cfd1411aec33848b13f5c656cc"}, - {file = "frozenlist-1.4.1-cp310-cp310-win32.whl", hash = "sha256:a828c57f00f729620a442881cc60e57cfcec6842ba38e1b19fd3e47ac0ff8dc1"}, - {file = "frozenlist-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:f56e2333dda1fe0f909e7cc59f021eba0d2307bc6f012a1ccf2beca6ba362439"}, - {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a0cb6f11204443f27a1628b0e460f37fb30f624be6051d490fa7d7e26d4af3d0"}, - {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b46c8ae3a8f1f41a0d2ef350c0b6e65822d80772fe46b653ab6b6274f61d4a49"}, - {file = "frozenlist-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fde5bd59ab5357e3853313127f4d3565fc7dad314a74d7b5d43c22c6a5ed2ced"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:722e1124aec435320ae01ee3ac7bec11a5d47f25d0ed6328f2273d287bc3abb0"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2471c201b70d58a0f0c1f91261542a03d9a5e088ed3dc6c160d614c01649c106"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c757a9dd70d72b076d6f68efdbb9bc943665ae954dad2801b874c8c69e185068"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f146e0911cb2f1da549fc58fc7bcd2b836a44b79ef871980d605ec392ff6b0d2"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f9c515e7914626b2a2e1e311794b4c35720a0be87af52b79ff8e1429fc25f19"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c302220494f5c1ebeb0912ea782bcd5e2f8308037b3c7553fad0e48ebad6ad82"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:442acde1e068288a4ba7acfe05f5f343e19fac87bfc96d89eb886b0363e977ec"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:1b280e6507ea8a4fa0c0a7150b4e526a8d113989e28eaaef946cc77ffd7efc0a"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:fe1a06da377e3a1062ae5fe0926e12b84eceb8a50b350ddca72dc85015873f74"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:db9e724bebd621d9beca794f2a4ff1d26eed5965b004a97f1f1685a173b869c2"}, - {file = "frozenlist-1.4.1-cp311-cp311-win32.whl", hash = "sha256:e774d53b1a477a67838a904131c4b0eef6b3d8a651f8b138b04f748fccfefe17"}, - {file = "frozenlist-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:fb3c2db03683b5767dedb5769b8a40ebb47d6f7f45b1b3e3b4b51ec8ad9d9825"}, - {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1979bc0aeb89b33b588c51c54ab0161791149f2461ea7c7c946d95d5f93b56ae"}, - {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cc7b01b3754ea68a62bd77ce6020afaffb44a590c2289089289363472d13aedb"}, - {file = "frozenlist-1.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9c92be9fd329ac801cc420e08452b70e7aeab94ea4233a4804f0915c14eba9b"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c3894db91f5a489fc8fa6a9991820f368f0b3cbdb9cd8849547ccfab3392d86"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba60bb19387e13597fb059f32cd4d59445d7b18b69a745b8f8e5db0346f33480"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8aefbba5f69d42246543407ed2461db31006b0f76c4e32dfd6f42215a2c41d09"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780d3a35680ced9ce682fbcf4cb9c2bad3136eeff760ab33707b71db84664e3a"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9acbb16f06fe7f52f441bb6f413ebae6c37baa6ef9edd49cdd567216da8600cd"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:23b701e65c7b36e4bf15546a89279bd4d8675faabc287d06bbcfac7d3c33e1e6"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3e0153a805a98f5ada7e09826255ba99fb4f7524bb81bf6b47fb702666484ae1"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:dd9b1baec094d91bf36ec729445f7769d0d0cf6b64d04d86e45baf89e2b9059b"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:1a4471094e146b6790f61b98616ab8e44f72661879cc63fa1049d13ef711e71e"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5667ed53d68d91920defdf4035d1cdaa3c3121dc0b113255124bcfada1cfa1b8"}, - {file = "frozenlist-1.4.1-cp312-cp312-win32.whl", hash = "sha256:beee944ae828747fd7cb216a70f120767fc9f4f00bacae8543c14a6831673f89"}, - {file = "frozenlist-1.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:64536573d0a2cb6e625cf309984e2d873979709f2cf22839bf2d61790b448ad5"}, - {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:20b51fa3f588ff2fe658663db52a41a4f7aa6c04f6201449c6c7c476bd255c0d"}, - {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:410478a0c562d1a5bcc2f7ea448359fcb050ed48b3c6f6f4f18c313a9bdb1826"}, - {file = "frozenlist-1.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c6321c9efe29975232da3bd0af0ad216800a47e93d763ce64f291917a381b8eb"}, - {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48f6a4533887e189dae092f1cf981f2e3885175f7a0f33c91fb5b7b682b6bab6"}, - {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6eb73fa5426ea69ee0e012fb59cdc76a15b1283d6e32e4f8dc4482ec67d1194d"}, - {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbeb989b5cc29e8daf7f976b421c220f1b8c731cbf22b9130d8815418ea45887"}, - {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32453c1de775c889eb4e22f1197fe3bdfe457d16476ea407472b9442e6295f7a"}, - {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693945278a31f2086d9bf3df0fe8254bbeaef1fe71e1351c3bd730aa7d31c41b"}, - {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:1d0ce09d36d53bbbe566fe296965b23b961764c0bcf3ce2fa45f463745c04701"}, - {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3a670dc61eb0d0eb7080890c13de3066790f9049b47b0de04007090807c776b0"}, - {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:dca69045298ce5c11fd539682cff879cc1e664c245d1c64da929813e54241d11"}, - {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a06339f38e9ed3a64e4c4e43aec7f59084033647f908e4259d279a52d3757d09"}, - {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b7f2f9f912dca3934c1baec2e4585a674ef16fe00218d833856408c48d5beee7"}, - {file = "frozenlist-1.4.1-cp38-cp38-win32.whl", hash = "sha256:e7004be74cbb7d9f34553a5ce5fb08be14fb33bc86f332fb71cbe5216362a497"}, - {file = "frozenlist-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:5a7d70357e7cee13f470c7883a063aae5fe209a493c57d86eb7f5a6f910fae09"}, - {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bfa4a17e17ce9abf47a74ae02f32d014c5e9404b6d9ac7f729e01562bbee601e"}, - {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b7e3ed87d4138356775346e6845cccbe66cd9e207f3cd11d2f0b9fd13681359d"}, - {file = "frozenlist-1.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c99169d4ff810155ca50b4da3b075cbde79752443117d89429595c2e8e37fed8"}, - {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edb678da49d9f72c9f6c609fbe41a5dfb9a9282f9e6a2253d5a91e0fc382d7c0"}, - {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6db4667b187a6742b33afbbaf05a7bc551ffcf1ced0000a571aedbb4aa42fc7b"}, - {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55fdc093b5a3cb41d420884cdaf37a1e74c3c37a31f46e66286d9145d2063bd0"}, - {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82e8211d69a4f4bc360ea22cd6555f8e61a1bd211d1d5d39d3d228b48c83a897"}, - {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89aa2c2eeb20957be2d950b85974b30a01a762f3308cd02bb15e1ad632e22dc7"}, - {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9d3e0c25a2350080e9319724dede4f31f43a6c9779be48021a7f4ebde8b2d742"}, - {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7268252af60904bf52c26173cbadc3a071cece75f873705419c8681f24d3edea"}, - {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:0c250a29735d4f15321007fb02865f0e6b6a41a6b88f1f523ca1596ab5f50bd5"}, - {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:96ec70beabbd3b10e8bfe52616a13561e58fe84c0101dd031dc78f250d5128b9"}, - {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:23b2d7679b73fe0e5a4560b672a39f98dfc6f60df63823b0a9970525325b95f6"}, - {file = "frozenlist-1.4.1-cp39-cp39-win32.whl", hash = "sha256:a7496bfe1da7fb1a4e1cc23bb67c58fab69311cc7d32b5a99c2007b4b2a0e932"}, - {file = "frozenlist-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:e6a20a581f9ce92d389a8c7d7c3dd47c81fd5d6e655c8dddf341e14aa48659d0"}, - {file = "frozenlist-1.4.1-py3-none-any.whl", hash = "sha256:04ced3e6a46b4cfffe20f9ae482818e34eba9b5fb0ce4056e4cc9b6e212d09b7"}, - {file = "frozenlist-1.4.1.tar.gz", hash = "sha256:c037a86e8513059a2613aaba4d817bb90b9d9b6b69aace3ce9c877e8c8ed402b"}, + {file = "frozenlist-1.5.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5b6a66c18b5b9dd261ca98dffcb826a525334b2f29e7caa54e182255c5f6a65a"}, + {file = "frozenlist-1.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d1b3eb7b05ea246510b43a7e53ed1653e55c2121019a97e60cad7efb881a97bb"}, + {file = "frozenlist-1.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:15538c0cbf0e4fa11d1e3a71f823524b0c46299aed6e10ebb4c2089abd8c3bec"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e79225373c317ff1e35f210dd5f1344ff31066ba8067c307ab60254cd3a78ad5"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9272fa73ca71266702c4c3e2d4a28553ea03418e591e377a03b8e3659d94fa76"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:498524025a5b8ba81695761d78c8dd7382ac0b052f34e66939c42df860b8ff17"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:92b5278ed9d50fe610185ecd23c55d8b307d75ca18e94c0e7de328089ac5dcba"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f3c8c1dacd037df16e85227bac13cca58c30da836c6f936ba1df0c05d046d8d"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f2ac49a9bedb996086057b75bf93538240538c6d9b38e57c82d51f75a73409d2"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e66cc454f97053b79c2ab09c17fbe3c825ea6b4de20baf1be28919460dd7877f"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:5a3ba5f9a0dfed20337d3e966dc359784c9f96503674c2faf015f7fe8e96798c"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:6321899477db90bdeb9299ac3627a6a53c7399c8cd58d25da094007402b039ab"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:76e4753701248476e6286f2ef492af900ea67d9706a0155335a40ea21bf3b2f5"}, + {file = "frozenlist-1.5.0-cp310-cp310-win32.whl", hash = "sha256:977701c081c0241d0955c9586ffdd9ce44f7a7795df39b9151cd9a6fd0ce4cfb"}, + {file = "frozenlist-1.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:189f03b53e64144f90990d29a27ec4f7997d91ed3d01b51fa39d2dbe77540fd4"}, + {file = "frozenlist-1.5.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:fd74520371c3c4175142d02a976aee0b4cb4a7cc912a60586ffd8d5929979b30"}, + {file = "frozenlist-1.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2f3f7a0fbc219fb4455264cae4d9f01ad41ae6ee8524500f381de64ffaa077d5"}, + {file = "frozenlist-1.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f47c9c9028f55a04ac254346e92977bf0f166c483c74b4232bee19a6697e4778"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0996c66760924da6e88922756d99b47512a71cfd45215f3570bf1e0b694c206a"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a2fe128eb4edeabe11896cb6af88fca5346059f6c8d807e3b910069f39157869"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1a8ea951bbb6cacd492e3948b8da8c502a3f814f5d20935aae74b5df2b19cf3d"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de537c11e4aa01d37db0d403b57bd6f0546e71a82347a97c6a9f0dcc532b3a45"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c2623347b933fcb9095841f1cc5d4ff0b278addd743e0e966cb3d460278840d"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cee6798eaf8b1416ef6909b06f7dc04b60755206bddc599f52232606e18179d3"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f5f9da7f5dbc00a604fe74aa02ae7c98bcede8a3b8b9666f9f86fc13993bc71a"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:90646abbc7a5d5c7c19461d2e3eeb76eb0b204919e6ece342feb6032c9325ae9"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:bdac3c7d9b705d253b2ce370fde941836a5f8b3c5c2b8fd70940a3ea3af7f4f2"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:03d33c2ddbc1816237a67f66336616416e2bbb6beb306e5f890f2eb22b959cdf"}, + {file = "frozenlist-1.5.0-cp311-cp311-win32.whl", hash = "sha256:237f6b23ee0f44066219dae14c70ae38a63f0440ce6750f868ee08775073f942"}, + {file = "frozenlist-1.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:0cc974cc93d32c42e7b0f6cf242a6bd941c57c61b618e78b6c0a96cb72788c1d"}, + {file = "frozenlist-1.5.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:31115ba75889723431aa9a4e77d5f398f5cf976eea3bdf61749731f62d4a4a21"}, + {file = "frozenlist-1.5.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7437601c4d89d070eac8323f121fcf25f88674627505334654fd027b091db09d"}, + {file = "frozenlist-1.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7948140d9f8ece1745be806f2bfdf390127cf1a763b925c4a805c603df5e697e"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:feeb64bc9bcc6b45c6311c9e9b99406660a9c05ca8a5b30d14a78555088b0b3a"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:683173d371daad49cffb8309779e886e59c2f369430ad28fe715f66d08d4ab1a"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7d57d8f702221405a9d9b40f9da8ac2e4a1a8b5285aac6100f3393675f0a85ee"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30c72000fbcc35b129cb09956836c7d7abf78ab5416595e4857d1cae8d6251a6"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:000a77d6034fbad9b6bb880f7ec073027908f1b40254b5d6f26210d2dab1240e"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5d7f5a50342475962eb18b740f3beecc685a15b52c91f7d975257e13e029eca9"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:87f724d055eb4785d9be84e9ebf0f24e392ddfad00b3fe036e43f489fafc9039"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:6e9080bb2fb195a046e5177f10d9d82b8a204c0736a97a153c2466127de87784"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9b93d7aaa36c966fa42efcaf716e6b3900438632a626fb09c049f6a2f09fc631"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:52ef692a4bc60a6dd57f507429636c2af8b6046db8b31b18dac02cbc8f507f7f"}, + {file = "frozenlist-1.5.0-cp312-cp312-win32.whl", hash = "sha256:29d94c256679247b33a3dc96cce0f93cbc69c23bf75ff715919332fdbb6a32b8"}, + {file = "frozenlist-1.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:8969190d709e7c48ea386db202d708eb94bdb29207a1f269bab1196ce0dcca1f"}, + {file = "frozenlist-1.5.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7a1a048f9215c90973402e26c01d1cff8a209e1f1b53f72b95c13db61b00f953"}, + {file = "frozenlist-1.5.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:dd47a5181ce5fcb463b5d9e17ecfdb02b678cca31280639255ce9d0e5aa67af0"}, + {file = "frozenlist-1.5.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1431d60b36d15cda188ea222033eec8e0eab488f39a272461f2e6d9e1a8e63c2"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6482a5851f5d72767fbd0e507e80737f9c8646ae7fd303def99bfe813f76cf7f"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:44c49271a937625619e862baacbd037a7ef86dd1ee215afc298a417ff3270608"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:12f78f98c2f1c2429d42e6a485f433722b0061d5c0b0139efa64f396efb5886b"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce3aa154c452d2467487765e3adc730a8c153af77ad84096bc19ce19a2400840"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b7dc0c4338e6b8b091e8faf0db3168a37101943e687f373dce00959583f7439"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:45e0896250900b5aa25180f9aec243e84e92ac84bd4a74d9ad4138ef3f5c97de"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:561eb1c9579d495fddb6da8959fd2a1fca2c6d060d4113f5844b433fc02f2641"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:df6e2f325bfee1f49f81aaac97d2aa757c7646534a06f8f577ce184afe2f0a9e"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:140228863501b44b809fb39ec56b5d4071f4d0aa6d216c19cbb08b8c5a7eadb9"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7707a25d6a77f5d27ea7dc7d1fc608aa0a478193823f88511ef5e6b8a48f9d03"}, + {file = "frozenlist-1.5.0-cp313-cp313-win32.whl", hash = "sha256:31a9ac2b38ab9b5a8933b693db4939764ad3f299fcaa931a3e605bc3460e693c"}, + {file = "frozenlist-1.5.0-cp313-cp313-win_amd64.whl", hash = "sha256:11aabdd62b8b9c4b84081a3c246506d1cddd2dd93ff0ad53ede5defec7886b28"}, + {file = "frozenlist-1.5.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:dd94994fc91a6177bfaafd7d9fd951bc8689b0a98168aa26b5f543868548d3ca"}, + {file = "frozenlist-1.5.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2d0da8bbec082bf6bf18345b180958775363588678f64998c2b7609e34719b10"}, + {file = "frozenlist-1.5.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:73f2e31ea8dd7df61a359b731716018c2be196e5bb3b74ddba107f694fbd7604"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:828afae9f17e6de596825cf4228ff28fbdf6065974e5ac1410cecc22f699d2b3"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1577515d35ed5649d52ab4319db757bb881ce3b2b796d7283e6634d99ace307"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2150cc6305a2c2ab33299453e2968611dacb970d2283a14955923062c8d00b10"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a72b7a6e3cd2725eff67cd64c8f13335ee18fc3c7befc05aed043d24c7b9ccb9"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c16d2fa63e0800723139137d667e1056bee1a1cf7965153d2d104b62855e9b99"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:17dcc32fc7bda7ce5875435003220a457bcfa34ab7924a49a1c19f55b6ee185c"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:97160e245ea33d8609cd2b8fd997c850b56db147a304a262abc2b3be021a9171"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:f1e6540b7fa044eee0bb5111ada694cf3dc15f2b0347ca125ee9ca984d5e9e6e"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:91d6c171862df0a6c61479d9724f22efb6109111017c87567cfeb7b5d1449fdf"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c1fac3e2ace2eb1052e9f7c7db480818371134410e1f5c55d65e8f3ac6d1407e"}, + {file = "frozenlist-1.5.0-cp38-cp38-win32.whl", hash = "sha256:b97f7b575ab4a8af9b7bc1d2ef7f29d3afee2226bd03ca3875c16451ad5a7723"}, + {file = "frozenlist-1.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:374ca2dabdccad8e2a76d40b1d037f5bd16824933bf7bcea3e59c891fd4a0923"}, + {file = "frozenlist-1.5.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9bbcdfaf4af7ce002694a4e10a0159d5a8d20056a12b05b45cea944a4953f972"}, + {file = "frozenlist-1.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1893f948bf6681733aaccf36c5232c231e3b5166d607c5fa77773611df6dc336"}, + {file = "frozenlist-1.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2b5e23253bb709ef57a8e95e6ae48daa9ac5f265637529e4ce6b003a37b2621f"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f253985bb515ecd89629db13cb58d702035ecd8cfbca7d7a7e29a0e6d39af5f"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:04a5c6babd5e8fb7d3c871dc8b321166b80e41b637c31a995ed844a6139942b6"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a9fe0f1c29ba24ba6ff6abf688cb0b7cf1efab6b6aa6adc55441773c252f7411"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:226d72559fa19babe2ccd920273e767c96a49b9d3d38badd7c91a0fdeda8ea08"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15b731db116ab3aedec558573c1a5eec78822b32292fe4f2f0345b7f697745c2"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:366d8f93e3edfe5a918c874702f78faac300209a4d5bf38352b2c1bdc07a766d"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1b96af8c582b94d381a1c1f51ffaedeb77c821c690ea5f01da3d70a487dd0a9b"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:c03eff4a41bd4e38415cbed054bbaff4a075b093e2394b6915dca34a40d1e38b"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:50cf5e7ee9b98f22bdecbabf3800ae78ddcc26e4a435515fc72d97903e8488e0"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1e76bfbc72353269c44e0bc2cfe171900fbf7f722ad74c9a7b638052afe6a00c"}, + {file = "frozenlist-1.5.0-cp39-cp39-win32.whl", hash = "sha256:666534d15ba8f0fda3f53969117383d5dc021266b3c1a42c9ec4855e4b58b9d3"}, + {file = "frozenlist-1.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:5c28f4b5dbef8a0d8aad0d4de24d1e9e981728628afaf4ea0792f5d0939372f0"}, + {file = "frozenlist-1.5.0-py3-none-any.whl", hash = "sha256:d994863bba198a4a518b467bb971c56e1db3f180a25c6cf7bb1949c267f748c3"}, + {file = "frozenlist-1.5.0.tar.gz", hash = "sha256:81d5af29e61b9c8348e876d442253723928dce6433e0e76cd925cd83f1b4b817"}, ] [[package]] name = "fsspec" -version = "2024.9.0" +version = "2024.10.0" description = "File-system specification" optional = false python-versions = ">=3.8" files = [ - {file = "fsspec-2024.9.0-py3-none-any.whl", hash = "sha256:a0947d552d8a6efa72cc2c730b12c41d043509156966cca4fb157b0f2a0c574b"}, - {file = "fsspec-2024.9.0.tar.gz", hash = "sha256:4b0afb90c2f21832df142f292649035d80b421f60a9e1c027802e5a0da2b04e8"}, + {file = "fsspec-2024.10.0-py3-none-any.whl", hash = "sha256:03b9a6785766a4de40368b88906366755e2819e758b83705c88cd7cb5fe81871"}, + {file = "fsspec-2024.10.0.tar.gz", hash = "sha256:eda2d8a4116d4f2429db8550f2457da57279247dd930bb12f821b58391359493"}, ] [package.extras] @@ -870,13 +982,13 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "huggingface-hub" -version = "0.25.2" +version = "0.26.2" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.8.0" files = [ - {file = "huggingface_hub-0.25.2-py3-none-any.whl", hash = "sha256:1897caf88ce7f97fe0110603d8f66ac264e3ba6accdf30cd66cc0fed5282ad25"}, - {file = "huggingface_hub-0.25.2.tar.gz", hash = "sha256:a1014ea111a5f40ccd23f7f7ba8ac46e20fa3b658ced1f86a00c75c06ec6423c"}, + {file = "huggingface_hub-0.26.2-py3-none-any.whl", hash = "sha256:98c2a5a8e786c7b2cb6fdeb2740893cba4d53e312572ed3d8afafda65b128c46"}, + {file = "huggingface_hub-0.26.2.tar.gz", hash = "sha256:b100d853465d965733964d123939ba287da60a547087783ddff8a323f340332b"}, ] [package.dependencies] @@ -889,28 +1001,28 @@ tqdm = ">=4.42.1" typing-extensions = ">=3.7.4.3" [package.extras] -all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "libcst (==1.4.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] cli = ["InquirerPy (==0.3.4)"] -dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "libcst (==1.4.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] hf-transfer = ["hf-transfer (>=0.1.4)"] -inference = ["aiohttp", "minijinja (>=1.0)"] -quality = ["mypy (==1.5.1)", "ruff (>=0.5.0)"] +inference = ["aiohttp"] +quality = ["libcst (==1.4.0)", "mypy (==1.5.1)", "ruff (>=0.5.0)"] tensorflow = ["graphviz", "pydot", "tensorflow"] tensorflow-testing = ["keras (<3.0)", "tensorflow"] -testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] +testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] torch = ["safetensors[torch]", "torch"] typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"] [[package]] name = "identify" -version = "2.5.36" +version = "2.6.2" description = "File identification library for Python" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "identify-2.5.36-py2.py3-none-any.whl", hash = "sha256:37d93f380f4de590500d9dba7db359d0d3da95ffe7f9de1753faa159e71e7dfa"}, - {file = "identify-2.5.36.tar.gz", hash = "sha256:e5e00f54165f9047fbebeb4a560f9acfb8af4c88232be60a488e9b68d122745d"}, + {file = "identify-2.6.2-py2.py3-none-any.whl", hash = "sha256:c097384259f49e372f4ea00a19719d95ae27dd5ff0fd77ad630aa891306b82f3"}, + {file = "identify-2.6.2.tar.gz", hash = "sha256:fab5c716c24d7a789775228823797296a2994b075fb6080ac83a102772a98cbd"}, ] [package.extras] @@ -932,22 +1044,26 @@ all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2 [[package]] name = "importlib-metadata" -version = "8.0.0" +version = "8.5.0" description = "Read metadata from Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "importlib_metadata-8.0.0-py3-none-any.whl", hash = "sha256:15584cf2b1bf449d98ff8a6ff1abef57bf20f3ac6454f431736cd3e660921b2f"}, - {file = "importlib_metadata-8.0.0.tar.gz", hash = "sha256:188bd24e4c346d3f0a933f275c2fec67050326a856b9a359881d7c2a697e8812"}, + {file = "importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b"}, + {file = "importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7"}, ] [package.dependencies] -zipp = ">=0.5" +zipp = ">=3.20" [package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] +cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] perf = ["ipython"] -test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"] +test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] +type = ["pytest-mypy"] [[package]] name = "iniconfig" @@ -995,61 +1111,59 @@ test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio [[package]] name = "ipython" -version = "8.12.3" +version = "8.18.1" description = "IPython: Productive Interactive Computing" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "ipython-8.12.3-py3-none-any.whl", hash = "sha256:b0340d46a933d27c657b211a329d0be23793c36595acf9e6ef4164bc01a1804c"}, - {file = "ipython-8.12.3.tar.gz", hash = "sha256:3910c4b54543c2ad73d06579aa771041b7d5707b033bd488669b4cf544e3b363"}, + {file = "ipython-8.18.1-py3-none-any.whl", hash = "sha256:e8267419d72d81955ec1177f8a29aaa90ac80ad647499201119e2f05e99aa397"}, + {file = "ipython-8.18.1.tar.gz", hash = "sha256:ca6f079bb33457c66e233e4580ebfc4128855b4cf6370dddd73842a9563e8a27"}, ] [package.dependencies] -appnope = {version = "*", markers = "sys_platform == \"darwin\""} -backcall = "*" colorama = {version = "*", markers = "sys_platform == \"win32\""} decorator = "*" +exceptiongroup = {version = "*", markers = "python_version < \"3.11\""} jedi = ">=0.16" matplotlib-inline = "*" pexpect = {version = ">4.3", markers = "sys_platform != \"win32\""} -pickleshare = "*" -prompt-toolkit = ">=3.0.30,<3.0.37 || >3.0.37,<3.1.0" +prompt-toolkit = ">=3.0.41,<3.1.0" pygments = ">=2.4.0" stack-data = "*" traitlets = ">=5" typing-extensions = {version = "*", markers = "python_version < \"3.10\""} [package.extras] -all = ["black", "curio", "docrepr", "ipykernel", "ipyparallel", "ipywidgets", "matplotlib", "matplotlib (!=3.2.0)", "nbconvert", "nbformat", "notebook", "numpy (>=1.21)", "pandas", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio", "qtconsole", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "trio", "typing-extensions"] +all = ["black", "curio", "docrepr", "exceptiongroup", "ipykernel", "ipyparallel", "ipywidgets", "matplotlib", "matplotlib (!=3.2.0)", "nbconvert", "nbformat", "notebook", "numpy (>=1.22)", "pandas", "pickleshare", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio (<0.22)", "qtconsole", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "trio", "typing-extensions"] black = ["black"] -doc = ["docrepr", "ipykernel", "matplotlib", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "typing-extensions"] +doc = ["docrepr", "exceptiongroup", "ipykernel", "matplotlib", "pickleshare", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio (<0.22)", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "typing-extensions"] kernel = ["ipykernel"] nbconvert = ["nbconvert"] nbformat = ["nbformat"] notebook = ["ipywidgets", "notebook"] parallel = ["ipyparallel"] qtconsole = ["qtconsole"] -test = ["pytest (<7.1)", "pytest-asyncio", "testpath"] -test-extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.21)", "pandas", "pytest (<7.1)", "pytest-asyncio", "testpath", "trio"] +test = ["pickleshare", "pytest (<7.1)", "pytest-asyncio (<0.22)", "testpath"] +test-extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.22)", "pandas", "pickleshare", "pytest (<7.1)", "pytest-asyncio (<0.22)", "testpath", "trio"] [[package]] name = "jedi" -version = "0.19.1" +version = "0.19.2" description = "An autocompletion tool for Python that can be used for text editors." optional = false python-versions = ">=3.6" files = [ - {file = "jedi-0.19.1-py2.py3-none-any.whl", hash = "sha256:e983c654fe5c02867aef4cdfce5a2fbb4a50adc0af145f70504238f18ef5e7e0"}, - {file = "jedi-0.19.1.tar.gz", hash = "sha256:cf0496f3651bc65d7174ac1b7d043eff454892c708a87d1b683e57b569927ffd"}, + {file = "jedi-0.19.2-py2.py3-none-any.whl", hash = "sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9"}, + {file = "jedi-0.19.2.tar.gz", hash = "sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0"}, ] [package.dependencies] -parso = ">=0.8.3,<0.9.0" +parso = ">=0.8.4,<0.9.0" [package.extras] docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alabaster (==0.7.12)", "babel (==2.9.1)", "chardet (==4.0.0)", "commonmark (==0.8.1)", "docutils (==0.17.1)", "future (==0.18.2)", "idna (==2.10)", "imagesize (==1.2.0)", "mock (==1.0.1)", "packaging (==20.9)", "pyparsing (==2.4.7)", "pytz (==2021.1)", "readthedocs-sphinx-ext (==2.1.4)", "recommonmark (==0.5.0)", "requests (==2.25.1)", "six (==1.15.0)", "snowballstemmer (==2.1.0)", "sphinx (==1.8.5)", "sphinx-rtd-theme (==0.4.3)", "sphinxcontrib-serializinghtml (==1.1.4)", "sphinxcontrib-websupport (==1.2.4)", "urllib3 (==1.26.4)"] qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] -testing = ["Django", "attrs", "colorama", "docopt", "pytest (<7.0.0)"] +testing = ["Django", "attrs", "colorama", "docopt", "pytest (<9.0.0)"] [[package]] name = "jinja2" @@ -1070,84 +1184,84 @@ i18n = ["Babel (>=2.7)"] [[package]] name = "jiter" -version = "0.6.1" +version = "0.7.1" description = "Fast iterable JSON parser." optional = false python-versions = ">=3.8" files = [ - {file = "jiter-0.6.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:d08510593cb57296851080018006dfc394070178d238b767b1879dc1013b106c"}, - {file = "jiter-0.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:adef59d5e2394ebbad13b7ed5e0306cceb1df92e2de688824232a91588e77aa7"}, - {file = "jiter-0.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b3e02f7a27f2bcc15b7d455c9df05df8ffffcc596a2a541eeda9a3110326e7a3"}, - {file = "jiter-0.6.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed69a7971d67b08f152c17c638f0e8c2aa207e9dd3a5fcd3cba294d39b5a8d2d"}, - {file = "jiter-0.6.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2019d966e98f7c6df24b3b8363998575f47d26471bfb14aade37630fae836a1"}, - {file = "jiter-0.6.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:36c0b51a285b68311e207a76c385650322734c8717d16c2eb8af75c9d69506e7"}, - {file = "jiter-0.6.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:220e0963b4fb507c525c8f58cde3da6b1be0bfddb7ffd6798fb8f2531226cdb1"}, - {file = "jiter-0.6.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:aa25c7a9bf7875a141182b9c95aed487add635da01942ef7ca726e42a0c09058"}, - {file = "jiter-0.6.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e90552109ca8ccd07f47ca99c8a1509ced93920d271bb81780a973279974c5ab"}, - {file = "jiter-0.6.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:67723a011964971864e0b484b0ecfee6a14de1533cff7ffd71189e92103b38a8"}, - {file = "jiter-0.6.1-cp310-none-win32.whl", hash = "sha256:33af2b7d2bf310fdfec2da0177eab2fedab8679d1538d5b86a633ebfbbac4edd"}, - {file = "jiter-0.6.1-cp310-none-win_amd64.whl", hash = "sha256:7cea41c4c673353799906d940eee8f2d8fd1d9561d734aa921ae0f75cb9732f4"}, - {file = "jiter-0.6.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:b03c24e7da7e75b170c7b2b172d9c5e463aa4b5c95696a368d52c295b3f6847f"}, - {file = "jiter-0.6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:47fee1be677b25d0ef79d687e238dc6ac91a8e553e1a68d0839f38c69e0ee491"}, - {file = "jiter-0.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25f0d2f6e01a8a0fb0eab6d0e469058dab2be46ff3139ed2d1543475b5a1d8e7"}, - {file = "jiter-0.6.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0b809e39e342c346df454b29bfcc7bca3d957f5d7b60e33dae42b0e5ec13e027"}, - {file = "jiter-0.6.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e9ac7c2f092f231f5620bef23ce2e530bd218fc046098747cc390b21b8738a7a"}, - {file = "jiter-0.6.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e51a2d80d5fe0ffb10ed2c82b6004458be4a3f2b9c7d09ed85baa2fbf033f54b"}, - {file = "jiter-0.6.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3343d4706a2b7140e8bd49b6c8b0a82abf9194b3f0f5925a78fc69359f8fc33c"}, - {file = "jiter-0.6.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82521000d18c71e41c96960cb36e915a357bc83d63a8bed63154b89d95d05ad1"}, - {file = "jiter-0.6.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3c843e7c1633470708a3987e8ce617ee2979ee18542d6eb25ae92861af3f1d62"}, - {file = "jiter-0.6.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a2e861658c3fe849efc39b06ebb98d042e4a4c51a8d7d1c3ddc3b1ea091d0784"}, - {file = "jiter-0.6.1-cp311-none-win32.whl", hash = "sha256:7d72fc86474862c9c6d1f87b921b70c362f2b7e8b2e3c798bb7d58e419a6bc0f"}, - {file = "jiter-0.6.1-cp311-none-win_amd64.whl", hash = "sha256:3e36a320634f33a07794bb15b8da995dccb94f944d298c8cfe2bd99b1b8a574a"}, - {file = "jiter-0.6.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:1fad93654d5a7dcce0809aff66e883c98e2618b86656aeb2129db2cd6f26f867"}, - {file = "jiter-0.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4e6e340e8cd92edab7f6a3a904dbbc8137e7f4b347c49a27da9814015cc0420c"}, - {file = "jiter-0.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:691352e5653af84ed71763c3c427cff05e4d658c508172e01e9c956dfe004aba"}, - {file = "jiter-0.6.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:defee3949313c1f5b55e18be45089970cdb936eb2a0063f5020c4185db1b63c9"}, - {file = "jiter-0.6.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:26d2bdd5da097e624081c6b5d416d3ee73e5b13f1703bcdadbb1881f0caa1933"}, - {file = "jiter-0.6.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18aa9d1626b61c0734b973ed7088f8a3d690d0b7f5384a5270cd04f4d9f26c86"}, - {file = "jiter-0.6.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a3567c8228afa5ddcce950631c6b17397ed178003dc9ee7e567c4c4dcae9fa0"}, - {file = "jiter-0.6.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e5c0507131c922defe3f04c527d6838932fcdfd69facebafd7d3574fa3395314"}, - {file = "jiter-0.6.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:540fcb224d7dc1bcf82f90f2ffb652df96f2851c031adca3c8741cb91877143b"}, - {file = "jiter-0.6.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e7b75436d4fa2032b2530ad989e4cb0ca74c655975e3ff49f91a1a3d7f4e1df2"}, - {file = "jiter-0.6.1-cp312-none-win32.whl", hash = "sha256:883d2ced7c21bf06874fdeecab15014c1c6d82216765ca6deef08e335fa719e0"}, - {file = "jiter-0.6.1-cp312-none-win_amd64.whl", hash = "sha256:91e63273563401aadc6c52cca64a7921c50b29372441adc104127b910e98a5b6"}, - {file = "jiter-0.6.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:852508a54fe3228432e56019da8b69208ea622a3069458252f725d634e955b31"}, - {file = "jiter-0.6.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f491cc69ff44e5a1e8bc6bf2b94c1f98d179e1aaf4a554493c171a5b2316b701"}, - {file = "jiter-0.6.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc56c8f0b2a28ad4d8047f3ae62d25d0e9ae01b99940ec0283263a04724de1f3"}, - {file = "jiter-0.6.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:51b58f7a0d9e084a43b28b23da2b09fc5e8df6aa2b6a27de43f991293cab85fd"}, - {file = "jiter-0.6.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5f79ce15099154c90ef900d69c6b4c686b64dfe23b0114e0971f2fecd306ec6c"}, - {file = "jiter-0.6.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:03a025b52009f47e53ea619175d17e4ded7c035c6fbd44935cb3ada11e1fd592"}, - {file = "jiter-0.6.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c74a8d93718137c021d9295248a87c2f9fdc0dcafead12d2930bc459ad40f885"}, - {file = "jiter-0.6.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:40b03b75f903975f68199fc4ec73d546150919cb7e534f3b51e727c4d6ccca5a"}, - {file = "jiter-0.6.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:825651a3f04cf92a661d22cad61fc913400e33aa89b3e3ad9a6aa9dc8a1f5a71"}, - {file = "jiter-0.6.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:928bf25eb69ddb292ab8177fe69d3fbf76c7feab5fce1c09265a7dccf25d3991"}, - {file = "jiter-0.6.1-cp313-none-win32.whl", hash = "sha256:352cd24121e80d3d053fab1cc9806258cad27c53cad99b7a3cac57cf934b12e4"}, - {file = "jiter-0.6.1-cp313-none-win_amd64.whl", hash = "sha256:be7503dd6f4bf02c2a9bacb5cc9335bc59132e7eee9d3e931b13d76fd80d7fda"}, - {file = "jiter-0.6.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:31d8e00e1fb4c277df8ab6f31a671f509ebc791a80e5c61fdc6bc8696aaa297c"}, - {file = "jiter-0.6.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:77c296d65003cd7ee5d7b0965f6acbe6cffaf9d1fa420ea751f60ef24e85fed5"}, - {file = "jiter-0.6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aeeb0c0325ef96c12a48ea7e23e2e86fe4838e6e0a995f464cf4c79fa791ceeb"}, - {file = "jiter-0.6.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a31c6fcbe7d6c25d6f1cc6bb1cba576251d32795d09c09961174fe461a1fb5bd"}, - {file = "jiter-0.6.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59e2b37f3b9401fc9e619f4d4badcab2e8643a721838bcf695c2318a0475ae42"}, - {file = "jiter-0.6.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bae5ae4853cb9644144e9d0755854ce5108d470d31541d83f70ca7ecdc2d1637"}, - {file = "jiter-0.6.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9df588e9c830b72d8db1dd7d0175af6706b0904f682ea9b1ca8b46028e54d6e9"}, - {file = "jiter-0.6.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:15f8395e835cf561c85c1adee72d899abf2733d9df72e9798e6d667c9b5c1f30"}, - {file = "jiter-0.6.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5a99d4e0b5fc3b05ea732d67eb2092fe894e95a90e6e413f2ea91387e228a307"}, - {file = "jiter-0.6.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a311df1fa6be0ccd64c12abcd85458383d96e542531bafbfc0a16ff6feda588f"}, - {file = "jiter-0.6.1-cp38-none-win32.whl", hash = "sha256:81116a6c272a11347b199f0e16b6bd63f4c9d9b52bc108991397dd80d3c78aba"}, - {file = "jiter-0.6.1-cp38-none-win_amd64.whl", hash = "sha256:13f9084e3e871a7c0b6e710db54444088b1dd9fbefa54d449b630d5e73bb95d0"}, - {file = "jiter-0.6.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:f1c53615fcfec3b11527c08d19cff6bc870da567ce4e57676c059a3102d3a082"}, - {file = "jiter-0.6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f791b6a4da23238c17a81f44f5b55d08a420c5692c1fda84e301a4b036744eb1"}, - {file = "jiter-0.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c97e90fec2da1d5f68ef121444c2c4fa72eabf3240829ad95cf6bbeca42a301"}, - {file = "jiter-0.6.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3cbc1a66b4e41511209e97a2866898733c0110b7245791ac604117b7fb3fedb7"}, - {file = "jiter-0.6.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4e85f9e12cd8418ab10e1fcf0e335ae5bb3da26c4d13a0fd9e6a17a674783b6"}, - {file = "jiter-0.6.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08be33db6dcc374c9cc19d3633af5e47961a7b10d4c61710bd39e48d52a35824"}, - {file = "jiter-0.6.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:677be9550004f5e010d673d3b2a2b815a8ea07a71484a57d3f85dde7f14cf132"}, - {file = "jiter-0.6.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e8bd065be46c2eecc328e419d6557bbc37844c88bb07b7a8d2d6c91c7c4dedc9"}, - {file = "jiter-0.6.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bd95375ce3609ec079a97c5d165afdd25693302c071ca60c7ae1cf826eb32022"}, - {file = "jiter-0.6.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:db459ed22d0208940d87f614e1f0ea5a946d29a3cfef71f7e1aab59b6c6b2afb"}, - {file = "jiter-0.6.1-cp39-none-win32.whl", hash = "sha256:d71c962f0971347bd552940ab96aa42ceefcd51b88c4ced8a27398182efa8d80"}, - {file = "jiter-0.6.1-cp39-none-win_amd64.whl", hash = "sha256:d465db62d2d10b489b7e7a33027c4ae3a64374425d757e963f86df5b5f2e7fc5"}, - {file = "jiter-0.6.1.tar.gz", hash = "sha256:e19cd21221fc139fb032e4112986656cb2739e9fe6d84c13956ab30ccc7d4449"}, + {file = "jiter-0.7.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:262e96d06696b673fad6f257e6a0abb6e873dc22818ca0e0600f4a1189eb334f"}, + {file = "jiter-0.7.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:be6de02939aac5be97eb437f45cfd279b1dc9de358b13ea6e040e63a3221c40d"}, + {file = "jiter-0.7.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:935f10b802bc1ce2b2f61843e498c7720aa7f4e4bb7797aa8121eab017293c3d"}, + {file = "jiter-0.7.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9cd3cccccabf5064e4bb3099c87bf67db94f805c1e62d1aefd2b7476e90e0ee2"}, + {file = "jiter-0.7.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4aa919ebfc5f7b027cc368fe3964c0015e1963b92e1db382419dadb098a05192"}, + {file = "jiter-0.7.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ae2d01e82c94491ce4d6f461a837f63b6c4e6dd5bb082553a70c509034ff3d4"}, + {file = "jiter-0.7.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f9568cd66dbbdab67ae1b4c99f3f7da1228c5682d65913e3f5f95586b3cb9a9"}, + {file = "jiter-0.7.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9ecbf4e20ec2c26512736284dc1a3f8ed79b6ca7188e3b99032757ad48db97dc"}, + {file = "jiter-0.7.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b1a0508fddc70ce00b872e463b387d49308ef02b0787992ca471c8d4ba1c0fa1"}, + {file = "jiter-0.7.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f84c9996664c460f24213ff1e5881530abd8fafd82058d39af3682d5fd2d6316"}, + {file = "jiter-0.7.1-cp310-none-win32.whl", hash = "sha256:c915e1a1960976ba4dfe06551ea87063b2d5b4d30759012210099e712a414d9f"}, + {file = "jiter-0.7.1-cp310-none-win_amd64.whl", hash = "sha256:75bf3b7fdc5c0faa6ffffcf8028a1f974d126bac86d96490d1b51b3210aa0f3f"}, + {file = "jiter-0.7.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ad04a23a91f3d10d69d6c87a5f4471b61c2c5cd6e112e85136594a02043f462c"}, + {file = "jiter-0.7.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e47a554de88dff701226bb5722b7f1b6bccd0b98f1748459b7e56acac2707a5"}, + {file = "jiter-0.7.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e44fff69c814a2e96a20b4ecee3e2365e9b15cf5fe4e00869d18396daa91dab"}, + {file = "jiter-0.7.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:df0a1d05081541b45743c965436f8b5a1048d6fd726e4a030113a2699a6046ea"}, + {file = "jiter-0.7.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f22cf8f236a645cb6d8ffe2a64edb5d2b66fb148bf7c75eea0cb36d17014a7bc"}, + {file = "jiter-0.7.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da8589f50b728ea4bf22e0632eefa125c8aa9c38ed202a5ee6ca371f05eeb3ff"}, + {file = "jiter-0.7.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f20de711224f2ca2dbb166a8d512f6ff48c9c38cc06b51f796520eb4722cc2ce"}, + {file = "jiter-0.7.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8a9803396032117b85ec8cbf008a54590644a062fedd0425cbdb95e4b2b60479"}, + {file = "jiter-0.7.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3d8bae77c82741032e9d89a4026479061aba6e646de3bf5f2fc1ae2bbd9d06e0"}, + {file = "jiter-0.7.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3dc9939e576bbc68c813fc82f6620353ed68c194c7bcf3d58dc822591ec12490"}, + {file = "jiter-0.7.1-cp311-none-win32.whl", hash = "sha256:f7605d24cd6fab156ec89e7924578e21604feee9c4f1e9da34d8b67f63e54892"}, + {file = "jiter-0.7.1-cp311-none-win_amd64.whl", hash = "sha256:f3ea649e7751a1a29ea5ecc03c4ada0a833846c59c6da75d747899f9b48b7282"}, + {file = "jiter-0.7.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ad36a1155cbd92e7a084a568f7dc6023497df781adf2390c345dd77a120905ca"}, + {file = "jiter-0.7.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7ba52e6aaed2dc5c81a3d9b5e4ab95b039c4592c66ac973879ba57c3506492bb"}, + {file = "jiter-0.7.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2b7de0b6f6728b678540c7927587e23f715284596724be203af952418acb8a2d"}, + {file = "jiter-0.7.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9463b62bd53c2fb85529c700c6a3beb2ee54fde8bef714b150601616dcb184a6"}, + {file = "jiter-0.7.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:627164ec01d28af56e1f549da84caf0fe06da3880ebc7b7ee1ca15df106ae172"}, + {file = "jiter-0.7.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:25d0e5bf64e368b0aa9e0a559c3ab2f9b67e35fe7269e8a0d81f48bbd10e8963"}, + {file = "jiter-0.7.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c244261306f08f8008b3087059601997016549cb8bb23cf4317a4827f07b7d74"}, + {file = "jiter-0.7.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7ded4e4b75b68b843b7cea5cd7c55f738c20e1394c68c2cb10adb655526c5f1b"}, + {file = "jiter-0.7.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:80dae4f1889b9d09e5f4de6b58c490d9c8ce7730e35e0b8643ab62b1538f095c"}, + {file = "jiter-0.7.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5970cf8ec943b51bce7f4b98d2e1ed3ada170c2a789e2db3cb484486591a176a"}, + {file = "jiter-0.7.1-cp312-none-win32.whl", hash = "sha256:701d90220d6ecb3125d46853c8ca8a5bc158de8c49af60fd706475a49fee157e"}, + {file = "jiter-0.7.1-cp312-none-win_amd64.whl", hash = "sha256:7824c3ecf9ecf3321c37f4e4d4411aad49c666ee5bc2a937071bdd80917e4533"}, + {file = "jiter-0.7.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:097676a37778ba3c80cb53f34abd6943ceb0848263c21bf423ae98b090f6c6ba"}, + {file = "jiter-0.7.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3298af506d4271257c0a8f48668b0f47048d69351675dd8500f22420d4eec378"}, + {file = "jiter-0.7.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:12fd88cfe6067e2199964839c19bd2b422ca3fd792949b8f44bb8a4e7d21946a"}, + {file = "jiter-0.7.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dacca921efcd21939123c8ea8883a54b9fa7f6545c8019ffcf4f762985b6d0c8"}, + {file = "jiter-0.7.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de3674a5fe1f6713a746d25ad9c32cd32fadc824e64b9d6159b3b34fd9134143"}, + {file = "jiter-0.7.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65df9dbae6d67e0788a05b4bad5706ad40f6f911e0137eb416b9eead6ba6f044"}, + {file = "jiter-0.7.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ba9a358d59a0a55cccaa4957e6ae10b1a25ffdabda863c0343c51817610501d"}, + {file = "jiter-0.7.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:576eb0f0c6207e9ede2b11ec01d9c2182973986514f9c60bc3b3b5d5798c8f50"}, + {file = "jiter-0.7.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:e550e29cdf3577d2c970a18f3959e6b8646fd60ef1b0507e5947dc73703b5627"}, + {file = "jiter-0.7.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:81d968dbf3ce0db2e0e4dec6b0a0d5d94f846ee84caf779b07cab49f5325ae43"}, + {file = "jiter-0.7.1-cp313-none-win32.whl", hash = "sha256:f892e547e6e79a1506eb571a676cf2f480a4533675f834e9ae98de84f9b941ac"}, + {file = "jiter-0.7.1-cp313-none-win_amd64.whl", hash = "sha256:0302f0940b1455b2a7fb0409b8d5b31183db70d2b07fd177906d83bf941385d1"}, + {file = "jiter-0.7.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:c65a3ce72b679958b79d556473f192a4dfc5895e8cc1030c9f4e434690906076"}, + {file = "jiter-0.7.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e80052d3db39f9bb8eb86d207a1be3d9ecee5e05fdec31380817f9609ad38e60"}, + {file = "jiter-0.7.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70a497859c4f3f7acd71c8bd89a6f9cf753ebacacf5e3e799138b8e1843084e3"}, + {file = "jiter-0.7.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c1288bc22b9e36854a0536ba83666c3b1fb066b811019d7b682c9cf0269cdf9f"}, + {file = "jiter-0.7.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b096ca72dd38ef35675e1d3b01785874315182243ef7aea9752cb62266ad516f"}, + {file = "jiter-0.7.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8dbbd52c50b605af13dbee1a08373c520e6fcc6b5d32f17738875847fea4e2cd"}, + {file = "jiter-0.7.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af29c5c6eb2517e71ffa15c7ae9509fa5e833ec2a99319ac88cc271eca865519"}, + {file = "jiter-0.7.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f114a4df1e40c03c0efbf974b376ed57756a1141eb27d04baee0680c5af3d424"}, + {file = "jiter-0.7.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:191fbaee7cf46a9dd9b817547bf556facde50f83199d07fc48ebeff4082f9df4"}, + {file = "jiter-0.7.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0e2b445e5ee627fb4ee6bbceeb486251e60a0c881a8e12398dfdff47c56f0723"}, + {file = "jiter-0.7.1-cp38-none-win32.whl", hash = "sha256:47ac4c3cf8135c83e64755b7276339b26cd3c7ddadf9e67306ace4832b283edf"}, + {file = "jiter-0.7.1-cp38-none-win_amd64.whl", hash = "sha256:60b49c245cd90cde4794f5c30f123ee06ccf42fb8730a019a2870cd005653ebd"}, + {file = "jiter-0.7.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:8f212eeacc7203256f526f550d105d8efa24605828382cd7d296b703181ff11d"}, + {file = "jiter-0.7.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d9e247079d88c00e75e297e6cb3a18a039ebcd79fefc43be9ba4eb7fb43eb726"}, + {file = "jiter-0.7.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0aacaa56360139c53dcf352992b0331f4057a0373bbffd43f64ba0c32d2d155"}, + {file = "jiter-0.7.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bc1b55314ca97dbb6c48d9144323896e9c1a25d41c65bcb9550b3e0c270ca560"}, + {file = "jiter-0.7.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f281aae41b47e90deb70e7386558e877a8e62e1693e0086f37d015fa1c102289"}, + {file = "jiter-0.7.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:93c20d2730a84d43f7c0b6fb2579dc54335db742a59cf9776d0b80e99d587382"}, + {file = "jiter-0.7.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e81ccccd8069110e150613496deafa10da2f6ff322a707cbec2b0d52a87b9671"}, + {file = "jiter-0.7.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0a7d5e85766eff4c9be481d77e2226b4c259999cb6862ccac5ef6621d3c8dcce"}, + {file = "jiter-0.7.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f52ce5799df5b6975439ecb16b1e879d7655e1685b6e3758c9b1b97696313bfb"}, + {file = "jiter-0.7.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e0c91a0304373fdf97d56f88356a010bba442e6d995eb7773cbe32885b71cdd8"}, + {file = "jiter-0.7.1-cp39-none-win32.whl", hash = "sha256:5c08adf93e41ce2755970e8aa95262298afe2bf58897fb9653c47cd93c3c6cdc"}, + {file = "jiter-0.7.1-cp39-none-win_amd64.whl", hash = "sha256:6592f4067c74176e5f369228fb2995ed01400c9e8e1225fb73417183a5e635f0"}, + {file = "jiter-0.7.1.tar.gz", hash = "sha256:448cf4f74f7363c34cdef26214da527e8eeffd88ba06d0b80b485ad0667baf5d"}, ] [[package]] @@ -1163,13 +1277,13 @@ files = [ [[package]] name = "jsonschema" -version = "4.22.0" +version = "4.23.0" description = "An implementation of JSON Schema validation for Python" optional = false python-versions = ">=3.8" files = [ - {file = "jsonschema-4.22.0-py3-none-any.whl", hash = "sha256:ff4cfd6b1367a40e7bc6411caec72effadd3db0bbe5017de188f2d6108335802"}, - {file = "jsonschema-4.22.0.tar.gz", hash = "sha256:5b22d434a45935119af990552c862e5d6d564e8f6601206b305a61fdf661a2b7"}, + {file = "jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566"}, + {file = "jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4"}, ] [package.dependencies] @@ -1180,17 +1294,17 @@ rpds-py = ">=0.7.1" [package.extras] format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] -format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"] +format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=24.6.0)"] [[package]] name = "jsonschema-specifications" -version = "2023.12.1" +version = "2024.10.1" description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "jsonschema_specifications-2023.12.1-py3-none-any.whl", hash = "sha256:87e4fdf3a94858b8a2ba2778d9ba57d8a9cafca7c7489c46ba0d30a8bc6a9c3c"}, - {file = "jsonschema_specifications-2023.12.1.tar.gz", hash = "sha256:48a76787b3e70f5ed53f1160d2b81f586e4ca6d1548c5de7085d1682674764cc"}, + {file = "jsonschema_specifications-2024.10.1-py3-none-any.whl", hash = "sha256:a09a0680616357d9a0ecf05c12ad234479f549239d0f5b55f3deea67475da9bf"}, + {file = "jsonschema_specifications-2024.10.1.tar.gz", hash = "sha256:0f38b83639958ce1152d02a7f062902c41c8fd20d558b0c34344292d417ae272"}, ] [package.dependencies] @@ -1198,13 +1312,13 @@ referencing = ">=0.31.0" [[package]] name = "jupyter-client" -version = "8.6.2" +version = "8.6.3" description = "Jupyter protocol implementation and client libraries" optional = false python-versions = ">=3.8" files = [ - {file = "jupyter_client-8.6.2-py3-none-any.whl", hash = "sha256:50cbc5c66fd1b8f65ecb66bc490ab73217993632809b6e505687de18e9dea39f"}, - {file = "jupyter_client-8.6.2.tar.gz", hash = "sha256:2bda14d55ee5ba58552a8c53ae43d215ad9868853489213f37da060ced54d8df"}, + {file = "jupyter_client-8.6.3-py3-none-any.whl", hash = "sha256:e8a19cc986cc45905ac3362915f410f3af85424b4c0905e94fa5f2cb08e8f23f"}, + {file = "jupyter_client-8.6.3.tar.gz", hash = "sha256:35b3a0947c4a6e9d589eb97d7d4cd5e90f910ee73101611f01283732bd6d9419"}, ] [package.dependencies] @@ -1267,13 +1381,13 @@ use = ["tensorflow", "tensorflow-hub", "tensorflow-text"] [[package]] name = "litellm" -version = "1.52.1" +version = "1.52.5" description = "Library to easily interface with LLM API providers" optional = false python-versions = "!=2.7.*,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,!=3.7.*,>=3.8" files = [ - {file = "litellm-1.52.1-py3-none-any.whl", hash = "sha256:a76133fc6d14a3157275d9ae850b9f18312ae93ae313092d6cf9e9d35f2c72f2"}, - {file = "litellm-1.52.1.tar.gz", hash = "sha256:750056e0329c5c742193b8f2104133da1e69b2fcc534827e18f7b536af56315c"}, + {file = "litellm-1.52.5-py3-none-any.whl", hash = "sha256:38c0f30a849b80c99cfc56f96c4c7563d5ced83f08fd7fc2129011ddc4414ac5"}, + {file = "litellm-1.52.5.tar.gz", hash = "sha256:9708c02983c7ed22fc18c96e167bf1c4ed9672de397d413e7957c216dfc911e6"}, ] [package.dependencies] @@ -1319,71 +1433,72 @@ testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] [[package]] name = "markupsafe" -version = "2.1.5" +version = "3.0.2" description = "Safely add untrusted strings to HTML/XML markup." optional = false -python-versions = ">=3.7" +python-versions = ">=3.9" files = [ - {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-win32.whl", hash = "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-win32.whl", hash = "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-win32.whl", hash = "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-win_amd64.whl", hash = "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-win32.whl", hash = "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-win_amd64.whl", hash = "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-win32.whl", hash = "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-win_amd64.whl", hash = "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5"}, - {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win32.whl", hash = "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a"}, + {file = "markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0"}, ] [[package]] @@ -1907,13 +2022,13 @@ files = [ [[package]] name = "openai" -version = "1.54.3" +version = "1.54.4" description = "The official Python library for the openai API" optional = false python-versions = ">=3.8" files = [ - {file = "openai-1.54.3-py3-none-any.whl", hash = "sha256:f18dbaf09c50d70c4185b892a2a553f80681d1d866323a2da7f7be2f688615d5"}, - {file = "openai-1.54.3.tar.gz", hash = "sha256:7511b74eeb894ac0b0253dc71f087a15d2e4d71d22d0088767205143d880cca6"}, + {file = "openai-1.54.4-py3-none-any.whl", hash = "sha256:0d95cef99346bf9b6d7fbf57faf61a673924c3e34fa8af84c9ffe04660673a7e"}, + {file = "openai-1.54.4.tar.gz", hash = "sha256:50f3656e45401c54e973fa05dc29f3f0b0d19348d685b2f7ddb4d92bf7b1b6bf"}, ] [package.dependencies] @@ -1931,15 +2046,36 @@ datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] [[package]] name = "packaging" -version = "24.1" +version = "24.2" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, - {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, + {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, + {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, ] +[[package]] +name = "paramiko" +version = "3.5.0" +description = "SSH2 protocol library" +optional = false +python-versions = ">=3.6" +files = [ + {file = "paramiko-3.5.0-py3-none-any.whl", hash = "sha256:1fedf06b085359051cd7d0d270cebe19e755a8a921cc2ddbfa647fb0cd7d68f9"}, + {file = "paramiko-3.5.0.tar.gz", hash = "sha256:ad11e540da4f55cedda52931f1a3f812a8238a7af7f62a60de538cd80bb28124"}, +] + +[package.dependencies] +bcrypt = ">=3.2" +cryptography = ">=3.3" +pynacl = ">=1.5" + +[package.extras] +all = ["gssapi (>=1.4.1)", "invoke (>=2.0)", "pyasn1 (>=0.1.7)", "pywin32 (>=2.1.8)"] +gssapi = ["gssapi (>=1.4.1)", "pyasn1 (>=0.1.7)", "pywin32 (>=2.1.8)"] +invoke = ["invoke (>=2.0)"] + [[package]] name = "parso" version = "0.8.4" @@ -1969,17 +2105,6 @@ files = [ [package.dependencies] ptyprocess = ">=0.5" -[[package]] -name = "pickleshare" -version = "0.7.5" -description = "Tiny 'shelve'-like database with concurrency support" -optional = false -python-versions = "*" -files = [ - {file = "pickleshare-0.7.5-py2.py3-none-any.whl", hash = "sha256:9649af414d74d4df115d5d718f82acb59c9d418196b7b4290ed47a12ce62df56"}, - {file = "pickleshare-0.7.5.tar.gz", hash = "sha256:87683d47965c1da65cdacaf31c8441d12b8044cdec9aca500cd78fc2c683afca"}, -] - [[package]] name = "pillow" version = "11.0.0" @@ -2074,19 +2199,19 @@ xmp = ["defusedxml"] [[package]] name = "platformdirs" -version = "4.2.2" +version = "4.3.6" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" files = [ - {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, - {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, + {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, + {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, ] [package.extras] -docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] -type = ["mypy (>=1.8)"] +docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"] +type = ["mypy (>=1.11.2)"] [[package]] name = "pluggy" @@ -2123,13 +2248,13 @@ virtualenv = ">=20.10.0" [[package]] name = "prompt-toolkit" -version = "3.0.47" +version = "3.0.48" description = "Library for building powerful interactive command lines in Python" optional = false python-versions = ">=3.7.0" files = [ - {file = "prompt_toolkit-3.0.47-py3-none-any.whl", hash = "sha256:0d7bfa67001d5e39d02c224b663abc33687405033a8c422d0d675a5a13361d10"}, - {file = "prompt_toolkit-3.0.47.tar.gz", hash = "sha256:1e1b29cb58080b1e69f207c893a1a7bf16d127a5c30c9d17a25a5d77792e5360"}, + {file = "prompt_toolkit-3.0.48-py3-none-any.whl", hash = "sha256:f49a827f90062e411f1ce1f854f2aedb3c23353244f8108b89283587397ac10e"}, + {file = "prompt_toolkit-3.0.48.tar.gz", hash = "sha256:d6623ab0477a80df74e646bdbc93621143f5caf104206aa29294d53de1a03d90"}, ] [package.dependencies] @@ -2244,32 +2369,109 @@ files = [ [[package]] name = "psutil" -version = "6.0.0" +version = "6.1.0" description = "Cross-platform lib for process and system monitoring in Python." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ - {file = "psutil-6.0.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a021da3e881cd935e64a3d0a20983bda0bb4cf80e4f74fa9bfcb1bc5785360c6"}, - {file = "psutil-6.0.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:1287c2b95f1c0a364d23bc6f2ea2365a8d4d9b726a3be7294296ff7ba97c17f0"}, - {file = "psutil-6.0.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:a9a3dbfb4de4f18174528d87cc352d1f788b7496991cca33c6996f40c9e3c92c"}, - {file = "psutil-6.0.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:6ec7588fb3ddaec7344a825afe298db83fe01bfaaab39155fa84cf1c0d6b13c3"}, - {file = "psutil-6.0.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:1e7c870afcb7d91fdea2b37c24aeb08f98b6d67257a5cb0a8bc3ac68d0f1a68c"}, - {file = "psutil-6.0.0-cp27-none-win32.whl", hash = "sha256:02b69001f44cc73c1c5279d02b30a817e339ceb258ad75997325e0e6169d8b35"}, - {file = "psutil-6.0.0-cp27-none-win_amd64.whl", hash = "sha256:21f1fb635deccd510f69f485b87433460a603919b45e2a324ad65b0cc74f8fb1"}, - {file = "psutil-6.0.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:c588a7e9b1173b6e866756dde596fd4cad94f9399daf99ad8c3258b3cb2b47a0"}, - {file = "psutil-6.0.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ed2440ada7ef7d0d608f20ad89a04ec47d2d3ab7190896cd62ca5fc4fe08bf0"}, - {file = "psutil-6.0.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fd9a97c8e94059b0ef54a7d4baf13b405011176c3b6ff257c247cae0d560ecd"}, - {file = "psutil-6.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2e8d0054fc88153ca0544f5c4d554d42e33df2e009c4ff42284ac9ebdef4132"}, - {file = "psutil-6.0.0-cp36-cp36m-win32.whl", hash = "sha256:fc8c9510cde0146432bbdb433322861ee8c3efbf8589865c8bf8d21cb30c4d14"}, - {file = "psutil-6.0.0-cp36-cp36m-win_amd64.whl", hash = "sha256:34859b8d8f423b86e4385ff3665d3f4d94be3cdf48221fbe476e883514fdb71c"}, - {file = "psutil-6.0.0-cp37-abi3-win32.whl", hash = "sha256:a495580d6bae27291324fe60cea0b5a7c23fa36a7cd35035a16d93bdcf076b9d"}, - {file = "psutil-6.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:33ea5e1c975250a720b3a6609c490db40dae5d83a4eb315170c4fe0d8b1f34b3"}, - {file = "psutil-6.0.0-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:ffe7fc9b6b36beadc8c322f84e1caff51e8703b88eee1da46d1e3a6ae11b4fd0"}, - {file = "psutil-6.0.0.tar.gz", hash = "sha256:8faae4f310b6d969fa26ca0545338b21f73c6b15db7c4a8d934a5482faa818f2"}, + {file = "psutil-6.1.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:ff34df86226c0227c52f38b919213157588a678d049688eded74c76c8ba4a5d0"}, + {file = "psutil-6.1.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:c0e0c00aa18ca2d3b2b991643b799a15fc8f0563d2ebb6040f64ce8dc027b942"}, + {file = "psutil-6.1.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:000d1d1ebd634b4efb383f4034437384e44a6d455260aaee2eca1e9c1b55f047"}, + {file = "psutil-6.1.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:5cd2bcdc75b452ba2e10f0e8ecc0b57b827dd5d7aaffbc6821b2a9a242823a76"}, + {file = "psutil-6.1.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:045f00a43c737f960d273a83973b2511430d61f283a44c96bf13a6e829ba8fdc"}, + {file = "psutil-6.1.0-cp27-none-win32.whl", hash = "sha256:9118f27452b70bb1d9ab3198c1f626c2499384935aaf55388211ad982611407e"}, + {file = "psutil-6.1.0-cp27-none-win_amd64.whl", hash = "sha256:a8506f6119cff7015678e2bce904a4da21025cc70ad283a53b099e7620061d85"}, + {file = "psutil-6.1.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:6e2dcd475ce8b80522e51d923d10c7871e45f20918e027ab682f94f1c6351688"}, + {file = "psutil-6.1.0-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:0895b8414afafc526712c498bd9de2b063deaac4021a3b3c34566283464aff8e"}, + {file = "psutil-6.1.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9dcbfce5d89f1d1f2546a2090f4fcf87c7f669d1d90aacb7d7582addece9fb38"}, + {file = "psutil-6.1.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:498c6979f9c6637ebc3a73b3f87f9eb1ec24e1ce53a7c5173b8508981614a90b"}, + {file = "psutil-6.1.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d905186d647b16755a800e7263d43df08b790d709d575105d419f8b6ef65423a"}, + {file = "psutil-6.1.0-cp36-cp36m-win32.whl", hash = "sha256:6d3fbbc8d23fcdcb500d2c9f94e07b1342df8ed71b948a2649b5cb060a7c94ca"}, + {file = "psutil-6.1.0-cp36-cp36m-win_amd64.whl", hash = "sha256:1209036fbd0421afde505a4879dee3b2fd7b1e14fee81c0069807adcbbcca747"}, + {file = "psutil-6.1.0-cp37-abi3-win32.whl", hash = "sha256:1ad45a1f5d0b608253b11508f80940985d1d0c8f6111b5cb637533a0e6ddc13e"}, + {file = "psutil-6.1.0-cp37-abi3-win_amd64.whl", hash = "sha256:a8fb3752b491d246034fa4d279ff076501588ce8cbcdbb62c32fd7a377d996be"}, + {file = "psutil-6.1.0.tar.gz", hash = "sha256:353815f59a7f64cdaca1c0307ee13558a0512f6db064e92fe833784f08539c7a"}, ] [package.extras] -test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] +dev = ["black", "check-manifest", "coverage", "packaging", "pylint", "pyperf", "pypinfo", "pytest-cov", "requests", "rstcheck", "ruff", "sphinx", "sphinx_rtd_theme", "toml-sort", "twine", "virtualenv", "wheel"] +test = ["pytest", "pytest-xdist", "setuptools"] + +[[package]] +name = "psycopg2-binary" +version = "2.9.10" +description = "psycopg2 - Python-PostgreSQL Database Adapter" +optional = false +python-versions = ">=3.8" +files = [ + {file = "psycopg2-binary-2.9.10.tar.gz", hash = "sha256:4b3df0e6990aa98acda57d983942eff13d824135fe2250e6522edaa782a06de2"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:0ea8e3d0ae83564f2fc554955d327fa081d065c8ca5cc6d2abb643e2c9c1200f"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:3e9c76f0ac6f92ecfc79516a8034a544926430f7b080ec5a0537bca389ee0906"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ad26b467a405c798aaa1458ba09d7e2b6e5f96b1ce0ac15d82fd9f95dc38a92"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:270934a475a0e4b6925b5f804e3809dd5f90f8613621d062848dd82f9cd62007"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:48b338f08d93e7be4ab2b5f1dbe69dc5e9ef07170fe1f86514422076d9c010d0"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f4152f8f76d2023aac16285576a9ecd2b11a9895373a1f10fd9db54b3ff06b4"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:32581b3020c72d7a421009ee1c6bf4a131ef5f0a968fab2e2de0c9d2bb4577f1"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:2ce3e21dc3437b1d960521eca599d57408a695a0d3c26797ea0f72e834c7ffe5"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:e984839e75e0b60cfe75e351db53d6db750b00de45644c5d1f7ee5d1f34a1ce5"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3c4745a90b78e51d9ba06e2088a2fe0c693ae19cc8cb051ccda44e8df8a6eb53"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-win32.whl", hash = "sha256:e5720a5d25e3b99cd0dc5c8a440570469ff82659bb09431c1439b92caf184d3b"}, + {file = "psycopg2_binary-2.9.10-cp310-cp310-win_amd64.whl", hash = "sha256:3c18f74eb4386bf35e92ab2354a12c17e5eb4d9798e4c0ad3a00783eae7cd9f1"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:04392983d0bb89a8717772a193cfaac58871321e3ec69514e1c4e0d4957b5aff"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:1a6784f0ce3fec4edc64e985865c17778514325074adf5ad8f80636cd029ef7c"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5f86c56eeb91dc3135b3fd8a95dc7ae14c538a2f3ad77a19645cf55bab1799c"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b3d2491d4d78b6b14f76881905c7a8a8abcf974aad4a8a0b065273a0ed7a2cb"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2286791ececda3a723d1910441c793be44625d86d1a4e79942751197f4d30341"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:512d29bb12608891e349af6a0cccedce51677725a921c07dba6342beaf576f9a"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5a507320c58903967ef7384355a4da7ff3f28132d679aeb23572753cbf2ec10b"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:6d4fa1079cab9018f4d0bd2db307beaa612b0d13ba73b5c6304b9fe2fb441ff7"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:851485a42dbb0bdc1edcdabdb8557c09c9655dfa2ca0460ff210522e073e319e"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:35958ec9e46432d9076286dda67942ed6d968b9c3a6a2fd62b48939d1d78bf68"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-win32.whl", hash = "sha256:ecced182e935529727401b24d76634a357c71c9275b356efafd8a2a91ec07392"}, + {file = "psycopg2_binary-2.9.10-cp311-cp311-win_amd64.whl", hash = "sha256:ee0e8c683a7ff25d23b55b11161c2663d4b099770f6085ff0a20d4505778d6b4"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:880845dfe1f85d9d5f7c412efea7a08946a46894537e4e5d091732eb1d34d9a0"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:9440fa522a79356aaa482aa4ba500b65f28e5d0e63b801abf6aa152a29bd842a"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e3923c1d9870c49a2d44f795df0c889a22380d36ef92440ff618ec315757e539"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b2c956c028ea5de47ff3a8d6b3cc3330ab45cf0b7c3da35a2d6ff8420896526"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f758ed67cab30b9a8d2833609513ce4d3bd027641673d4ebc9c067e4d208eec1"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cd9b4f2cfab88ed4a9106192de509464b75a906462fb846b936eabe45c2063e"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dc08420625b5a20b53551c50deae6e231e6371194fa0651dbe0fb206452ae1f"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:d7cd730dfa7c36dbe8724426bf5612798734bff2d3c3857f36f2733f5bfc7c00"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:155e69561d54d02b3c3209545fb08938e27889ff5a10c19de8d23eb5a41be8a5"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c3cc28a6fd5a4a26224007712e79b81dbaee2ffb90ff406256158ec4d7b52b47"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-win32.whl", hash = "sha256:ec8a77f521a17506a24a5f626cb2aee7850f9b69a0afe704586f63a464f3cd64"}, + {file = "psycopg2_binary-2.9.10-cp312-cp312-win_amd64.whl", hash = "sha256:18c5ee682b9c6dd3696dad6e54cc7ff3a1a9020df6a5c0f861ef8bfd338c3ca0"}, + {file = "psycopg2_binary-2.9.10-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:26540d4a9a4e2b096f1ff9cce51253d0504dca5a85872c7f7be23be5a53eb18d"}, + {file = "psycopg2_binary-2.9.10-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:e217ce4d37667df0bc1c397fdcd8de5e81018ef305aed9415c3b093faaeb10fb"}, + {file = "psycopg2_binary-2.9.10-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:245159e7ab20a71d989da00f280ca57da7641fa2cdcf71749c193cea540a74f7"}, + {file = "psycopg2_binary-2.9.10-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c4ded1a24b20021ebe677b7b08ad10bf09aac197d6943bfe6fec70ac4e4690d"}, + {file = "psycopg2_binary-2.9.10-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3abb691ff9e57d4a93355f60d4f4c1dd2d68326c968e7db17ea96df3c023ef73"}, + {file = "psycopg2_binary-2.9.10-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8608c078134f0b3cbd9f89b34bd60a943b23fd33cc5f065e8d5f840061bd0673"}, + {file = "psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:230eeae2d71594103cd5b93fd29d1ace6420d0b86f4778739cb1a5a32f607d1f"}, + {file = "psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:bb89f0a835bcfc1d42ccd5f41f04870c1b936d8507c6df12b7737febc40f0909"}, + {file = "psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f0c2d907a1e102526dd2986df638343388b94c33860ff3bbe1384130828714b1"}, + {file = "psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f8157bed2f51db683f31306aa497311b560f2265998122abe1dce6428bd86567"}, + {file = "psycopg2_binary-2.9.10-cp38-cp38-macosx_12_0_x86_64.whl", hash = "sha256:eb09aa7f9cecb45027683bb55aebaaf45a0df8bf6de68801a6afdc7947bb09d4"}, + {file = "psycopg2_binary-2.9.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b73d6d7f0ccdad7bc43e6d34273f70d587ef62f824d7261c4ae9b8b1b6af90e8"}, + {file = "psycopg2_binary-2.9.10-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce5ab4bf46a211a8e924d307c1b1fcda82368586a19d0a24f8ae166f5c784864"}, + {file = "psycopg2_binary-2.9.10-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:056470c3dc57904bbf63d6f534988bafc4e970ffd50f6271fc4ee7daad9498a5"}, + {file = "psycopg2_binary-2.9.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73aa0e31fa4bb82578f3a6c74a73c273367727de397a7a0f07bd83cbea696baa"}, + {file = "psycopg2_binary-2.9.10-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:8de718c0e1c4b982a54b41779667242bc630b2197948405b7bd8ce16bcecac92"}, + {file = "psycopg2_binary-2.9.10-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:5c370b1e4975df846b0277b4deba86419ca77dbc25047f535b0bb03d1a544d44"}, + {file = "psycopg2_binary-2.9.10-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:ffe8ed017e4ed70f68b7b371d84b7d4a790368db9203dfc2d222febd3a9c8863"}, + {file = "psycopg2_binary-2.9.10-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:8aecc5e80c63f7459a1a2ab2c64df952051df196294d9f739933a9f6687e86b3"}, + {file = "psycopg2_binary-2.9.10-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:7a813c8bdbaaaab1f078014b9b0b13f5de757e2b5d9be6403639b298a04d218b"}, + {file = "psycopg2_binary-2.9.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d00924255d7fc916ef66e4bf22f354a940c67179ad3fd7067d7a0a9c84d2fbfc"}, + {file = "psycopg2_binary-2.9.10-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7559bce4b505762d737172556a4e6ea8a9998ecac1e39b5233465093e8cee697"}, + {file = "psycopg2_binary-2.9.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e8b58f0a96e7a1e341fc894f62c1177a7c83febebb5ff9123b579418fdc8a481"}, + {file = "psycopg2_binary-2.9.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b269105e59ac96aba877c1707c600ae55711d9dcd3fc4b5012e4af68e30c648"}, + {file = "psycopg2_binary-2.9.10-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:79625966e176dc97ddabc142351e0409e28acf4660b88d1cf6adb876d20c490d"}, + {file = "psycopg2_binary-2.9.10-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:8aabf1c1a04584c168984ac678a668094d831f152859d06e055288fa515e4d30"}, + {file = "psycopg2_binary-2.9.10-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:19721ac03892001ee8fdd11507e6a2e01f4e37014def96379411ca99d78aeb2c"}, + {file = "psycopg2_binary-2.9.10-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7f5d859928e635fa3ce3477704acee0f667b3a3d3e4bb109f2b18d4005f38287"}, + {file = "psycopg2_binary-2.9.10-cp39-cp39-win32.whl", hash = "sha256:3216ccf953b3f267691c90c6fe742e45d890d8272326b4a8b20850a03d05b7b8"}, + {file = "psycopg2_binary-2.9.10-cp39-cp39-win_amd64.whl", hash = "sha256:30e34c4e97964805f715206c7b789d54a78b70f3ff19fbe590104b71c45600e5"}, +] [[package]] name = "ptyprocess" @@ -2284,13 +2486,13 @@ files = [ [[package]] name = "pure-eval" -version = "0.2.2" +version = "0.2.3" description = "Safely evaluate AST nodes without side effects" optional = false python-versions = "*" files = [ - {file = "pure_eval-0.2.2-py3-none-any.whl", hash = "sha256:01eaab343580944bc56080ebe0a674b39ec44a945e6d09ba7db3cb8cec289350"}, - {file = "pure_eval-0.2.2.tar.gz", hash = "sha256:2b45320af6dfaa1750f543d714b6d1c520a1688dec6fd24d339063ce0aaa9ac3"}, + {file = "pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0"}, + {file = "pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42"}, ] [package.extras] @@ -2442,6 +2644,47 @@ files = [ [package.extras] windows-terminal = ["colorama (>=0.4.6)"] +[[package]] +name = "pymysql" +version = "1.1.1" +description = "Pure Python MySQL Driver" +optional = false +python-versions = ">=3.7" +files = [ + {file = "PyMySQL-1.1.1-py3-none-any.whl", hash = "sha256:4de15da4c61dc132f4fb9ab763063e693d521a80fd0e87943b9a453dd4c19d6c"}, + {file = "pymysql-1.1.1.tar.gz", hash = "sha256:e127611aaf2b417403c60bf4dc570124aeb4a57f5f37b8e95ae399a42f904cd0"}, +] + +[package.extras] +ed25519 = ["PyNaCl (>=1.4.0)"] +rsa = ["cryptography"] + +[[package]] +name = "pynacl" +version = "1.5.0" +description = "Python binding to the Networking and Cryptography (NaCl) library" +optional = false +python-versions = ">=3.6" +files = [ + {file = "PyNaCl-1.5.0-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:401002a4aaa07c9414132aaed7f6836ff98f59277a234704ff66878c2ee4a0d1"}, + {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:52cb72a79269189d4e0dc537556f4740f7f0a9ec41c1322598799b0bdad4ef92"}, + {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a36d4a9dda1f19ce6e03c9a784a2921a4b726b02e1c736600ca9c22029474394"}, + {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0c84947a22519e013607c9be43706dd42513f9e6ae5d39d3613ca1e142fba44d"}, + {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06b8f6fa7f5de8d5d2f7573fe8c863c051225a27b61e6860fd047b1775807858"}, + {file = "PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:a422368fc821589c228f4c49438a368831cb5bbc0eab5ebe1d7fac9dded6567b"}, + {file = "PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:61f642bf2378713e2c2e1de73444a3778e5f0a38be6fee0fe532fe30060282ff"}, + {file = "PyNaCl-1.5.0-cp36-abi3-win32.whl", hash = "sha256:e46dae94e34b085175f8abb3b0aaa7da40767865ac82c928eeb9e57e1ea8a543"}, + {file = "PyNaCl-1.5.0-cp36-abi3-win_amd64.whl", hash = "sha256:20f42270d27e1b6a29f54032090b972d97f0a1b0948cc52392041ef7831fee93"}, + {file = "PyNaCl-1.5.0.tar.gz", hash = "sha256:8ac7448f09ab85811607bdd21ec2464495ac8b7c66d146bf545b0f08fb9220ba"}, +] + +[package.dependencies] +cffi = ">=1.4.1" + +[package.extras] +docs = ["sphinx (>=1.6.5)", "sphinx-rtd-theme"] +tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"] + [[package]] name = "pypdf2" version = "3.0.1" @@ -2533,182 +2776,209 @@ cli = ["click (>=5.0)"] [[package]] name = "pywin32" -version = "306" +version = "308" description = "Python for Window Extensions" optional = false python-versions = "*" files = [ - {file = "pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d"}, - {file = "pywin32-306-cp310-cp310-win_amd64.whl", hash = "sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8"}, - {file = "pywin32-306-cp311-cp311-win32.whl", hash = "sha256:e65028133d15b64d2ed8f06dd9fbc268352478d4f9289e69c190ecd6818b6407"}, - {file = "pywin32-306-cp311-cp311-win_amd64.whl", hash = "sha256:a7639f51c184c0272e93f244eb24dafca9b1855707d94c192d4a0b4c01e1100e"}, - {file = "pywin32-306-cp311-cp311-win_arm64.whl", hash = "sha256:70dba0c913d19f942a2db25217d9a1b726c278f483a919f1abfed79c9cf64d3a"}, - {file = "pywin32-306-cp312-cp312-win32.whl", hash = "sha256:383229d515657f4e3ed1343da8be101000562bf514591ff383ae940cad65458b"}, - {file = "pywin32-306-cp312-cp312-win_amd64.whl", hash = "sha256:37257794c1ad39ee9be652da0462dc2e394c8159dfd913a8a4e8eb6fd346da0e"}, - {file = "pywin32-306-cp312-cp312-win_arm64.whl", hash = "sha256:5821ec52f6d321aa59e2db7e0a35b997de60c201943557d108af9d4ae1ec7040"}, - {file = "pywin32-306-cp37-cp37m-win32.whl", hash = "sha256:1c73ea9a0d2283d889001998059f5eaaba3b6238f767c9cf2833b13e6a685f65"}, - {file = "pywin32-306-cp37-cp37m-win_amd64.whl", hash = "sha256:72c5f621542d7bdd4fdb716227be0dd3f8565c11b280be6315b06ace35487d36"}, - {file = "pywin32-306-cp38-cp38-win32.whl", hash = "sha256:e4c092e2589b5cf0d365849e73e02c391c1349958c5ac3e9d5ccb9a28e017b3a"}, - {file = "pywin32-306-cp38-cp38-win_amd64.whl", hash = "sha256:e8ac1ae3601bee6ca9f7cb4b5363bf1c0badb935ef243c4733ff9a393b1690c0"}, - {file = "pywin32-306-cp39-cp39-win32.whl", hash = "sha256:e25fd5b485b55ac9c057f67d94bc203f3f6595078d1fb3b458c9c28b7153a802"}, - {file = "pywin32-306-cp39-cp39-win_amd64.whl", hash = "sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4"}, + {file = "pywin32-308-cp310-cp310-win32.whl", hash = "sha256:796ff4426437896550d2981b9c2ac0ffd75238ad9ea2d3bfa67a1abd546d262e"}, + {file = "pywin32-308-cp310-cp310-win_amd64.whl", hash = "sha256:4fc888c59b3c0bef905ce7eb7e2106a07712015ea1c8234b703a088d46110e8e"}, + {file = "pywin32-308-cp310-cp310-win_arm64.whl", hash = "sha256:a5ab5381813b40f264fa3495b98af850098f814a25a63589a8e9eb12560f450c"}, + {file = "pywin32-308-cp311-cp311-win32.whl", hash = "sha256:5d8c8015b24a7d6855b1550d8e660d8daa09983c80e5daf89a273e5c6fb5095a"}, + {file = "pywin32-308-cp311-cp311-win_amd64.whl", hash = "sha256:575621b90f0dc2695fec346b2d6302faebd4f0f45c05ea29404cefe35d89442b"}, + {file = "pywin32-308-cp311-cp311-win_arm64.whl", hash = "sha256:100a5442b7332070983c4cd03f2e906a5648a5104b8a7f50175f7906efd16bb6"}, + {file = "pywin32-308-cp312-cp312-win32.whl", hash = "sha256:587f3e19696f4bf96fde9d8a57cec74a57021ad5f204c9e627e15c33ff568897"}, + {file = "pywin32-308-cp312-cp312-win_amd64.whl", hash = "sha256:00b3e11ef09ede56c6a43c71f2d31857cf7c54b0ab6e78ac659497abd2834f47"}, + {file = "pywin32-308-cp312-cp312-win_arm64.whl", hash = "sha256:9b4de86c8d909aed15b7011182c8cab38c8850de36e6afb1f0db22b8959e3091"}, + {file = "pywin32-308-cp313-cp313-win32.whl", hash = "sha256:1c44539a37a5b7b21d02ab34e6a4d314e0788f1690d65b48e9b0b89f31abbbed"}, + {file = "pywin32-308-cp313-cp313-win_amd64.whl", hash = "sha256:fd380990e792eaf6827fcb7e187b2b4b1cede0585e3d0c9e84201ec27b9905e4"}, + {file = "pywin32-308-cp313-cp313-win_arm64.whl", hash = "sha256:ef313c46d4c18dfb82a2431e3051ac8f112ccee1a34f29c263c583c568db63cd"}, + {file = "pywin32-308-cp37-cp37m-win32.whl", hash = "sha256:1f696ab352a2ddd63bd07430080dd598e6369152ea13a25ebcdd2f503a38f1ff"}, + {file = "pywin32-308-cp37-cp37m-win_amd64.whl", hash = "sha256:13dcb914ed4347019fbec6697a01a0aec61019c1046c2b905410d197856326a6"}, + {file = "pywin32-308-cp38-cp38-win32.whl", hash = "sha256:5794e764ebcabf4ff08c555b31bd348c9025929371763b2183172ff4708152f0"}, + {file = "pywin32-308-cp38-cp38-win_amd64.whl", hash = "sha256:3b92622e29d651c6b783e368ba7d6722b1634b8e70bd376fd7610fe1992e19de"}, + {file = "pywin32-308-cp39-cp39-win32.whl", hash = "sha256:7873ca4dc60ab3287919881a7d4f88baee4a6e639aa6962de25a98ba6b193341"}, + {file = "pywin32-308-cp39-cp39-win_amd64.whl", hash = "sha256:71b3322d949b4cc20776436a9c9ba0eeedcbc9c650daa536df63f0ff111bb920"}, ] [[package]] name = "pyyaml" -version = "6.0.1" +version = "6.0.2" description = "YAML parser and emitter for Python" optional = false -python-versions = ">=3.6" +python-versions = ">=3.8" files = [ - {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, - {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, - {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, - {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, - {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, - {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, - {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, - {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, - {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, - {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, - {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, - {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, - {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, - {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, ] [[package]] name = "pyzmq" -version = "26.0.3" +version = "26.2.0" description = "Python bindings for 0MQ" optional = false python-versions = ">=3.7" files = [ - {file = "pyzmq-26.0.3-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:44dd6fc3034f1eaa72ece33588867df9e006a7303725a12d64c3dff92330f625"}, - {file = "pyzmq-26.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:acb704195a71ac5ea5ecf2811c9ee19ecdc62b91878528302dd0be1b9451cc90"}, - {file = "pyzmq-26.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dbb9c997932473a27afa93954bb77a9f9b786b4ccf718d903f35da3232317de"}, - {file = "pyzmq-26.0.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6bcb34f869d431799c3ee7d516554797f7760cb2198ecaa89c3f176f72d062be"}, - {file = "pyzmq-26.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38ece17ec5f20d7d9b442e5174ae9f020365d01ba7c112205a4d59cf19dc38ee"}, - {file = "pyzmq-26.0.3-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:ba6e5e6588e49139a0979d03a7deb9c734bde647b9a8808f26acf9c547cab1bf"}, - {file = "pyzmq-26.0.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3bf8b000a4e2967e6dfdd8656cd0757d18c7e5ce3d16339e550bd462f4857e59"}, - {file = "pyzmq-26.0.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2136f64fbb86451dbbf70223635a468272dd20075f988a102bf8a3f194a411dc"}, - {file = "pyzmq-26.0.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e8918973fbd34e7814f59143c5f600ecd38b8038161239fd1a3d33d5817a38b8"}, - {file = "pyzmq-26.0.3-cp310-cp310-win32.whl", hash = "sha256:0aaf982e68a7ac284377d051c742610220fd06d330dcd4c4dbb4cdd77c22a537"}, - {file = "pyzmq-26.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:f1a9b7d00fdf60b4039f4455afd031fe85ee8305b019334b72dcf73c567edc47"}, - {file = "pyzmq-26.0.3-cp310-cp310-win_arm64.whl", hash = "sha256:80b12f25d805a919d53efc0a5ad7c0c0326f13b4eae981a5d7b7cc343318ebb7"}, - {file = "pyzmq-26.0.3-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:a72a84570f84c374b4c287183debc776dc319d3e8ce6b6a0041ce2e400de3f32"}, - {file = "pyzmq-26.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7ca684ee649b55fd8f378127ac8462fb6c85f251c2fb027eb3c887e8ee347bcd"}, - {file = "pyzmq-26.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e222562dc0f38571c8b1ffdae9d7adb866363134299264a1958d077800b193b7"}, - {file = "pyzmq-26.0.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f17cde1db0754c35a91ac00b22b25c11da6eec5746431d6e5092f0cd31a3fea9"}, - {file = "pyzmq-26.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b7c0c0b3244bb2275abe255d4a30c050d541c6cb18b870975553f1fb6f37527"}, - {file = "pyzmq-26.0.3-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:ac97a21de3712afe6a6c071abfad40a6224fd14fa6ff0ff8d0c6e6cd4e2f807a"}, - {file = "pyzmq-26.0.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:88b88282e55fa39dd556d7fc04160bcf39dea015f78e0cecec8ff4f06c1fc2b5"}, - {file = "pyzmq-26.0.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:72b67f966b57dbd18dcc7efbc1c7fc9f5f983e572db1877081f075004614fcdd"}, - {file = "pyzmq-26.0.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f4b6cecbbf3b7380f3b61de3a7b93cb721125dc125c854c14ddc91225ba52f83"}, - {file = "pyzmq-26.0.3-cp311-cp311-win32.whl", hash = "sha256:eed56b6a39216d31ff8cd2f1d048b5bf1700e4b32a01b14379c3b6dde9ce3aa3"}, - {file = "pyzmq-26.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:3191d312c73e3cfd0f0afdf51df8405aafeb0bad71e7ed8f68b24b63c4f36500"}, - {file = "pyzmq-26.0.3-cp311-cp311-win_arm64.whl", hash = "sha256:b6907da3017ef55139cf0e417c5123a84c7332520e73a6902ff1f79046cd3b94"}, - {file = "pyzmq-26.0.3-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:068ca17214038ae986d68f4a7021f97e187ed278ab6dccb79f837d765a54d753"}, - {file = "pyzmq-26.0.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:7821d44fe07335bea256b9f1f41474a642ca55fa671dfd9f00af8d68a920c2d4"}, - {file = "pyzmq-26.0.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eeb438a26d87c123bb318e5f2b3d86a36060b01f22fbdffd8cf247d52f7c9a2b"}, - {file = "pyzmq-26.0.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:69ea9d6d9baa25a4dc9cef5e2b77b8537827b122214f210dd925132e34ae9b12"}, - {file = "pyzmq-26.0.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7daa3e1369355766dea11f1d8ef829905c3b9da886ea3152788dc25ee6079e02"}, - {file = "pyzmq-26.0.3-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:6ca7a9a06b52d0e38ccf6bca1aeff7be178917893f3883f37b75589d42c4ac20"}, - {file = "pyzmq-26.0.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1b7d0e124948daa4d9686d421ef5087c0516bc6179fdcf8828b8444f8e461a77"}, - {file = "pyzmq-26.0.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:e746524418b70f38550f2190eeee834db8850088c834d4c8406fbb9bc1ae10b2"}, - {file = "pyzmq-26.0.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:6b3146f9ae6af82c47a5282ac8803523d381b3b21caeae0327ed2f7ecb718798"}, - {file = "pyzmq-26.0.3-cp312-cp312-win32.whl", hash = "sha256:2b291d1230845871c00c8462c50565a9cd6026fe1228e77ca934470bb7d70ea0"}, - {file = "pyzmq-26.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:926838a535c2c1ea21c903f909a9a54e675c2126728c21381a94ddf37c3cbddf"}, - {file = "pyzmq-26.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:5bf6c237f8c681dfb91b17f8435b2735951f0d1fad10cc5dfd96db110243370b"}, - {file = "pyzmq-26.0.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0c0991f5a96a8e620f7691e61178cd8f457b49e17b7d9cfa2067e2a0a89fc1d5"}, - {file = "pyzmq-26.0.3-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:dbf012d8fcb9f2cf0643b65df3b355fdd74fc0035d70bb5c845e9e30a3a4654b"}, - {file = "pyzmq-26.0.3-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:01fbfbeb8249a68d257f601deb50c70c929dc2dfe683b754659569e502fbd3aa"}, - {file = "pyzmq-26.0.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c8eb19abe87029c18f226d42b8a2c9efdd139d08f8bf6e085dd9075446db450"}, - {file = "pyzmq-26.0.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:5344b896e79800af86ad643408ca9aa303a017f6ebff8cee5a3163c1e9aec987"}, - {file = "pyzmq-26.0.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:204e0f176fd1d067671157d049466869b3ae1fc51e354708b0dc41cf94e23a3a"}, - {file = "pyzmq-26.0.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a42db008d58530efa3b881eeee4991146de0b790e095f7ae43ba5cc612decbc5"}, - {file = "pyzmq-26.0.3-cp37-cp37m-win32.whl", hash = "sha256:8d7a498671ca87e32b54cb47c82a92b40130a26c5197d392720a1bce1b3c77cf"}, - {file = "pyzmq-26.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:3b4032a96410bdc760061b14ed6a33613ffb7f702181ba999df5d16fb96ba16a"}, - {file = "pyzmq-26.0.3-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:2cc4e280098c1b192c42a849de8de2c8e0f3a84086a76ec5b07bfee29bda7d18"}, - {file = "pyzmq-26.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5bde86a2ed3ce587fa2b207424ce15b9a83a9fa14422dcc1c5356a13aed3df9d"}, - {file = "pyzmq-26.0.3-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:34106f68e20e6ff253c9f596ea50397dbd8699828d55e8fa18bd4323d8d966e6"}, - {file = "pyzmq-26.0.3-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ebbbd0e728af5db9b04e56389e2299a57ea8b9dd15c9759153ee2455b32be6ad"}, - {file = "pyzmq-26.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6b1d1c631e5940cac5a0b22c5379c86e8df6a4ec277c7a856b714021ab6cfad"}, - {file = "pyzmq-26.0.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e891ce81edd463b3b4c3b885c5603c00141151dd9c6936d98a680c8c72fe5c67"}, - {file = "pyzmq-26.0.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:9b273ecfbc590a1b98f014ae41e5cf723932f3b53ba9367cfb676f838038b32c"}, - {file = "pyzmq-26.0.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b32bff85fb02a75ea0b68f21e2412255b5731f3f389ed9aecc13a6752f58ac97"}, - {file = "pyzmq-26.0.3-cp38-cp38-win32.whl", hash = "sha256:f6c21c00478a7bea93caaaef9e7629145d4153b15a8653e8bb4609d4bc70dbfc"}, - {file = "pyzmq-26.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:3401613148d93ef0fd9aabdbddb212de3db7a4475367f49f590c837355343972"}, - {file = "pyzmq-26.0.3-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:2ed8357f4c6e0daa4f3baf31832df8a33334e0fe5b020a61bc8b345a3db7a606"}, - {file = "pyzmq-26.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c1c8f2a2ca45292084c75bb6d3a25545cff0ed931ed228d3a1810ae3758f975f"}, - {file = "pyzmq-26.0.3-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:b63731993cdddcc8e087c64e9cf003f909262b359110070183d7f3025d1c56b5"}, - {file = "pyzmq-26.0.3-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b3cd31f859b662ac5d7f4226ec7d8bd60384fa037fc02aee6ff0b53ba29a3ba8"}, - {file = "pyzmq-26.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:115f8359402fa527cf47708d6f8a0f8234f0e9ca0cab7c18c9c189c194dbf620"}, - {file = "pyzmq-26.0.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:715bdf952b9533ba13dfcf1f431a8f49e63cecc31d91d007bc1deb914f47d0e4"}, - {file = "pyzmq-26.0.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:e1258c639e00bf5e8a522fec6c3eaa3e30cf1c23a2f21a586be7e04d50c9acab"}, - {file = "pyzmq-26.0.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:15c59e780be8f30a60816a9adab900c12a58d79c1ac742b4a8df044ab2a6d920"}, - {file = "pyzmq-26.0.3-cp39-cp39-win32.whl", hash = "sha256:d0cdde3c78d8ab5b46595054e5def32a755fc028685add5ddc7403e9f6de9879"}, - {file = "pyzmq-26.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:ce828058d482ef860746bf532822842e0ff484e27f540ef5c813d516dd8896d2"}, - {file = "pyzmq-26.0.3-cp39-cp39-win_arm64.whl", hash = "sha256:788f15721c64109cf720791714dc14afd0f449d63f3a5487724f024345067381"}, - {file = "pyzmq-26.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2c18645ef6294d99b256806e34653e86236eb266278c8ec8112622b61db255de"}, - {file = "pyzmq-26.0.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7e6bc96ebe49604df3ec2c6389cc3876cabe475e6bfc84ced1bf4e630662cb35"}, - {file = "pyzmq-26.0.3-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:971e8990c5cc4ddcff26e149398fc7b0f6a042306e82500f5e8db3b10ce69f84"}, - {file = "pyzmq-26.0.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8416c23161abd94cc7da80c734ad7c9f5dbebdadfdaa77dad78244457448223"}, - {file = "pyzmq-26.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:082a2988364b60bb5de809373098361cf1dbb239623e39e46cb18bc035ed9c0c"}, - {file = "pyzmq-26.0.3-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d57dfbf9737763b3a60d26e6800e02e04284926329aee8fb01049635e957fe81"}, - {file = "pyzmq-26.0.3-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:77a85dca4c2430ac04dc2a2185c2deb3858a34fe7f403d0a946fa56970cf60a1"}, - {file = "pyzmq-26.0.3-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4c82a6d952a1d555bf4be42b6532927d2a5686dd3c3e280e5f63225ab47ac1f5"}, - {file = "pyzmq-26.0.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4496b1282c70c442809fc1b151977c3d967bfb33e4e17cedbf226d97de18f709"}, - {file = "pyzmq-26.0.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:e4946d6bdb7ba972dfda282f9127e5756d4f299028b1566d1245fa0d438847e6"}, - {file = "pyzmq-26.0.3-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:03c0ae165e700364b266876d712acb1ac02693acd920afa67da2ebb91a0b3c09"}, - {file = "pyzmq-26.0.3-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:3e3070e680f79887d60feeda051a58d0ac36622e1759f305a41059eff62c6da7"}, - {file = "pyzmq-26.0.3-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6ca08b840fe95d1c2bd9ab92dac5685f949fc6f9ae820ec16193e5ddf603c3b2"}, - {file = "pyzmq-26.0.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e76654e9dbfb835b3518f9938e565c7806976c07b37c33526b574cc1a1050480"}, - {file = "pyzmq-26.0.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:871587bdadd1075b112e697173e946a07d722459d20716ceb3d1bd6c64bd08ce"}, - {file = "pyzmq-26.0.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d0a2d1bd63a4ad79483049b26514e70fa618ce6115220da9efdff63688808b17"}, - {file = "pyzmq-26.0.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0270b49b6847f0d106d64b5086e9ad5dc8a902413b5dbbb15d12b60f9c1747a4"}, - {file = "pyzmq-26.0.3-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:703c60b9910488d3d0954ca585c34f541e506a091a41930e663a098d3b794c67"}, - {file = "pyzmq-26.0.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:74423631b6be371edfbf7eabb02ab995c2563fee60a80a30829176842e71722a"}, - {file = "pyzmq-26.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:4adfbb5451196842a88fda3612e2c0414134874bffb1c2ce83ab4242ec9e027d"}, - {file = "pyzmq-26.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:3516119f4f9b8671083a70b6afaa0a070f5683e431ab3dc26e9215620d7ca1ad"}, - {file = "pyzmq-26.0.3.tar.gz", hash = "sha256:dba7d9f2e047dfa2bca3b01f4f84aa5246725203d6284e3790f2ca15fba6b40a"}, + {file = "pyzmq-26.2.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:ddf33d97d2f52d89f6e6e7ae66ee35a4d9ca6f36eda89c24591b0c40205a3629"}, + {file = "pyzmq-26.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dacd995031a01d16eec825bf30802fceb2c3791ef24bcce48fa98ce40918c27b"}, + {file = "pyzmq-26.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89289a5ee32ef6c439086184529ae060c741334b8970a6855ec0b6ad3ff28764"}, + {file = "pyzmq-26.2.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5506f06d7dc6ecf1efacb4a013b1f05071bb24b76350832c96449f4a2d95091c"}, + {file = "pyzmq-26.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ea039387c10202ce304af74def5021e9adc6297067f3441d348d2b633e8166a"}, + {file = "pyzmq-26.2.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a2224fa4a4c2ee872886ed00a571f5e967c85e078e8e8c2530a2fb01b3309b88"}, + {file = "pyzmq-26.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:28ad5233e9c3b52d76196c696e362508959741e1a005fb8fa03b51aea156088f"}, + {file = "pyzmq-26.2.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:1c17211bc037c7d88e85ed8b7d8f7e52db6dc8eca5590d162717c654550f7282"}, + {file = "pyzmq-26.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b8f86dd868d41bea9a5f873ee13bf5551c94cf6bc51baebc6f85075971fe6eea"}, + {file = "pyzmq-26.2.0-cp310-cp310-win32.whl", hash = "sha256:46a446c212e58456b23af260f3d9fb785054f3e3653dbf7279d8f2b5546b21c2"}, + {file = "pyzmq-26.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:49d34ab71db5a9c292a7644ce74190b1dd5a3475612eefb1f8be1d6961441971"}, + {file = "pyzmq-26.2.0-cp310-cp310-win_arm64.whl", hash = "sha256:bfa832bfa540e5b5c27dcf5de5d82ebc431b82c453a43d141afb1e5d2de025fa"}, + {file = "pyzmq-26.2.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:8f7e66c7113c684c2b3f1c83cdd3376103ee0ce4c49ff80a648643e57fb22218"}, + {file = "pyzmq-26.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3a495b30fc91db2db25120df5847d9833af237546fd59170701acd816ccc01c4"}, + {file = "pyzmq-26.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77eb0968da535cba0470a5165468b2cac7772cfb569977cff92e240f57e31bef"}, + {file = "pyzmq-26.2.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ace4f71f1900a548f48407fc9be59c6ba9d9aaf658c2eea6cf2779e72f9f317"}, + {file = "pyzmq-26.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:92a78853d7280bffb93df0a4a6a2498cba10ee793cc8076ef797ef2f74d107cf"}, + {file = "pyzmq-26.2.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:689c5d781014956a4a6de61d74ba97b23547e431e9e7d64f27d4922ba96e9d6e"}, + {file = "pyzmq-26.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0aca98bc423eb7d153214b2df397c6421ba6373d3397b26c057af3c904452e37"}, + {file = "pyzmq-26.2.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:1f3496d76b89d9429a656293744ceca4d2ac2a10ae59b84c1da9b5165f429ad3"}, + {file = "pyzmq-26.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5c2b3bfd4b9689919db068ac6c9911f3fcb231c39f7dd30e3138be94896d18e6"}, + {file = "pyzmq-26.2.0-cp311-cp311-win32.whl", hash = "sha256:eac5174677da084abf378739dbf4ad245661635f1600edd1221f150b165343f4"}, + {file = "pyzmq-26.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:5a509df7d0a83a4b178d0f937ef14286659225ef4e8812e05580776c70e155d5"}, + {file = "pyzmq-26.2.0-cp311-cp311-win_arm64.whl", hash = "sha256:c0e6091b157d48cbe37bd67233318dbb53e1e6327d6fc3bb284afd585d141003"}, + {file = "pyzmq-26.2.0-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:ded0fc7d90fe93ae0b18059930086c51e640cdd3baebdc783a695c77f123dcd9"}, + {file = "pyzmq-26.2.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:17bf5a931c7f6618023cdacc7081f3f266aecb68ca692adac015c383a134ca52"}, + {file = "pyzmq-26.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55cf66647e49d4621a7e20c8d13511ef1fe1efbbccf670811864452487007e08"}, + {file = "pyzmq-26.2.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4661c88db4a9e0f958c8abc2b97472e23061f0bc737f6f6179d7a27024e1faa5"}, + {file = "pyzmq-26.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea7f69de383cb47522c9c208aec6dd17697db7875a4674c4af3f8cfdac0bdeae"}, + {file = "pyzmq-26.2.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:7f98f6dfa8b8ccaf39163ce872bddacca38f6a67289116c8937a02e30bbe9711"}, + {file = "pyzmq-26.2.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e3e0210287329272539eea617830a6a28161fbbd8a3271bf4150ae3e58c5d0e6"}, + {file = "pyzmq-26.2.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:6b274e0762c33c7471f1a7471d1a2085b1a35eba5cdc48d2ae319f28b6fc4de3"}, + {file = "pyzmq-26.2.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:29c6a4635eef69d68a00321e12a7d2559fe2dfccfa8efae3ffb8e91cd0b36a8b"}, + {file = "pyzmq-26.2.0-cp312-cp312-win32.whl", hash = "sha256:989d842dc06dc59feea09e58c74ca3e1678c812a4a8a2a419046d711031f69c7"}, + {file = "pyzmq-26.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:2a50625acdc7801bc6f74698c5c583a491c61d73c6b7ea4dee3901bb99adb27a"}, + {file = "pyzmq-26.2.0-cp312-cp312-win_arm64.whl", hash = "sha256:4d29ab8592b6ad12ebbf92ac2ed2bedcfd1cec192d8e559e2e099f648570e19b"}, + {file = "pyzmq-26.2.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9dd8cd1aeb00775f527ec60022004d030ddc51d783d056e3e23e74e623e33726"}, + {file = "pyzmq-26.2.0-cp313-cp313-macosx_10_15_universal2.whl", hash = "sha256:28c812d9757fe8acecc910c9ac9dafd2ce968c00f9e619db09e9f8f54c3a68a3"}, + {file = "pyzmq-26.2.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d80b1dd99c1942f74ed608ddb38b181b87476c6a966a88a950c7dee118fdf50"}, + {file = "pyzmq-26.2.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c997098cc65e3208eca09303630e84d42718620e83b733d0fd69543a9cab9cb"}, + {file = "pyzmq-26.2.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ad1bc8d1b7a18497dda9600b12dc193c577beb391beae5cd2349184db40f187"}, + {file = "pyzmq-26.2.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:bea2acdd8ea4275e1278350ced63da0b166421928276c7c8e3f9729d7402a57b"}, + {file = "pyzmq-26.2.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:23f4aad749d13698f3f7b64aad34f5fc02d6f20f05999eebc96b89b01262fb18"}, + {file = "pyzmq-26.2.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:a4f96f0d88accc3dbe4a9025f785ba830f968e21e3e2c6321ccdfc9aef755115"}, + {file = "pyzmq-26.2.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ced65e5a985398827cc9276b93ef6dfabe0273c23de8c7931339d7e141c2818e"}, + {file = "pyzmq-26.2.0-cp313-cp313-win32.whl", hash = "sha256:31507f7b47cc1ead1f6e86927f8ebb196a0bab043f6345ce070f412a59bf87b5"}, + {file = "pyzmq-26.2.0-cp313-cp313-win_amd64.whl", hash = "sha256:70fc7fcf0410d16ebdda9b26cbd8bf8d803d220a7f3522e060a69a9c87bf7bad"}, + {file = "pyzmq-26.2.0-cp313-cp313-win_arm64.whl", hash = "sha256:c3789bd5768ab5618ebf09cef6ec2b35fed88709b104351748a63045f0ff9797"}, + {file = "pyzmq-26.2.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:034da5fc55d9f8da09015d368f519478a52675e558c989bfcb5cf6d4e16a7d2a"}, + {file = "pyzmq-26.2.0-cp313-cp313t-macosx_10_15_universal2.whl", hash = "sha256:c92d73464b886931308ccc45b2744e5968cbaade0b1d6aeb40d8ab537765f5bc"}, + {file = "pyzmq-26.2.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:794a4562dcb374f7dbbfb3f51d28fb40123b5a2abadee7b4091f93054909add5"}, + {file = "pyzmq-26.2.0-cp313-cp313t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aee22939bb6075e7afededabad1a56a905da0b3c4e3e0c45e75810ebe3a52672"}, + {file = "pyzmq-26.2.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ae90ff9dad33a1cfe947d2c40cb9cb5e600d759ac4f0fd22616ce6540f72797"}, + {file = "pyzmq-26.2.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:43a47408ac52647dfabbc66a25b05b6a61700b5165807e3fbd40063fcaf46386"}, + {file = "pyzmq-26.2.0-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:25bf2374a2a8433633c65ccb9553350d5e17e60c8eb4de4d92cc6bd60f01d306"}, + {file = "pyzmq-26.2.0-cp313-cp313t-musllinux_1_1_i686.whl", hash = "sha256:007137c9ac9ad5ea21e6ad97d3489af654381324d5d3ba614c323f60dab8fae6"}, + {file = "pyzmq-26.2.0-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:470d4a4f6d48fb34e92d768b4e8a5cc3780db0d69107abf1cd7ff734b9766eb0"}, + {file = "pyzmq-26.2.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3b55a4229ce5da9497dd0452b914556ae58e96a4381bb6f59f1305dfd7e53fc8"}, + {file = "pyzmq-26.2.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9cb3a6460cdea8fe8194a76de8895707e61ded10ad0be97188cc8463ffa7e3a8"}, + {file = "pyzmq-26.2.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8ab5cad923cc95c87bffee098a27856c859bd5d0af31bd346035aa816b081fe1"}, + {file = "pyzmq-26.2.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ed69074a610fad1c2fda66180e7b2edd4d31c53f2d1872bc2d1211563904cd9"}, + {file = "pyzmq-26.2.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:cccba051221b916a4f5e538997c45d7d136a5646442b1231b916d0164067ea27"}, + {file = "pyzmq-26.2.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:0eaa83fc4c1e271c24eaf8fb083cbccef8fde77ec8cd45f3c35a9a123e6da097"}, + {file = "pyzmq-26.2.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:9edda2df81daa129b25a39b86cb57dfdfe16f7ec15b42b19bfac503360d27a93"}, + {file = "pyzmq-26.2.0-cp37-cp37m-win32.whl", hash = "sha256:ea0eb6af8a17fa272f7b98d7bebfab7836a0d62738e16ba380f440fceca2d951"}, + {file = "pyzmq-26.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:4ff9dc6bc1664bb9eec25cd17506ef6672d506115095411e237d571e92a58231"}, + {file = "pyzmq-26.2.0-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:2eb7735ee73ca1b0d71e0e67c3739c689067f055c764f73aac4cc8ecf958ee3f"}, + {file = "pyzmq-26.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1a534f43bc738181aa7cbbaf48e3eca62c76453a40a746ab95d4b27b1111a7d2"}, + {file = "pyzmq-26.2.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:aedd5dd8692635813368e558a05266b995d3d020b23e49581ddd5bbe197a8ab6"}, + {file = "pyzmq-26.2.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8be4700cd8bb02cc454f630dcdf7cfa99de96788b80c51b60fe2fe1dac480289"}, + {file = "pyzmq-26.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fcc03fa4997c447dce58264e93b5aa2d57714fbe0f06c07b7785ae131512732"}, + {file = "pyzmq-26.2.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:402b190912935d3db15b03e8f7485812db350d271b284ded2b80d2e5704be780"}, + {file = "pyzmq-26.2.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8685fa9c25ff00f550c1fec650430c4b71e4e48e8d852f7ddcf2e48308038640"}, + {file = "pyzmq-26.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:76589c020680778f06b7e0b193f4b6dd66d470234a16e1df90329f5e14a171cd"}, + {file = "pyzmq-26.2.0-cp38-cp38-win32.whl", hash = "sha256:8423c1877d72c041f2c263b1ec6e34360448decfb323fa8b94e85883043ef988"}, + {file = "pyzmq-26.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:76589f2cd6b77b5bdea4fca5992dc1c23389d68b18ccc26a53680ba2dc80ff2f"}, + {file = "pyzmq-26.2.0-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:b1d464cb8d72bfc1a3adc53305a63a8e0cac6bc8c5a07e8ca190ab8d3faa43c2"}, + {file = "pyzmq-26.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4da04c48873a6abdd71811c5e163bd656ee1b957971db7f35140a2d573f6949c"}, + {file = "pyzmq-26.2.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:d049df610ac811dcffdc147153b414147428567fbbc8be43bb8885f04db39d98"}, + {file = "pyzmq-26.2.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:05590cdbc6b902101d0e65d6a4780af14dc22914cc6ab995d99b85af45362cc9"}, + {file = "pyzmq-26.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c811cfcd6a9bf680236c40c6f617187515269ab2912f3d7e8c0174898e2519db"}, + {file = "pyzmq-26.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6835dd60355593de10350394242b5757fbbd88b25287314316f266e24c61d073"}, + {file = "pyzmq-26.2.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc6bee759a6bddea5db78d7dcd609397449cb2d2d6587f48f3ca613b19410cfc"}, + {file = "pyzmq-26.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c530e1eecd036ecc83c3407f77bb86feb79916d4a33d11394b8234f3bd35b940"}, + {file = "pyzmq-26.2.0-cp39-cp39-win32.whl", hash = "sha256:367b4f689786fca726ef7a6c5ba606958b145b9340a5e4808132cc65759abd44"}, + {file = "pyzmq-26.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:e6fa2e3e683f34aea77de8112f6483803c96a44fd726d7358b9888ae5bb394ec"}, + {file = "pyzmq-26.2.0-cp39-cp39-win_arm64.whl", hash = "sha256:7445be39143a8aa4faec43b076e06944b8f9d0701b669df4af200531b21e40bb"}, + {file = "pyzmq-26.2.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:706e794564bec25819d21a41c31d4df2d48e1cc4b061e8d345d7fb4dd3e94072"}, + {file = "pyzmq-26.2.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b435f2753621cd36e7c1762156815e21c985c72b19135dac43a7f4f31d28dd1"}, + {file = "pyzmq-26.2.0-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:160c7e0a5eb178011e72892f99f918c04a131f36056d10d9c1afb223fc952c2d"}, + {file = "pyzmq-26.2.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c4a71d5d6e7b28a47a394c0471b7e77a0661e2d651e7ae91e0cab0a587859ca"}, + {file = "pyzmq-26.2.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:90412f2db8c02a3864cbfc67db0e3dcdbda336acf1c469526d3e869394fe001c"}, + {file = "pyzmq-26.2.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2ea4ad4e6a12e454de05f2949d4beddb52460f3de7c8b9d5c46fbb7d7222e02c"}, + {file = "pyzmq-26.2.0-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:fc4f7a173a5609631bb0c42c23d12c49df3966f89f496a51d3eb0ec81f4519d6"}, + {file = "pyzmq-26.2.0-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:878206a45202247781472a2d99df12a176fef806ca175799e1c6ad263510d57c"}, + {file = "pyzmq-26.2.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17c412bad2eb9468e876f556eb4ee910e62d721d2c7a53c7fa31e643d35352e6"}, + {file = "pyzmq-26.2.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:0d987a3ae5a71c6226b203cfd298720e0086c7fe7c74f35fa8edddfbd6597eed"}, + {file = "pyzmq-26.2.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:39887ac397ff35b7b775db7201095fc6310a35fdbae85bac4523f7eb3b840e20"}, + {file = "pyzmq-26.2.0-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:fdb5b3e311d4d4b0eb8b3e8b4d1b0a512713ad7e6a68791d0923d1aec433d919"}, + {file = "pyzmq-26.2.0-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:226af7dcb51fdb0109f0016449b357e182ea0ceb6b47dfb5999d569e5db161d5"}, + {file = "pyzmq-26.2.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bed0e799e6120b9c32756203fb9dfe8ca2fb8467fed830c34c877e25638c3fc"}, + {file = "pyzmq-26.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:29c7947c594e105cb9e6c466bace8532dc1ca02d498684128b339799f5248277"}, + {file = "pyzmq-26.2.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:cdeabcff45d1c219636ee2e54d852262e5c2e085d6cb476d938aee8d921356b3"}, + {file = "pyzmq-26.2.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35cffef589bcdc587d06f9149f8d5e9e8859920a071df5a2671de2213bef592a"}, + {file = "pyzmq-26.2.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18c8dc3b7468d8b4bdf60ce9d7141897da103c7a4690157b32b60acb45e333e6"}, + {file = "pyzmq-26.2.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7133d0a1677aec369d67dd78520d3fa96dd7f3dcec99d66c1762870e5ea1a50a"}, + {file = "pyzmq-26.2.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6a96179a24b14fa6428cbfc08641c779a53f8fcec43644030328f44034c7f1f4"}, + {file = "pyzmq-26.2.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:4f78c88905461a9203eac9faac157a2a0dbba84a0fd09fd29315db27be40af9f"}, + {file = "pyzmq-26.2.0.tar.gz", hash = "sha256:070672c258581c8e4f640b5159297580a9974b026043bd4ab0470be9ed324f1f"}, ] [package.dependencies] @@ -2731,105 +3001,105 @@ rpds-py = ">=0.7.0" [[package]] name = "regex" -version = "2024.9.11" +version = "2024.11.6" description = "Alternative regular expression module, to replace re." optional = false python-versions = ">=3.8" files = [ - {file = "regex-2024.9.11-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1494fa8725c285a81d01dc8c06b55287a1ee5e0e382d8413adc0a9197aac6408"}, - {file = "regex-2024.9.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0e12c481ad92d129c78f13a2a3662317e46ee7ef96c94fd332e1c29131875b7d"}, - {file = "regex-2024.9.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:16e13a7929791ac1216afde26f712802e3df7bf0360b32e4914dca3ab8baeea5"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46989629904bad940bbec2106528140a218b4a36bb3042d8406980be1941429c"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a906ed5e47a0ce5f04b2c981af1c9acf9e8696066900bf03b9d7879a6f679fc8"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e9a091b0550b3b0207784a7d6d0f1a00d1d1c8a11699c1a4d93db3fbefc3ad35"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ddcd9a179c0a6fa8add279a4444015acddcd7f232a49071ae57fa6e278f1f71"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6b41e1adc61fa347662b09398e31ad446afadff932a24807d3ceb955ed865cc8"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ced479f601cd2f8ca1fd7b23925a7e0ad512a56d6e9476f79b8f381d9d37090a"}, - {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:635a1d96665f84b292e401c3d62775851aedc31d4f8784117b3c68c4fcd4118d"}, - {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:c0256beda696edcf7d97ef16b2a33a8e5a875affd6fa6567b54f7c577b30a137"}, - {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:3ce4f1185db3fbde8ed8aa223fc9620f276c58de8b0d4f8cc86fd1360829edb6"}, - {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:09d77559e80dcc9d24570da3745ab859a9cf91953062e4ab126ba9d5993688ca"}, - {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7a22ccefd4db3f12b526eccb129390942fe874a3a9fdbdd24cf55773a1faab1a"}, - {file = "regex-2024.9.11-cp310-cp310-win32.whl", hash = "sha256:f745ec09bc1b0bd15cfc73df6fa4f726dcc26bb16c23a03f9e3367d357eeedd0"}, - {file = "regex-2024.9.11-cp310-cp310-win_amd64.whl", hash = "sha256:01c2acb51f8a7d6494c8c5eafe3d8e06d76563d8a8a4643b37e9b2dd8a2ff623"}, - {file = "regex-2024.9.11-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2cce2449e5927a0bf084d346da6cd5eb016b2beca10d0013ab50e3c226ffc0df"}, - {file = "regex-2024.9.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3b37fa423beefa44919e009745ccbf353d8c981516e807995b2bd11c2c77d268"}, - {file = "regex-2024.9.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:64ce2799bd75039b480cc0360907c4fb2f50022f030bf9e7a8705b636e408fad"}, - {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4cc92bb6db56ab0c1cbd17294e14f5e9224f0cc6521167ef388332604e92679"}, - {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d05ac6fa06959c4172eccd99a222e1fbf17b5670c4d596cb1e5cde99600674c4"}, - {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:040562757795eeea356394a7fb13076ad4f99d3c62ab0f8bdfb21f99a1f85664"}, - {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6113c008a7780792efc80f9dfe10ba0cd043cbf8dc9a76ef757850f51b4edc50"}, - {file = "regex-2024.9.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e5fb5f77c8745a60105403a774fe2c1759b71d3e7b4ca237a5e67ad066c7199"}, - {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:54d9ff35d4515debf14bc27f1e3b38bfc453eff3220f5bce159642fa762fe5d4"}, - {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:df5cbb1fbc74a8305b6065d4ade43b993be03dbe0f8b30032cced0d7740994bd"}, - {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:7fb89ee5d106e4a7a51bce305ac4efb981536301895f7bdcf93ec92ae0d91c7f"}, - {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:a738b937d512b30bf75995c0159c0ddf9eec0775c9d72ac0202076c72f24aa96"}, - {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e28f9faeb14b6f23ac55bfbbfd3643f5c7c18ede093977f1df249f73fd22c7b1"}, - {file = "regex-2024.9.11-cp311-cp311-win32.whl", hash = "sha256:18e707ce6c92d7282dfce370cd205098384b8ee21544e7cb29b8aab955b66fa9"}, - {file = "regex-2024.9.11-cp311-cp311-win_amd64.whl", hash = "sha256:313ea15e5ff2a8cbbad96ccef6be638393041b0a7863183c2d31e0c6116688cf"}, - {file = "regex-2024.9.11-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b0d0a6c64fcc4ef9c69bd5b3b3626cc3776520a1637d8abaa62b9edc147a58f7"}, - {file = "regex-2024.9.11-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:49b0e06786ea663f933f3710a51e9385ce0cba0ea56b67107fd841a55d56a231"}, - {file = "regex-2024.9.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5b513b6997a0b2f10e4fd3a1313568e373926e8c252bd76c960f96fd039cd28d"}, - {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee439691d8c23e76f9802c42a95cfeebf9d47cf4ffd06f18489122dbb0a7ad64"}, - {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a8f877c89719d759e52783f7fe6e1c67121076b87b40542966c02de5503ace42"}, - {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23b30c62d0f16827f2ae9f2bb87619bc4fba2044911e2e6c2eb1af0161cdb766"}, - {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85ab7824093d8f10d44330fe1e6493f756f252d145323dd17ab6b48733ff6c0a"}, - {file = "regex-2024.9.11-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8dee5b4810a89447151999428fe096977346cf2f29f4d5e29609d2e19e0199c9"}, - {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:98eeee2f2e63edae2181c886d7911ce502e1292794f4c5ee71e60e23e8d26b5d"}, - {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:57fdd2e0b2694ce6fc2e5ccf189789c3e2962916fb38779d3e3521ff8fe7a822"}, - {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:d552c78411f60b1fdaafd117a1fca2f02e562e309223b9d44b7de8be451ec5e0"}, - {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:a0b2b80321c2ed3fcf0385ec9e51a12253c50f146fddb2abbb10f033fe3d049a"}, - {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:18406efb2f5a0e57e3a5881cd9354c1512d3bb4f5c45d96d110a66114d84d23a"}, - {file = "regex-2024.9.11-cp312-cp312-win32.whl", hash = "sha256:e464b467f1588e2c42d26814231edecbcfe77f5ac414d92cbf4e7b55b2c2a776"}, - {file = "regex-2024.9.11-cp312-cp312-win_amd64.whl", hash = "sha256:9e8719792ca63c6b8340380352c24dcb8cd7ec49dae36e963742a275dfae6009"}, - {file = "regex-2024.9.11-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c157bb447303070f256e084668b702073db99bbb61d44f85d811025fcf38f784"}, - {file = "regex-2024.9.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4db21ece84dfeefc5d8a3863f101995de646c6cb0536952c321a2650aa202c36"}, - {file = "regex-2024.9.11-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:220e92a30b426daf23bb67a7962900ed4613589bab80382be09b48896d211e92"}, - {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb1ae19e64c14c7ec1995f40bd932448713d3c73509e82d8cd7744dc00e29e86"}, - {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f47cd43a5bfa48f86925fe26fbdd0a488ff15b62468abb5d2a1e092a4fb10e85"}, - {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9d4a76b96f398697fe01117093613166e6aa8195d63f1b4ec3f21ab637632963"}, - {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ea51dcc0835eea2ea31d66456210a4e01a076d820e9039b04ae8d17ac11dee6"}, - {file = "regex-2024.9.11-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7aaa315101c6567a9a45d2839322c51c8d6e81f67683d529512f5bcfb99c802"}, - {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c57d08ad67aba97af57a7263c2d9006d5c404d721c5f7542f077f109ec2a4a29"}, - {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:f8404bf61298bb6f8224bb9176c1424548ee1181130818fcd2cbffddc768bed8"}, - {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:dd4490a33eb909ef5078ab20f5f000087afa2a4daa27b4c072ccb3cb3050ad84"}, - {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:eee9130eaad130649fd73e5cd92f60e55708952260ede70da64de420cdcad554"}, - {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6a2644a93da36c784e546de579ec1806bfd2763ef47babc1b03d765fe560c9f8"}, - {file = "regex-2024.9.11-cp313-cp313-win32.whl", hash = "sha256:e997fd30430c57138adc06bba4c7c2968fb13d101e57dd5bb9355bf8ce3fa7e8"}, - {file = "regex-2024.9.11-cp313-cp313-win_amd64.whl", hash = "sha256:042c55879cfeb21a8adacc84ea347721d3d83a159da6acdf1116859e2427c43f"}, - {file = "regex-2024.9.11-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:35f4a6f96aa6cb3f2f7247027b07b15a374f0d5b912c0001418d1d55024d5cb4"}, - {file = "regex-2024.9.11-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:55b96e7ce3a69a8449a66984c268062fbaa0d8ae437b285428e12797baefce7e"}, - {file = "regex-2024.9.11-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cb130fccd1a37ed894824b8c046321540263013da72745d755f2d35114b81a60"}, - {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:323c1f04be6b2968944d730e5c2091c8c89767903ecaa135203eec4565ed2b2b"}, - {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:be1c8ed48c4c4065ecb19d882a0ce1afe0745dfad8ce48c49586b90a55f02366"}, - {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b5b029322e6e7b94fff16cd120ab35a253236a5f99a79fb04fda7ae71ca20ae8"}, - {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6fff13ef6b5f29221d6904aa816c34701462956aa72a77f1f151a8ec4f56aeb"}, - {file = "regex-2024.9.11-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:587d4af3979376652010e400accc30404e6c16b7df574048ab1f581af82065e4"}, - {file = "regex-2024.9.11-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:079400a8269544b955ffa9e31f186f01d96829110a3bf79dc338e9910f794fca"}, - {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f9268774428ec173654985ce55fc6caf4c6d11ade0f6f914d48ef4719eb05ebb"}, - {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:23f9985c8784e544d53fc2930fc1ac1a7319f5d5332d228437acc9f418f2f168"}, - {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:ae2941333154baff9838e88aa71c1d84f4438189ecc6021a12c7573728b5838e"}, - {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:e93f1c331ca8e86fe877a48ad64e77882c0c4da0097f2212873a69bbfea95d0c"}, - {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:846bc79ee753acf93aef4184c040d709940c9d001029ceb7b7a52747b80ed2dd"}, - {file = "regex-2024.9.11-cp38-cp38-win32.whl", hash = "sha256:c94bb0a9f1db10a1d16c00880bdebd5f9faf267273b8f5bd1878126e0fbde771"}, - {file = "regex-2024.9.11-cp38-cp38-win_amd64.whl", hash = "sha256:2b08fce89fbd45664d3df6ad93e554b6c16933ffa9d55cb7e01182baaf971508"}, - {file = "regex-2024.9.11-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:07f45f287469039ffc2c53caf6803cd506eb5f5f637f1d4acb37a738f71dd066"}, - {file = "regex-2024.9.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4838e24ee015101d9f901988001038f7f0d90dc0c3b115541a1365fb439add62"}, - {file = "regex-2024.9.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6edd623bae6a737f10ce853ea076f56f507fd7726bee96a41ee3d68d347e4d16"}, - {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c69ada171c2d0e97a4b5aa78fbb835e0ffbb6b13fc5da968c09811346564f0d3"}, - {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:02087ea0a03b4af1ed6ebab2c54d7118127fee8d71b26398e8e4b05b78963199"}, - {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:69dee6a020693d12a3cf892aba4808fe168d2a4cef368eb9bf74f5398bfd4ee8"}, - {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:297f54910247508e6e5cae669f2bc308985c60540a4edd1c77203ef19bfa63ca"}, - {file = "regex-2024.9.11-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ecea58b43a67b1b79805f1a0255730edaf5191ecef84dbc4cc85eb30bc8b63b9"}, - {file = "regex-2024.9.11-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:eab4bb380f15e189d1313195b062a6aa908f5bd687a0ceccd47c8211e9cf0d4a"}, - {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0cbff728659ce4bbf4c30b2a1be040faafaa9eca6ecde40aaff86f7889f4ab39"}, - {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:54c4a097b8bc5bb0dfc83ae498061d53ad7b5762e00f4adaa23bee22b012e6ba"}, - {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:73d6d2f64f4d894c96626a75578b0bf7d9e56dcda8c3d037a2118fdfe9b1c664"}, - {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:e53b5fbab5d675aec9f0c501274c467c0f9a5d23696cfc94247e1fb56501ed89"}, - {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:0ffbcf9221e04502fc35e54d1ce9567541979c3fdfb93d2c554f0ca583a19b35"}, - {file = "regex-2024.9.11-cp39-cp39-win32.whl", hash = "sha256:e4c22e1ac1f1ec1e09f72e6c44d8f2244173db7eb9629cc3a346a8d7ccc31142"}, - {file = "regex-2024.9.11-cp39-cp39-win_amd64.whl", hash = "sha256:faa3c142464efec496967359ca99696c896c591c56c53506bac1ad465f66e919"}, - {file = "regex-2024.9.11.tar.gz", hash = "sha256:6c188c307e8433bcb63dc1915022deb553b4203a70722fc542c363bf120a01fd"}, + {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff590880083d60acc0433f9c3f713c51f7ac6ebb9adf889c79a261ecf541aa91"}, + {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:658f90550f38270639e83ce492f27d2c8d2cd63805c65a13a14d36ca126753f0"}, + {file = "regex-2024.11.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:164d8b7b3b4bcb2068b97428060b2a53be050085ef94eca7f240e7947f1b080e"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3660c82f209655a06b587d55e723f0b813d3a7db2e32e5e7dc64ac2a9e86fde"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d22326fcdef5e08c154280b71163ced384b428343ae16a5ab2b3354aed12436e"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1ac758ef6aebfc8943560194e9fd0fa18bcb34d89fd8bd2af18183afd8da3a2"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:997d6a487ff00807ba810e0f8332c18b4eb8d29463cfb7c820dc4b6e7562d0cf"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:02a02d2bb04fec86ad61f3ea7f49c015a0681bf76abb9857f945d26159d2968c"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f02f93b92358ee3f78660e43b4b0091229260c5d5c408d17d60bf26b6c900e86"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:06eb1be98df10e81ebaded73fcd51989dcf534e3c753466e4b60c4697a003b67"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:040df6fe1a5504eb0f04f048e6d09cd7c7110fef851d7c567a6b6e09942feb7d"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:fdabbfc59f2c6edba2a6622c647b716e34e8e3867e0ab975412c5c2f79b82da2"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:8447d2d39b5abe381419319f942de20b7ecd60ce86f16a23b0698f22e1b70008"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:da8f5fc57d1933de22a9e23eec290a0d8a5927a5370d24bda9a6abe50683fe62"}, + {file = "regex-2024.11.6-cp310-cp310-win32.whl", hash = "sha256:b489578720afb782f6ccf2840920f3a32e31ba28a4b162e13900c3e6bd3f930e"}, + {file = "regex-2024.11.6-cp310-cp310-win_amd64.whl", hash = "sha256:5071b2093e793357c9d8b2929dfc13ac5f0a6c650559503bb81189d0a3814519"}, + {file = "regex-2024.11.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5478c6962ad548b54a591778e93cd7c456a7a29f8eca9c49e4f9a806dcc5d638"}, + {file = "regex-2024.11.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2c89a8cc122b25ce6945f0423dc1352cb9593c68abd19223eebbd4e56612c5b7"}, + {file = "regex-2024.11.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:94d87b689cdd831934fa3ce16cc15cd65748e6d689f5d2b8f4f4df2065c9fa20"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1062b39a0a2b75a9c694f7a08e7183a80c63c0d62b301418ffd9c35f55aaa114"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:167ed4852351d8a750da48712c3930b031f6efdaa0f22fa1933716bfcd6bf4a3"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d548dafee61f06ebdb584080621f3e0c23fff312f0de1afc776e2a2ba99a74f"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2a19f302cd1ce5dd01a9099aaa19cae6173306d1302a43b627f62e21cf18ac0"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bec9931dfb61ddd8ef2ebc05646293812cb6b16b60cf7c9511a832b6f1854b55"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9714398225f299aa85267fd222f7142fcb5c769e73d7733344efc46f2ef5cf89"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:202eb32e89f60fc147a41e55cb086db2a3f8cb82f9a9a88440dcfc5d37faae8d"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:4181b814e56078e9b00427ca358ec44333765f5ca1b45597ec7446d3a1ef6e34"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:068376da5a7e4da51968ce4c122a7cd31afaaec4fccc7856c92f63876e57b51d"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ac10f2c4184420d881a3475fb2c6f4d95d53a8d50209a2500723d831036f7c45"}, + {file = "regex-2024.11.6-cp311-cp311-win32.whl", hash = "sha256:c36f9b6f5f8649bb251a5f3f66564438977b7ef8386a52460ae77e6070d309d9"}, + {file = "regex-2024.11.6-cp311-cp311-win_amd64.whl", hash = "sha256:02e28184be537f0e75c1f9b2f8847dc51e08e6e171c6bde130b2687e0c33cf60"}, + {file = "regex-2024.11.6-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:52fb28f528778f184f870b7cf8f225f5eef0a8f6e3778529bdd40c7b3920796a"}, + {file = "regex-2024.11.6-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdd6028445d2460f33136c55eeb1f601ab06d74cb3347132e1c24250187500d9"}, + {file = "regex-2024.11.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:805e6b60c54bf766b251e94526ebad60b7de0c70f70a4e6210ee2891acb70bf2"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b85c2530be953a890eaffde05485238f07029600e8f098cdf1848d414a8b45e4"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb26437975da7dc36b7efad18aa9dd4ea569d2357ae6b783bf1118dabd9ea577"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:abfa5080c374a76a251ba60683242bc17eeb2c9818d0d30117b4486be10c59d3"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b7fa6606c2881c1db9479b0eaa11ed5dfa11c8d60a474ff0e095099f39d98e"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c32f75920cf99fe6b6c539c399a4a128452eaf1af27f39bce8909c9a3fd8cbe"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:982e6d21414e78e1f51cf595d7f321dcd14de1f2881c5dc6a6e23bbbbd68435e"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a7c2155f790e2fb448faed6dd241386719802296ec588a8b9051c1f5c481bc29"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:149f5008d286636e48cd0b1dd65018548944e495b0265b45e1bffecce1ef7f39"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:e5364a4502efca094731680e80009632ad6624084aff9a23ce8c8c6820de3e51"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0a86e7eeca091c09e021db8eb72d54751e527fa47b8d5787caf96d9831bd02ad"}, + {file = "regex-2024.11.6-cp312-cp312-win32.whl", hash = "sha256:32f9a4c643baad4efa81d549c2aadefaeba12249b2adc5af541759237eee1c54"}, + {file = "regex-2024.11.6-cp312-cp312-win_amd64.whl", hash = "sha256:a93c194e2df18f7d264092dc8539b8ffb86b45b899ab976aa15d48214138e81b"}, + {file = "regex-2024.11.6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a6ba92c0bcdf96cbf43a12c717eae4bc98325ca3730f6b130ffa2e3c3c723d84"}, + {file = "regex-2024.11.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:525eab0b789891ac3be914d36893bdf972d483fe66551f79d3e27146191a37d4"}, + {file = "regex-2024.11.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:086a27a0b4ca227941700e0b31425e7a28ef1ae8e5e05a33826e17e47fbfdba0"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bde01f35767c4a7899b7eb6e823b125a64de314a8ee9791367c9a34d56af18d0"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b583904576650166b3d920d2bcce13971f6f9e9a396c673187f49811b2769dc7"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c4de13f06a0d54fa0d5ab1b7138bfa0d883220965a29616e3ea61b35d5f5fc7"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3cde6e9f2580eb1665965ce9bf17ff4952f34f5b126beb509fee8f4e994f143c"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0d7f453dca13f40a02b79636a339c5b62b670141e63efd511d3f8f73fba162b3"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:59dfe1ed21aea057a65c6b586afd2a945de04fc7db3de0a6e3ed5397ad491b07"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b97c1e0bd37c5cd7902e65f410779d39eeda155800b65fc4d04cc432efa9bc6e"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f9d1e379028e0fc2ae3654bac3cbbef81bf3fd571272a42d56c24007979bafb6"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:13291b39131e2d002a7940fb176e120bec5145f3aeb7621be6534e46251912c4"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4f51f88c126370dcec4908576c5a627220da6c09d0bff31cfa89f2523843316d"}, + {file = "regex-2024.11.6-cp313-cp313-win32.whl", hash = "sha256:63b13cfd72e9601125027202cad74995ab26921d8cd935c25f09c630436348ff"}, + {file = "regex-2024.11.6-cp313-cp313-win_amd64.whl", hash = "sha256:2b3361af3198667e99927da8b84c1b010752fa4b1115ee30beaa332cabc3ef1a"}, + {file = "regex-2024.11.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:3a51ccc315653ba012774efca4f23d1d2a8a8f278a6072e29c7147eee7da446b"}, + {file = "regex-2024.11.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ad182d02e40de7459b73155deb8996bbd8e96852267879396fb274e8700190e3"}, + {file = "regex-2024.11.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ba9b72e5643641b7d41fa1f6d5abda2c9a263ae835b917348fc3c928182ad467"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40291b1b89ca6ad8d3f2b82782cc33807f1406cf68c8d440861da6304d8ffbbd"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cdf58d0e516ee426a48f7b2c03a332a4114420716d55769ff7108c37a09951bf"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a36fdf2af13c2b14738f6e973aba563623cb77d753bbbd8d414d18bfaa3105dd"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1cee317bfc014c2419a76bcc87f071405e3966da434e03e13beb45f8aced1a6"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50153825ee016b91549962f970d6a4442fa106832e14c918acd1c8e479916c4f"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ea1bfda2f7162605f6e8178223576856b3d791109f15ea99a9f95c16a7636fb5"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:df951c5f4a1b1910f1a99ff42c473ff60f8225baa1cdd3539fe2819d9543e9df"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:072623554418a9911446278f16ecb398fb3b540147a7828c06e2011fa531e773"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:f654882311409afb1d780b940234208a252322c24a93b442ca714d119e68086c"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:89d75e7293d2b3e674db7d4d9b1bee7f8f3d1609428e293771d1a962617150cc"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:f65557897fc977a44ab205ea871b690adaef6b9da6afda4790a2484b04293a5f"}, + {file = "regex-2024.11.6-cp38-cp38-win32.whl", hash = "sha256:6f44ec28b1f858c98d3036ad5d7d0bfc568bdd7a74f9c24e25f41ef1ebfd81a4"}, + {file = "regex-2024.11.6-cp38-cp38-win_amd64.whl", hash = "sha256:bb8f74f2f10dbf13a0be8de623ba4f9491faf58c24064f32b65679b021ed0001"}, + {file = "regex-2024.11.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5704e174f8ccab2026bd2f1ab6c510345ae8eac818b613d7d73e785f1310f839"}, + {file = "regex-2024.11.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:220902c3c5cc6af55d4fe19ead504de80eb91f786dc102fbd74894b1551f095e"}, + {file = "regex-2024.11.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5e7e351589da0850c125f1600a4c4ba3c722efefe16b297de54300f08d734fbf"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5056b185ca113c88e18223183aa1a50e66507769c9640a6ff75859619d73957b"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e34b51b650b23ed3354b5a07aab37034d9f923db2a40519139af34f485f77d0"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5670bce7b200273eee1840ef307bfa07cda90b38ae56e9a6ebcc9f50da9c469b"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:08986dce1339bc932923e7d1232ce9881499a0e02925f7402fb7c982515419ef"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93c0b12d3d3bc25af4ebbf38f9ee780a487e8bf6954c115b9f015822d3bb8e48"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:764e71f22ab3b305e7f4c21f1a97e1526a25ebdd22513e251cf376760213da13"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:f056bf21105c2515c32372bbc057f43eb02aae2fda61052e2f7622c801f0b4e2"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:69ab78f848845569401469da20df3e081e6b5a11cb086de3eed1d48f5ed57c95"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:86fddba590aad9208e2fa8b43b4c098bb0ec74f15718bb6a704e3c63e2cef3e9"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:684d7a212682996d21ca12ef3c17353c021fe9de6049e19ac8481ec35574a70f"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a03e02f48cd1abbd9f3b7e3586d97c8f7a9721c436f51a5245b3b9483044480b"}, + {file = "regex-2024.11.6-cp39-cp39-win32.whl", hash = "sha256:41758407fc32d5c3c5de163888068cfee69cb4c2be844e7ac517a52770f9af57"}, + {file = "regex-2024.11.6-cp39-cp39-win_amd64.whl", hash = "sha256:b2837718570f95dd41675328e111345f9b7095d821bac435aac173ac80b19983"}, + {file = "regex-2024.11.6.tar.gz", hash = "sha256:7ab159b063c52a0333c884e4679f8d7a85112ee3078fe3d9004b2dd875585519"}, ] [[package]] @@ -2874,110 +3144,101 @@ jupyter = ["ipywidgets (>=7.5.1,<9)"] [[package]] name = "rpds-py" -version = "0.18.1" +version = "0.21.0" description = "Python bindings to Rust's persistent data structures (rpds)" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "rpds_py-0.18.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:d31dea506d718693b6b2cffc0648a8929bdc51c70a311b2770f09611caa10d53"}, - {file = "rpds_py-0.18.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:732672fbc449bab754e0b15356c077cc31566df874964d4801ab14f71951ea80"}, - {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a98a1f0552b5f227a3d6422dbd61bc6f30db170939bd87ed14f3c339aa6c7c9"}, - {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7f1944ce16401aad1e3f7d312247b3d5de7981f634dc9dfe90da72b87d37887d"}, - {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:38e14fb4e370885c4ecd734f093a2225ee52dc384b86fa55fe3f74638b2cfb09"}, - {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08d74b184f9ab6289b87b19fe6a6d1a97fbfea84b8a3e745e87a5de3029bf944"}, - {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d70129cef4a8d979caa37e7fe957202e7eee8ea02c5e16455bc9808a59c6b2f0"}, - {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ce0bb20e3a11bd04461324a6a798af34d503f8d6f1aa3d2aa8901ceaf039176d"}, - {file = "rpds_py-0.18.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:81c5196a790032e0fc2464c0b4ab95f8610f96f1f2fa3d4deacce6a79852da60"}, - {file = "rpds_py-0.18.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:f3027be483868c99b4985fda802a57a67fdf30c5d9a50338d9db646d590198da"}, - {file = "rpds_py-0.18.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:d44607f98caa2961bab4fa3c4309724b185b464cdc3ba6f3d7340bac3ec97cc1"}, - {file = "rpds_py-0.18.1-cp310-none-win32.whl", hash = "sha256:c273e795e7a0f1fddd46e1e3cb8be15634c29ae8ff31c196debb620e1edb9333"}, - {file = "rpds_py-0.18.1-cp310-none-win_amd64.whl", hash = "sha256:8352f48d511de5f973e4f2f9412736d7dea76c69faa6d36bcf885b50c758ab9a"}, - {file = "rpds_py-0.18.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6b5ff7e1d63a8281654b5e2896d7f08799378e594f09cf3674e832ecaf396ce8"}, - {file = "rpds_py-0.18.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8927638a4d4137a289e41d0fd631551e89fa346d6dbcfc31ad627557d03ceb6d"}, - {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:154bf5c93d79558b44e5b50cc354aa0459e518e83677791e6adb0b039b7aa6a7"}, - {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:07f2139741e5deb2c5154a7b9629bc5aa48c766b643c1a6750d16f865a82c5fc"}, - {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8c7672e9fba7425f79019db9945b16e308ed8bc89348c23d955c8c0540da0a07"}, - {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:489bdfe1abd0406eba6b3bb4fdc87c7fa40f1031de073d0cfb744634cc8fa261"}, - {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c20f05e8e3d4fc76875fc9cb8cf24b90a63f5a1b4c5b9273f0e8225e169b100"}, - {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:967342e045564cef76dfcf1edb700b1e20838d83b1aa02ab313e6a497cf923b8"}, - {file = "rpds_py-0.18.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2cc7c1a47f3a63282ab0f422d90ddac4aa3034e39fc66a559ab93041e6505da7"}, - {file = "rpds_py-0.18.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f7afbfee1157e0f9376c00bb232e80a60e59ed716e3211a80cb8506550671e6e"}, - {file = "rpds_py-0.18.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9e6934d70dc50f9f8ea47081ceafdec09245fd9f6032669c3b45705dea096b88"}, - {file = "rpds_py-0.18.1-cp311-none-win32.whl", hash = "sha256:c69882964516dc143083d3795cb508e806b09fc3800fd0d4cddc1df6c36e76bb"}, - {file = "rpds_py-0.18.1-cp311-none-win_amd64.whl", hash = "sha256:70a838f7754483bcdc830444952fd89645569e7452e3226de4a613a4c1793fb2"}, - {file = "rpds_py-0.18.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:3dd3cd86e1db5aadd334e011eba4e29d37a104b403e8ca24dcd6703c68ca55b3"}, - {file = "rpds_py-0.18.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:05f3d615099bd9b13ecf2fc9cf2d839ad3f20239c678f461c753e93755d629ee"}, - {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35b2b771b13eee8729a5049c976197ff58a27a3829c018a04341bcf1ae409b2b"}, - {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ee17cd26b97d537af8f33635ef38be873073d516fd425e80559f4585a7b90c43"}, - {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b646bf655b135ccf4522ed43d6902af37d3f5dbcf0da66c769a2b3938b9d8184"}, - {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19ba472b9606c36716062c023afa2484d1e4220548751bda14f725a7de17b4f6"}, - {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e30ac5e329098903262dc5bdd7e2086e0256aa762cc8b744f9e7bf2a427d3f8"}, - {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d58ad6317d188c43750cb76e9deacf6051d0f884d87dc6518e0280438648a9ac"}, - {file = "rpds_py-0.18.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e1735502458621921cee039c47318cb90b51d532c2766593be6207eec53e5c4c"}, - {file = "rpds_py-0.18.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:f5bab211605d91db0e2995a17b5c6ee5edec1270e46223e513eaa20da20076ac"}, - {file = "rpds_py-0.18.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2fc24a329a717f9e2448f8cd1f960f9dac4e45b6224d60734edeb67499bab03a"}, - {file = "rpds_py-0.18.1-cp312-none-win32.whl", hash = "sha256:1805d5901779662d599d0e2e4159d8a82c0b05faa86ef9222bf974572286b2b6"}, - {file = "rpds_py-0.18.1-cp312-none-win_amd64.whl", hash = "sha256:720edcb916df872d80f80a1cc5ea9058300b97721efda8651efcd938a9c70a72"}, - {file = "rpds_py-0.18.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:c827576e2fa017a081346dce87d532a5310241648eb3700af9a571a6e9fc7e74"}, - {file = "rpds_py-0.18.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:aa3679e751408d75a0b4d8d26d6647b6d9326f5e35c00a7ccd82b78ef64f65f8"}, - {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0abeee75434e2ee2d142d650d1e54ac1f8b01e6e6abdde8ffd6eeac6e9c38e20"}, - {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed402d6153c5d519a0faf1bb69898e97fb31613b49da27a84a13935ea9164dfc"}, - {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:338dee44b0cef8b70fd2ef54b4e09bb1b97fc6c3a58fea5db6cc083fd9fc2724"}, - {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7750569d9526199c5b97e5a9f8d96a13300950d910cf04a861d96f4273d5b104"}, - {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:607345bd5912aacc0c5a63d45a1f73fef29e697884f7e861094e443187c02be5"}, - {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:207c82978115baa1fd8d706d720b4a4d2b0913df1c78c85ba73fe6c5804505f0"}, - {file = "rpds_py-0.18.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:6d1e42d2735d437e7e80bab4d78eb2e459af48c0a46e686ea35f690b93db792d"}, - {file = "rpds_py-0.18.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:5463c47c08630007dc0fe99fb480ea4f34a89712410592380425a9b4e1611d8e"}, - {file = "rpds_py-0.18.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:06d218939e1bf2ca50e6b0ec700ffe755e5216a8230ab3e87c059ebb4ea06afc"}, - {file = "rpds_py-0.18.1-cp38-none-win32.whl", hash = "sha256:312fe69b4fe1ffbe76520a7676b1e5ac06ddf7826d764cc10265c3b53f96dbe9"}, - {file = "rpds_py-0.18.1-cp38-none-win_amd64.whl", hash = "sha256:9437ca26784120a279f3137ee080b0e717012c42921eb07861b412340f85bae2"}, - {file = "rpds_py-0.18.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:19e515b78c3fc1039dd7da0a33c28c3154458f947f4dc198d3c72db2b6b5dc93"}, - {file = "rpds_py-0.18.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a7b28c5b066bca9a4eb4e2f2663012debe680f097979d880657f00e1c30875a0"}, - {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:673fdbbf668dd958eff750e500495ef3f611e2ecc209464f661bc82e9838991e"}, - {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d960de62227635d2e61068f42a6cb6aae91a7fe00fca0e3aeed17667c8a34611"}, - {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:352a88dc7892f1da66b6027af06a2e7e5d53fe05924cc2cfc56495b586a10b72"}, - {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4e0ee01ad8260184db21468a6e1c37afa0529acc12c3a697ee498d3c2c4dcaf3"}, - {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4c39ad2f512b4041343ea3c7894339e4ca7839ac38ca83d68a832fc8b3748ab"}, - {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:aaa71ee43a703c321906813bb252f69524f02aa05bf4eec85f0c41d5d62d0f4c"}, - {file = "rpds_py-0.18.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:6cd8098517c64a85e790657e7b1e509b9fe07487fd358e19431cb120f7d96338"}, - {file = "rpds_py-0.18.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:4adec039b8e2928983f885c53b7cc4cda8965b62b6596501a0308d2703f8af1b"}, - {file = "rpds_py-0.18.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:32b7daaa3e9389db3695964ce8e566e3413b0c43e3394c05e4b243a4cd7bef26"}, - {file = "rpds_py-0.18.1-cp39-none-win32.whl", hash = "sha256:2625f03b105328729f9450c8badda34d5243231eef6535f80064d57035738360"}, - {file = "rpds_py-0.18.1-cp39-none-win_amd64.whl", hash = "sha256:bf18932d0003c8c4d51a39f244231986ab23ee057d235a12b2684ea26a353590"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cbfbea39ba64f5e53ae2915de36f130588bba71245b418060ec3330ebf85678e"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:a3d456ff2a6a4d2adcdf3c1c960a36f4fd2fec6e3b4902a42a384d17cf4e7a65"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7700936ef9d006b7ef605dc53aa364da2de5a3aa65516a1f3ce73bf82ecfc7ae"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:51584acc5916212e1bf45edd17f3a6b05fe0cbb40482d25e619f824dccb679de"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:942695a206a58d2575033ff1e42b12b2aece98d6003c6bc739fbf33d1773b12f"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b906b5f58892813e5ba5c6056d6a5ad08f358ba49f046d910ad992196ea61397"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6f8e3fecca256fefc91bb6765a693d96692459d7d4c644660a9fff32e517843"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7732770412bab81c5a9f6d20aeb60ae943a9b36dcd990d876a773526468e7163"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:bd1105b50ede37461c1d51b9698c4f4be6e13e69a908ab7751e3807985fc0346"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:618916f5535784960f3ecf8111581f4ad31d347c3de66d02e728de460a46303c"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:17c6d2155e2423f7e79e3bb18151c686d40db42d8645e7977442170c360194d4"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:6c4c4c3f878df21faf5fac86eda32671c27889e13570645a9eea0a1abdd50922"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:fab6ce90574645a0d6c58890e9bcaac8d94dff54fb51c69e5522a7358b80ab64"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:531796fb842b53f2695e94dc338929e9f9dbf473b64710c28af5a160b2a8927d"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:740884bc62a5e2bbb31e584f5d23b32320fd75d79f916f15a788d527a5e83644"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:998125738de0158f088aef3cb264a34251908dd2e5d9966774fdab7402edfab7"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e2be6e9dd4111d5b31ba3b74d17da54a8319d8168890fbaea4b9e5c3de630ae5"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d0cee71bc618cd93716f3c1bf56653740d2d13ddbd47673efa8bf41435a60daa"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2c3caec4ec5cd1d18e5dd6ae5194d24ed12785212a90b37f5f7f06b8bedd7139"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:27bba383e8c5231cd559affe169ca0b96ec78d39909ffd817f28b166d7ddd4d8"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:a888e8bdb45916234b99da2d859566f1e8a1d2275a801bb8e4a9644e3c7e7909"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:6031b25fb1b06327b43d841f33842b383beba399884f8228a6bb3df3088485ff"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:48c2faaa8adfacefcbfdb5f2e2e7bdad081e5ace8d182e5f4ade971f128e6bb3"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:d85164315bd68c0806768dc6bb0429c6f95c354f87485ee3593c4f6b14def2bd"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6afd80f6c79893cfc0574956f78a0add8c76e3696f2d6a15bca2c66c415cf2d4"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa242ac1ff583e4ec7771141606aafc92b361cd90a05c30d93e343a0c2d82a89"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d21be4770ff4e08698e1e8e0bce06edb6ea0626e7c8f560bc08222880aca6a6f"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c45a639e93a0c5d4b788b2613bd637468edd62f8f95ebc6fcc303d58ab3f0a8"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:910e71711d1055b2768181efa0a17537b2622afeb0424116619817007f8a2b10"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b9bb1f182a97880f6078283b3505a707057c42bf55d8fca604f70dedfdc0772a"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:1d54f74f40b1f7aaa595a02ff42ef38ca654b1469bef7d52867da474243cc633"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:8d2e182c9ee01135e11e9676e9a62dfad791a7a467738f06726872374a83db49"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:636a15acc588f70fda1661234761f9ed9ad79ebed3f2125d44be0862708b666e"}, - {file = "rpds_py-0.18.1.tar.gz", hash = "sha256:dc48b479d540770c811fbd1eb9ba2bb66951863e448efec2e2c102625328e92f"}, + {file = "rpds_py-0.21.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:a017f813f24b9df929674d0332a374d40d7f0162b326562daae8066b502d0590"}, + {file = "rpds_py-0.21.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:20cc1ed0bcc86d8e1a7e968cce15be45178fd16e2ff656a243145e0b439bd250"}, + {file = "rpds_py-0.21.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad116dda078d0bc4886cb7840e19811562acdc7a8e296ea6ec37e70326c1b41c"}, + {file = "rpds_py-0.21.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:808f1ac7cf3b44f81c9475475ceb221f982ef548e44e024ad5f9e7060649540e"}, + {file = "rpds_py-0.21.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de552f4a1916e520f2703ec474d2b4d3f86d41f353e7680b597512ffe7eac5d0"}, + {file = "rpds_py-0.21.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:efec946f331349dfc4ae9d0e034c263ddde19414fe5128580f512619abed05f1"}, + {file = "rpds_py-0.21.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b80b4690bbff51a034bfde9c9f6bf9357f0a8c61f548942b80f7b66356508bf5"}, + {file = "rpds_py-0.21.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:085ed25baac88953d4283e5b5bd094b155075bb40d07c29c4f073e10623f9f2e"}, + {file = "rpds_py-0.21.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:daa8efac2a1273eed2354397a51216ae1e198ecbce9036fba4e7610b308b6153"}, + {file = "rpds_py-0.21.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:95a5bad1ac8a5c77b4e658671642e4af3707f095d2b78a1fdd08af0dfb647624"}, + {file = "rpds_py-0.21.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3e53861b29a13d5b70116ea4230b5f0f3547b2c222c5daa090eb7c9c82d7f664"}, + {file = "rpds_py-0.21.0-cp310-none-win32.whl", hash = "sha256:ea3a6ac4d74820c98fcc9da4a57847ad2cc36475a8bd9683f32ab6d47a2bd682"}, + {file = "rpds_py-0.21.0-cp310-none-win_amd64.whl", hash = "sha256:b8f107395f2f1d151181880b69a2869c69e87ec079c49c0016ab96860b6acbe5"}, + {file = "rpds_py-0.21.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:5555db3e618a77034954b9dc547eae94166391a98eb867905ec8fcbce1308d95"}, + {file = "rpds_py-0.21.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:97ef67d9bbc3e15584c2f3c74bcf064af36336c10d2e21a2131e123ce0f924c9"}, + {file = "rpds_py-0.21.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ab2c2a26d2f69cdf833174f4d9d86118edc781ad9a8fa13970b527bf8236027"}, + {file = "rpds_py-0.21.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4e8921a259f54bfbc755c5bbd60c82bb2339ae0324163f32868f63f0ebb873d9"}, + {file = "rpds_py-0.21.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8a7ff941004d74d55a47f916afc38494bd1cfd4b53c482b77c03147c91ac0ac3"}, + {file = "rpds_py-0.21.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5145282a7cd2ac16ea0dc46b82167754d5e103a05614b724457cffe614f25bd8"}, + {file = "rpds_py-0.21.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de609a6f1b682f70bb7163da745ee815d8f230d97276db049ab447767466a09d"}, + {file = "rpds_py-0.21.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:40c91c6e34cf016fa8e6b59d75e3dbe354830777fcfd74c58b279dceb7975b75"}, + {file = "rpds_py-0.21.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d2132377f9deef0c4db89e65e8bb28644ff75a18df5293e132a8d67748397b9f"}, + {file = "rpds_py-0.21.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:0a9e0759e7be10109645a9fddaaad0619d58c9bf30a3f248a2ea57a7c417173a"}, + {file = "rpds_py-0.21.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9e20da3957bdf7824afdd4b6eeb29510e83e026473e04952dca565170cd1ecc8"}, + {file = "rpds_py-0.21.0-cp311-none-win32.whl", hash = "sha256:f71009b0d5e94c0e86533c0b27ed7cacc1239cb51c178fd239c3cfefefb0400a"}, + {file = "rpds_py-0.21.0-cp311-none-win_amd64.whl", hash = "sha256:e168afe6bf6ab7ab46c8c375606298784ecbe3ba31c0980b7dcbb9631dcba97e"}, + {file = "rpds_py-0.21.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:30b912c965b2aa76ba5168fd610087bad7fcde47f0a8367ee8f1876086ee6d1d"}, + {file = "rpds_py-0.21.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ca9989d5d9b1b300bc18e1801c67b9f6d2c66b8fd9621b36072ed1df2c977f72"}, + {file = "rpds_py-0.21.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f54e7106f0001244a5f4cf810ba8d3f9c542e2730821b16e969d6887b664266"}, + {file = "rpds_py-0.21.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fed5dfefdf384d6fe975cc026886aece4f292feaf69d0eeb716cfd3c5a4dd8be"}, + {file = "rpds_py-0.21.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:590ef88db231c9c1eece44dcfefd7515d8bf0d986d64d0caf06a81998a9e8cab"}, + {file = "rpds_py-0.21.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f983e4c2f603c95dde63df633eec42955508eefd8d0f0e6d236d31a044c882d7"}, + {file = "rpds_py-0.21.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b229ce052ddf1a01c67d68166c19cb004fb3612424921b81c46e7ea7ccf7c3bf"}, + {file = "rpds_py-0.21.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ebf64e281a06c904a7636781d2e973d1f0926a5b8b480ac658dc0f556e7779f4"}, + {file = "rpds_py-0.21.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:998a8080c4495e4f72132f3d66ff91f5997d799e86cec6ee05342f8f3cda7dca"}, + {file = "rpds_py-0.21.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:98486337f7b4f3c324ab402e83453e25bb844f44418c066623db88e4c56b7c7b"}, + {file = "rpds_py-0.21.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a78d8b634c9df7f8d175451cfeac3810a702ccb85f98ec95797fa98b942cea11"}, + {file = "rpds_py-0.21.0-cp312-none-win32.whl", hash = "sha256:a58ce66847711c4aa2ecfcfaff04cb0327f907fead8945ffc47d9407f41ff952"}, + {file = "rpds_py-0.21.0-cp312-none-win_amd64.whl", hash = "sha256:e860f065cc4ea6f256d6f411aba4b1251255366e48e972f8a347cf88077b24fd"}, + {file = "rpds_py-0.21.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:ee4eafd77cc98d355a0d02f263efc0d3ae3ce4a7c24740010a8b4012bbb24937"}, + {file = "rpds_py-0.21.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:688c93b77e468d72579351a84b95f976bd7b3e84aa6686be6497045ba84be560"}, + {file = "rpds_py-0.21.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c38dbf31c57032667dd5a2f0568ccde66e868e8f78d5a0d27dcc56d70f3fcd3b"}, + {file = "rpds_py-0.21.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2d6129137f43f7fa02d41542ffff4871d4aefa724a5fe38e2c31a4e0fd343fb0"}, + {file = "rpds_py-0.21.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:520ed8b99b0bf86a176271f6fe23024323862ac674b1ce5b02a72bfeff3fff44"}, + {file = "rpds_py-0.21.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aaeb25ccfb9b9014a10eaf70904ebf3f79faaa8e60e99e19eef9f478651b9b74"}, + {file = "rpds_py-0.21.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af04ac89c738e0f0f1b913918024c3eab6e3ace989518ea838807177d38a2e94"}, + {file = "rpds_py-0.21.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b9b76e2afd585803c53c5b29e992ecd183f68285b62fe2668383a18e74abe7a3"}, + {file = "rpds_py-0.21.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5afb5efde74c54724e1a01118c6e5c15e54e642c42a1ba588ab1f03544ac8c7a"}, + {file = "rpds_py-0.21.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:52c041802a6efa625ea18027a0723676a778869481d16803481ef6cc02ea8cb3"}, + {file = "rpds_py-0.21.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ee1e4fc267b437bb89990b2f2abf6c25765b89b72dd4a11e21934df449e0c976"}, + {file = "rpds_py-0.21.0-cp313-none-win32.whl", hash = "sha256:0c025820b78817db6a76413fff6866790786c38f95ea3f3d3c93dbb73b632202"}, + {file = "rpds_py-0.21.0-cp313-none-win_amd64.whl", hash = "sha256:320c808df533695326610a1b6a0a6e98f033e49de55d7dc36a13c8a30cfa756e"}, + {file = "rpds_py-0.21.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:2c51d99c30091f72a3c5d126fad26236c3f75716b8b5e5cf8effb18889ced928"}, + {file = "rpds_py-0.21.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cbd7504a10b0955ea287114f003b7ad62330c9e65ba012c6223dba646f6ffd05"}, + {file = "rpds_py-0.21.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6dcc4949be728ede49e6244eabd04064336012b37f5c2200e8ec8eb2988b209c"}, + {file = "rpds_py-0.21.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f414da5c51bf350e4b7960644617c130140423882305f7574b6cf65a3081cecb"}, + {file = "rpds_py-0.21.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9afe42102b40007f588666bc7de82451e10c6788f6f70984629db193849dced1"}, + {file = "rpds_py-0.21.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b929c2bb6e29ab31f12a1117c39f7e6d6450419ab7464a4ea9b0b417174f044"}, + {file = "rpds_py-0.21.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8404b3717da03cbf773a1d275d01fec84ea007754ed380f63dfc24fb76ce4592"}, + {file = "rpds_py-0.21.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e12bb09678f38b7597b8346983d2323a6482dcd59e423d9448108c1be37cac9d"}, + {file = "rpds_py-0.21.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:58a0e345be4b18e6b8501d3b0aa540dad90caeed814c515e5206bb2ec26736fd"}, + {file = "rpds_py-0.21.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:c3761f62fcfccf0864cc4665b6e7c3f0c626f0380b41b8bd1ce322103fa3ef87"}, + {file = "rpds_py-0.21.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c2b2f71c6ad6c2e4fc9ed9401080badd1469fa9889657ec3abea42a3d6b2e1ed"}, + {file = "rpds_py-0.21.0-cp39-none-win32.whl", hash = "sha256:b21747f79f360e790525e6f6438c7569ddbfb1b3197b9e65043f25c3c9b489d8"}, + {file = "rpds_py-0.21.0-cp39-none-win_amd64.whl", hash = "sha256:0626238a43152918f9e72ede9a3b6ccc9e299adc8ade0d67c5e142d564c9a83d"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:6b4ef7725386dc0762857097f6b7266a6cdd62bfd209664da6712cb26acef035"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:6bc0e697d4d79ab1aacbf20ee5f0df80359ecf55db33ff41481cf3e24f206919"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da52d62a96e61c1c444f3998c434e8b263c384f6d68aca8274d2e08d1906325c"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:98e4fe5db40db87ce1c65031463a760ec7906ab230ad2249b4572c2fc3ef1f9f"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:30bdc973f10d28e0337f71d202ff29345320f8bc49a31c90e6c257e1ccef4333"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:faa5e8496c530f9c71f2b4e1c49758b06e5f4055e17144906245c99fa6d45356"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32eb88c30b6a4f0605508023b7141d043a79b14acb3b969aa0b4f99b25bc7d4a"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a89a8ce9e4e75aeb7fa5d8ad0f3fecdee813802592f4f46a15754dcb2fd6b061"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:241e6c125568493f553c3d0fdbb38c74babf54b45cef86439d4cd97ff8feb34d"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:3b766a9f57663396e4f34f5140b3595b233a7b146e94777b97a8413a1da1be18"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:af4a644bf890f56e41e74be7d34e9511e4954894d544ec6b8efe1e21a1a8da6c"}, + {file = "rpds_py-0.21.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:3e30a69a706e8ea20444b98a49f386c17b26f860aa9245329bab0851ed100677"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:031819f906bb146561af051c7cef4ba2003d28cff07efacef59da973ff7969ba"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:b876f2bc27ab5954e2fd88890c071bd0ed18b9c50f6ec3de3c50a5ece612f7a6"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc5695c321e518d9f03b7ea6abb5ea3af4567766f9852ad1560f501b17588c7b"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b4de1da871b5c0fd5537b26a6fc6814c3cc05cabe0c941db6e9044ffbb12f04a"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:878f6fea96621fda5303a2867887686d7a198d9e0f8a40be100a63f5d60c88c9"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8eeec67590e94189f434c6d11c426892e396ae59e4801d17a93ac96b8c02a6c"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ff2eba7f6c0cb523d7e9cff0903f2fe1feff8f0b2ceb6bd71c0e20a4dcee271"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a429b99337062877d7875e4ff1a51fe788424d522bd64a8c0a20ef3021fdb6ed"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:d167e4dbbdac48bd58893c7e446684ad5d425b407f9336e04ab52e8b9194e2ed"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:4eb2de8a147ffe0626bfdc275fc6563aa7bf4b6db59cf0d44f0ccd6ca625a24e"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:e78868e98f34f34a88e23ee9ccaeeec460e4eaf6db16d51d7a9b883e5e785a5e"}, + {file = "rpds_py-0.21.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:4991ca61656e3160cdaca4851151fd3f4a92e9eba5c7a530ab030d6aee96ec89"}, + {file = "rpds_py-0.21.0.tar.gz", hash = "sha256:ed6378c9d66d0de903763e7706383d60c33829581f0adff47b6535f1802fa6db"}, ] [[package]] @@ -3222,13 +3483,13 @@ tenacity = "*" [[package]] name = "sentence-transformers" -version = "3.2.1" +version = "3.3.0" description = "State-of-the-Art Text Embeddings" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "sentence_transformers-3.2.1-py3-none-any.whl", hash = "sha256:c507e069eea33d15f1f2c72f74d7ea93abef298152cc235ab5af5e3a7584f738"}, - {file = "sentence_transformers-3.2.1.tar.gz", hash = "sha256:9fc38e620e5e1beba31d538a451778c9ccdbad77119d90f59f5bce49c4148e79"}, + {file = "sentence_transformers-3.3.0-py3-none-any.whl", hash = "sha256:5897c376fde1fea5f22a90ead2612278a464e52b8e42f1af95f84092c36bc23c"}, + {file = "sentence_transformers-3.3.0.tar.gz", hash = "sha256:b91f0aea4ada72ed5a7cdbe8a6245a7152d0d9f84f336383778f8568e406b008"}, ] [package.dependencies] @@ -3241,7 +3502,7 @@ tqdm = "*" transformers = ">=4.41.0,<5.0.0" [package.extras] -dev = ["accelerate (>=0.20.3)", "datasets", "pre-commit", "pytest", "pytest-cov"] +dev = ["accelerate (>=0.20.3)", "datasets", "peft", "pre-commit", "pytest", "pytest-cov"] onnx = ["optimum[onnxruntime] (>=1.23.1)"] onnx-gpu = ["optimum[onnxruntime-gpu] (>=1.23.1)"] openvino = ["optimum-intel[openvino] (>=1.20.0)"] @@ -3354,47 +3615,42 @@ files = [ [[package]] name = "tiktoken" -version = "0.7.0" +version = "0.8.0" description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "tiktoken-0.7.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:485f3cc6aba7c6b6ce388ba634fbba656d9ee27f766216f45146beb4ac18b25f"}, - {file = "tiktoken-0.7.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e54be9a2cd2f6d6ffa3517b064983fb695c9a9d8aa7d574d1ef3c3f931a99225"}, - {file = "tiktoken-0.7.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79383a6e2c654c6040e5f8506f3750db9ddd71b550c724e673203b4f6b4b4590"}, - {file = "tiktoken-0.7.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d4511c52caacf3c4981d1ae2df85908bd31853f33d30b345c8b6830763f769c"}, - {file = "tiktoken-0.7.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:13c94efacdd3de9aff824a788353aa5749c0faee1fbe3816df365ea450b82311"}, - {file = "tiktoken-0.7.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8e58c7eb29d2ab35a7a8929cbeea60216a4ccdf42efa8974d8e176d50c9a3df5"}, - {file = "tiktoken-0.7.0-cp310-cp310-win_amd64.whl", hash = "sha256:21a20c3bd1dd3e55b91c1331bf25f4af522c525e771691adbc9a69336fa7f702"}, - {file = "tiktoken-0.7.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:10c7674f81e6e350fcbed7c09a65bca9356eaab27fb2dac65a1e440f2bcfe30f"}, - {file = "tiktoken-0.7.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:084cec29713bc9d4189a937f8a35dbdfa785bd1235a34c1124fe2323821ee93f"}, - {file = "tiktoken-0.7.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:811229fde1652fedcca7c6dfe76724d0908775b353556d8a71ed74d866f73f7b"}, - {file = "tiktoken-0.7.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86b6e7dc2e7ad1b3757e8a24597415bafcfb454cebf9a33a01f2e6ba2e663992"}, - {file = "tiktoken-0.7.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1063c5748be36344c7e18c7913c53e2cca116764c2080177e57d62c7ad4576d1"}, - {file = "tiktoken-0.7.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:20295d21419bfcca092644f7e2f2138ff947a6eb8cfc732c09cc7d76988d4a89"}, - {file = "tiktoken-0.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:959d993749b083acc57a317cbc643fb85c014d055b2119b739487288f4e5d1cb"}, - {file = "tiktoken-0.7.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:71c55d066388c55a9c00f61d2c456a6086673ab7dec22dd739c23f77195b1908"}, - {file = "tiktoken-0.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:09ed925bccaa8043e34c519fbb2f99110bd07c6fd67714793c21ac298e449410"}, - {file = "tiktoken-0.7.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03c6c40ff1db0f48a7b4d2dafeae73a5607aacb472fa11f125e7baf9dce73704"}, - {file = "tiktoken-0.7.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d20b5c6af30e621b4aca094ee61777a44118f52d886dbe4f02b70dfe05c15350"}, - {file = "tiktoken-0.7.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d427614c3e074004efa2f2411e16c826f9df427d3c70a54725cae860f09e4bf4"}, - {file = "tiktoken-0.7.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8c46d7af7b8c6987fac9b9f61041b452afe92eb087d29c9ce54951280f899a97"}, - {file = "tiktoken-0.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:0bc603c30b9e371e7c4c7935aba02af5994a909fc3c0fe66e7004070858d3f8f"}, - {file = "tiktoken-0.7.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2398fecd38c921bcd68418675a6d155fad5f5e14c2e92fcf5fe566fa5485a858"}, - {file = "tiktoken-0.7.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8f5f6afb52fb8a7ea1c811e435e4188f2bef81b5e0f7a8635cc79b0eef0193d6"}, - {file = "tiktoken-0.7.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:861f9ee616766d736be4147abac500732b505bf7013cfaf019b85892637f235e"}, - {file = "tiktoken-0.7.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:54031f95c6939f6b78122c0aa03a93273a96365103793a22e1793ee86da31685"}, - {file = "tiktoken-0.7.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:fffdcb319b614cf14f04d02a52e26b1d1ae14a570f90e9b55461a72672f7b13d"}, - {file = "tiktoken-0.7.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c72baaeaefa03ff9ba9688624143c858d1f6b755bb85d456d59e529e17234769"}, - {file = "tiktoken-0.7.0-cp38-cp38-win_amd64.whl", hash = "sha256:131b8aeb043a8f112aad9f46011dced25d62629091e51d9dc1adbf4a1cc6aa98"}, - {file = "tiktoken-0.7.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cabc6dc77460df44ec5b879e68692c63551ae4fae7460dd4ff17181df75f1db7"}, - {file = "tiktoken-0.7.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8d57f29171255f74c0aeacd0651e29aa47dff6f070cb9f35ebc14c82278f3b25"}, - {file = "tiktoken-0.7.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ee92776fdbb3efa02a83f968c19d4997a55c8e9ce7be821ceee04a1d1ee149c"}, - {file = "tiktoken-0.7.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e215292e99cb41fbc96988ef62ea63bb0ce1e15f2c147a61acc319f8b4cbe5bf"}, - {file = "tiktoken-0.7.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8a81bac94769cab437dd3ab0b8a4bc4e0f9cf6835bcaa88de71f39af1791727a"}, - {file = "tiktoken-0.7.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:d6d73ea93e91d5ca771256dfc9d1d29f5a554b83821a1dc0891987636e0ae226"}, - {file = "tiktoken-0.7.0-cp39-cp39-win_amd64.whl", hash = "sha256:2bcb28ddf79ffa424f171dfeef9a4daff61a94c631ca6813f43967cb263b83b9"}, - {file = "tiktoken-0.7.0.tar.gz", hash = "sha256:1077266e949c24e0291f6c350433c6f0971365ece2b173a23bc3b9f9defef6b6"}, + {file = "tiktoken-0.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b07e33283463089c81ef1467180e3e00ab00d46c2c4bbcef0acab5f771d6695e"}, + {file = "tiktoken-0.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9269348cb650726f44dd3bbb3f9110ac19a8dcc8f54949ad3ef652ca22a38e21"}, + {file = "tiktoken-0.8.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e13f37bc4ef2d012731e93e0fef21dc3b7aea5bb9009618de9a4026844e560"}, + {file = "tiktoken-0.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f13d13c981511331eac0d01a59b5df7c0d4060a8be1e378672822213da51e0a2"}, + {file = "tiktoken-0.8.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6b2ddbc79a22621ce8b1166afa9f9a888a664a579350dc7c09346a3b5de837d9"}, + {file = "tiktoken-0.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:d8c2d0e5ba6453a290b86cd65fc51fedf247e1ba170191715b049dac1f628005"}, + {file = "tiktoken-0.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d622d8011e6d6f239297efa42a2657043aaed06c4f68833550cac9e9bc723ef1"}, + {file = "tiktoken-0.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2efaf6199717b4485031b4d6edb94075e4d79177a172f38dd934d911b588d54a"}, + {file = "tiktoken-0.8.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5637e425ce1fc49cf716d88df3092048359a4b3bbb7da762840426e937ada06d"}, + {file = "tiktoken-0.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fb0e352d1dbe15aba082883058b3cce9e48d33101bdaac1eccf66424feb5b47"}, + {file = "tiktoken-0.8.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:56edfefe896c8f10aba372ab5706b9e3558e78db39dd497c940b47bf228bc419"}, + {file = "tiktoken-0.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:326624128590def898775b722ccc327e90b073714227175ea8febbc920ac0a99"}, + {file = "tiktoken-0.8.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:881839cfeae051b3628d9823b2e56b5cc93a9e2efb435f4cf15f17dc45f21586"}, + {file = "tiktoken-0.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fe9399bdc3f29d428f16a2f86c3c8ec20be3eac5f53693ce4980371c3245729b"}, + {file = "tiktoken-0.8.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9a58deb7075d5b69237a3ff4bb51a726670419db6ea62bdcd8bd80c78497d7ab"}, + {file = "tiktoken-0.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2908c0d043a7d03ebd80347266b0e58440bdef5564f84f4d29fb235b5df3b04"}, + {file = "tiktoken-0.8.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:294440d21a2a51e12d4238e68a5972095534fe9878be57d905c476017bff99fc"}, + {file = "tiktoken-0.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:d8f3192733ac4d77977432947d563d7e1b310b96497acd3c196c9bddb36ed9db"}, + {file = "tiktoken-0.8.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:02be1666096aff7da6cbd7cdaa8e7917bfed3467cd64b38b1f112e96d3b06a24"}, + {file = "tiktoken-0.8.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c94ff53c5c74b535b2cbf431d907fc13c678bbd009ee633a2aca269a04389f9a"}, + {file = "tiktoken-0.8.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b231f5e8982c245ee3065cd84a4712d64692348bc609d84467c57b4b72dcbc5"}, + {file = "tiktoken-0.8.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4177faa809bd55f699e88c96d9bb4635d22e3f59d635ba6fd9ffedf7150b9953"}, + {file = "tiktoken-0.8.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5376b6f8dc4753cd81ead935c5f518fa0fbe7e133d9e25f648d8c4dabdd4bad7"}, + {file = "tiktoken-0.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:18228d624807d66c87acd8f25fc135665617cab220671eb65b50f5d70fa51f69"}, + {file = "tiktoken-0.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7e17807445f0cf1f25771c9d86496bd8b5c376f7419912519699f3cc4dc5c12e"}, + {file = "tiktoken-0.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:886f80bd339578bbdba6ed6d0567a0d5c6cfe198d9e587ba6c447654c65b8edc"}, + {file = "tiktoken-0.8.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6adc8323016d7758d6de7313527f755b0fc6c72985b7d9291be5d96d73ecd1e1"}, + {file = "tiktoken-0.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b591fb2b30d6a72121a80be24ec7a0e9eb51c5500ddc7e4c2496516dd5e3816b"}, + {file = "tiktoken-0.8.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:845287b9798e476b4d762c3ebda5102be87ca26e5d2c9854002825d60cdb815d"}, + {file = "tiktoken-0.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:1473cfe584252dc3fa62adceb5b1c763c1874e04511b197da4e6de51d6ce5a02"}, + {file = "tiktoken-0.8.0.tar.gz", hash = "sha256:9ccbb2740f24542534369c5635cfd9b2b3c2490754a78ac8831d99f89f94eeb2"}, ] [package.dependencies] @@ -3406,111 +3662,123 @@ blobfile = ["blobfile (>=2)"] [[package]] name = "tokenizers" -version = "0.20.1" +version = "0.20.3" description = "" optional = false python-versions = ">=3.7" files = [ - {file = "tokenizers-0.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:439261da7c0a5c88bda97acb284d49fbdaf67e9d3b623c0bfd107512d22787a9"}, - {file = "tokenizers-0.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:03dae629d99068b1ea5416d50de0fea13008f04129cc79af77a2a6392792d93c"}, - {file = "tokenizers-0.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b61f561f329ffe4b28367798b89d60c4abf3f815d37413b6352bc6412a359867"}, - {file = "tokenizers-0.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ec870fce1ee5248a10be69f7a8408a234d6f2109f8ea827b4f7ecdbf08c9fd15"}, - {file = "tokenizers-0.20.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d388d1ea8b7447da784e32e3b86a75cce55887e3b22b31c19d0b186b1c677800"}, - {file = "tokenizers-0.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:299c85c1d21135bc01542237979bf25c32efa0d66595dd0069ae259b97fb2dbe"}, - {file = "tokenizers-0.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e96f6c14c9752bb82145636b614d5a78e9cde95edfbe0a85dad0dd5ddd6ec95c"}, - {file = "tokenizers-0.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc9e95ad49c932b80abfbfeaf63b155761e695ad9f8a58c52a47d962d76e310f"}, - {file = "tokenizers-0.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f22dee205329a636148c325921c73cf3e412e87d31f4d9c3153b302a0200057b"}, - {file = "tokenizers-0.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a2ffd9a8895575ac636d44500c66dffaef133823b6b25067604fa73bbc5ec09d"}, - {file = "tokenizers-0.20.1-cp310-none-win32.whl", hash = "sha256:2847843c53f445e0f19ea842a4e48b89dd0db4e62ba6e1e47a2749d6ec11f50d"}, - {file = "tokenizers-0.20.1-cp310-none-win_amd64.whl", hash = "sha256:f9aa93eacd865f2798b9e62f7ce4533cfff4f5fbd50c02926a78e81c74e432cd"}, - {file = "tokenizers-0.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4a717dcb08f2dabbf27ae4b6b20cbbb2ad7ed78ce05a829fae100ff4b3c7ff15"}, - {file = "tokenizers-0.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3f84dad1ff1863c648d80628b1b55353d16303431283e4efbb6ab1af56a75832"}, - {file = "tokenizers-0.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:929c8f3afa16a5130a81ab5079c589226273ec618949cce79b46d96e59a84f61"}, - {file = "tokenizers-0.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d10766473954397e2d370f215ebed1cc46dcf6fd3906a2a116aa1d6219bfedc3"}, - {file = "tokenizers-0.20.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9300fac73ddc7e4b0330acbdda4efaabf74929a4a61e119a32a181f534a11b47"}, - {file = "tokenizers-0.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0ecaf7b0e39caeb1aa6dd6e0975c405716c82c1312b55ac4f716ef563a906969"}, - {file = "tokenizers-0.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5170be9ec942f3d1d317817ced8d749b3e1202670865e4fd465e35d8c259de83"}, - {file = "tokenizers-0.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef3f1ae08fa9aea5891cbd69df29913e11d3841798e0bfb1ff78b78e4e7ea0a4"}, - {file = "tokenizers-0.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ee86d4095d3542d73579e953c2e5e07d9321af2ffea6ecc097d16d538a2dea16"}, - {file = "tokenizers-0.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:86dcd08da163912e17b27bbaba5efdc71b4fbffb841530fdb74c5707f3c49216"}, - {file = "tokenizers-0.20.1-cp311-none-win32.whl", hash = "sha256:9af2dc4ee97d037bc6b05fa4429ddc87532c706316c5e11ce2f0596dfcfa77af"}, - {file = "tokenizers-0.20.1-cp311-none-win_amd64.whl", hash = "sha256:899152a78b095559c287b4c6d0099469573bb2055347bb8154db106651296f39"}, - {file = "tokenizers-0.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:407ab666b38e02228fa785e81f7cf79ef929f104bcccf68a64525a54a93ceac9"}, - {file = "tokenizers-0.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f13a2d16032ebc8bd812eb8099b035ac65887d8f0c207261472803b9633cf3e"}, - {file = "tokenizers-0.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e98eee4dca22849fbb56a80acaa899eec5b72055d79637dd6aa15d5e4b8628c9"}, - {file = "tokenizers-0.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:47c1bcdd61e61136087459cb9e0b069ff23b5568b008265e5cbc927eae3387ce"}, - {file = "tokenizers-0.20.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:128c1110e950534426e2274837fc06b118ab5f2fa61c3436e60e0aada0ccfd67"}, - {file = "tokenizers-0.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2e2d47a819d2954f2c1cd0ad51bb58ffac6f53a872d5d82d65d79bf76b9896d"}, - {file = "tokenizers-0.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bdd67a0e3503a9a7cf8bc5a4a49cdde5fa5bada09a51e4c7e1c73900297539bd"}, - {file = "tokenizers-0.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:689b93d2e26d04da337ac407acec8b5d081d8d135e3e5066a88edd5bdb5aff89"}, - {file = "tokenizers-0.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0c6a796ddcd9a19ad13cf146997cd5895a421fe6aec8fd970d69f9117bddb45c"}, - {file = "tokenizers-0.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3ea919687aa7001a8ff1ba36ac64f165c4e89035f57998fa6cedcfd877be619d"}, - {file = "tokenizers-0.20.1-cp312-none-win32.whl", hash = "sha256:6d3ac5c1f48358ffe20086bf065e843c0d0a9fce0d7f0f45d5f2f9fba3609ca5"}, - {file = "tokenizers-0.20.1-cp312-none-win_amd64.whl", hash = "sha256:b0874481aea54a178f2bccc45aa2d0c99cd3f79143a0948af6a9a21dcc49173b"}, - {file = "tokenizers-0.20.1-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:96af92e833bd44760fb17f23f402e07a66339c1dcbe17d79a9b55bb0cc4f038e"}, - {file = "tokenizers-0.20.1-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:65f34e5b731a262dfa562820818533c38ce32a45864437f3d9c82f26c139ca7f"}, - {file = "tokenizers-0.20.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17f98fccb5c12ab1ce1f471731a9cd86df5d4bd2cf2880c5a66b229802d96145"}, - {file = "tokenizers-0.20.1-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b8c0fc3542cf9370bf92c932eb71bdeb33d2d4aeeb4126d9fd567b60bd04cb30"}, - {file = "tokenizers-0.20.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b39356df4575d37f9b187bb623aab5abb7b62c8cb702867a1768002f814800c"}, - {file = "tokenizers-0.20.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bfdad27b0e50544f6b838895a373db6114b85112ba5c0cefadffa78d6daae563"}, - {file = "tokenizers-0.20.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:094663dd0e85ee2e573126918747bdb40044a848fde388efb5b09d57bc74c680"}, - {file = "tokenizers-0.20.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14e4cf033a2aa207d7ac790e91adca598b679999710a632c4a494aab0fc3a1b2"}, - {file = "tokenizers-0.20.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:9310951c92c9fb91660de0c19a923c432f110dbfad1a2d429fbc44fa956bf64f"}, - {file = "tokenizers-0.20.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:05e41e302c315bd2ed86c02e917bf03a6cf7d2f652c9cee1a0eb0d0f1ca0d32c"}, - {file = "tokenizers-0.20.1-cp37-none-win32.whl", hash = "sha256:212231ab7dfcdc879baf4892ca87c726259fa7c887e1688e3f3cead384d8c305"}, - {file = "tokenizers-0.20.1-cp37-none-win_amd64.whl", hash = "sha256:896195eb9dfdc85c8c052e29947169c1fcbe75a254c4b5792cdbd451587bce85"}, - {file = "tokenizers-0.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:741fb22788482d09d68e73ece1495cfc6d9b29a06c37b3df90564a9cfa688e6d"}, - {file = "tokenizers-0.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:10be14ebd8082086a342d969e17fc2d6edc856c59dbdbddd25f158fa40eaf043"}, - {file = "tokenizers-0.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:514cf279b22fa1ae0bc08e143458c74ad3b56cd078b319464959685a35c53d5e"}, - {file = "tokenizers-0.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a647c5b7cb896d6430cf3e01b4e9a2d77f719c84cefcef825d404830c2071da2"}, - {file = "tokenizers-0.20.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7cdf379219e1e1dd432091058dab325a2e6235ebb23e0aec8d0508567c90cd01"}, - {file = "tokenizers-0.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ba72260449e16c4c2f6f3252823b059fbf2d31b32617e582003f2b18b415c39"}, - {file = "tokenizers-0.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:910b96ed87316e4277b23c7bcaf667ce849c7cc379a453fa179e7e09290eeb25"}, - {file = "tokenizers-0.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e53975a6694428a0586534cc1354b2408d4e010a3103117f617cbb550299797c"}, - {file = "tokenizers-0.20.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:07c4b7be58da142b0730cc4e5fd66bb7bf6f57f4986ddda73833cd39efef8a01"}, - {file = "tokenizers-0.20.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b605c540753e62199bf15cf69c333e934077ef2350262af2ccada46026f83d1c"}, - {file = "tokenizers-0.20.1-cp38-none-win32.whl", hash = "sha256:88b3bc76ab4db1ab95ead623d49c95205411e26302cf9f74203e762ac7e85685"}, - {file = "tokenizers-0.20.1-cp38-none-win_amd64.whl", hash = "sha256:d412a74cf5b3f68a90c615611a5aa4478bb303d1c65961d22db45001df68afcb"}, - {file = "tokenizers-0.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a25dcb2f41a0a6aac31999e6c96a75e9152fa0127af8ece46c2f784f23b8197a"}, - {file = "tokenizers-0.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a12c3cebb8c92e9c35a23ab10d3852aee522f385c28d0b4fe48c0b7527d59762"}, - {file = "tokenizers-0.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02e18da58cf115b7c40de973609c35bde95856012ba42a41ee919c77935af251"}, - {file = "tokenizers-0.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f326a1ac51ae909b9760e34671c26cd0dfe15662f447302a9d5bb2d872bab8ab"}, - {file = "tokenizers-0.20.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0b4872647ea6f25224e2833b044b0b19084e39400e8ead3cfe751238b0802140"}, - {file = "tokenizers-0.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce6238a3311bb8e4c15b12600927d35c267b92a52c881ef5717a900ca14793f7"}, - {file = "tokenizers-0.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57b7a8880b208866508b06ce365dc631e7a2472a3faa24daa430d046fb56c885"}, - {file = "tokenizers-0.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a908c69c2897a68f412aa05ba38bfa87a02980df70f5a72fa8490479308b1f2d"}, - {file = "tokenizers-0.20.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:da1001aa46f4490099c82e2facc4fbc06a6a32bf7de3918ba798010954b775e0"}, - {file = "tokenizers-0.20.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:42c097390e2f0ed0a5c5d569e6669dd4e9fff7b31c6a5ce6e9c66a61687197de"}, - {file = "tokenizers-0.20.1-cp39-none-win32.whl", hash = "sha256:3d4d218573a3d8b121a1f8c801029d70444ffb6d8f129d4cca1c7b672ee4a24c"}, - {file = "tokenizers-0.20.1-cp39-none-win_amd64.whl", hash = "sha256:37d1e6f616c84fceefa7c6484a01df05caf1e207669121c66213cb5b2911d653"}, - {file = "tokenizers-0.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:48689da7a395df41114f516208d6550e3e905e1239cc5ad386686d9358e9cef0"}, - {file = "tokenizers-0.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:712f90ea33f9bd2586b4a90d697c26d56d0a22fd3c91104c5858c4b5b6489a79"}, - {file = "tokenizers-0.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:359eceb6a620c965988fc559cebc0a98db26713758ec4df43fb76d41486a8ed5"}, - {file = "tokenizers-0.20.1-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0d3caf244ce89d24c87545aafc3448be15870096e796c703a0d68547187192e1"}, - {file = "tokenizers-0.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03b03cf8b9a32254b1bf8a305fb95c6daf1baae0c1f93b27f2b08c9759f41dee"}, - {file = "tokenizers-0.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:218e5a3561561ea0f0ef1559c6d95b825308dbec23fb55b70b92589e7ff2e1e8"}, - {file = "tokenizers-0.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f40df5e0294a95131cc5f0e0eb91fe86d88837abfbee46b9b3610b09860195a7"}, - {file = "tokenizers-0.20.1-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:08aaa0d72bb65058e8c4b0455f61b840b156c557e2aca57627056624c3a93976"}, - {file = "tokenizers-0.20.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:998700177b45f70afeb206ad22c08d9e5f3a80639dae1032bf41e8cbc4dada4b"}, - {file = "tokenizers-0.20.1-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62f7fbd3c2c38b179556d879edae442b45f68312019c3a6013e56c3947a4e648"}, - {file = "tokenizers-0.20.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31e87fca4f6bbf5cc67481b562147fe932f73d5602734de7dd18a8f2eee9c6dd"}, - {file = "tokenizers-0.20.1-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:956f21d359ae29dd51ca5726d2c9a44ffafa041c623f5aa33749da87cfa809b9"}, - {file = "tokenizers-0.20.1-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:1fbbaf17a393c78d8aedb6a334097c91cb4119a9ced4764ab8cfdc8d254dc9f9"}, - {file = "tokenizers-0.20.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ebe63e31f9c1a970c53866d814e35ec2ec26fda03097c486f82f3891cee60830"}, - {file = "tokenizers-0.20.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:81970b80b8ac126910295f8aab2d7ef962009ea39e0d86d304769493f69aaa1e"}, - {file = "tokenizers-0.20.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:130e35e76f9337ed6c31be386e75d4925ea807055acf18ca1a9b0eec03d8fe23"}, - {file = "tokenizers-0.20.1-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd28a8614f5c82a54ab2463554e84ad79526c5184cf4573bbac2efbbbcead457"}, - {file = "tokenizers-0.20.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9041ee665d0fa7f5c4ccf0f81f5e6b7087f797f85b143c094126fc2611fec9d0"}, - {file = "tokenizers-0.20.1-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:62eb9daea2a2c06bcd8113a5824af8ef8ee7405d3a71123ba4d52c79bb3d9f1a"}, - {file = "tokenizers-0.20.1-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f861889707b54a9ab1204030b65fd6c22bdd4a95205deec7994dc22a8baa2ea4"}, - {file = "tokenizers-0.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:89d5c337d74ea6e5e7dc8af124cf177be843bbb9ca6e58c01f75ea103c12c8a9"}, - {file = "tokenizers-0.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:0b7f515c83397e73292accdbbbedc62264e070bae9682f06061e2ddce67cacaf"}, - {file = "tokenizers-0.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e0305fc1ec6b1e5052d30d9c1d5c807081a7bd0cae46a33d03117082e91908c"}, - {file = "tokenizers-0.20.1-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5dc611e6ac0fa00a41de19c3bf6391a05ea201d2d22b757d63f5491ec0e67faa"}, - {file = "tokenizers-0.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c5ffe0d7f7bfcfa3b2585776ecf11da2e01c317027c8573c78ebcb8985279e23"}, - {file = "tokenizers-0.20.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e7edb8ec12c100d5458d15b1e47c0eb30ad606a05641f19af7563bc3d1608c14"}, - {file = "tokenizers-0.20.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:de291633fb9303555793cc544d4a86e858da529b7d0b752bcaf721ae1d74b2c9"}, - {file = "tokenizers-0.20.1.tar.gz", hash = "sha256:84edcc7cdeeee45ceedb65d518fffb77aec69311c9c8e30f77ad84da3025f002"}, + {file = "tokenizers-0.20.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:31ccab28dbb1a9fe539787210b0026e22debeab1662970f61c2d921f7557f7e4"}, + {file = "tokenizers-0.20.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c6361191f762bda98c773da418cf511cbaa0cb8d0a1196f16f8c0119bde68ff8"}, + {file = "tokenizers-0.20.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f128d5da1202b78fa0a10d8d938610472487da01b57098d48f7e944384362514"}, + {file = "tokenizers-0.20.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:79c4121a2e9433ad7ef0769b9ca1f7dd7fa4c0cd501763d0a030afcbc6384481"}, + {file = "tokenizers-0.20.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7850fde24197fe5cd6556e2fdba53a6d3bae67c531ea33a3d7c420b90904141"}, + {file = "tokenizers-0.20.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b357970c095dc134978a68c67d845a1e3803ab7c4fbb39195bde914e7e13cf8b"}, + {file = "tokenizers-0.20.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a333d878c4970b72d6c07848b90c05f6b045cf9273fc2bc04a27211721ad6118"}, + {file = "tokenizers-0.20.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1fd9fee817f655a8f50049f685e224828abfadd436b8ff67979fc1d054b435f1"}, + {file = "tokenizers-0.20.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9e7816808b402129393a435ea2a509679b41246175d6e5e9f25b8692bfaa272b"}, + {file = "tokenizers-0.20.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ba96367db9d8a730d3a1d5996b4b7babb846c3994b8ef14008cd8660f55db59d"}, + {file = "tokenizers-0.20.3-cp310-none-win32.whl", hash = "sha256:ee31ba9d7df6a98619426283e80c6359f167e2e9882d9ce1b0254937dbd32f3f"}, + {file = "tokenizers-0.20.3-cp310-none-win_amd64.whl", hash = "sha256:a845c08fdad554fe0871d1255df85772f91236e5fd6b9287ef8b64f5807dbd0c"}, + {file = "tokenizers-0.20.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:585b51e06ca1f4839ce7759941e66766d7b060dccfdc57c4ca1e5b9a33013a90"}, + {file = "tokenizers-0.20.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:61cbf11954f3b481d08723ebd048ba4b11e582986f9be74d2c3bdd9293a4538d"}, + {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef820880d5e4e8484e2fa54ff8d297bb32519eaa7815694dc835ace9130a3eea"}, + {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:67ef4dcb8841a4988cd00dd288fb95dfc8e22ed021f01f37348fd51c2b055ba9"}, + {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ff1ef8bd47a02b0dc191688ccb4da53600df5d4c9a05a4b68e1e3de4823e78eb"}, + {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:444d188186eab3148baf0615b522461b41b1f0cd58cd57b862ec94b6ac9780f1"}, + {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:37c04c032c1442740b2c2d925f1857885c07619224a533123ac7ea71ca5713da"}, + {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:453c7769d22231960ee0e883d1005c93c68015025a5e4ae56275406d94a3c907"}, + {file = "tokenizers-0.20.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4bb31f7b2847e439766aaa9cc7bccf7ac7088052deccdb2275c952d96f691c6a"}, + {file = "tokenizers-0.20.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:843729bf0f991b29655a069a2ff58a4c24375a553c70955e15e37a90dd4e045c"}, + {file = "tokenizers-0.20.3-cp311-none-win32.whl", hash = "sha256:efcce3a927b1e20ca694ba13f7a68c59b0bd859ef71e441db68ee42cf20c2442"}, + {file = "tokenizers-0.20.3-cp311-none-win_amd64.whl", hash = "sha256:88301aa0801f225725b6df5dea3d77c80365ff2362ca7e252583f2b4809c4cc0"}, + {file = "tokenizers-0.20.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:49d12a32e190fad0e79e5bdb788d05da2f20d8e006b13a70859ac47fecf6ab2f"}, + {file = "tokenizers-0.20.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:282848cacfb9c06d5e51489f38ec5aa0b3cd1e247a023061945f71f41d949d73"}, + {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:abe4e08c7d0cd6154c795deb5bf81d2122f36daf075e0c12a8b050d824ef0a64"}, + {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ca94fc1b73b3883c98f0c88c77700b13d55b49f1071dfd57df2b06f3ff7afd64"}, + {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef279c7e239f95c8bdd6ff319d9870f30f0d24915b04895f55b1adcf96d6c60d"}, + {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:16384073973f6ccbde9852157a4fdfe632bb65208139c9d0c0bd0176a71fd67f"}, + {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:312d522caeb8a1a42ebdec87118d99b22667782b67898a76c963c058a7e41d4f"}, + {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2b7cb962564785a83dafbba0144ecb7f579f1d57d8c406cdaa7f32fe32f18ad"}, + {file = "tokenizers-0.20.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:124c5882ebb88dadae1fc788a582299fcd3a8bd84fc3e260b9918cf28b8751f5"}, + {file = "tokenizers-0.20.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2b6e54e71f84c4202111a489879005cb14b92616a87417f6c102c833af961ea2"}, + {file = "tokenizers-0.20.3-cp312-none-win32.whl", hash = "sha256:83d9bfbe9af86f2d9df4833c22e94d94750f1d0cd9bfb22a7bb90a86f61cdb1c"}, + {file = "tokenizers-0.20.3-cp312-none-win_amd64.whl", hash = "sha256:44def74cee574d609a36e17c8914311d1b5dbcfe37c55fd29369d42591b91cf2"}, + {file = "tokenizers-0.20.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:e0b630e0b536ef0e3c8b42c685c1bc93bd19e98c0f1543db52911f8ede42cf84"}, + {file = "tokenizers-0.20.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a02d160d2b19bcbfdf28bd9a4bf11be4cb97d0499c000d95d4c4b1a4312740b6"}, + {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e3d80d89b068bc30034034b5319218c7c0a91b00af19679833f55f3becb6945"}, + {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:174a54910bed1b089226512b4458ea60d6d6fd93060254734d3bc3540953c51c"}, + {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:098b8a632b8656aa5802c46689462c5c48f02510f24029d71c208ec2c822e771"}, + {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:78c8c143e3ae41e718588281eb3e212c2b31623c9d6d40410ec464d7d6221fb5"}, + {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b26b0aadb18cd8701077362ba359a06683662d5cafe3e8e8aba10eb05c037f1"}, + {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07d7851a72717321022f3774e84aa9d595a041d643fafa2e87fbc9b18711dac0"}, + {file = "tokenizers-0.20.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:bd44e48a430ada902c6266a8245f5036c4fe744fcb51f699999fbe82aa438797"}, + {file = "tokenizers-0.20.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:a4c186bb006ccbe1f5cc4e0380d1ce7806f5955c244074fd96abc55e27b77f01"}, + {file = "tokenizers-0.20.3-cp313-none-win32.whl", hash = "sha256:6e19e0f1d854d6ab7ea0c743d06e764d1d9a546932be0a67f33087645f00fe13"}, + {file = "tokenizers-0.20.3-cp313-none-win_amd64.whl", hash = "sha256:d50ede425c7e60966a9680d41b58b3a0950afa1bb570488e2972fa61662c4273"}, + {file = "tokenizers-0.20.3-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:9adda1ff5fb9dcdf899ceca672a4e2ce9e797adb512a6467305ca3d8bfcfbdd0"}, + {file = "tokenizers-0.20.3-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:6dde2cae6004ba7a3badff4a11911cae03ebf23e97eebfc0e71fef2530e5074f"}, + {file = "tokenizers-0.20.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4a7fd678b35614fca708579eb95b7587a5e8a6d328171bd2488fd9f27d82be4"}, + {file = "tokenizers-0.20.3-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1b80e3c7283a01a356bd2210f53d1a4a5d32b269c2024389ed0173137708d50e"}, + {file = "tokenizers-0.20.3-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a8cc0e8176b762973758a77f0d9c4467d310e33165fb74173418ca3734944da4"}, + {file = "tokenizers-0.20.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d5634b2e2f5f3d2b4439d2d74066e22eb4b1f04f3fea05cb2a3c12d89b5a3bcd"}, + {file = "tokenizers-0.20.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b4ba635165bc1ea46f2da8e5d80b5f70f6ec42161e38d96dbef33bb39df73964"}, + {file = "tokenizers-0.20.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18e4c7c64172e7789bd8b07aa3087ea87c4c4de7e90937a2aa036b5d92332536"}, + {file = "tokenizers-0.20.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1f74909ef7675c26d4095a817ec3393d67f3158ca4836c233212e5613ef640c4"}, + {file = "tokenizers-0.20.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0e9b81321a1e05b16487d312b4264984513f8b4a7556229cafac6e88c2036b09"}, + {file = "tokenizers-0.20.3-cp37-none-win32.whl", hash = "sha256:ab48184cd58b4a03022a2ec75b54c9f600ffea9a733612c02325ed636f353729"}, + {file = "tokenizers-0.20.3-cp37-none-win_amd64.whl", hash = "sha256:60ac483cebee1c12c71878523e768df02fa17e4c54412966cb3ac862c91b36c1"}, + {file = "tokenizers-0.20.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:3229ef103c89583d10b9378afa5d601b91e6337530a0988e17ca8d635329a996"}, + {file = "tokenizers-0.20.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6ac52cc24bad3de865c7e65b1c4e7b70d00938a8ae09a92a453b8f676e714ad5"}, + {file = "tokenizers-0.20.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:04627b7b502fa6a2a005e1bd446fa4247d89abcb1afaa1b81eb90e21aba9a60f"}, + {file = "tokenizers-0.20.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c27ceb887f0e81a3c377eb4605dca7a95a81262761c0fba308d627b2abb98f2b"}, + {file = "tokenizers-0.20.3-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65ab780194da4e1fcf5670523a2f377c4838ebf5249efe41fa1eddd2a84fb49d"}, + {file = "tokenizers-0.20.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98d343134f47159e81f7f242264b0eb222e6b802f37173c8d7d7b64d5c9d1388"}, + {file = "tokenizers-0.20.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2475bb004ab2009d29aff13b5047bfdb3d4b474f0aa9d4faa13a7f34dbbbb43"}, + {file = "tokenizers-0.20.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b6583a65c01db1197c1eb36857ceba8ec329d53afadd268b42a6b04f4965724"}, + {file = "tokenizers-0.20.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:62d00ba208358c037eeab7bfc00a905adc67b2d31b68ab40ed09d75881e114ea"}, + {file = "tokenizers-0.20.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0fc7a39e5bedc817bda395a798dfe2d9c5f7c71153c90d381b5135a0328d9520"}, + {file = "tokenizers-0.20.3-cp38-none-win32.whl", hash = "sha256:84d40ee0f8550d64d3ea92dd7d24a8557a9172165bdb986c9fb2503b4fe4e3b6"}, + {file = "tokenizers-0.20.3-cp38-none-win_amd64.whl", hash = "sha256:205a45246ed7f1718cf3785cff88450ba603352412aaf220ace026384aa3f1c0"}, + {file = "tokenizers-0.20.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:93e37f0269a11dc3b1a953f1fca9707f0929ebf8b4063c591c71a0664219988e"}, + {file = "tokenizers-0.20.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f4cb0c614b0135e781de96c2af87e73da0389ac1458e2a97562ed26e29490d8d"}, + {file = "tokenizers-0.20.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7eb2fb1c432f5746b22f8a7f09fc18c4156cb0031c77f53cb19379d82d43297a"}, + {file = "tokenizers-0.20.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bfa8d029bb156181b006643309d6b673615a24e4ed24cf03aa191d599b996f51"}, + {file = "tokenizers-0.20.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f90549622de3bf476ad9f1dd6f3f952ec3ed6ab8615ae88ef060d0c5bfad55d"}, + {file = "tokenizers-0.20.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a1d469c74eebf5c43fd61cd9b030e271d17198edd7bd45392e03a3c091d7d6d4"}, + {file = "tokenizers-0.20.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bee8f53b2594749f4460d53253bae55d718f04e9b633efa0f5df8938bd98e4f0"}, + {file = "tokenizers-0.20.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:938441babf3e5720e4459e306ef2809fb267680df9d1ff2873458b22aef60248"}, + {file = "tokenizers-0.20.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7310ab23d7b0caebecc0e8be11a1146f320f5f07284000f6ea54793e83de1b75"}, + {file = "tokenizers-0.20.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:16121eb030a2b13094cfec936b0c12e8b4063c5f839591ea7d0212336d8f9921"}, + {file = "tokenizers-0.20.3-cp39-none-win32.whl", hash = "sha256:401cc21ef642ee235985d747f65e18f639464d377c70836c9003df208d582064"}, + {file = "tokenizers-0.20.3-cp39-none-win_amd64.whl", hash = "sha256:7498f3ea7746133335a6adb67a77cf77227a8b82c8483f644a2e5f86fea42b8d"}, + {file = "tokenizers-0.20.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e919f2e3e68bb51dc31de4fcbbeff3bdf9c1cad489044c75e2b982a91059bd3c"}, + {file = "tokenizers-0.20.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b8e9608f2773996cc272156e305bd79066163a66b0390fe21750aff62df1ac07"}, + {file = "tokenizers-0.20.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39270a7050deaf50f7caff4c532c01b3c48f6608d42b3eacdebdc6795478c8df"}, + {file = "tokenizers-0.20.3-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e005466632b1c5d2d2120f6de8aa768cc9d36cd1ab7d51d0c27a114c91a1e6ee"}, + {file = "tokenizers-0.20.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a07962340b36189b6c8feda552ea1bfeee6cf067ff922a1d7760662c2ee229e5"}, + {file = "tokenizers-0.20.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:55046ad3dd5f2b3c67501fcc8c9cbe3e901d8355f08a3b745e9b57894855f85b"}, + {file = "tokenizers-0.20.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:efcf0eb939988b627558aaf2b9dc3e56d759cad2e0cfa04fcab378e4b48fc4fd"}, + {file = "tokenizers-0.20.3-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f3558a7ae6a6d38a77dfce12172a1e2e1bf3e8871e744a1861cd7591ea9ebe24"}, + {file = "tokenizers-0.20.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d53029fe44bc70c3ff14ef512460a0cf583495a0f8e2f4b70e26eb9438e38a9"}, + {file = "tokenizers-0.20.3-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57a2a56397b2bec5a629b516b23f0f8a3e4f978c7488d4a299980f8375954b85"}, + {file = "tokenizers-0.20.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e5bfaae740ef9ece000f8a07e78ac0e2b085c5ce9648f8593ddf0243c9f76d"}, + {file = "tokenizers-0.20.3-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:fbaf3ea28fedfb2283da60e710aff25492e795a7397cad8a50f1e079b65a5a70"}, + {file = "tokenizers-0.20.3-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:c47c037116310dc976eb96b008e41b9cfaba002ed8005848d4d632ee0b7ba9ae"}, + {file = "tokenizers-0.20.3-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c31751f0721f58f5e19bb27c1acc259aeff860d8629c4e1a900b26a1979ada8e"}, + {file = "tokenizers-0.20.3-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:c697cbd3be7a79ea250ea5f380d6f12e534c543cfb137d5c734966b3ee4f34cc"}, + {file = "tokenizers-0.20.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b48971b88ef9130bf35b41b35fd857c3c4dae4a9cd7990ebc7fc03e59cc92438"}, + {file = "tokenizers-0.20.3-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4e615de179bbe060ab33773f0d98a8a8572b5883dd7dac66c1de8c056c7e748c"}, + {file = "tokenizers-0.20.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da1ec842035ed9999c62e45fbe0ff14b7e8a7e02bb97688cc6313cf65e5cd755"}, + {file = "tokenizers-0.20.3-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:6ee4954c1dd23aadc27958dad759006e71659d497dcb0ef0c7c87ea992c16ebd"}, + {file = "tokenizers-0.20.3-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:3eda46ca402751ec82553a321bf35a617b76bbed7586e768c02ccacbdda94d6d"}, + {file = "tokenizers-0.20.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:de082392a85eb0055cc055c535bff2f0cc15d7a000bdc36fbf601a0f3cf8507a"}, + {file = "tokenizers-0.20.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c3db46cc0647bfd88263afdb739b92017a02a87ee30945cb3e86c7e25c7c9917"}, + {file = "tokenizers-0.20.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a292392f24ab9abac5cfa8197e5a6208f2e43723420217e1ceba0b4ec77816ac"}, + {file = "tokenizers-0.20.3-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8dcd91f4e60f62b20d83a87a84fe062035a1e3ff49a8c2bbdeb2d441c8e311f4"}, + {file = "tokenizers-0.20.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:900991a2b8ee35961b1095db7e265342e0e42a84c1a594823d5ee9f8fb791958"}, + {file = "tokenizers-0.20.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:5a8d8261ca2133d4f98aa9627c748189502b3787537ba3d7e2beb4f7cfc5d627"}, + {file = "tokenizers-0.20.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:c4fd4d71e6deb6ddf99d8d0eab87d1d16f635898906e631914a9bae8ae9f2cfb"}, + {file = "tokenizers-0.20.3.tar.gz", hash = "sha256:2278b34c5d0dd78e087e1ca7f9b1dcbf129d80211afa645f214bd6e051037539"}, ] [package.dependencies] @@ -3523,13 +3791,13 @@ testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests", "ruff"] [[package]] name = "tomli" -version = "2.0.1" +version = "2.1.0" description = "A lil' TOML parser" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, - {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, + {file = "tomli-2.1.0-py3-none-any.whl", hash = "sha256:a5c57c3d1c56f5ccdf89f6523458f60ef716e210fc47c4cfb188c5ba473e0391"}, + {file = "tomli-2.1.0.tar.gz", hash = "sha256:3f646cae2aec94e17d04973e4249548320197cfabdf130015d023de4b74d8ab8"}, ] [[package]] @@ -3757,13 +4025,13 @@ urllib3 = ">=2" [[package]] name = "types-setuptools" -version = "75.3.0.20241107" +version = "75.3.0.20241112" description = "Typing stubs for setuptools" optional = false python-versions = ">=3.8" files = [ - {file = "types-setuptools-75.3.0.20241107.tar.gz", hash = "sha256:f66710e1cd4a936e5fcc12d4e49be1a67c34372cf753e87ebe704426451b4012"}, - {file = "types_setuptools-75.3.0.20241107-py3-none-any.whl", hash = "sha256:bc6de6e2bcb6d610556304d0a69fe4ca208ac4896162647314ecfd9fd73d8550"}, + {file = "types-setuptools-75.3.0.20241112.tar.gz", hash = "sha256:f9e1ebd17a56f606e16395c4ee4efa1cdc394b9a2a0ee898a624058b4b62ef8f"}, + {file = "types_setuptools-75.3.0.20241112-py3-none-any.whl", hash = "sha256:78cb5fef4a6056d2f37114d27da90f4655a306e4e38042d7034a8a880bc3f5dd"}, ] [[package]] @@ -3796,13 +4064,13 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "virtualenv" -version = "20.26.3" +version = "20.27.1" description = "Virtual Python Environment builder" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "virtualenv-20.26.3-py3-none-any.whl", hash = "sha256:8cc4a31139e796e9a7de2cd5cf2489de1217193116a8fd42328f1bd65f434589"}, - {file = "virtualenv-20.26.3.tar.gz", hash = "sha256:4c43a2a236279d9ea36a0d76f98d84bd6ca94ac4e0f4a3b9d46d05e10fea542a"}, + {file = "virtualenv-20.27.1-py3-none-any.whl", hash = "sha256:f11f1b8a29525562925f745563bfd48b189450f61fb34c4f9cc79dd5aa32a1f4"}, + {file = "virtualenv-20.27.1.tar.gz", hash = "sha256:142c6be10212543b32c6c45d3d3893dff89112cc588b7d0879ae5a1ec03a47ba"}, ] [package.dependencies] @@ -3827,109 +4095,93 @@ files = [ [[package]] name = "yarl" -version = "1.15.2" +version = "1.17.1" description = "Yet another URL library" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "yarl-1.15.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e4ee8b8639070ff246ad3649294336b06db37a94bdea0d09ea491603e0be73b8"}, - {file = "yarl-1.15.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a7cf963a357c5f00cb55b1955df8bbe68d2f2f65de065160a1c26b85a1e44172"}, - {file = "yarl-1.15.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:43ebdcc120e2ca679dba01a779333a8ea76b50547b55e812b8b92818d604662c"}, - {file = "yarl-1.15.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3433da95b51a75692dcf6cc8117a31410447c75a9a8187888f02ad45c0a86c50"}, - {file = "yarl-1.15.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:38d0124fa992dbacd0c48b1b755d3ee0a9f924f427f95b0ef376556a24debf01"}, - {file = "yarl-1.15.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ded1b1803151dd0f20a8945508786d57c2f97a50289b16f2629f85433e546d47"}, - {file = "yarl-1.15.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ace4cad790f3bf872c082366c9edd7f8f8f77afe3992b134cfc810332206884f"}, - {file = "yarl-1.15.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c77494a2f2282d9bbbbcab7c227a4d1b4bb829875c96251f66fb5f3bae4fb053"}, - {file = "yarl-1.15.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b7f227ca6db5a9fda0a2b935a2ea34a7267589ffc63c8045f0e4edb8d8dcf956"}, - {file = "yarl-1.15.2-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:31561a5b4d8dbef1559b3600b045607cf804bae040f64b5f5bca77da38084a8a"}, - {file = "yarl-1.15.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3e52474256a7db9dcf3c5f4ca0b300fdea6c21cca0148c8891d03a025649d935"}, - {file = "yarl-1.15.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:0e1af74a9529a1137c67c887ed9cde62cff53aa4d84a3adbec329f9ec47a3936"}, - {file = "yarl-1.15.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:15c87339490100c63472a76d87fe7097a0835c705eb5ae79fd96e343473629ed"}, - {file = "yarl-1.15.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:74abb8709ea54cc483c4fb57fb17bb66f8e0f04438cff6ded322074dbd17c7ec"}, - {file = "yarl-1.15.2-cp310-cp310-win32.whl", hash = "sha256:ffd591e22b22f9cb48e472529db6a47203c41c2c5911ff0a52e85723196c0d75"}, - {file = "yarl-1.15.2-cp310-cp310-win_amd64.whl", hash = "sha256:1695497bb2a02a6de60064c9f077a4ae9c25c73624e0d43e3aa9d16d983073c2"}, - {file = "yarl-1.15.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9fcda20b2de7042cc35cf911702fa3d8311bd40055a14446c1e62403684afdc5"}, - {file = "yarl-1.15.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0545de8c688fbbf3088f9e8b801157923be4bf8e7b03e97c2ecd4dfa39e48e0e"}, - {file = "yarl-1.15.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fbda058a9a68bec347962595f50546a8a4a34fd7b0654a7b9697917dc2bf810d"}, - {file = "yarl-1.15.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1ac2bc069f4a458634c26b101c2341b18da85cb96afe0015990507efec2e417"}, - {file = "yarl-1.15.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd126498171f752dd85737ab1544329a4520c53eed3997f9b08aefbafb1cc53b"}, - {file = "yarl-1.15.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3db817b4e95eb05c362e3b45dafe7144b18603e1211f4a5b36eb9522ecc62bcf"}, - {file = "yarl-1.15.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:076b1ed2ac819933895b1a000904f62d615fe4533a5cf3e052ff9a1da560575c"}, - {file = "yarl-1.15.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f8cfd847e6b9ecf9f2f2531c8427035f291ec286c0a4944b0a9fce58c6446046"}, - {file = "yarl-1.15.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:32b66be100ac5739065496c74c4b7f3015cef792c3174982809274d7e51b3e04"}, - {file = "yarl-1.15.2-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:34a2d76a1984cac04ff8b1bfc939ec9dc0914821264d4a9c8fd0ed6aa8d4cfd2"}, - {file = "yarl-1.15.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:0afad2cd484908f472c8fe2e8ef499facee54a0a6978be0e0cff67b1254fd747"}, - {file = "yarl-1.15.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:c68e820879ff39992c7f148113b46efcd6ec765a4865581f2902b3c43a5f4bbb"}, - {file = "yarl-1.15.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:98f68df80ec6ca3015186b2677c208c096d646ef37bbf8b49764ab4a38183931"}, - {file = "yarl-1.15.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3c56ec1eacd0a5d35b8a29f468659c47f4fe61b2cab948ca756c39b7617f0aa5"}, - {file = "yarl-1.15.2-cp311-cp311-win32.whl", hash = "sha256:eedc3f247ee7b3808ea07205f3e7d7879bc19ad3e6222195cd5fbf9988853e4d"}, - {file = "yarl-1.15.2-cp311-cp311-win_amd64.whl", hash = "sha256:0ccaa1bc98751fbfcf53dc8dfdb90d96e98838010fc254180dd6707a6e8bb179"}, - {file = "yarl-1.15.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:82d5161e8cb8f36ec778fd7ac4d740415d84030f5b9ef8fe4da54784a1f46c94"}, - {file = "yarl-1.15.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fa2bea05ff0a8fb4d8124498e00e02398f06d23cdadd0fe027d84a3f7afde31e"}, - {file = "yarl-1.15.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:99e12d2bf587b44deb74e0d6170fec37adb489964dbca656ec41a7cd8f2ff178"}, - {file = "yarl-1.15.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:243fbbbf003754fe41b5bdf10ce1e7f80bcc70732b5b54222c124d6b4c2ab31c"}, - {file = "yarl-1.15.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:856b7f1a7b98a8c31823285786bd566cf06226ac4f38b3ef462f593c608a9bd6"}, - {file = "yarl-1.15.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:553dad9af802a9ad1a6525e7528152a015b85fb8dbf764ebfc755c695f488367"}, - {file = "yarl-1.15.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:30c3ff305f6e06650a761c4393666f77384f1cc6c5c0251965d6bfa5fbc88f7f"}, - {file = "yarl-1.15.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:353665775be69bbfc6d54c8d134bfc533e332149faeddd631b0bc79df0897f46"}, - {file = "yarl-1.15.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f4fe99ce44128c71233d0d72152db31ca119711dfc5f2c82385ad611d8d7f897"}, - {file = "yarl-1.15.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:9c1e3ff4b89cdd2e1a24c214f141e848b9e0451f08d7d4963cb4108d4d798f1f"}, - {file = "yarl-1.15.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:711bdfae4e699a6d4f371137cbe9e740dc958530cb920eb6f43ff9551e17cfbc"}, - {file = "yarl-1.15.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4388c72174868884f76affcdd3656544c426407e0043c89b684d22fb265e04a5"}, - {file = "yarl-1.15.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:f0e1844ad47c7bd5d6fa784f1d4accc5f4168b48999303a868fe0f8597bde715"}, - {file = "yarl-1.15.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a5cafb02cf097a82d74403f7e0b6b9df3ffbfe8edf9415ea816314711764a27b"}, - {file = "yarl-1.15.2-cp312-cp312-win32.whl", hash = "sha256:156ececdf636143f508770bf8a3a0498de64da5abd890c7dbb42ca9e3b6c05b8"}, - {file = "yarl-1.15.2-cp312-cp312-win_amd64.whl", hash = "sha256:435aca062444a7f0c884861d2e3ea79883bd1cd19d0a381928b69ae1b85bc51d"}, - {file = "yarl-1.15.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:416f2e3beaeae81e2f7a45dc711258be5bdc79c940a9a270b266c0bec038fb84"}, - {file = "yarl-1.15.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:173563f3696124372831007e3d4b9821746964a95968628f7075d9231ac6bb33"}, - {file = "yarl-1.15.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9ce2e0f6123a60bd1a7f5ae3b2c49b240c12c132847f17aa990b841a417598a2"}, - {file = "yarl-1.15.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eaea112aed589131f73d50d570a6864728bd7c0c66ef6c9154ed7b59f24da611"}, - {file = "yarl-1.15.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4ca3b9f370f218cc2a0309542cab8d0acdfd66667e7c37d04d617012485f904"}, - {file = "yarl-1.15.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23ec1d3c31882b2a8a69c801ef58ebf7bae2553211ebbddf04235be275a38548"}, - {file = "yarl-1.15.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75119badf45f7183e10e348edff5a76a94dc19ba9287d94001ff05e81475967b"}, - {file = "yarl-1.15.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78e6fdc976ec966b99e4daa3812fac0274cc28cd2b24b0d92462e2e5ef90d368"}, - {file = "yarl-1.15.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:8657d3f37f781d987037f9cc20bbc8b40425fa14380c87da0cb8dfce7c92d0fb"}, - {file = "yarl-1.15.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:93bed8a8084544c6efe8856c362af08a23e959340c87a95687fdbe9c9f280c8b"}, - {file = "yarl-1.15.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:69d5856d526802cbda768d3e6246cd0d77450fa2a4bc2ea0ea14f0d972c2894b"}, - {file = "yarl-1.15.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:ccad2800dfdff34392448c4bf834be124f10a5bc102f254521d931c1c53c455a"}, - {file = "yarl-1.15.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:a880372e2e5dbb9258a4e8ff43f13888039abb9dd6d515f28611c54361bc5644"}, - {file = "yarl-1.15.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c998d0558805860503bc3a595994895ca0f7835e00668dadc673bbf7f5fbfcbe"}, - {file = "yarl-1.15.2-cp313-cp313-win32.whl", hash = "sha256:533a28754e7f7439f217550a497bb026c54072dbe16402b183fdbca2431935a9"}, - {file = "yarl-1.15.2-cp313-cp313-win_amd64.whl", hash = "sha256:5838f2b79dc8f96fdc44077c9e4e2e33d7089b10788464609df788eb97d03aad"}, - {file = "yarl-1.15.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:fbbb63bed5fcd70cd3dd23a087cd78e4675fb5a2963b8af53f945cbbca79ae16"}, - {file = "yarl-1.15.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e2e93b88ecc8f74074012e18d679fb2e9c746f2a56f79cd5e2b1afcf2a8a786b"}, - {file = "yarl-1.15.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:af8ff8d7dc07ce873f643de6dfbcd45dc3db2c87462e5c387267197f59e6d776"}, - {file = "yarl-1.15.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:66f629632220a4e7858b58e4857927dd01a850a4cef2fb4044c8662787165cf7"}, - {file = "yarl-1.15.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:833547179c31f9bec39b49601d282d6f0ea1633620701288934c5f66d88c3e50"}, - {file = "yarl-1.15.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2aa738e0282be54eede1e3f36b81f1e46aee7ec7602aa563e81e0e8d7b67963f"}, - {file = "yarl-1.15.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a13a07532e8e1c4a5a3afff0ca4553da23409fad65def1b71186fb867eeae8d"}, - {file = "yarl-1.15.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c45817e3e6972109d1a2c65091504a537e257bc3c885b4e78a95baa96df6a3f8"}, - {file = "yarl-1.15.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:670eb11325ed3a6209339974b276811867defe52f4188fe18dc49855774fa9cf"}, - {file = "yarl-1.15.2-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:d417a4f6943112fae3924bae2af7112562285848d9bcee737fc4ff7cbd450e6c"}, - {file = "yarl-1.15.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:bc8936d06cd53fddd4892677d65e98af514c8d78c79864f418bbf78a4a2edde4"}, - {file = "yarl-1.15.2-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:954dde77c404084c2544e572f342aef384240b3e434e06cecc71597e95fd1ce7"}, - {file = "yarl-1.15.2-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:5bc0df728e4def5e15a754521e8882ba5a5121bd6b5a3a0ff7efda5d6558ab3d"}, - {file = "yarl-1.15.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:b71862a652f50babab4a43a487f157d26b464b1dedbcc0afda02fd64f3809d04"}, - {file = "yarl-1.15.2-cp38-cp38-win32.whl", hash = "sha256:63eab904f8630aed5a68f2d0aeab565dcfc595dc1bf0b91b71d9ddd43dea3aea"}, - {file = "yarl-1.15.2-cp38-cp38-win_amd64.whl", hash = "sha256:2cf441c4b6e538ba0d2591574f95d3fdd33f1efafa864faa077d9636ecc0c4e9"}, - {file = "yarl-1.15.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a32d58f4b521bb98b2c0aa9da407f8bd57ca81f34362bcb090e4a79e9924fefc"}, - {file = "yarl-1.15.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:766dcc00b943c089349d4060b935c76281f6be225e39994c2ccec3a2a36ad627"}, - {file = "yarl-1.15.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:bed1b5dbf90bad3bfc19439258c97873eab453c71d8b6869c136346acfe497e7"}, - {file = "yarl-1.15.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed20a4bdc635f36cb19e630bfc644181dd075839b6fc84cac51c0f381ac472e2"}, - {file = "yarl-1.15.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d538df442c0d9665664ab6dd5fccd0110fa3b364914f9c85b3ef9b7b2e157980"}, - {file = "yarl-1.15.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c6cf1d92edf936ceedc7afa61b07e9d78a27b15244aa46bbcd534c7458ee1b"}, - {file = "yarl-1.15.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce44217ad99ffad8027d2fde0269ae368c86db66ea0571c62a000798d69401fb"}, - {file = "yarl-1.15.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b47a6000a7e833ebfe5886b56a31cb2ff12120b1efd4578a6fcc38df16cc77bd"}, - {file = "yarl-1.15.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:e52f77a0cd246086afde8815039f3e16f8d2be51786c0a39b57104c563c5cbb0"}, - {file = "yarl-1.15.2-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:f9ca0e6ce7774dc7830dc0cc4bb6b3eec769db667f230e7c770a628c1aa5681b"}, - {file = "yarl-1.15.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:136f9db0f53c0206db38b8cd0c985c78ded5fd596c9a86ce5c0b92afb91c3a19"}, - {file = "yarl-1.15.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:173866d9f7409c0fb514cf6e78952e65816600cb888c68b37b41147349fe0057"}, - {file = "yarl-1.15.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:6e840553c9c494a35e449a987ca2c4f8372668ee954a03a9a9685075228e5036"}, - {file = "yarl-1.15.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:458c0c65802d816a6b955cf3603186de79e8fdb46d4f19abaec4ef0a906f50a7"}, - {file = "yarl-1.15.2-cp39-cp39-win32.whl", hash = "sha256:5b48388ded01f6f2429a8c55012bdbd1c2a0c3735b3e73e221649e524c34a58d"}, - {file = "yarl-1.15.2-cp39-cp39-win_amd64.whl", hash = "sha256:81dadafb3aa124f86dc267a2168f71bbd2bfb163663661ab0038f6e4b8edb810"}, - {file = "yarl-1.15.2-py3-none-any.whl", hash = "sha256:0d3105efab7c5c091609abacad33afff33bdff0035bece164c98bcf5a85ef90a"}, - {file = "yarl-1.15.2.tar.gz", hash = "sha256:a39c36f4218a5bb668b4f06874d676d35a035ee668e6e7e3538835c703634b84"}, + {file = "yarl-1.17.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b1794853124e2f663f0ea54efb0340b457f08d40a1cef78edfa086576179c91"}, + {file = "yarl-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:fbea1751729afe607d84acfd01efd95e3b31db148a181a441984ce9b3d3469da"}, + {file = "yarl-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8ee427208c675f1b6e344a1f89376a9613fc30b52646a04ac0c1f6587c7e46ec"}, + {file = "yarl-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b74ff4767d3ef47ffe0cd1d89379dc4d828d4873e5528976ced3b44fe5b0a21"}, + {file = "yarl-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:62a91aefff3d11bf60e5956d340eb507a983a7ec802b19072bb989ce120cd948"}, + {file = "yarl-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:846dd2e1243407133d3195d2d7e4ceefcaa5f5bf7278f0a9bda00967e6326b04"}, + {file = "yarl-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e844be8d536afa129366d9af76ed7cb8dfefec99f5f1c9e4f8ae542279a6dc3"}, + {file = "yarl-1.17.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cc7c92c1baa629cb03ecb0c3d12564f172218fb1739f54bf5f3881844daadc6d"}, + {file = "yarl-1.17.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ae3476e934b9d714aa8000d2e4c01eb2590eee10b9d8cd03e7983ad65dfbfcba"}, + {file = "yarl-1.17.1-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:c7e177c619342e407415d4f35dec63d2d134d951e24b5166afcdfd1362828e17"}, + {file = "yarl-1.17.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:64cc6e97f14cf8a275d79c5002281f3040c12e2e4220623b5759ea7f9868d6a5"}, + {file = "yarl-1.17.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:84c063af19ef5130084db70ada40ce63a84f6c1ef4d3dbc34e5e8c4febb20822"}, + {file = "yarl-1.17.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:482c122b72e3c5ec98f11457aeb436ae4aecca75de19b3d1de7cf88bc40db82f"}, + {file = "yarl-1.17.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:380e6c38ef692b8fd5a0f6d1fa8774d81ebc08cfbd624b1bca62a4d4af2f9931"}, + {file = "yarl-1.17.1-cp310-cp310-win32.whl", hash = "sha256:16bca6678a83657dd48df84b51bd56a6c6bd401853aef6d09dc2506a78484c7b"}, + {file = "yarl-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:561c87fea99545ef7d692403c110b2f99dced6dff93056d6e04384ad3bc46243"}, + {file = "yarl-1.17.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:cbad927ea8ed814622305d842c93412cb47bd39a496ed0f96bfd42b922b4a217"}, + {file = "yarl-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fca4b4307ebe9c3ec77a084da3a9d1999d164693d16492ca2b64594340999988"}, + {file = "yarl-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ff5c6771c7e3511a06555afa317879b7db8d640137ba55d6ab0d0c50425cab75"}, + {file = "yarl-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b29beab10211a746f9846baa39275e80034e065460d99eb51e45c9a9495bcca"}, + {file = "yarl-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a52a1ffdd824fb1835272e125385c32fd8b17fbdefeedcb4d543cc23b332d74"}, + {file = "yarl-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:58c8e9620eb82a189c6c40cb6b59b4e35b2ee68b1f2afa6597732a2b467d7e8f"}, + {file = "yarl-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d216e5d9b8749563c7f2c6f7a0831057ec844c68b4c11cb10fc62d4fd373c26d"}, + {file = "yarl-1.17.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:881764d610e3269964fc4bb3c19bb6fce55422828e152b885609ec176b41cf11"}, + {file = "yarl-1.17.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8c79e9d7e3d8a32d4824250a9c6401194fb4c2ad9a0cec8f6a96e09a582c2cc0"}, + {file = "yarl-1.17.1-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:299f11b44d8d3a588234adbe01112126010bd96d9139c3ba7b3badd9829261c3"}, + {file = "yarl-1.17.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:cc7d768260f4ba4ea01741c1b5fe3d3a6c70eb91c87f4c8761bbcce5181beafe"}, + {file = "yarl-1.17.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:de599af166970d6a61accde358ec9ded821234cbbc8c6413acfec06056b8e860"}, + {file = "yarl-1.17.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:2b24ec55fad43e476905eceaf14f41f6478780b870eda5d08b4d6de9a60b65b4"}, + {file = "yarl-1.17.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9fb815155aac6bfa8d86184079652c9715c812d506b22cfa369196ef4e99d1b4"}, + {file = "yarl-1.17.1-cp311-cp311-win32.whl", hash = "sha256:7615058aabad54416ddac99ade09a5510cf77039a3b903e94e8922f25ed203d7"}, + {file = "yarl-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:14bc88baa44e1f84164a392827b5defb4fa8e56b93fecac3d15315e7c8e5d8b3"}, + {file = "yarl-1.17.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:327828786da2006085a4d1feb2594de6f6d26f8af48b81eb1ae950c788d97f61"}, + {file = "yarl-1.17.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:cc353841428d56b683a123a813e6a686e07026d6b1c5757970a877195f880c2d"}, + {file = "yarl-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c73df5b6e8fabe2ddb74876fb82d9dd44cbace0ca12e8861ce9155ad3c886139"}, + {file = "yarl-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bdff5e0995522706c53078f531fb586f56de9c4c81c243865dd5c66c132c3b5"}, + {file = "yarl-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:06157fb3c58f2736a5e47c8fcbe1afc8b5de6fb28b14d25574af9e62150fcaac"}, + {file = "yarl-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1654ec814b18be1af2c857aa9000de7a601400bd4c9ca24629b18486c2e35463"}, + {file = "yarl-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f6595c852ca544aaeeb32d357e62c9c780eac69dcd34e40cae7b55bc4fb1147"}, + {file = "yarl-1.17.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:459e81c2fb920b5f5df744262d1498ec2c8081acdcfe18181da44c50f51312f7"}, + {file = "yarl-1.17.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7e48cdb8226644e2fbd0bdb0a0f87906a3db07087f4de77a1b1b1ccfd9e93685"}, + {file = "yarl-1.17.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:d9b6b28a57feb51605d6ae5e61a9044a31742db557a3b851a74c13bc61de5172"}, + {file = "yarl-1.17.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e594b22688d5747b06e957f1ef822060cb5cb35b493066e33ceac0cf882188b7"}, + {file = "yarl-1.17.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5f236cb5999ccd23a0ab1bd219cfe0ee3e1c1b65aaf6dd3320e972f7ec3a39da"}, + {file = "yarl-1.17.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:a2a64e62c7a0edd07c1c917b0586655f3362d2c2d37d474db1a509efb96fea1c"}, + {file = "yarl-1.17.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d0eea830b591dbc68e030c86a9569826145df485b2b4554874b07fea1275a199"}, + {file = "yarl-1.17.1-cp312-cp312-win32.whl", hash = "sha256:46ddf6e0b975cd680eb83318aa1d321cb2bf8d288d50f1754526230fcf59ba96"}, + {file = "yarl-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:117ed8b3732528a1e41af3aa6d4e08483c2f0f2e3d3d7dca7cf538b3516d93df"}, + {file = "yarl-1.17.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:5d1d42556b063d579cae59e37a38c61f4402b47d70c29f0ef15cee1acaa64488"}, + {file = "yarl-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:c0167540094838ee9093ef6cc2c69d0074bbf84a432b4995835e8e5a0d984374"}, + {file = "yarl-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2f0a6423295a0d282d00e8701fe763eeefba8037e984ad5de44aa349002562ac"}, + {file = "yarl-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5b078134f48552c4d9527db2f7da0b5359abd49393cdf9794017baec7506170"}, + {file = "yarl-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d401f07261dc5aa36c2e4efc308548f6ae943bfff20fcadb0a07517a26b196d8"}, + {file = "yarl-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b5f1ac7359e17efe0b6e5fec21de34145caef22b260e978336f325d5c84e6938"}, + {file = "yarl-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f63d176a81555984e91f2c84c2a574a61cab7111cc907e176f0f01538e9ff6e"}, + {file = "yarl-1.17.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e275792097c9f7e80741c36de3b61917aebecc08a67ae62899b074566ff8556"}, + {file = "yarl-1.17.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:81713b70bea5c1386dc2f32a8f0dab4148a2928c7495c808c541ee0aae614d67"}, + {file = "yarl-1.17.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:aa46dce75078fceaf7cecac5817422febb4355fbdda440db55206e3bd288cfb8"}, + {file = "yarl-1.17.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:1ce36ded585f45b1e9bb36d0ae94765c6608b43bd2e7f5f88079f7a85c61a4d3"}, + {file = "yarl-1.17.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:2d374d70fdc36f5863b84e54775452f68639bc862918602d028f89310a034ab0"}, + {file = "yarl-1.17.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:2d9f0606baaec5dd54cb99667fcf85183a7477f3766fbddbe3f385e7fc253299"}, + {file = "yarl-1.17.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b0341e6d9a0c0e3cdc65857ef518bb05b410dbd70d749a0d33ac0f39e81a4258"}, + {file = "yarl-1.17.1-cp313-cp313-win32.whl", hash = "sha256:2e7ba4c9377e48fb7b20dedbd473cbcbc13e72e1826917c185157a137dac9df2"}, + {file = "yarl-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:949681f68e0e3c25377462be4b658500e85ca24323d9619fdc41f68d46a1ffda"}, + {file = "yarl-1.17.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8994b29c462de9a8fce2d591028b986dbbe1b32f3ad600b2d3e1c482c93abad6"}, + {file = "yarl-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f9cbfbc5faca235fbdf531b93aa0f9f005ec7d267d9d738761a4d42b744ea159"}, + {file = "yarl-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b40d1bf6e6f74f7c0a567a9e5e778bbd4699d1d3d2c0fe46f4b717eef9e96b95"}, + {file = "yarl-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5efe0661b9fcd6246f27957f6ae1c0eb29bc60552820f01e970b4996e016004"}, + {file = "yarl-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b5c4804e4039f487e942c13381e6c27b4b4e66066d94ef1fae3f6ba8b953f383"}, + {file = "yarl-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b5d6a6c9602fd4598fa07e0389e19fe199ae96449008d8304bf5d47cb745462e"}, + {file = "yarl-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f4c9156c4d1eb490fe374fb294deeb7bc7eaccda50e23775b2354b6a6739934"}, + {file = "yarl-1.17.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6324274b4e0e2fa1b3eccb25997b1c9ed134ff61d296448ab8269f5ac068c4c"}, + {file = "yarl-1.17.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:d8a8b74d843c2638f3864a17d97a4acda58e40d3e44b6303b8cc3d3c44ae2d29"}, + {file = "yarl-1.17.1-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:7fac95714b09da9278a0b52e492466f773cfe37651cf467a83a1b659be24bf71"}, + {file = "yarl-1.17.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:c180ac742a083e109c1a18151f4dd8675f32679985a1c750d2ff806796165b55"}, + {file = "yarl-1.17.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:578d00c9b7fccfa1745a44f4eddfdc99d723d157dad26764538fbdda37209857"}, + {file = "yarl-1.17.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:1a3b91c44efa29e6c8ef8a9a2b583347998e2ba52c5d8280dbd5919c02dfc3b5"}, + {file = "yarl-1.17.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a7ac5b4984c468ce4f4a553df281450df0a34aefae02e58d77a0847be8d1e11f"}, + {file = "yarl-1.17.1-cp39-cp39-win32.whl", hash = "sha256:7294e38f9aa2e9f05f765b28ffdc5d81378508ce6dadbe93f6d464a8c9594473"}, + {file = "yarl-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:eb6dce402734575e1a8cc0bb1509afca508a400a57ce13d306ea2c663bad1138"}, + {file = "yarl-1.17.1-py3-none-any.whl", hash = "sha256:f1790a4b1e8e8e028c391175433b9c8122c39b46e1663228158e61e6f915bf06"}, + {file = "yarl-1.17.1.tar.gz", hash = "sha256:067a63fcfda82da6b198fa73079b1ca40b7c9b7994995b6ee38acda728b64d47"}, ] [package.dependencies] @@ -3939,20 +4191,24 @@ propcache = ">=0.2.0" [[package]] name = "zipp" -version = "3.19.2" +version = "3.21.0" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "zipp-3.19.2-py3-none-any.whl", hash = "sha256:f091755f667055f2d02b32c53771a7a6c8b47e1fdbc4b72a8b9072b3eef8015c"}, - {file = "zipp-3.19.2.tar.gz", hash = "sha256:bf1dcf6450f873a13e952a29504887c89e6de7506209e5b1bcc3460135d4de19"}, + {file = "zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931"}, + {file = "zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4"}, ] [package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] +cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] +type = ["pytest-mypy"] [metadata] lock-version = "2.0" python-versions = ">=3.9, <3.12" -content-hash = "c8f5820a6efc0fcab43ea7d21c627d38c32d01d0dac79403c2bb3909005a5f00" +content-hash = "cddd902892282469f57f758c70c5beb6f5a67072ce3ed9f4173717554cb582ae" diff --git a/pyproject.toml b/pyproject.toml index e35c90f9..00617d26 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -24,6 +24,9 @@ pypdf2 = "^3.0.1" tqdm = "^4.67.0" semanticscholar = "^0.8.4" requests = "^2.32.3" +paramiko = "^3.5.0" +psycopg2-binary = "^2.9.10" +pymysql = "^1.1.1" [tool.poetry.group.dev.dependencies]