Forums

Bots that need to be added in April

Sort:
idochesscat
Okay so I came up with bots for April. So we could have like baby bunny who sometimes makes a blunder and is rated about 500 elo. Then we have bad bunny( not to be mistaken with the rapper) who is rated 1000 elo. Then we have big bunny who is 1500. Next is fluffy bunny who is 1900 and last but not least we have easter bunny who is 2300
DaChessBlues
It reminds of mittens
DaChessBlues
And cat bots lol
idilis

How about these bunnies instead?

@tkbunny

@bunicula

@president_max

@miskit_mistake

mishailu
idochesscat wrote:
Okay so I came up with bots for April. So we could have like baby bunny who sometimes makes a blunder and is rated about 500 elo. Then we have bad bunny( not to be mistaken with the rapper) who is rated 1000 elo. Then we have big bunny who is 1500. Next is fluffy bunny who is 1900 and last but not least we have easter bunny who is 2300

States the occasion of Easter really well.

Dreadgod0069

import chess
import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras.callbacks import LearningRateScheduler
from keras.optimizers import SGD
import math

# Define piece values
PAWN_VALUE = 1
KNIGHT_VALUE = 5
BISHOP_VALUE = 5
ROOK_VALUE = 5
QUEEN_VALUE = 10
KING_VALUE = 50

# Define piece values mapping
PIECE_VALUES = {
chess.PAWN: PAWN_VALUE,
chess.KNIGHT: KNIGHT_VALUE,
chess.BISHOP: BISHOP_VALUE,
chess.ROOK: ROOK_VALUE,
chess.QUEEN: QUEEN_VALUE,
chess.KING: KING_VALUE,
}

def board_to_array(board):
"""Convert the chess.Board object to a numpy array."""
piece_maps = [board.piece_map()[piece_type] for piece_type in chess.PIECE_TYPES]
return np.stack(piece_maps).reshape(8, 8, 12)

def get_legal_moves(board):
"""Get a list of legal moves for the current board state."""
return list(board.legal_moves)

def make_move(board, move):
"""Make a move on the board."""
board.push(move)

def get_board_value(board):
"""Calculate the board value based on the modified piece values."""
value = 0
for piece in board.piece_map().values():
value += PIECE_VALUES[piece.piece_type] * (1 if piece.color == chess.WHITE else -1)
return value

def create_model():
model = tf.keras.Sequential([
tf.keras.layers.Input(shape=(8, 8, 12)),
tf.keras.layers.Conv2D(32, kernel_size=3, padding='same', activation='relu'),
for _ in range(10)],
tf.keras.layers.Flatten(),
tf.keras.layers.Conv2D(64, kernel_size=3, padding='same', activation='relu'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
for _ in range(10)],
tf.keras.layers.Dense(1)
])
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), loss='mse')
return model

# Define the learning rate schedule
def lr_schedule(epoch):
initial_lr = 0.1
decay_factor = 0.1
decay_epochs = 30
lr = initial_lr * (decay_factor ** math.floor((1 + epoch) / decay_epochs))
return lr

# Create the learning rate scheduler
lr_scheduler = LearningRateScheduler(lr_schedule)

# Define the optimizer with initial learning rate
optimizer = SGD(lr=0.1)

# Compile the model with the optimizer
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])

# Train the model with learning rate decay
model.fit(x_train, y_train, epochs=100, batch_size=32, callbacks=[lr_scheduler])

class MCTSNode:
def __init__(self, board, parent=None, move=None):
self.board = board
self.parent = parent
self.move = move
self.children = []
self.visits = 0
self.q = 0
self.u = 0

def add_child(self, board, move):
self.children.append(MCTSNode(board, self, move))

def update(self, result):
self.visits += 1
self.q += result
self.u = self.q / self.visits

class MCTS:
def __init__(self, iterations):
self.iterations = iterations

def search(self, node):
for i in range(self.iterations):
# Selection
node = self.select(node)
# Expansion
board = node.board
if board.is_checkmate() or board.is_stalemate():
continue
moves = get_legal_moves(board)
for move in moves:
next_board = board.copy()
make_move(next_board, move)
node.add_child(next_board, move)
# Simulation
result = self.simulate(node.children[-1].board)
# Backpropagation
while node is not None:
node.update(result)
node = node.parent
return self.get_best_child(node)

def select(self, node):
best = None
while len(node.children) > 0:
best = self.get_best_child(node)
node = best
return node

def get_best_child(self, node):
return self.pick_max_uct(node)

def pick_max_uct(self, node):
best = node.children[0]
for c in node.children:
if c.u > best.u:
best = c

return best

def simulate(self, board):
"""Simulate a game starting from the given board state and return the result."""
while not board.is_game_over():
moves = get_legal_moves(board)
move = np.random.choice(moves)
make_move(board, move)
result = get_board_value(board)
return result

def play_game(model):
board = chess.Board()
while not board.is_game_over():
if board.turn == chess.WHITE:
# Player move
move = input("Enter your move: ")
move = chess.Move.from_uci(move)
board.push(move)
else:
# Model move
root = MCTSNode(board)
best_node = MCTS(1500000).search(root)
board.push(best_node.move)
print(board)

def minimax(node, depth, maximizing_player):
if depth == 0 or node.is_terminal():
return node.evaluate()

if maximizing_player: value = float('-inf')
for child in node.children():
value = max(value, minimax(child, depth - 1, False))
return value

else: value = float('inf')
for child in node.children():
value = min(value, minimax(child, depth - 1, True))
return value

def alphabeta(node, depth, alpha, beta, maximizing_player):
if depth == 0 or node.is_terminal():
return node.evaluate()

if maximizing_player:
value = float('-inf')
for child in node.children():
value = max(value, alphabeta(child, depth - 1, alpha, beta, False))
alpha = max(alpha, value)
if alpha >= beta:
break
return value

else: value = float('inf')
for child in node.children():
value = min(value, alphabeta(child, depth - 1, alpha, beta, True))
beta = min(beta, value)
if alpha >= beta:
break
return value

class TranspositionTable:
def __init__(self):
self.table = {}

def store(self, key, value):
self.table[key] = value

def lookup(self, key):
if key in self.table:
return self.table[key]
else:
return None

def minimax_with_null_move(node, depth, alpha, beta, maximizing_player, transposition_table):
# Check if the current position is in the transposition table key = node.hash()
transposition_entry = transposition_table.lookup(key)
if transposition_entry is not None and transposition_entry.depth >= depth:
if transposition_entry.type == "exact":
return transposition_entry.score
elif transposition_entry.type == "lower_bound":
alpha = max(alpha, transposition_entry.score)
elif transposition_entry.type == "upper_bound":
beta = min(beta, transposition_entry.score)
if alpha >= beta:
return transposition_entry.score

# Check if null move pruning is possible if depth >= 3 and not node.is_terminal() and not node.in_check():
null_move_node = node.make_null_move()
null_score = -minimax_with_null_move(null_move_node, depth - 3, -beta, -beta + 1, False, transposition_table)
if null_score >= beta:
transposition_table.store(key, TranspositionEntry(depth, null_score, "lower_bound"))
return null_score

# Normal minimax alpha-beta pruning
if depth == 0 or node.is_terminal():
score = node.evaluate()
transposition_table.store(key, TranspositionEntry(depth, score, "exact"))
return score

if maximizing_player:
value = float('-inf')
for child in node.children():
value = max(value, minimax_with_null_move(child, depth - 1, alpha, beta, False, transposition_table))
alpha = max(alpha, value)
if alpha >= beta:
break
transposition_table.store(key, TranspositionEntry(depth, value, "exact"))
return value

else:
value = float('inf')
for child in node.children():
value = min(value, minimax_with_null_move(child, depth - 1, alpha, beta, True, transposition_table))
beta = min(beta, value)
if alpha >= beta:
break
transposition_table.store(key, TranspositionEntry(depth, value, "exact"))
return value

def minimax_with_lmr(node, depth, alpha, beta, maximizing_player, transposition_table):
# Check if the current position is in the transposition table
key = node.hash()
transposition_entry = transposition_table.lookup(key)
if transposition_entry is not None and transposition_entry.depth >= depth:
if transposition_entry.type == "exact":
return transposition_entry.score
elif transposition_entry.type == "lower_bound"
alpha = max(alpha, transposition_entry.score)
elif transposition_entry.type == "upper_bound":
beta = min(beta, transposition_entry.score)
if alpha >= beta:
return transposition_entry.score

# Late move reduction
if depth >= 3 and not node.is_terminal() and not node.in_check() and not node.is_capture():
reduction = 1
if depth >= 6:
reduction = 2
for child in node.children():
if child.is_capture() or child.is_check():
continue
value = -minimax_with_lmr(child, depth - reduction, -beta, -alpha, not maximizing_player, transposition_table)
if value >= beta:
transposition_table.store(key, TranspositionEntry(depth, value, "lower_bound"))
return value
if value > alpha:
alpha = value
if alpha >= beta:
transposition_table.store(key, TranspositionEntry(depth, alpha, "lower_bound"))
return alpha

# Normal minimax alpha-beta pruning
if depth == 0 or node.is_terminal():
score = node.evaluate()
transposition_table.store(key, TranspositionEntry(depth, score, "exact"))
return score

if maximizing_player:
value = float('-inf')
for child in node.children():
value = max(value, -minimax_with_lmr(child, depth - 1, -beta, -alpha, False, transposition_table))
alpha = max(alpha, value)
if alpha >= beta:
break
transposition_table.store(key, TranspositionEntry(depth, value, "exact"))
return value

else:
value = float('inf')
for child in node.children():
value = min(value, -minimax_with_lmr(child, depth - 1, -beta, -alpha, True, transposition_table))
beta = min(beta, value)
if alpha >= beta:
break
transposition_table.store(key, TranspositionEntry(depth, value, "exact"))
return value

def get_board_value(board):
# Evaluate material
material_score = 0
for piece_type in (chess.PAWN, chess.KNIGHT, chess.BISHOP, chess.ROOK, chess.QUEEN):
material_score += (len(board.pieces(piece_type, chess.WHITE)) - len(board.pieces(piece_type, chess.BLACK))) * piece_values[piece_type]

# Evaluate king safety
king_square = board.king(chess.WHITE)
king_safety_score = 0
if king_square is not None:
king_file = chess.square_file(king_square)
king_rank = chess.square_rank(king_square)
if king_file in (0, 7):
king_safety_score -= 10 # Penalty for king on edge file
if king_rank in (0, 7):
king_safety_score -= 10 # Penalty for king on edge rank
king_attackers = board.attackers(chess.BLACK, king_square)
king_safety_score -= len(king_attackers) * 20 # Penalty for each attacker

# Return total score
return material_score + king_safety_score

# Define the king safety score function
def king_safety_score(board):
# Get the king square and its adjacent squares
king_square = board.king(chess.WHITE)
adjacent_squares = chess.SquareSet(chess.Square(i) for i in chess.SQUARES if chess.square_distance(i, king_square) <= 2)

# Calculate the number of attackers and defenders
attackers = len(board.attackers(chess.BLACK, king_square))
defenders = len(board.attackers(chess.WHITE, king_square))

# Calculate the pawn shield score
pawn_shield = 0
pawns = board.pawns
if king_square in pawns:
if board.turn == chess.WHITE:
pawn_shield += sum(1 for square in chess.SquareSet(chess.pawn_attacks(chess.BLACK, king_square)) if pawns[square] == chess.Piece(chess.PAWN, chess.WHITE))
else:
pawn_shield += sum(1 for square in chess.SquareSet(chess.pawn_attacks(chess.WHITE, king_square)) if pawns[square] == chess.Piece(chess.PAWN, chess.BLACK))

# Calculate the piece placement score
piece_score = sum(1 for square in adjacent_squares if board.piece_at(square) and board.color_at(square) == chess.WHITE)

# Calculate the king safety score
king_safety = (pawn_shield + attackers - defenders) * 10 + piece_score

# Return the king safety score
return king_safety

# Evaluate a position with the king safety score
board = chess.Board("r1bqkbnr/pppp1ppp/2n5/4p3/2B1P3/5N2/PPPP1PPP/RNBQK2R w KQkq e6 0 3")
score = king_safety_score(board)
print(f"Position score with king safety: {score}")

# Define the neural network architecture
inputs = tf.keras.layers.Input(shape=(8,8,12))
x = tf.keras.layers.Conv2D(32, (3, 3), activation='relu')(inputs)
x = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(x)
x = tf.keras.layers.Conv2D(64, (3, 3), activation='relu')(x)
x = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(x)
x = tf.keras.layers.Flatten()(x)
for i in range(35):
x = tf.keras.layers.Dense(256, activation='relu')(x)
q_values = tf.keras.layers.Dense(4096, activation='linear')(x)
model = tf.keras.Model(inputs=inputs, outputs=q_values)

# Define the loss function and optimizer
loss_fn = tf.keras.losses.MeanSquaredError()
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)

# Define the replay buffer and other hyperparameters
replay_buffer = []
batch_size = 32
gamma = 0.99
epsilon = 1.0
epsilon_min = 0.1
epsilon_decay = 0.999
update_frequency = 4
target_update_frequency = 1000
num_episodes = 10000

# Define the reward and punishment values
checkmate_reward = 100.0
checkmate_punishment = -100.0

# Train the neural network using deep Q-learning
for episode in range(num_episodes):
state = initial_state()
done = False
while not done:
# Choose an action using epsilon-greedy exploration
if np.random.rand() < epsilon:
action = np.random.randint(0, 4096)
else:
q_values = model.predict(np.expand_dims(state, axis=0))[0]
action = np.argmax(q_values)
# Take the chosen action and observe the next state and reward
next_state, reward, done = take_action(action)
# Check for a checkmate and adjust the reward accordingly
if done and reward == 1.0:
reward = checkmate_reward
elif done and reward == -1.0:
reward = checkmate_punishment
# Store the transition in the replay buffer
replay_buffer.append((state, action, reward, next_state, done))
# Update the epsilon value
epsilon = max(epsilon_min, epsilon * epsilon_decay)
# Update the neural network weights using deep Q-learning
if len(replay_buffer) >= batch_size and episode % update_frequency == 0:
minibatch = random.sample(replay_buffer, batch_size)
states, actions, rewards, next_states, dones = zip(*minibatch)
states = np.array(states)
actions = np.array(actions)
rewards = np.array(rewards)
next_states = np.array(next_states)
dones = np.array(dones)
q_values = model.predict(states)
next_q_values = model.predict(next_states)
max_next_q_values = np.max(next_q_values, axis=1)
targets = rewards + gamma * max_next_q_values * (1 - dones)
for i in range(batch_size):
q_values[i][actions[i]] = targets[i]
loss = model.train_on_batch(states, q_values)
# Update the target network weights
if episode % target_update_frequency == 0:
target_model.set_weights(model.get_weights())
# Update the current state
state = next_state

I already submitted this to chess.com

KoperChor

Impressive 

mishailu

The bots have already been loaded.

SoupSailor72
Chess.com had better ideas