931 lines
34 KiB
GDScript
931 lines
34 KiB
GDScript
class_name UnlockSimulatorCached
|
|
extends Control
|
|
|
|
# CACHED VERSION - Uses aggressive chunk-based caching for 10-100x speedup
|
|
|
|
# Load the actual game resources
|
|
var unlock_collection: UnlockDataCollection = load("res://resources/UnlockData.tres")
|
|
var inventory_resource: InventoryResource = load("res://resources/InventoryData.tres")
|
|
|
|
# Results tracking
|
|
var all_results: Array[Dictionary] = []
|
|
var results_mutex: Mutex = Mutex.new()
|
|
|
|
# Chunk-based caching for intermediate simulation states
|
|
# Key: "unlock_id:rank,..." sorted string
|
|
# Value: {cache_key, ticks, currency, wood, stock, current_ranks, modifiers}
|
|
var simulation_cache: Dictionary = {}
|
|
var cache_mutex: Mutex = Mutex.new()
|
|
|
|
# Global unlock struct cache (build once, clone per simulation)
|
|
var global_unlock_structs: Array[Dictionary] = []
|
|
var global_unlock_structs_mutex: Mutex = Mutex.new()
|
|
var unlock_structs_initialized: bool = false
|
|
|
|
# Manual thread pool
|
|
var num_threads: int = 14 # Increase this for more CPU usage
|
|
var threads: Array[Thread] = []
|
|
var task_queue: Array[Dictionary] = []
|
|
var queue_mutex: Mutex = Mutex.new()
|
|
var completed_count: int = 0
|
|
var completed_mutex: Mutex = Mutex.new()
|
|
var active_threads: int = 0
|
|
var threads_done: bool = false
|
|
|
|
# Pre-calculated cost arrays for faster lookup - OPTIMIZATION
|
|
var cost_cache: Dictionary = {}
|
|
|
|
var start_time: int = 0
|
|
var total_combinations: int = 0
|
|
var last_progress_time: int = 0
|
|
var monitoring_active: bool = false
|
|
var cache_hits: int = 0
|
|
var cache_misses: int = 0
|
|
|
|
# UI References
|
|
@onready var status_label = $MarginContainer/VBoxContainer/StatusPanel/VBox/StatusLabel
|
|
@onready var progress_label = $MarginContainer/VBoxContainer/StatusPanel/VBox/ProgressLabel
|
|
@onready var progress_bar = $MarginContainer/VBoxContainer/StatusPanel/VBox/ProgressBar
|
|
@onready var rate_label = $MarginContainer/VBoxContainer/StatusPanel/VBox/RateLabel
|
|
@onready var eta_label = $MarginContainer/VBoxContainer/StatusPanel/VBox/ETALabel
|
|
@onready var cache_hits_label = $MarginContainer/VBoxContainer/CachePanel/VBox/CacheHitsLabel
|
|
@onready var cache_misses_label = $MarginContainer/VBoxContainer/CachePanel/VBox/CacheMissesLabel
|
|
@onready var cache_rate_label = $MarginContainer/VBoxContainer/CachePanel/VBox/CacheRateLabel
|
|
@onready var cache_size_label = $MarginContainer/VBoxContainer/CachePanel/VBox/CacheSizeLabel
|
|
@onready var results_label = $MarginContainer/VBoxContainer/ResultsPanel/VBox/ScrollContainer/ResultsLabel
|
|
|
|
func _ready():
|
|
GameManager.tick.stop()
|
|
print("=== CACHED Unlock Simulator Started ===")
|
|
print("Using aggressive chunk-based caching for 10-100x speedup")
|
|
var cpu_count = OS.get_processor_count()
|
|
print("CPU cores detected: %d" % cpu_count)
|
|
print("Creating %d worker threads (adjust num_threads variable for more/less)" % num_threads)
|
|
|
|
# Update UI
|
|
status_label.text = "Status: Starting cached simulation..."
|
|
results_label.text = "[b]CACHED Unlock Simulator Started[/b]\n\nCPU cores: %d\nWorker threads: %d\n\nGenerating combinations..." % [cpu_count, num_threads]
|
|
|
|
run_comprehensive_test()
|
|
|
|
func _process(_delta):
|
|
if monitoring_active:
|
|
# Only update progress once per second
|
|
var current_time = Time.get_ticks_msec()
|
|
if current_time - last_progress_time >= 1000:
|
|
last_progress_time = current_time
|
|
update_progress()
|
|
|
|
func update_progress():
|
|
"""Update progress display"""
|
|
var current_count = 0
|
|
completed_mutex.lock()
|
|
current_count = completed_count
|
|
completed_mutex.unlock()
|
|
|
|
# Check if all work is complete
|
|
if current_count >= total_combinations:
|
|
monitoring_active = false
|
|
finish_processing()
|
|
return
|
|
|
|
var percent = float(current_count) / total_combinations * 100.0
|
|
var elapsed = (Time.get_ticks_msec() - start_time) / 1000.0
|
|
var rate = current_count / elapsed if elapsed > 0 else 0
|
|
var eta_seconds = (total_combinations - current_count) / rate if rate > 0 else 0
|
|
|
|
# Calculate cache hit rate
|
|
var total_cache_checks = cache_hits + cache_misses
|
|
var cache_hit_rate = (float(cache_hits) / total_cache_checks * 100.0) if total_cache_checks > 0 else 0.0
|
|
|
|
# Format ETA
|
|
var eta_str = ""
|
|
if eta_seconds > 0:
|
|
var eta_minutes = int(eta_seconds) / 60
|
|
var eta_secs = int(eta_seconds) % 60
|
|
if eta_minutes > 0:
|
|
eta_str = "%dm %ds" % [eta_minutes, eta_secs]
|
|
else:
|
|
eta_str = "%ds" % eta_secs
|
|
else:
|
|
eta_str = "calculating..."
|
|
|
|
print("Progress: %.1f%% (%d/%d) - %.1f combos/sec - Cache: %.1f%% hits - ETA: %s" % [
|
|
percent, current_count, total_combinations, rate, cache_hit_rate, eta_str
|
|
])
|
|
|
|
# Update UI
|
|
status_label.text = "Status: Running simulation..."
|
|
progress_label.text = "Progress: %.1f%% (%d/%d)" % [percent, current_count, total_combinations]
|
|
progress_bar.value = percent / 100.0
|
|
rate_label.text = "Speed: %.1f combos/sec" % rate
|
|
eta_label.text = "ETA: %s" % eta_str
|
|
|
|
# Update cache stats
|
|
cache_hits_label.text = "Cache Hits: %d" % cache_hits
|
|
cache_misses_label.text = "Cache Misses: %d" % cache_misses
|
|
cache_rate_label.text = "Hit Rate: %.1f%%" % cache_hit_rate
|
|
cache_mutex.lock()
|
|
cache_size_label.text = "Cache Entries: %d" % simulation_cache.size()
|
|
cache_mutex.unlock()
|
|
|
|
func worker_thread(thread_id: int):
|
|
"""Worker thread function that pulls tasks from the queue"""
|
|
# Local batch storage to reduce mutex contention - OPTIMIZATION
|
|
var local_results: Array[Dictionary] = []
|
|
var local_count: int = 0
|
|
var batch_size: int = 10 # Process 10 before syncing
|
|
|
|
while true:
|
|
# Get next task from queue
|
|
var task_data = null
|
|
queue_mutex.lock()
|
|
if task_queue.size() > 0:
|
|
task_data = task_queue.pop_front()
|
|
queue_mutex.unlock()
|
|
|
|
# If no more tasks, flush results and exit
|
|
if task_data == null:
|
|
if local_results.size() > 0:
|
|
results_mutex.lock()
|
|
all_results.append_array(local_results)
|
|
results_mutex.unlock()
|
|
|
|
# CRITICAL FIX: Update completed count when flushing final batch
|
|
completed_mutex.lock()
|
|
completed_count += local_results.size()
|
|
completed_mutex.unlock()
|
|
break
|
|
|
|
# Process the task
|
|
var result = simulate_rank_combination_pure(task_data.combo, task_data.unlock_data, 1000000)
|
|
|
|
# Store in local batch
|
|
local_results.append(result)
|
|
local_count += 1
|
|
|
|
# Flush batch periodically to reduce mutex contention
|
|
if local_results.size() >= batch_size:
|
|
results_mutex.lock()
|
|
all_results.append_array(local_results)
|
|
results_mutex.unlock()
|
|
|
|
# CRITICAL FIX: Update progress counter with mutex protection
|
|
completed_mutex.lock()
|
|
completed_count += local_results.size()
|
|
completed_mutex.unlock()
|
|
|
|
local_results.clear()
|
|
local_count = 0
|
|
|
|
func get_cache_key(current_ranks: Dictionary) -> String:
|
|
"""Generate a cache key from current unlock ranks"""
|
|
var sorted_keys = current_ranks.keys()
|
|
sorted_keys.sort()
|
|
var key_parts = []
|
|
for k in sorted_keys:
|
|
key_parts.append(str(k) + ":" + str(current_ranks[k]))
|
|
return ",".join(key_parts)
|
|
|
|
func try_load_best_prefix_from_cache(rank_targets: Dictionary) -> Variant:
|
|
"""Balanced cache lookup - fast with good coverage (~10-15 lookups)"""
|
|
|
|
cache_mutex.lock()
|
|
|
|
# Try exact match first
|
|
var full_key = get_cache_key(rank_targets)
|
|
if simulation_cache.has(full_key):
|
|
cache_hits += 1
|
|
var result = simulation_cache[full_key]
|
|
cache_mutex.unlock()
|
|
return result
|
|
|
|
# Sort unlock IDs for consistent ordering
|
|
var unlock_ids = rank_targets.keys()
|
|
unlock_ids.sort()
|
|
var num_unlocks = unlock_ids.size()
|
|
|
|
var best_match = null
|
|
var best_rank_sum = 0
|
|
|
|
# STRATEGY: Try progressively shorter prefixes by dropping unlocks from the END
|
|
# This is the most common pattern: {1,2,3,4,5} → {1,2,3,4} → {1,2,3} → {1,2} → {1}
|
|
# Covers 80%+ of cache reuse because combinations are generated in sorted order
|
|
|
|
for prefix_len in range(num_unlocks - 1, 0, -1):
|
|
var subset = {}
|
|
for i in range(prefix_len):
|
|
subset[unlock_ids[i]] = rank_targets[unlock_ids[i]]
|
|
|
|
var key = get_cache_key(subset)
|
|
if simulation_cache.has(key):
|
|
var cached_entry = simulation_cache[key]
|
|
var rank_sum = 0
|
|
for r in cached_entry.current_ranks.values():
|
|
rank_sum += r
|
|
|
|
# Keep best match (longest prefix)
|
|
if rank_sum > best_rank_sum:
|
|
best_match = cached_entry
|
|
best_rank_sum = rank_sum
|
|
# Early exit if we found a substantial match
|
|
if prefix_len >= num_unlocks - 2:
|
|
break
|
|
|
|
if best_match != null:
|
|
cache_hits += 1
|
|
else:
|
|
cache_misses += 1
|
|
cache_mutex.unlock()
|
|
|
|
return best_match
|
|
|
|
func should_cache_state(current_ranks: Dictionary, targets_remaining: int) -> bool:
|
|
"""Decide if this state is worth caching"""
|
|
# Don't cache if all targets reached
|
|
if targets_remaining == 0:
|
|
return false
|
|
|
|
# Cache aggressively for early states
|
|
var total_ranks = 0
|
|
var active_unlocks = 0
|
|
|
|
for rank in current_ranks.values():
|
|
if rank > 0:
|
|
total_ranks += rank
|
|
active_unlocks += 1
|
|
|
|
# Cache if: multiple unlocks active OR significant progress on one
|
|
return (active_unlocks >= 2) or (total_ranks >= 2)
|
|
|
|
func simulate_rank_combination_pure(rank_targets: Dictionary, unlock_data_array: Array, max_ticks: int, track_purchases: bool = false) -> Dictionary:
|
|
"""Optimized pure simulation function with struct-based unlocks"""
|
|
var currency: float = 0.0
|
|
var stock: float = 0.0
|
|
var wood: float = 0.0
|
|
|
|
# Purchase tracking (only enabled for top results)
|
|
var purchases: Array[Dictionary] = []
|
|
|
|
# GLOBAL STRUCT CACHE - Build once, clone per simulation
|
|
# This avoids rebuilding cost/effect tables for every simulation
|
|
var unlocks: Array[Dictionary] = []
|
|
var unlock_by_id: Dictionary = {}
|
|
|
|
if not unlock_structs_initialized:
|
|
global_unlock_structs_mutex.lock()
|
|
if not unlock_structs_initialized: # Double-check pattern
|
|
for unlock_data in unlock_data_array:
|
|
var base_mods = unlock_data.base_modifiers
|
|
|
|
# Pre-calculate cost table for first 20 ranks (avoid pow() in hot loop)
|
|
var cost_table: Array[float] = []
|
|
if unlock_data.is_scaling:
|
|
# Use cost_ladder if defined, otherwise use exponential scaling
|
|
if unlock_data.has("cost_ladder") and unlock_data.cost_ladder.size() > 0:
|
|
# Use fixed cost ladder
|
|
for cost in unlock_data.cost_ladder:
|
|
cost_table.append(float(cost))
|
|
# Fill remaining slots with last cost for safety
|
|
while cost_table.size() < 21:
|
|
cost_table.append(cost_table[cost_table.size() - 1])
|
|
else:
|
|
# Fallback to exponential scaling
|
|
var base_cost_float = float(unlock_data.base_cost)
|
|
var mult = unlock_data.cost_scaling_multiplier
|
|
for r in range(21): # Pre-calc ranks 0-20
|
|
cost_table.append(base_cost_float * pow(mult, r))
|
|
else:
|
|
cost_table.append(float(unlock_data.base_cost))
|
|
|
|
# Pre-calculate effect scale factors for first 20 ranks
|
|
var effect_scale_table: Array[float] = []
|
|
var effect_mult = unlock_data.effect_scaling_multiplier
|
|
for r in range(21):
|
|
if r == 0:
|
|
effect_scale_table.append(0.0) # No effect at rank 0
|
|
elif r == 1:
|
|
effect_scale_table.append(1.0) # Base effect at rank 1
|
|
else:
|
|
effect_scale_table.append(pow(effect_mult, r - 1))
|
|
|
|
var unlock_struct = {
|
|
"id": unlock_data.unlock_id,
|
|
"name": unlock_data.unlock_name,
|
|
"base_cost": unlock_data.base_cost,
|
|
"is_scaling": unlock_data.is_scaling,
|
|
"max_rank": unlock_data.max_rank,
|
|
"cost_multiplier": unlock_data.cost_scaling_multiplier,
|
|
"effect_multiplier": unlock_data.effect_scaling_multiplier,
|
|
"base_mods": base_mods,
|
|
"cost_table": cost_table, # Pre-calculated costs
|
|
"effect_scale_table": effect_scale_table, # Pre-calculated effect scales
|
|
# Pre-calculate whether this unlock affects each modifier (avoids string lookups)
|
|
"affects_sale_price": base_mods.has("sale_price_modifier"),
|
|
"affects_efficiency": base_mods.has("efficiency_modifier"),
|
|
"affects_wood_per_click": base_mods.has("wood_per_click_modifier"),
|
|
"affects_purchase_rate": base_mods.has("purchase_rate_modifier"),
|
|
"affects_autowood": base_mods.has("autowood_modifier"),
|
|
"is_multicraft": base_mods.has("multicraft_increase_modifier"),
|
|
# Cache base modifier values to avoid dictionary lookups
|
|
"sale_price_value": base_mods.get("sale_price_modifier", 1.0),
|
|
"efficiency_value": base_mods.get("efficiency_modifier", 1.0),
|
|
"wood_per_click_value": base_mods.get("wood_per_click_modifier", 1.0),
|
|
"purchase_rate_value": base_mods.get("purchase_rate_modifier", 1.0),
|
|
"autowood_value": base_mods.get("autowood_modifier", 0.0)
|
|
}
|
|
global_unlock_structs.append(unlock_struct)
|
|
|
|
unlock_structs_initialized = true
|
|
global_unlock_structs_mutex.unlock()
|
|
|
|
# Clone structs for this simulation (fast shallow copy)
|
|
for template in global_unlock_structs:
|
|
var unlock = template.duplicate(false) # Shallow copy
|
|
unlock.current_rank = 0 # Reset rank for this simulation
|
|
unlocks.append(unlock)
|
|
unlock_by_id[unlock.id] = unlock
|
|
|
|
var ticks: int = 0
|
|
# Removed purchases array - it's only needed for debug output and slows down simulation
|
|
|
|
# Track how many targets still need to be reached - OPTIMIZATION
|
|
var targets_remaining: int = 0
|
|
var current_ranks: Dictionary = {}
|
|
var active_unlock_ids: Array = [] # Only check unlocks that haven't reached target yet
|
|
for unlock_id in rank_targets.keys():
|
|
current_ranks[unlock_id] = 0
|
|
targets_remaining += rank_targets[unlock_id]
|
|
active_unlock_ids.append(unlock_id)
|
|
|
|
# Modifiers as individual variables for faster access - MAJOR OPTIMIZATION
|
|
var sale_price_mod: float = 1.0
|
|
var efficiency_mod: float = 1.0
|
|
var wood_per_click_mod: float = 1.0
|
|
var purchase_rate_mod: float = 1.0
|
|
var autowood_mod: float = 0.0
|
|
var multicraft_rank: int = 0
|
|
|
|
var wholesale_unlocked: bool = false
|
|
|
|
# Pre-calculate constants
|
|
var wood_per_click_base: float = Global.wood_per_click
|
|
var cost_per_whittle: float = Global.cost_per_whittle
|
|
var base_sale_price: float = Global.base_sale_price
|
|
var base_purchase_rate: float = Global.base_purchase_rate
|
|
var wholesale_id: int = Global.wholesale_unlock_id
|
|
var wholesale_size: float = Global.wholesale_bundle_size
|
|
var wholesale_mult: float = Global.wholesale_discount_multiplier
|
|
|
|
# CACHE LOOKUP: Try to load from cached intermediate state
|
|
# NOTE: Disable cache when tracking purchases to ensure all purchases are recorded
|
|
var cached_state = null
|
|
if not track_purchases:
|
|
cached_state = try_load_best_prefix_from_cache(rank_targets)
|
|
|
|
if cached_state != null:
|
|
# Restore full state from cache
|
|
ticks = cached_state.ticks
|
|
currency = cached_state.currency
|
|
stock = cached_state.stock
|
|
wood = cached_state.wood
|
|
|
|
# Restore modifiers
|
|
sale_price_mod = cached_state.modifiers.sale_price_mod
|
|
efficiency_mod = cached_state.modifiers.efficiency_mod
|
|
wood_per_click_mod = cached_state.modifiers.wood_per_click_mod
|
|
purchase_rate_mod = cached_state.modifiers.purchase_rate_mod
|
|
autowood_mod = cached_state.modifiers.autowood_mod
|
|
multicraft_rank = cached_state.modifiers.multicraft_rank
|
|
wholesale_unlocked = cached_state.modifiers.wholesale_unlocked
|
|
|
|
# Restore unlock ranks
|
|
for unlock_id in cached_state.current_ranks.keys():
|
|
if unlock_by_id.has(unlock_id):
|
|
unlock_by_id[unlock_id].current_rank = cached_state.current_ranks[unlock_id]
|
|
current_ranks[unlock_id] = cached_state.current_ranks[unlock_id]
|
|
|
|
# Recalculate targets_remaining and active_unlock_ids
|
|
targets_remaining = 0
|
|
active_unlock_ids.clear()
|
|
for unlock_id in rank_targets.keys():
|
|
if not current_ranks.has(unlock_id):
|
|
current_ranks[unlock_id] = 0
|
|
var remaining = rank_targets[unlock_id] - current_ranks[unlock_id]
|
|
if remaining > 0:
|
|
targets_remaining += remaining
|
|
active_unlock_ids.append(unlock_id)
|
|
|
|
# PRE-CALCULATE all next costs to avoid repeated lookups in main loop
|
|
var next_costs: Array[float] = []
|
|
next_costs.resize(active_unlock_ids.size())
|
|
|
|
for i in range(active_unlock_ids.size()):
|
|
var unlock = unlock_by_id[active_unlock_ids[i]]
|
|
var current_rank: int = unlock.current_rank
|
|
if current_rank < unlock.cost_table.size():
|
|
next_costs[i] = unlock.cost_table[current_rank]
|
|
else:
|
|
next_costs[i] = unlock.base_cost * pow(unlock.cost_multiplier, current_rank)
|
|
|
|
while ticks < max_ticks:
|
|
# Find cheapest affordable unlock using pre-calculated costs
|
|
var cheapest_unlock_id: int = -1
|
|
var cheapest_cost: float = INF
|
|
var cheapest_unlock = null
|
|
var cheapest_index: int = -1
|
|
|
|
if targets_remaining > 0:
|
|
for i in range(active_unlock_ids.size()):
|
|
if next_costs[i] < cheapest_cost and currency >= next_costs[i]:
|
|
cheapest_cost = next_costs[i]
|
|
cheapest_unlock_id = active_unlock_ids[i]
|
|
cheapest_unlock = unlock_by_id[cheapest_unlock_id]
|
|
cheapest_index = i
|
|
|
|
# If we can't afford anything and all targets are met, skip to earning 1M
|
|
if cheapest_unlock == null and targets_remaining == 0:
|
|
if currency >= 1000000.0:
|
|
break
|
|
# Skip ahead: calculate ticks needed to reach 1M currency
|
|
# Use current production rate to estimate
|
|
var currency_needed = 1000000.0 - currency
|
|
var price_per_item = base_sale_price * sale_price_mod
|
|
var items_per_tick = max(1.0, floor(base_purchase_rate * purchase_rate_mod))
|
|
var revenue_per_tick = items_per_tick * price_per_item
|
|
|
|
if revenue_per_tick > 0:
|
|
var ticks_needed = int(ceil(currency_needed / revenue_per_tick))
|
|
ticks += ticks_needed
|
|
currency += revenue_per_tick * ticks_needed
|
|
break
|
|
|
|
# Purchase the cheapest unlock if found
|
|
if cheapest_unlock != null:
|
|
currency -= cheapest_cost
|
|
cheapest_unlock.current_rank += 1
|
|
current_ranks[cheapest_unlock_id] += 1
|
|
targets_remaining -= 1
|
|
|
|
# Update wholesale cache
|
|
if cheapest_unlock_id == wholesale_id:
|
|
wholesale_unlocked = true
|
|
|
|
# OPTIMIZED modifier update - use ratio instead of recalculating from scratch
|
|
var rank: int = cheapest_unlock.current_rank
|
|
var prev_rank: int = rank - 1
|
|
|
|
if cheapest_unlock.is_multicraft:
|
|
multicraft_rank = rank
|
|
|
|
# Get scale factors from pre-calculated tables
|
|
var old_scale: float = cheapest_unlock.effect_scale_table[prev_rank] if prev_rank < cheapest_unlock.effect_scale_table.size() else 0.0
|
|
var new_scale: float = cheapest_unlock.effect_scale_table[rank] if rank < cheapest_unlock.effect_scale_table.size() else pow(cheapest_unlock.effect_multiplier, rank - 1)
|
|
|
|
# Apply incremental changes using ratio
|
|
if cheapest_unlock.affects_sale_price:
|
|
var base_bonus: float = cheapest_unlock.sale_price_value - 1.0
|
|
var old_mult: float = 1.0 + base_bonus * old_scale
|
|
var new_mult: float = 1.0 + base_bonus * new_scale
|
|
sale_price_mod = sale_price_mod * (new_mult / old_mult)
|
|
|
|
if cheapest_unlock.affects_efficiency:
|
|
var base_bonus: float = cheapest_unlock.efficiency_value - 1.0
|
|
var old_mult: float = 1.0 + base_bonus * old_scale
|
|
var new_mult: float = 1.0 + base_bonus * new_scale
|
|
efficiency_mod = efficiency_mod * (new_mult / old_mult)
|
|
|
|
if cheapest_unlock.affects_wood_per_click:
|
|
var base_bonus: float = cheapest_unlock.wood_per_click_value - 1.0
|
|
var old_mult: float = 1.0 + base_bonus * old_scale
|
|
var new_mult: float = 1.0 + base_bonus * new_scale
|
|
wood_per_click_mod = wood_per_click_mod * (new_mult / old_mult)
|
|
|
|
if cheapest_unlock.affects_purchase_rate:
|
|
var base_bonus: float = cheapest_unlock.purchase_rate_value - 1.0
|
|
var old_mult: float = 1.0 + base_bonus * old_scale
|
|
var new_mult: float = 1.0 + base_bonus * new_scale
|
|
purchase_rate_mod = purchase_rate_mod * (new_mult / old_mult)
|
|
|
|
if cheapest_unlock.affects_autowood:
|
|
autowood_mod = autowood_mod - cheapest_unlock.autowood_value * prev_rank + cheapest_unlock.autowood_value * rank
|
|
|
|
# Track purchase if enabled
|
|
if track_purchases:
|
|
if purchases.size() == 0:
|
|
print("DEBUG: First purchase being tracked!")
|
|
purchases.append({
|
|
"unlock_id": cheapest_unlock_id,
|
|
"unlock_name": cheapest_unlock.name,
|
|
"rank": rank,
|
|
"cost": cheapest_cost,
|
|
"tick": ticks,
|
|
"currency_after": currency
|
|
})
|
|
|
|
# Update next cost for this unlock or remove from active list
|
|
if current_ranks[cheapest_unlock_id] >= rank_targets[cheapest_unlock_id]:
|
|
# Target reached - swap with last element and shrink array
|
|
var last_idx = active_unlock_ids.size() - 1
|
|
if cheapest_index != last_idx:
|
|
active_unlock_ids[cheapest_index] = active_unlock_ids[last_idx]
|
|
next_costs[cheapest_index] = next_costs[last_idx]
|
|
active_unlock_ids.resize(last_idx)
|
|
next_costs.resize(last_idx)
|
|
else:
|
|
# Update cost for next rank
|
|
var new_rank = cheapest_unlock.current_rank
|
|
if new_rank < cheapest_unlock.cost_table.size():
|
|
next_costs[cheapest_index] = cheapest_unlock.cost_table[new_rank]
|
|
else:
|
|
next_costs[cheapest_index] = cheapest_unlock.base_cost * pow(cheapest_unlock.cost_multiplier, new_rank)
|
|
|
|
# Removed purchase tracking for performance - only track in debug mode if needed
|
|
# Don't append to purchases array on every purchase
|
|
|
|
# CACHE INSERTION: Cache this state if valuable
|
|
if should_cache_state(current_ranks, targets_remaining):
|
|
var cache_key = get_cache_key(current_ranks)
|
|
|
|
cache_mutex.lock()
|
|
if not simulation_cache.has(cache_key):
|
|
simulation_cache[cache_key] = {
|
|
"cache_key": cache_key,
|
|
"ticks": ticks,
|
|
"currency": currency,
|
|
"stock": stock,
|
|
"wood": wood,
|
|
"current_ranks": current_ranks.duplicate(),
|
|
"modifiers": {
|
|
"sale_price_mod": sale_price_mod,
|
|
"efficiency_mod": efficiency_mod,
|
|
"wood_per_click_mod": wood_per_click_mod,
|
|
"purchase_rate_mod": purchase_rate_mod,
|
|
"autowood_mod": autowood_mod,
|
|
"multicraft_rank": multicraft_rank,
|
|
"wholesale_unlocked": wholesale_unlocked
|
|
}
|
|
}
|
|
cache_mutex.unlock()
|
|
|
|
# Simulate one tick - HEAVILY OPTIMIZED
|
|
|
|
# 1. Generate wood
|
|
var wood_per_click_modified = wood_per_click_base * wood_per_click_mod
|
|
|
|
# Manual clicks based on tick range (pre-calculate to avoid repeated conditions)
|
|
var manual_clicks: float = 1.0 if ticks < 120 else (0.5 if ticks < 300 else (0.25 if (ticks < 600 and autowood_mod < 0.2) else 0.0))
|
|
|
|
# Total wood generation
|
|
var wood_gen: float = manual_clicks * wood_per_click_modified
|
|
if autowood_mod > 0.0:
|
|
wood_gen += max(wood_per_click_modified * autowood_mod, 1.0)
|
|
wood += wood_gen
|
|
|
|
# 2. Whittle wood into stock - MATCHES tick_process.gd:19-32
|
|
# Base whittling action (always happens once)
|
|
var multicraft_actions = 1 + multicraft_rank # 1 base + multicraft ranks
|
|
|
|
# Each whittle action: items_produced_per_tick = cost_per_whittle * efficiency_modifier
|
|
var items_per_whittle = cost_per_whittle * efficiency_mod
|
|
|
|
for action in range(multicraft_actions):
|
|
if wood >= 1: # Need at least 1 wood to whittle
|
|
# How much wood needed for this whittle (matches tick_process.gd:63-65)
|
|
var wood_needed = ceil(items_per_whittle)
|
|
var wood_to_use = min(wood, wood_needed)
|
|
var items_produced = wood_to_use # 1 wood = 1 item always
|
|
|
|
wood -= wood_to_use
|
|
stock += items_produced
|
|
else:
|
|
break # Not enough wood for more whittle actions
|
|
|
|
# 3. Sell stock for currency - MATCHES tick_process.gd:34-58
|
|
var price_per_item = base_sale_price * sale_price_mod
|
|
|
|
# 3a. Wholesale selling (if unlocked) - matches tick_process.gd:36-42
|
|
# Sell ALL possible 100-item bundles at 1.2x price
|
|
if wholesale_unlocked:
|
|
while stock >= wholesale_size:
|
|
stock -= wholesale_size
|
|
currency += wholesale_size * price_per_item * wholesale_mult
|
|
|
|
# 3b. Regular selling - matches tick_process.gd:45-58
|
|
if stock > 0:
|
|
var purchase_rate = base_purchase_rate * purchase_rate_mod
|
|
var max_stock_to_sell = floor(purchase_rate)
|
|
# Always sell at least 1, up to the max
|
|
var actual_stock_to_sell = min(stock, max(1.0, max_stock_to_sell))
|
|
stock -= actual_stock_to_sell
|
|
currency += actual_stock_to_sell * price_per_item
|
|
|
|
ticks += 1
|
|
|
|
var success = currency >= 1000000.0
|
|
|
|
var result = {
|
|
"rank_targets": rank_targets,
|
|
"success": success,
|
|
"ticks": ticks if success else -1,
|
|
"final_currency": currency,
|
|
"time_formatted": format_time(ticks) if success else "Failed"
|
|
}
|
|
|
|
# Include purchase timeline if tracking was enabled
|
|
if track_purchases:
|
|
result["purchases"] = purchases
|
|
print("DEBUG: track_purchases=true, purchases.size()=%d" % purchases.size())
|
|
|
|
return result
|
|
|
|
|
|
func format_time(ticks: int) -> String:
|
|
var seconds = ticks
|
|
var minutes = seconds / 60
|
|
var hours = minutes / 60
|
|
|
|
if hours > 0:
|
|
return "%dh %dm %ds" % [hours, minutes % 60, seconds % 60]
|
|
elif minutes > 0:
|
|
return "%dm %ds" % [minutes, seconds % 60]
|
|
else:
|
|
return "%ds" % seconds
|
|
|
|
func generate_all_combinations(unlimited_scaling_cap: int = 5) -> Array[Dictionary]:
|
|
"""Generate combinations for ALL unlocks dynamically, respecting max_ranks from resource file
|
|
|
|
Args:
|
|
unlimited_scaling_cap: Maximum rank to test for unlocks with unlimited scaling (default: 5)
|
|
Lower values = faster testing, higher = more comprehensive
|
|
Cap=3: ~13K combos (~28 sec) | Cap=5: ~47K combos (~93 sec)
|
|
Cap=7: ~111K combos (~3.7min) | Cap=10: ~287K combos (~9.6min)
|
|
"""
|
|
var combinations: Array[Dictionary] = []
|
|
|
|
# Build constraint list from resource file
|
|
var unlock_constraints = []
|
|
for unlock in unlock_collection.unlocks:
|
|
var max_rank: int
|
|
if unlock.max_rank > 0:
|
|
max_rank = unlock.max_rank
|
|
elif not unlock.is_scaling:
|
|
max_rank = 1 # One-shot unlocks
|
|
else:
|
|
max_rank = unlimited_scaling_cap # Configurable cap for unlimited scaling
|
|
|
|
unlock_constraints.append({
|
|
"id": unlock.unlock_id,
|
|
"name": unlock.unlock_name,
|
|
"max_rank": max_rank
|
|
})
|
|
|
|
print("\n=== Generating Combinations ===")
|
|
print("Reading from resource file: %d unlocks" % unlock_constraints.size())
|
|
for c in unlock_constraints:
|
|
print(" - %s (ID %d): 0-%d ranks" % [c.name, c.id, c.max_rank])
|
|
|
|
# Recursive generation
|
|
_generate_combinations_recursive(unlock_constraints, 0, {}, combinations)
|
|
|
|
print("Generated %d total combinations" % combinations.size())
|
|
return combinations
|
|
|
|
func _generate_combinations_recursive(constraints: Array, index: int, current: Dictionary, output: Array):
|
|
"""Recursively generate all valid combinations"""
|
|
if index >= constraints.size():
|
|
# Skip all-zeros combination
|
|
if current.size() > 0:
|
|
output.append(current.duplicate())
|
|
return
|
|
|
|
var constraint = constraints[index]
|
|
for rank in range(constraint.max_rank + 1):
|
|
if rank > 0:
|
|
current[constraint.id] = rank
|
|
|
|
_generate_combinations_recursive(constraints, index + 1, current, output)
|
|
|
|
if rank > 0:
|
|
current.erase(constraint.id)
|
|
|
|
func serialize_unlock_data() -> Array:
|
|
"""Convert unlock collection to serializable data for threads"""
|
|
var unlock_data = []
|
|
for unlock in unlock_collection.unlocks:
|
|
unlock_data.append({
|
|
"unlock_id": unlock.unlock_id,
|
|
"unlock_name": unlock.unlock_name,
|
|
"base_cost": unlock.base_cost,
|
|
"is_scaling": unlock.is_scaling,
|
|
"max_rank": unlock.max_rank,
|
|
"cost_scaling_multiplier": unlock.cost_scaling_multiplier,
|
|
"effect_scaling_multiplier": unlock.effect_scaling_multiplier,
|
|
"cost_ladder": unlock.cost_ladder.duplicate() if unlock.cost_ladder.size() > 0 else [],
|
|
"base_modifiers": unlock.base_modifiers.duplicate()
|
|
})
|
|
return unlock_data
|
|
|
|
func run_comprehensive_test():
|
|
"""Test all combinations dynamically generated from resource file"""
|
|
print("\n=== Available Unlocks ===")
|
|
for unlock in unlock_collection.unlocks:
|
|
var max_rank_str = str(unlock.max_rank) if unlock.max_rank > 0 else "unlimited"
|
|
print("ID: %d | %s | Base Cost: %d | Scaling: %s | Max Rank: %s" % [
|
|
unlock.unlock_id,
|
|
unlock.unlock_name,
|
|
unlock.base_cost,
|
|
"Yes" if unlock.is_scaling else "No",
|
|
max_rank_str
|
|
])
|
|
print(" Modifiers: ", unlock.base_modifiers)
|
|
|
|
print("\n=== Global Constants ===")
|
|
print("Base Sale Price: %s" % Global.base_sale_price)
|
|
print("Base Purchase Rate: %s" % Global.base_purchase_rate)
|
|
print("Cost Per Whittle: %s" % Global.cost_per_whittle)
|
|
|
|
# Serialize unlock data for threads
|
|
var unlock_data = serialize_unlock_data()
|
|
|
|
# CACHE WARMUP: Pre-populate cache with common single-unlock states
|
|
print("\n=== Cache Warmup ===")
|
|
print("Pre-populating cache with common prefixes...")
|
|
for unlock in unlock_collection.unlocks:
|
|
var max_warmup_rank = 3
|
|
if unlock.max_rank > 0:
|
|
max_warmup_rank = min(unlock.max_rank, 3)
|
|
|
|
for rank in range(1, max_warmup_rank + 1):
|
|
var warmup_target = {unlock.unlock_id: rank}
|
|
simulate_rank_combination_pure(warmup_target, unlock_data, 1000000)
|
|
|
|
print("Cache warmup complete. Cache size: %d entries" % simulation_cache.size())
|
|
|
|
# Generate all combinations (configurable cap for unlimited scaling)
|
|
var unlimited_cap = 5 # Adjust this to test more/fewer ranks: 3=fast, 5=balanced, 7+=comprehensive
|
|
print("\n=== Generation Settings ===")
|
|
print("Unlimited scaling cap: %d ranks" % unlimited_cap)
|
|
var combinations = generate_all_combinations(unlimited_cap)
|
|
total_combinations = combinations.size()
|
|
print("\n=== Testing %d Combinations ===" % total_combinations)
|
|
|
|
# Fill task queue
|
|
task_queue.clear()
|
|
for combo in combinations:
|
|
task_queue.append({
|
|
"combo": combo,
|
|
"unlock_data": unlock_data
|
|
})
|
|
|
|
# Reset counters
|
|
completed_count = 0
|
|
all_results.clear()
|
|
threads_done = false
|
|
start_time = Time.get_ticks_msec()
|
|
last_progress_time = start_time
|
|
monitoring_active = true
|
|
|
|
# Create and start threads
|
|
print("Starting %d worker threads..." % num_threads)
|
|
for i in range(num_threads):
|
|
var thread = Thread.new()
|
|
thread.start(worker_thread.bind(i))
|
|
threads.append(thread)
|
|
|
|
print("All threads started, processing...")
|
|
|
|
func finish_processing():
|
|
"""Called when all processing is complete"""
|
|
print("\nAll combinations complete! Waiting for threads to finish...")
|
|
|
|
# Wait for all threads to finish
|
|
for thread in threads:
|
|
thread.wait_to_finish()
|
|
threads.clear()
|
|
threads_done = true
|
|
|
|
print("All threads finished. Processing results...")
|
|
|
|
var total_time = (Time.get_ticks_msec() - start_time) / 1000.0
|
|
|
|
# SAFETY CHECK: Verify result count matches
|
|
results_mutex.lock()
|
|
var actual_results = all_results.size()
|
|
results_mutex.unlock()
|
|
|
|
if actual_results != total_combinations:
|
|
print("WARNING: Result count mismatch! Expected %d, got %d" % [total_combinations, actual_results])
|
|
print("This indicates a threading issue where some results weren't flushed")
|
|
|
|
# Print results
|
|
print("\n=== RESULTS ===")
|
|
print("Total time: %.1f seconds" % total_time)
|
|
print("Total combinations tested: %d (expected %d)" % [actual_results, total_combinations])
|
|
|
|
# Cache statistics
|
|
var total_cache_checks = cache_hits + cache_misses
|
|
var cache_hit_rate = (float(cache_hits) / total_cache_checks * 100.0) if total_cache_checks > 0 else 0.0
|
|
cache_mutex.lock()
|
|
var cache_size = simulation_cache.size()
|
|
cache_mutex.unlock()
|
|
print("\n=== CACHE STATISTICS ===")
|
|
print("Cache hits: %d" % cache_hits)
|
|
print("Cache misses: %d" % cache_misses)
|
|
print("Hit rate: %.1f%%" % cache_hit_rate)
|
|
print("Cache entries stored: %d" % cache_size)
|
|
|
|
var successful = all_results.filter(func(r): return r.success)
|
|
print("Successful strategies: %d" % successful.size())
|
|
|
|
# Update UI status
|
|
status_label.text = "Status: Complete!"
|
|
progress_label.text = "Progress: 100%% (%d/%d)" % [all_results.size(), total_combinations]
|
|
progress_bar.value = 1.0
|
|
eta_label.text = "Total Time: %.1f seconds" % total_time
|
|
|
|
# Build results text for UI
|
|
var results_text = "[b]SIMULATION COMPLETE[/b]\n\n"
|
|
results_text += "[color=green]Total time: %.1f seconds[/color]\n" % total_time
|
|
results_text += "Combinations tested: %d\n" % all_results.size()
|
|
results_text += "Successful strategies: %d\n\n" % successful.size()
|
|
|
|
results_text += "[b]Cache Performance:[/b]\n"
|
|
results_text += " Hits: %d\n" % cache_hits
|
|
results_text += " Misses: %d\n" % cache_misses
|
|
results_text += " [color=cyan]Hit Rate: %.1f%%[/color]\n" % cache_hit_rate
|
|
results_text += " Entries: %d\n\n" % cache_size
|
|
|
|
if successful.size() > 0:
|
|
# Sort by ticks (fastest first)
|
|
successful.sort_custom(func(a, b): return a.ticks < b.ticks)
|
|
|
|
# Re-simulate top 10 with detailed purchase tracking
|
|
print("\n=== RE-SIMULATING TOP 10 WITH PURCHASE TRACKING ===")
|
|
var unlock_data = serialize_unlock_data()
|
|
var top_10_detailed: Array = []
|
|
|
|
for i in range(min(10, successful.size())):
|
|
var result = successful[i]
|
|
print("Re-simulating #%d with track_purchases=true..." % (i + 1))
|
|
var detailed_result = simulate_rank_combination_pure(result.rank_targets, unlock_data, 1000000, true)
|
|
print(" Result has purchases key: %s" % detailed_result.has("purchases"))
|
|
if detailed_result.has("purchases"):
|
|
print(" Purchases array size: %d" % detailed_result.purchases.size())
|
|
top_10_detailed.append(detailed_result)
|
|
|
|
print("\n=== TOP 10 FASTEST STRATEGIES (WITH PURCHASE TIMELINE) ===")
|
|
results_text += "[b]TOP 10 FASTEST STRATEGIES:[/b]\n\n"
|
|
|
|
for i in range(top_10_detailed.size()):
|
|
var result = top_10_detailed[i]
|
|
print("\n#%d: %s (%d ticks)" % [i + 1, result.time_formatted, result.ticks])
|
|
|
|
# Format ranks with unlock names
|
|
var rank_display = []
|
|
for unlock_id in result.rank_targets.keys():
|
|
var unlock_name = get_unlock_name_by_id(unlock_id)
|
|
var ranks = result.rank_targets[unlock_id]
|
|
rank_display.append("%s: %d" % [unlock_name, ranks])
|
|
print("Target Ranks: %s" % ", ".join(rank_display))
|
|
|
|
# Add to UI
|
|
results_text += "[color=yellow]#%d: %s (%d ticks)[/color]\n" % [i + 1, result.time_formatted, result.ticks]
|
|
results_text += " Ranks: %s\n" % ", ".join(rank_display)
|
|
results_text += " Currency: %.0f\n" % result.final_currency
|
|
|
|
# Add purchase timeline
|
|
if result.has("purchases") and result.purchases.size() > 0:
|
|
print("\nPurchase Timeline:")
|
|
results_text += " [b]Purchase Timeline:[/b]\n"
|
|
for purchase in result.purchases:
|
|
var time_str = format_time(purchase.tick)
|
|
print(" %s: %s Rank %d - Cost: %d¥ @ %s" % [
|
|
time_str, purchase.unlock_name, purchase.rank,
|
|
purchase.cost, time_str
|
|
])
|
|
results_text += " • %s [color=cyan]%s Rank %d[/color] - %d¥ @ %s\n" % [
|
|
format_time(purchase.tick), purchase.unlock_name, purchase.rank,
|
|
purchase.cost, time_str
|
|
]
|
|
results_text += "\n"
|
|
else:
|
|
print("\nNo successful strategies found!")
|
|
results_text += "[color=red]No successful strategies found![/color]\n"
|
|
|
|
# Update results UI
|
|
results_label.text = results_text
|
|
|
|
func get_unlock_name_by_id(unlock_id: int) -> String:
|
|
"""Helper function to get unlock name by ID"""
|
|
for unlock in unlock_collection.unlocks:
|
|
if unlock.unlock_id == unlock_id:
|
|
return unlock.unlock_name
|
|
return "Unknown"
|
|
|
|
func _exit_tree():
|
|
# Clean up threads
|
|
monitoring_active = false
|
|
for thread in threads:
|
|
if thread.is_alive():
|
|
thread.wait_to_finish()
|