// Pseudocode idea for simplified Lam-like annealing: read step by step every line, understand it, and then adjust to your programming language!
// Try to adjust "total_iters" to get the same computational cost as your own implementation of standard, step-wise temperature-decreasing SA.
// Then compare both approaches in terms of the quality of best discovered solutions.

solution LamLikeAnnealing(solution initial_solution, int total_iters) // read below on how to adjust "total_iters"
{
	current_solution = initial_solution
	best_solution = current_solution
	
	// how it works: we keep temperature constant during a "step" which lasts ITERATIONS_PER_STEP iterations.
	// after a "step" competes, we adjust temperature (down or up) based on the acceptance rate of deteriorating moves we have calculated during last "step".
	ITERATIONS_PER_STEP = 100 // during a step, T remains constant, and we measure the ratio of accepted deteriorating moves. If you set ITERATIONS_PER_STEP too low, you will get too much noise in the measured ratio. ITERATIONS_PER_STEP too high will result in reliable ratio, but slow responsiveness in adjusting T. 100 is typically a good tradeoff; you can try 50 for a more responsive temperature adjustment.
	int num_steps = max(2, int(total_iters / ITERATIONS_PER_STEP)) // "total_iters" must be sufficiently high to have sufficiently many steps (num_steps)! usually we need 1000 steps, or 10000, or more... total_iters = ITERATIONS_PER_STEP * num_steps.
	
	T = 1.0 // arbitrary initial temperature, will be adjusted (down or up) in the loop below. If you have already implemented any method that adjusts initial T based on the ruggedness of the landscape (like we discussed during the lecture and labs), use it here instead of 1.0!
	
	for (step=0; step<num_steps; step++) // T is constant during a single step
	{
		accepted_bad_moves = 0
		total_bad_moves = 0

		progress = step / (num_steps-1) // from 0.0 to 1.0
		P_target = GetLamTargetProbability(progress)
		if P_target <= 0.001: // last phase of SA: become strictly greedy
			T = 0.0
		
		for (i=0; i<ITERATIONS_PER_STEP; i++) // one step (constant temperature level) of standard SA
		{
			neighbor = GenerateNeighbor(current_solution)
			delta_f = Objective(neighbor) - Objective(current_solution)
			
			if delta_f <= 0: // improving move, always accept
				current_solution = neighbor
				if Objective(current_solution) < Objective(best_solution):
					best_solution = current_solution
			else: // deteriorating move
				total_bad_moves++
				if Random(0.0, 1.0) < exp(-delta_f / T):
					current_solution = neighbor
					accepted_bad_moves++
		}

		if T > 0 // only adjust T (for the next step) if we are not in the final, strictly greedy phase (T=0)
		{
			if (total_bad_moves == 0) // wow, accepted all moves during the recent step: free-falling down a steep gradient?
				T = T * 1.5 // bump up temperature to escape (we want thermal equilibrium!), the multiplier can be e.g. 1.2 .. 2.0
			else
			{
				P_current = accepted_bad_moves / total_bad_moves // the actual acceptance rate of deteriorating moves during the recent step
				error = P_target - P_current
				
				// use a dynamic multiplier based on how big the error is (a proportional controller).
				// if error is large positive (we are rejecting too much and need more acceptances), T goes up - and vice versa.
				adjustment_factor = 1.0 + (error * 0.5) //0.5 can be adjusted: 0.1 -> slower changes of T, 1.0 -> may cause more violent oscillations
				T = T * adjustment_factor // adjust T (down or up!) to make P_current approach P_target
			}
		}
	}

	return best_solution
}

float GetLamTargetProbability(progress): // use the magical "Lam Curve" for target acceptance probability; "progress" is from 0.0 (start) to 1.0 (end)
{
	if progress < 0.15: // first 15% of iterations: fast drop from 1.0 to 0.44
		return 1.0 - (progress / 0.15) * (1.0 - 0.44)
	else if progress < 0.65: // next 50% of time: the "plateau"
		return 0.44 // stay at probability of 0.44, for efficient optimization
	else: // progress in range from 0.65 to 1.0
		remaining_progress = (progress - 0.65) / 0.35 // normalize remaining iterations
		return 0.44 * (1.0 - remaining_progress) // final 35%: quick cooling down to 0.0
}
