|
5 | 5 | require 'socket' |
6 | 6 | require 'digest/sha1' |
7 | 7 | require 'set' |
| 8 | +require 'hashdiff' |
8 | 9 |
|
9 | 10 | class Synapse::ConfigGenerator |
10 | 11 | class Haproxy < BaseGenerator |
@@ -801,6 +802,8 @@ class Haproxy < BaseGenerator |
801 | 802 | # should be enough for anyone right (famous last words)? |
802 | 803 | MAX_SERVER_ID = (2**16 - 1).freeze |
803 | 804 |
|
| 805 | + attr_reader :server_id_map, :state_cache |
| 806 | + |
804 | 807 | def initialize(opts) |
805 | 808 | super(opts) |
806 | 809 |
|
@@ -845,8 +848,11 @@ def initialize(opts) |
845 | 848 | @backends_cache = {} |
846 | 849 | @watcher_revisions = {} |
847 | 850 |
|
848 | | - @state_file_path = @opts['state_file_path'] |
849 | | - @state_file_ttl = @opts.fetch('state_file_ttl', DEFAULT_STATE_FILE_TTL).to_i |
| 851 | + @state_cache = HaproxyState.new( |
| 852 | + @opts['state_file_path'], |
| 853 | + @opts.fetch('state_file_ttl', DEFAULT_STATE_FILE_TTL).to_i, |
| 854 | + self |
| 855 | + ) |
850 | 856 |
|
851 | 857 | # For giving consistent orders, even if they are random |
852 | 858 | @server_order_seed = @opts.fetch('server_order_seed', rand(2000)).to_i |
@@ -907,15 +913,26 @@ def update_config(watchers) |
907 | 913 | end |
908 | 914 | end |
909 | 915 |
|
| 916 | + def update_state_file(watchers) |
| 917 | + @state_cache.update_state_file(watchers) |
| 918 | + end |
| 919 | + |
910 | 920 | # generates a new config based on the state of the watchers |
911 | 921 | def generate_config(watchers) |
912 | 922 | new_config = generate_base_config |
913 | 923 | shared_frontend_lines = generate_shared_frontend |
914 | 924 |
|
915 | 925 | watchers.each do |watcher| |
916 | 926 | watcher_config = watcher.config_for_generator[name] |
917 | | - @watcher_configs[watcher.name] ||= parse_watcher_config(watcher) |
918 | | - next if watcher_config['disabled'] |
| 927 | + next if watcher_config.nil? || watcher_config.empty? || watcher_config['disabled'] |
| 928 | + @watcher_configs[watcher.name] = parse_watcher_config(watcher) |
| 929 | + |
| 930 | + # if watcher_config is changed, trigger restart |
| 931 | + config_diff = HashDiff.diff(@state_cache.config_for_generator(watcher.name), watcher_config) |
| 932 | + if !config_diff.empty? |
| 933 | + log.info "synapse: restart required because config_for_generator changed. before: #{@state_cache.config_for_generator(watcher.name)}, after: #{watcher_config}" |
| 934 | + @restart_required = true |
| 935 | + end |
919 | 936 |
|
920 | 937 | regenerate = watcher.revision != @watcher_revisions[watcher.name] || |
921 | 938 | @frontends_cache[watcher.name].nil? || |
@@ -1051,7 +1068,7 @@ def generate_backend_stanza(watcher, config) |
1051 | 1068 |
|
1052 | 1069 | # The ordering here is important. First we add all the backends in the |
1053 | 1070 | # disabled state... |
1054 | | - seen.fetch(watcher.name, []).each do |backend_name, backend| |
| 1071 | + @state_cache.backends(watcher).each do |backend_name, backend| |
1055 | 1072 | backends[backend_name] = backend.merge('enabled' => false) |
1056 | 1073 | # We remember the haproxy_server_id from a previous reload here. |
1057 | 1074 | # Note though that if live servers below define haproxy_server_id |
@@ -1308,74 +1325,113 @@ def construct_name(backend) |
1308 | 1325 | ###################################### |
1309 | 1326 | # methods for managing the state file |
1310 | 1327 | ###################################### |
1311 | | - def seen |
1312 | | - # if we don't support the state file, return nothing |
1313 | | - return {} if @state_file_path.nil? |
| 1328 | + class HaproxyState |
| 1329 | + include Synapse::Logging |
1314 | 1330 |
|
1315 | | - # if we've never needed the backends, now is the time to load them |
1316 | | - @seen = read_state_file if @seen.nil? |
| 1331 | + # TODO: enable version in the Haproxy Cache File |
| 1332 | + KEY_WATCHER_CONFIG_FOR_GENERATOR = "watcher_config_for_generator" |
| 1333 | + NON_BACKENDS_KEYS = [KEY_WATCHER_CONFIG_FOR_GENERATOR] |
1317 | 1334 |
|
1318 | | - @seen |
1319 | | - end |
| 1335 | + def initialize(state_file_path, state_file_ttl, haproxy) |
| 1336 | + @state_file_path = state_file_path |
| 1337 | + @state_file_ttl = state_file_ttl |
| 1338 | + @haproxy = haproxy |
| 1339 | + end |
1320 | 1340 |
|
1321 | | - def update_state_file(watchers) |
1322 | | - # if we don't support the state file, do nothing |
1323 | | - return if @state_file_path.nil? |
1324 | | - |
1325 | | - log.info "synapse: writing state file" |
1326 | | - timestamp = Time.now.to_i |
1327 | | - |
1328 | | - # Remove stale backends |
1329 | | - seen.each do |watcher_name, backends| |
1330 | | - backends.each do |backend_name, backend| |
1331 | | - ts = backend.fetch('timestamp', 0) |
1332 | | - delta = (timestamp - ts).abs |
1333 | | - if delta > @state_file_ttl |
1334 | | - log.info "synapse: expiring #{backend_name} with age #{delta}" |
1335 | | - backends.delete(backend_name) |
1336 | | - end |
| 1341 | + def backends(watcher_name) |
| 1342 | + if seen.key?(watcher_name) |
| 1343 | + seen[watcher_name].select { |section, data| !NON_BACKENDS_KEYS.include?(section) } |
| 1344 | + else |
| 1345 | + {} |
1337 | 1346 | end |
1338 | 1347 | end |
1339 | 1348 |
|
1340 | | - # Remove any services which no longer have any backends |
1341 | | - seen.reject!{|watcher_name, backends| backends.keys.length == 0} |
| 1349 | + def config_for_generator(watcher_name) |
| 1350 | + cache_config = {} |
| 1351 | + if seen.key?(watcher_name) && seen[watcher_name].key?(KEY_WATCHER_CONFIG_FOR_GENERATOR) |
| 1352 | + cache_config = seen[watcher_name][KEY_WATCHER_CONFIG_FOR_GENERATOR] |
| 1353 | + end |
1342 | 1354 |
|
1343 | | - # Add backends from watchers |
1344 | | - watchers.each do |watcher| |
1345 | | - seen[watcher.name] ||= {} |
| 1355 | + cache_config |
| 1356 | + end |
1346 | 1357 |
|
1347 | | - watcher.backends.each do |backend| |
1348 | | - backend_name = construct_name(backend) |
1349 | | - data = { |
1350 | | - 'timestamp' => timestamp, |
1351 | | - } |
1352 | | - server_id = @server_id_map[watcher.name][backend_name].to_i |
1353 | | - if server_id && server_id > 0 && server_id <= MAX_SERVER_ID |
1354 | | - data['haproxy_server_id'] = server_id |
| 1358 | + def update_state_file(watchers) |
| 1359 | + # if we don't support the state file, do nothing |
| 1360 | + return if @state_file_path.nil? |
| 1361 | + |
| 1362 | + log.info "synapse: writing state file" |
| 1363 | + timestamp = Time.now.to_i |
| 1364 | + |
| 1365 | + # Remove stale backends |
| 1366 | + seen.each do |watcher_name, data| |
| 1367 | + backends(watcher_name).each do |backend_name, backend| |
| 1368 | + ts = backend.fetch('timestamp', 0) |
| 1369 | + delta = (timestamp - ts).abs |
| 1370 | + if delta > @state_file_ttl |
| 1371 | + log.info "synapse: expiring #{backend_name} with age #{delta}" |
| 1372 | + data.delete(backend_name) |
| 1373 | + end |
1355 | 1374 | end |
| 1375 | + end |
1356 | 1376 |
|
1357 | | - seen[watcher.name][backend_name] = data.merge(backend) |
| 1377 | + # Remove any services which no longer have any backends |
| 1378 | + seen.reject!{|watcher_name, data| backends(watcher_name).keys.length == 0} |
| 1379 | + |
| 1380 | + # Add backends and config from watchers |
| 1381 | + watchers.each do |watcher| |
| 1382 | + seen[watcher.name] ||= {} |
| 1383 | + |
| 1384 | + watcher.backends.each do |backend| |
| 1385 | + backend_name = @haproxy.construct_name(backend) |
| 1386 | + data = { |
| 1387 | + 'timestamp' => timestamp, |
| 1388 | + } |
| 1389 | + server_id = @haproxy.server_id_map[watcher.name][backend_name].to_i |
| 1390 | + if server_id && server_id > 0 && server_id <= MAX_SERVER_ID |
| 1391 | + data['haproxy_server_id'] = server_id |
| 1392 | + end |
| 1393 | + |
| 1394 | + seen[watcher.name][backend_name] = data.merge(backend) |
| 1395 | + end |
| 1396 | + |
| 1397 | + # Add config for generator from watcher |
| 1398 | + if watcher.config_for_generator.key?(@haproxy.name) |
| 1399 | + seen[watcher.name][KEY_WATCHER_CONFIG_FOR_GENERATOR] = |
| 1400 | + watcher.config_for_generator[@haproxy.name] |
| 1401 | + end |
1358 | 1402 | end |
| 1403 | + |
| 1404 | + # write the data! |
| 1405 | + write_data_to_state_file(seen) |
1359 | 1406 | end |
1360 | 1407 |
|
1361 | | - # write the data! |
1362 | | - write_data_to_state_file(seen) |
1363 | | - end |
| 1408 | + private |
1364 | 1409 |
|
1365 | | - def read_state_file |
1366 | | - # Some versions of JSON return nil on an empty file ... |
1367 | | - JSON.load(File.read(@state_file_path)) || {} |
1368 | | - rescue StandardError => e |
1369 | | - # It's ok if the state file doesn't exist or contains invalid data |
1370 | | - # The state file will be rebuilt automatically |
1371 | | - {} |
1372 | | - end |
| 1410 | + def seen |
| 1411 | + # if we don't support the state file, return nothing |
| 1412 | + return {} if @state_file_path.nil? |
| 1413 | + |
| 1414 | + # if we've never needed the backends, now is the time to load them |
| 1415 | + @seen = read_state_file if @seen.nil? |
| 1416 | + |
| 1417 | + @seen |
| 1418 | + end |
1373 | 1419 |
|
1374 | | - # we do this atomically so the state file is always consistent |
1375 | | - def write_data_to_state_file(data) |
1376 | | - tmp_state_file_path = @state_file_path + ".tmp" |
1377 | | - File.write(tmp_state_file_path, JSON.pretty_generate(data)) |
1378 | | - FileUtils.mv(tmp_state_file_path, @state_file_path) |
| 1420 | + def read_state_file |
| 1421 | + # Some versions of JSON return nil on an empty file ... |
| 1422 | + JSON.load(File.read(@state_file_path)) || {} |
| 1423 | + rescue StandardError => e |
| 1424 | + # It's ok if the state file doesn't exist or contains invalid data |
| 1425 | + # The state file will be rebuilt automatically |
| 1426 | + {} |
| 1427 | + end |
| 1428 | + |
| 1429 | + # we do this atomically so the state file is always consistent |
| 1430 | + def write_data_to_state_file(data) |
| 1431 | + tmp_state_file_path = @state_file_path + ".tmp" |
| 1432 | + File.write(tmp_state_file_path, JSON.pretty_generate(data)) |
| 1433 | + FileUtils.mv(tmp_state_file_path, @state_file_path) |
| 1434 | + end |
1379 | 1435 | end |
1380 | 1436 | end |
1381 | 1437 | end |
0 commit comments