|
73 | 73 |
|
74 | 74 | % Worker children get a default 5 second shutdown timeout, so pick a value just |
75 | 75 | % a bit less than that: 4.5 seconds. In couch_replicator_sup our scheduler |
76 | | -% worker doesn't specify the timeout, so it up picks ups the OTP default of 5 |
| 76 | +% worker doesn't specify the timeout, so it picks up the OTP default of 5 |
77 | 77 | % seconds https://www.erlang.org/doc/system/sup_princ.html#child-specification |
78 | 78 | % |
79 | 79 | -define(TERMINATE_SHUTDOWN_TIME, 4500). |
@@ -173,9 +173,9 @@ job_proxy_url(_Endpoint) -> |
173 | 173 | null. |
174 | 174 |
|
175 | 175 | % Health threshold is the minimum amount of time an unhealthy job should run |
176 | | -% crashing before it is considered to be healthy again. HealtThreashold should |
| 176 | +% crashing before it is considered to be healthy again. Health threshold should |
177 | 177 | % not be 0 as jobs could start and immediately crash, and it shouldn't be |
178 | | -% infinity, since then consecutive crashes would accumulate forever even if |
| 178 | +% infinity, since then consecutive crashes would accumulate forever even if |
179 | 179 | % job is back to normal. |
180 | 180 | -spec health_threshold() -> non_neg_integer(). |
181 | 181 | health_threshold() -> |
@@ -522,7 +522,7 @@ pending_fold(Job, {Set, Now, Count, HealthThreshold}) -> |
522 | 522 |
|
523 | 523 | % Replace Job in the accumulator if it has a higher priority (lower priority |
524 | 524 | % value) than the lowest priority there. Job priority is indexed by |
525 | | -% {FairSharePiority, LastStarted} tuples. If the FairSharePriority is the same |
| 525 | +% {FairSharePriority, LastStarted} tuples. If the FairSharePriority is the same |
526 | 526 | % then last started timestamp is used to pick. The goal is to keep up to Count |
527 | 527 | % oldest jobs during the iteration. For example, if there are jobs with these |
528 | 528 | % priorities accumulated so far [5, 7, 11], and the priority of current job is |
@@ -594,14 +594,13 @@ not_recently_crashed(#job{history = History}, Now, HealthThreshold) -> |
594 | 594 | % and running successfully without crashing for a period of time. That period |
595 | 595 | % of time is the HealthThreshold. |
596 | 596 | % |
597 | | - |
598 | 597 | -spec consecutive_crashes(history(), non_neg_integer()) -> non_neg_integer(). |
599 | 598 | consecutive_crashes(History, HealthThreshold) when is_list(History) -> |
600 | 599 | consecutive_crashes(History, HealthThreshold, 0). |
601 | 600 |
|
602 | 601 | -spec consecutive_crashes(history(), non_neg_integer(), non_neg_integer()) -> |
603 | 602 | non_neg_integer(). |
604 | | -consecutive_crashes([], _HealthThreashold, Count) -> |
| 603 | +consecutive_crashes([], _HealthThreshold, Count) -> |
605 | 604 | Count; |
606 | 605 | consecutive_crashes( |
607 | 606 | [{{crashed, _}, CrashT}, {_, PrevT} = PrevEvent | Rest], |
@@ -795,7 +794,7 @@ rotate_jobs(State, ChurnSoFar) -> |
795 | 794 | if |
796 | 795 | SlotsAvailable >= 0 -> |
797 | 796 | % If there is are enough SlotsAvailable reduce StopCount to avoid |
798 | | - % unnesessarily stopping jobs. `stop_jobs/3` ignores 0 or negative |
| 797 | + % unnecessarily stopping jobs. `stop_jobs/3` ignores 0 or negative |
799 | 798 | % values so we don't worry about that here. |
800 | 799 | StopCount = lists:min([Pending - SlotsAvailable, Running, Churn]), |
801 | 800 | stop_jobs(StopCount, true, State), |
@@ -930,7 +929,7 @@ optimize_int_option({Key, Val}, #rep{options = Options} = Rep) -> |
930 | 929 | % Updater is a separate process. It receives `update_stats` messages and |
931 | 930 | % updates scheduler stats from the scheduler jobs table. Updates are |
932 | 931 | % performed no more frequently than once per ?STATS_UPDATE_WAIT milliseconds. |
933 | | - |
| 932 | +% |
934 | 933 | update_running_jobs_stats(StatsPid) when is_pid(StatsPid) -> |
935 | 934 | StatsPid ! update_stats, |
936 | 935 | ok. |
|
0 commit comments