Ordinal Equilibria: Condorcet Consistency of Approval Voting (4.1, C.1)

[1]:
import random
import pandas as pd
from poisson_approval import *
[2]:
N_SAMPLES = 10000
[3]:
def has_an_ordinal_equilibrium(profile):
    return len(profile.analyzed_strategies_ordinal.equilibria) > 0
[4]:
def has_an_ordinal_equilibrium_not_electing_the_cw(profile):
    return not profile.analyzed_strategies_ordinal.winners_at_equilibrium.issubset(profile.condorcet_winners)

By Number of Rankings

[5]:
table = pd.DataFrame(index=range(1, 7), columns=[
    'P(∃ ord eq | ∃ Cond)',
    'P(∃ ord eq not Cond | ∃ Cond)',
    'P(∃ Cond)'
])
table.index.name = "Number of rankings"
for n_rankings in table.index:
    def rand_profile():
        rankings = random.sample(RANKINGS, n_rankings)
        return RandProfileOrdinalUniform(orders=rankings)()
    p1, p2 = probability(
        factory=rand_profile,
        n_samples=N_SAMPLES,
        test=(has_an_ordinal_equilibrium, has_an_ordinal_equilibrium_not_electing_the_cw),
        conditional_on=is_condorcet
    )
    table.loc[n_rankings, 'P(∃ ord eq | ∃ Cond)'] = p1
    table.loc[n_rankings, 'P(∃ ord eq not Cond | ∃ Cond)'] = p2
    p3 = probability(
        factory=rand_profile,
        n_samples=N_SAMPLES,
        test=is_condorcet
    )
    table.loc[n_rankings, 'P(∃ Cond)'] = p3
table
[5]:
P(∃ ord eq | ∃ Cond) P(∃ ord eq not Cond | ∃ Cond) P(∃ Cond)
Number of rankings
1 1 0 1
2 0.4083 0 1
3 0.2408 0.0378 0.9721
4 0.2742 0.0175 0.9493
5 0.2687 0.0031 0.9404
6 0.2941 0.0012 0.9367

By Subset of Rankings

[6]:
table = pd.DataFrame()
table.index.name = 'Rankings'
for rankings in SETS_OF_RANKINGS_UP_TO_RELABELLING:
    rand_profile = RandProfileOrdinalUniform(orders=rankings)
    p = probability(
        factory=rand_profile,
        n_samples=N_SAMPLES,
        test=has_an_ordinal_equilibrium_not_electing_the_cw,
        conditional_on=is_condorcet
    )
    table.loc[str(rankings), 'P(∃ ord eq not Cond | ∃ Cond)'] = p
table.sort_values(by='P(∃ ord eq not Cond | ∃ Cond)', ascending=False, inplace=True)
table
[6]:
P(∃ ord eq not Cond | ∃ Cond)
Rankings
('abc', 'bac', 'cab') 0.1299
('abc', 'acb', 'bac', 'cab') 0.0590
('abc', 'acb', 'bca', 'cba') 0.0094
('abc', 'acb', 'bac', 'bca', 'cab') 0.0046
('abc', 'acb', 'bac', 'cba') 0.0038
('abc', 'acb', 'bac', 'bca', 'cab', 'cba') 0.0012
('abc',) 0.0000
('abc', 'acb') 0.0000
('abc', 'bac') 0.0000
('abc', 'bca') 0.0000
('abc', 'cba') 0.0000
('abc', 'acb', 'bac') 0.0000
('abc', 'acb', 'bca') 0.0000
('abc', 'bca', 'cab') 0.0000
('abc', 'acb', 'bac', 'bca') 0.0000

Focus on Single-Peaked Profiles

Over the whole single-peaked domain:

[7]:
table.loc["('abc', 'acb', 'bac', 'cab')", 'P(∃ ord eq not Cond | ∃ Cond)']
[7]:
0.059

When only 3 rankings are present:

[8]:
table.loc["('abc', 'bac', 'cab')", 'P(∃ ord eq not Cond | ∃ Cond)']
[8]:
0.1299
[9]:
table.loc["('abc', 'acb', 'bac')", 'P(∃ ord eq not Cond | ∃ Cond)']
[9]:
0.0

N.B.: the above cases actually cover all single-peaked cases, up to relabeling the candidates.