while (1) Vs. for (;;) Is there a speed difference?

In perl, they result in the same opcodes:

$ perl -MO=Concise -e 'for(;;) { print "foo\n" }'
a  <@> leave[1 ref] vKP/REFC ->(end)
1     <0> enter ->2
2     <;> nextstate(main 2 -e:1) v ->3
9     <2> leaveloop vK/2 ->a
3        <{> enterloop(next->8 last->9 redo->4) v ->4
-        <@> lineseq vK ->9
4           <;> nextstate(main 1 -e:1) v ->5
7           <@> print vK ->8
5              <0> pushmark s ->6
6              <$> const[PV "foo\n"] s ->7
8           <0> unstack v ->4
-e syntax OK

$ perl -MO=Concise -e 'while(1) { print "foo\n" }'
a  <@> leave[1 ref] vKP/REFC ->(end)
1     <0> enter ->2
2     <;> nextstate(main 2 -e:1) v ->3
9     <2> leaveloop vK/2 ->a
3        <{> enterloop(next->8 last->9 redo->4) v ->4
-        <@> lineseq vK ->9
4           <;> nextstate(main 1 -e:1) v ->5
7           <@> print vK ->8
5              <0> pushmark s ->6
6              <$> const[PV "foo\n"] s ->7
8           <0> unstack v ->4
-e syntax OK

Likewise in GCC:

#include <stdio.h>

void t_while() {
    while(1)
        printf("foo\n");
}

void t_for() {
    for(;;)
        printf("foo\n");
}

    .file   "test.c"
    .section    .rodata
.LC0:
    .string "foo"
    .text
.globl t_while
    .type   t_while, @function
t_while:
.LFB2:
    pushq   %rbp
.LCFI0:
    movq    %rsp, %rbp
.LCFI1:
.L2:
    movl    $.LC0, %edi
    call    puts
    jmp .L2
.LFE2:
    .size   t_while, .-t_while
.globl t_for
    .type   t_for, @function
t_for:
.LFB3:
    pushq   %rbp
.LCFI2:
    movq    %rsp, %rbp
.LCFI3:
.L5:
    movl    $.LC0, %edi
    call    puts
    jmp .L5
.LFE3:
    .size   t_for, .-t_for
    .section    .eh_frame,"a",@progbits
.Lframe1:
    .long   .LECIE1-.LSCIE1
.LSCIE1:
    .long   0x0
    .byte   0x1
    .string "zR"
    .uleb128 0x1
    .sleb128 -8
    .byte   0x10
    .uleb128 0x1
    .byte   0x3
    .byte   0xc
    .uleb128 0x7
    .uleb128 0x8
    .byte   0x90
    .uleb128 0x1
    .align 8
.LECIE1:
.LSFDE1:
    .long   .LEFDE1-.LASFDE1
.LASFDE1:
    .long   .LASFDE1-.Lframe1
    .long   .LFB2
    .long   .LFE2-.LFB2
    .uleb128 0x0
    .byte   0x4
    .long   .LCFI0-.LFB2
    .byte   0xe
    .uleb128 0x10
    .byte   0x86
    .uleb128 0x2
    .byte   0x4
    .long   .LCFI1-.LCFI0
    .byte   0xd
    .uleb128 0x6
    .align 8
.LEFDE1:
.LSFDE3:
    .long   .LEFDE3-.LASFDE3
.LASFDE3:
    .long   .LASFDE3-.Lframe1
    .long   .LFB3
    .long   .LFE3-.LFB3
    .uleb128 0x0
    .byte   0x4
    .long   .LCFI2-.LFB3
    .byte   0xe
    .uleb128 0x10
    .byte   0x86
    .uleb128 0x2
    .byte   0x4
    .long   .LCFI3-.LCFI2
    .byte   0xd
    .uleb128 0x6
    .align 8
.LEFDE3:
    .ident  "GCC: (Ubuntu 4.3.3-5ubuntu4) 4.3.3"
    .section    .note.GNU-stack,"",@progbits

So I guess the answer is, they're the same in many compilers. Of course, for some other compilers this may not necessarily be the case, but chances are the code inside of the loop is going to be a few thousand times more expensive than the loop itself anyway, so who cares?


There's not much reason to prefer one over the other. I do think that while(1) and particularly while(true) are more readable than for(;;), but that's just my preference.