What does "immediate mode" mean in OpenGL?
Runnable retained example
Damon has provided the key parts, but newbs like me will be looking for a full runnable example.
main.c
#include <stdio.h>
#include <stdlib.h>
#define GLEW_STATIC
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#define INFOLOG_LEN 512
static const GLuint WIDTH = 512, HEIGHT = 512;
/* vertex data is passed as input to this shader
* ourColor is passed as input to the to the fragment shader. */
static const GLchar* vertexShaderSource =
"#version 330 core\n"
"layout (location = 0) in vec3 position;\n"
"layout (location = 1) in vec3 color;\n"
"out vec3 ourColor;\n"
"void main() {\n"
" gl_Position = vec4(position, 1.0f);\n"
" ourColor = color;\n"
"}\n";
static const GLchar* fragmentShaderSource =
"#version 330 core\n"
"in vec3 ourColor;\n"
"out vec4 color;\n"
"void main() {\n"
" color = vec4(ourColor, 1.0f);\n"
"}\n";
GLfloat vertices[] = {
/* Positions Colors */
0.5f, -0.5f, 0.0f, 1.0f, 0.0f, 0.0f,
-0.5f, -0.5f, 0.0f, 0.0f, 1.0f, 0.0f,
0.0f, 0.5f, 0.0f, 0.0f, 0.0f, 1.0f
};
int main(int argc, char **argv) {
int immediate = (argc > 1) && argv[1][0] == '1';
/* Used in !immediate only. */
GLuint vao, vbo;
GLint shaderProgram;
glfwInit();
GLFWwindow* window = glfwCreateWindow(WIDTH, HEIGHT, __FILE__, NULL, NULL);
glfwMakeContextCurrent(window);
glewExperimental = GL_TRUE;
glewInit();
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glViewport(0, 0, WIDTH, HEIGHT);
if (immediate) {
float ratio;
int width, height;
glfwGetFramebufferSize(window, &width, &height);
ratio = width / (float) height;
glClear(GL_COLOR_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(-ratio, ratio, -1.f, 1.f, 1.f, -1.f);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glBegin(GL_TRIANGLES);
glColor3f( 1.0f, 0.0f, 0.0f);
glVertex3f(-0.5f, -0.5f, 0.0f);
glColor3f( 0.0f, 1.0f, 0.0f);
glVertex3f( 0.5f, -0.5f, 0.0f);
glColor3f( 0.0f, 0.0f, 1.0f);
glVertex3f( 0.0f, 0.5f, 0.0f);
glEnd();
} else {
/* Build and compile shader program. */
/* Vertex shader */
GLint vertexShader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vertexShader, 1, &vertexShaderSource, NULL);
glCompileShader(vertexShader);
GLint success;
GLchar infoLog[INFOLOG_LEN];
glGetShaderiv(vertexShader, GL_COMPILE_STATUS, &success);
if (!success) {
glGetShaderInfoLog(vertexShader, INFOLOG_LEN, NULL, infoLog);
printf("ERROR::SHADER::VERTEX::COMPILATION_FAILED\n%s\n", infoLog);
}
/* Fragment shader */
GLint fragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fragmentShader, 1, &fragmentShaderSource, NULL);
glCompileShader(fragmentShader);
glGetShaderiv(fragmentShader, GL_COMPILE_STATUS, &success);
if (!success) {
glGetShaderInfoLog(fragmentShader, INFOLOG_LEN, NULL, infoLog);
printf("ERROR::SHADER::FRAGMENT::COMPILATION_FAILED\n%s\n", infoLog);
}
/* Link shaders */
shaderProgram = glCreateProgram();
glAttachShader(shaderProgram, vertexShader);
glAttachShader(shaderProgram, fragmentShader);
glLinkProgram(shaderProgram);
glGetProgramiv(shaderProgram, GL_LINK_STATUS, &success);
if (!success) {
glGetProgramInfoLog(shaderProgram, INFOLOG_LEN, NULL, infoLog);
printf("ERROR::SHADER::PROGRAM::LINKING_FAILED\n%s\n", infoLog);
}
glDeleteShader(vertexShader);
glDeleteShader(fragmentShader);
glGenVertexArrays(1, &vao);
glGenBuffers(1, &vbo);
glBindVertexArray(vao);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
/* Position attribute */
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 6 * sizeof(GLfloat), (GLvoid*)0);
glEnableVertexAttribArray(0);
/* Color attribute */
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 6 * sizeof(GLfloat), (GLvoid*)(3 * sizeof(GLfloat)));
glEnableVertexAttribArray(1);
glBindVertexArray(0);
glUseProgram(shaderProgram);
glBindVertexArray(vao);
glDrawArrays(GL_TRIANGLES, 0, 3);
glBindVertexArray(0);
}
glfwSwapBuffers(window);
/* Main loop. */
while (!glfwWindowShouldClose(window)) {
glfwPollEvents();
}
if (!immediate) {
glDeleteVertexArrays(1, &vao);
glDeleteBuffers(1, &vbo);
glDeleteProgram(shaderProgram);
}
glfwTerminate();
return EXIT_SUCCESS;
}
Adapted from Learn OpenGL, my GitHub upstream.
Compile and run on Ubuntu 20.04:
sudo apt install libglew-dev libglfw3-dev
gcc -ggdb3 -O0 -std=c99 -Wall -Wextra -pedantic -o main.out main.c -lGL -lGLEW -lglfw
# Shader
./main.out
# Immediate
./main.out 1
From that we see how:
When using shaders:
the vertex and fragment shader programs are being represented as C-style strings containing GLSL language (
vertexShaderSource
andfragmentShaderSource
) inside a regular C program that runs on the CPUthis C program makes OpenGL calls which compile those strings into GPU code, e.g.:
glShaderSource(fragmentShader, 1, &fragmentShaderSource, NULL); glCompileShader(fragmentShader);
the shader define their expected inputs, and the C program provides them through a pointer to memory to the GPU code. For example, the fragment shader defines its expected inputs as an array of vertex positions and colors:
"layout (location = 0) in vec3 position;\n" "layout (location = 1) in vec3 color;\n" "out vec3 ourColor;\n"
and also defines one of its outputs
ourColor
as an array of colors, which is then becomes an input to the fragment shader:static const GLchar* fragmentShaderSource = "#version 330 core\n" "in vec3 ourColor;\n"
The C program then provides the array containing the vertex positions and colors from the CPU to the GPU
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
On the immediate non-shader example however, we see that magic API calls are made that explicitly give positions and colors:
glColor3f( 1.0f, 0.0f, 0.0f);
glVertex3f(-0.5f, -0.5f, 0.0f);
We understand therefore that this represents a much more restricted model, since the positions and colors are not arbitrary user-defined arrays in memory anymore, but rather just inputs to a Phong-like model.
In both cases, the rendered output normally goes straight to the video, without passing back through the CPU, although it is possible to read to the CPU e.g. if you want to save them to a file: How to use GLUT/OpenGL to render to a file?
Most "modern" OpenGL tutorials normally retained mode and GLFW, you will find many examples at:
- https://github.com/JoeyDeVries/LearnOpenGL
- https://github.com/opengl-tutorials/ogl
- https://github.com/capnramses/antons_opengl_tutorials_book
- https://github.com/Overv/Open.GL/tree/master/content/code
- https://github.com/tomdalling/opengl-series
One example of "immediate mode" is using glBegin
and glEnd
with glVertex
in between them. Another example of "immediate mode" is to use glDrawArrays
with a client vertex array (i.e. not a vertex buffer object).
You will usually never want to use immediate mode (except maybe for your first "hello world" program) because it is deprecated functionality and does not offer optimal performance.
The reason why immediate mode is not optimal is that the graphic card is linked directly with your program's flow. The driver cannot tell the GPU to start rendering before glEnd
, because it does not know when you will be finished submitting data, and it needs to transfer that data too (which it can only do after glEnd
).
Similarly, with a client vertex array, the driver can only pull a copy of your array the moment you call glDrawArrays
, and it must block your application while doing so. The reason is that otherwise you could modify (or free) the array's memory before the driver has captured it. It cannot schedule that operation any earlier or later, because it only knows that the data is valid exactly at one point in time.
In contrast to that, if you use for example a vertex buffer object, you fill a buffer with data and hand it to OpenGL. Your process does no longer own this data and can therefore no longer modify it. The driver can rely on this fact and can (even speculatively) upload the data whenever the bus is free.
Any of your later glDrawArrays
or glDrawElements
calls will just go into a work queue and return immediately (before actually finishing!), so your program keeps submitting commands while at the same time the driver works off one by one. They also likely won't need to wait for the data to arrive, because the driver could already do that much earlier.
Thus, render thread and GPU run asynchronously, every component is busy at all times, which yields better performance.
Immediate mode does have the advantage of being dead simple to use, but then again using OpenGL properly in a non-deprecated way is not precisely rocket science either -- it only takes very little extra work.
Here is the typical OpenGL "Hello World" code in immediate mode:
glBegin(GL_TRIANGLES);
glColor3f(1.0f, 0.0f, 0.0f); glVertex2f(0.0f, 1.0f);
glColor3f(0.0f, 1.0f, 0.0f); glVertex2f(0.87f, -0.5f);
glColor3f(0.0f, 0.0f, 1.0f); glVertex2f(-0.87f, -0.5f);
glEnd();
Edit:
By common request, the same thing in retained mode would look somewhat like this:
float verts = {...};
float colors = {...};
static_assert(sizeof(verts) == sizeof(colors), "");
// not really needed for this example, but mandatory in core profile after GL 3.2
GLuint vao;
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
GLuint buf[2];
glGenBuffers(2, buf);
// assuming a layout(location = 0) for position and
// layout(location = 1) for color in the vertex shader
// vertex positions
glBindBuffer(GL_ARRAY_BUFFER, buf[0]);
glBufferData(GL_ARRAY_BUFFER, sizeof(verts), verts, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
// copy/paste for color... same code as above. A real, non-trivial program would
// normally use a single buffer for both -- usually with stride (5th param) to
// glVertexAttribPointer -- that presumes interleaving the verts and colors arrays.
// It's somewhat uglier but has better cache performance (ugly does however not
// matter for a real program, since data is loaded from a modelling-tool generated
// binary file anyway).
glBindBuffer(GL_ARRAY_BUFFER, buf[1]);
glBufferData(GL_ARRAY_BUFFER, sizeof(colors), colors, GL_STATIC_DRAW);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, 0);
glDrawArrays(GL_TRIANGLES, 0, 3);