Let's say we have the following scheme using C++ and virtual functions:

class DSP {

public:

DSP() {}

virtual ~DSP() {}

virtual int Compute(int count, float** in, float** out) = 0;

};

class CONCRETE_DSP : public DSP {

public:

CONCRETE_DSP():fValue() {}

virtual ~CONCRETE_DSP() {}

virtual int Compute(int count, float** in, float** out)

{

DoSomeProcess();

}

};

class SEQ_DDSP : public DSP {

private:

DSP* fArg1;

DSP* fArg2;

public:

SEQ_DDSP(DSP* a1, DSP* a2):fArg1(a1), fArg2(a2) {}

virtual~SEQ_DDSP() {delete fArg1; delete fArg2;}

virtual int Compute(int count, float** in, float** out)

{

// Some code that uses:

fArg1->Compute(count, in, out);

fArg2->Compute(count, in, out);

}

};

class PAR_DSP : public DSP {

private:

DSP* fArg1;

DSP* fArg2;

public:

PAR_DSP(DSP* a1, DSP* a2):fArg1(a1), fArg2(a2) {}

virtual~PAR_DSP() {delete fArg1; delete fArg2;}

virtual int Compute(int count, float** in, float** out)

{

// Some code that uses:

fArg1->Compute(count, in, out);

fArg2->Compute(count, in, out);

}

};

void ProcessGraph (float** in, float** out)

{

DSP* graph = new PAR_DSP(new SEQ_DDSP(new CONCRETE_DSP(), new CONCRETE_DSP()), new CONCRETE_DSP());

graph->Compute(512, in, out);

delete graph;

}

At runtime after a graph is created, one could imagine optimizing by resolving call to "virtual Compute" and possibly get a more efficient Compute method for the entire graph, so that we could write:

DSP* graph = new PAR_DSP(new SEQ_DDSP(new CONCRETE_DSP(), new CONCRETE_DSP()), new CONCRETE_DSP());

graph->Optimize();

graph->Compute(512, in, out); possibly a lot of time.

Is there any possible method using LLVM that would help in this case?

Thanks

Stephane Letz