Thanks to visit codestin.com
Credit goes to github.com

Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 17 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,23 @@ rep+ is a lightweight Chrome DevTools extension inspired by Burp Suite's Repeate

This combo makes rep+ handy for bug bounty hunters and vulnerability researchers who want Burp-like iteration without the heavyweight UI. Install the extension, open DevTools, head to the rep+ panel, and start hacking. 😎

### Local Model (Ollama) Setup
If you use a local model (e.g., Ollama) you must allow Chrome extensions to call it, otherwise you’ll see 403/CORS errors.

1. Stop any running Ollama instance.
2. Start Ollama with CORS enabled (pick one):
- Allow only Chrome extensions:
```bash
OLLAMA_ORIGINS="chrome-extension://*" ollama serve
```
- Allow everything (easier for local dev):
```bash
OLLAMA_ORIGINS="*" ollama serve
```
3. Verify your model exists (e.g., `gemma3:4b`) with `ollama list`.
4. Reload the extension and try again. If you still see 403, check Ollama logs for details.


## Permissions & Privacy
- **Optional**: `webRequest` + `<all_urls>` only when you enable multi-tab capture.
- **Data**: Stored locally; no tracking/analytics.
Expand Down
259 changes: 258 additions & 1 deletion background.js
Original file line number Diff line number Diff line change
Expand Up @@ -13,14 +13,271 @@ chrome.runtime.onConnect.addListener((port) => {
ports.delete(port);
});

// Listen for messages from panel (e.g. to toggle capture)
// Listen for messages from panel (e.g. to toggle capture, local model requests)
port.onMessage.addListener((msg) => {
console.log('Background: Received port message:', msg.type);
if (msg.type === 'ping') {
console.log('Background: Responding to ping');
port.postMessage({ type: 'pong' });
} else if (msg.type === 'local-model-request') {
// Handle local model request via port
const requestId = msg.requestId || `local-${Date.now()}-${Math.random()}`;
console.log('Background: Received local model request', requestId, 'URL:', msg.url, 'Body:', JSON.stringify(msg.body).substring(0, 100));

// Check if port is still connected before making request
if (!port || !port.onDisconnect) {
console.error('Background: Port already disconnected');
return;
}

// Keep service worker alive during request
const keepAlive = setInterval(() => {
// Service worker will stay alive as long as we have active work
}, 1000);

// Proxy the request to localhost
// Note: Service workers need host_permissions for localhost in MV3
const requestBody = {
model: msg.body.model,
prompt: msg.body.prompt,
stream: msg.body.stream !== undefined ? msg.body.stream : true
};

console.log('Background: Sending fetch request to', msg.url, 'with body:', JSON.stringify(requestBody).substring(0, 200));

// Try to match curl's request format exactly
fetch(msg.url, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Accept': 'application/json'
},
body: JSON.stringify(requestBody),
// Don't send credentials or referrer that might trigger security
credentials: 'omit',
referrerPolicy: 'no-referrer'
})
.then(response => {
console.log('Background: Fetch response status', response.status);
// Log response headers for debugging
const responseHeaders = {};
response.headers.forEach((value, key) => {
responseHeaders[key] = value;
});
console.log('Background: Response headers:', responseHeaders);

if (!response.ok) {
return response.text().then(text => {
console.error('Background: Fetch failed with status', response.status, 'Response body length:', text?.length || 0, 'Response body:', text || '(empty)');
// Provide more helpful error message
let errorMsg = `Request failed with status ${response.status}`;
if (text && text.trim()) {
try {
const errorData = JSON.parse(text);
errorMsg = errorData.error || errorData.message || errorMsg;
} catch (e) {
errorMsg = text.length > 200 ? text.substring(0, 200) + '...' : text;
}
} else if (response.status === 403) {
errorMsg = '403 Forbidden: Ollama is blocking the request. ' +
'This might be due to CORS or security settings. ' +
'Try restarting Ollama with: OLLAMA_ORIGINS="*" ollama serve ' +
'Or check Ollama configuration for access restrictions.';
}
throw new Error(errorMsg);
});
}
return response.body;
})
.then(body => {
if (!body) {
throw new Error('No response body received');
}

// Stream the response back via this specific port
const reader = body.getReader();
const decoder = new TextDecoder();
let hasError = false;

function readChunk() {
if (hasError) return;

reader.read().then(({ done, value }) => {
if (done) {
// Send final message
clearInterval(keepAlive);
try {
port.postMessage({
type: 'local-model-stream-done',
requestId: requestId
});
console.log('Background: Sent stream-done for', requestId);
} catch (e) {
console.error('Background: Error sending stream-done', e);
hasError = true;
}
return;
}

const chunk = decoder.decode(value, { stream: true });
// Send chunk message
try {
port.postMessage({
type: 'local-model-stream-chunk',
chunk: chunk,
requestId: requestId
});
} catch (e) {
console.error('Background: Port disconnected during streaming', e);
hasError = true;
reader.cancel().catch(() => {});
return;
}

// Continue reading
readChunk();
}).catch(error => {
clearInterval(keepAlive);
console.error('Background: Error reading chunk', error);
hasError = true;
try {
port.postMessage({
type: 'local-model-stream-error',
error: error.message,
requestId: requestId
});
} catch (e) {
console.error('Background: Error sending error message', e);
}
});
}

readChunk();
})
.catch(error => {
clearInterval(keepAlive);
console.error('Background: Fetch error', error, error.stack);
let errorMessage = error.message || 'Failed to fetch from local model API';

// Provide helpful error message for CORS issues
if (errorMessage.includes('CORS') || errorMessage.includes('Failed to fetch')) {
errorMessage = 'CORS error: Ollama needs to allow CORS. ' +
'Start Ollama with: OLLAMA_ORIGINS="chrome-extension://*" ollama serve ' +
'Or configure your Ollama server to send CORS headers. ' +
'Original error: ' + errorMessage;
}

try {
port.postMessage({
type: 'local-model-error',
error: errorMessage,
requestId: requestId
});
} catch (e) {
console.error('Background: Port disconnected, cannot send error', e);
}
});
}
});
});

// Handle local model API requests (bypass CORS)
chrome.runtime.onMessage.addListener((request, sender, sendResponse) => {
if (request.type === 'local-model-request') {
const requestId = request.requestId || `local-${Date.now()}-${Math.random()}`;

// Proxy the request to localhost (service workers can bypass CORS)
fetch(request.url, {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify(request.body)
})
.then(response => {
if (!response.ok) {
return response.text().then(text => {
throw new Error(text || 'Request failed');
});
}
return response.body;
})
.then(body => {
// Stream the response back via port connections (for DevTools panels)
const reader = body.getReader();
const decoder = new TextDecoder();

function readChunk() {
reader.read().then(({ done, value }) => {
if (done) {
// Send final message to all connected ports
ports.forEach(port => {
try {
port.postMessage({
type: 'local-model-stream-done',
requestId: requestId
});
} catch (e) {
// Port might be disconnected, remove it
ports.delete(port);
}
});
return;
}

const chunk = decoder.decode(value, { stream: true });
// Send chunk message to all connected ports
ports.forEach(port => {
try {
port.postMessage({
type: 'local-model-stream-chunk',
chunk: chunk,
requestId: requestId
});
} catch (e) {
// Port might be disconnected, remove it
ports.delete(port);
}
});

// Continue reading
readChunk();
}).catch(error => {
ports.forEach(port => {
try {
port.postMessage({
type: 'local-model-stream-error',
error: error.message,
requestId: requestId
});
} catch (e) {
ports.delete(port);
}
});
});
}

readChunk();
})
.catch(error => {
ports.forEach(port => {
try {
port.postMessage({
type: 'local-model-error',
error: error.message,
requestId: requestId
});
} catch (e) {
ports.delete(port);
}
});
});

// Return true to indicate we'll send responses asynchronously
return true;
}
});

// Helper to process request body
function parseRequestBody(requestBody) {
if (!requestBody) return null;
Expand Down
Loading